# HG changeset patch
# User coleenp
# Date 1208123022 14400
# Node ID 21d113ecbf6a7d1ea32cf51ed6f5f1f9c58ed2df
# Parent f4edb0d9f109c2375fd5991982d86a72a20abfe3
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Summary: Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv
Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java Sun Apr 13 17:43:42 2008 -0400
@@ -885,7 +885,12 @@
out.println("found at " + addr);
}
}
-
+ public void visitCompOopAddress(Address addr) {
+ Address val = addr.getCompOopAddressAt(0);
+ if (AddressOps.equal(val, value)) {
+ out.println("found at " + addr);
+ }
+ }
public void epilogue() {
}
};
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/HSDB.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/HSDB.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/HSDB.java Sun Apr 13 17:43:42 2008 -0400
@@ -1011,8 +1011,21 @@
Assert.that(addr.andWithMask(VM.getVM().getAddressSize() - 1) == null,
"Address " + addr + "should have been aligned");
}
+ OopHandle handle = addr.getOopHandleAt(0);
+ addAnnotation(addr, handle);
+ }
+
+ public void visitCompOopAddress(Address addr) {
+ if (Assert.ASSERTS_ENABLED) {
+ Assert.that(addr.andWithMask(VM.getVM().getAddressSize() - 1) == null,
+ "Address " + addr + "should have been aligned");
+ }
+ OopHandle handle = addr.getCompOopHandleAt(0);
+ addAnnotation(addr, handle);
+ }
+
+ public void addAnnotation(Address addr, OopHandle handle) {
// Check contents
- OopHandle handle = addr.getOopHandleAt(0);
String anno = "null oop";
if (handle != null) {
// Find location
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java Sun Apr 13 17:43:42 2008 -0400
@@ -306,6 +306,8 @@
entryAddr = entryAddr.addOffsetTo(intConstantEntryArrayStride);
} while (nameAddr != null);
+ String symbol = "heapOopSize"; // global int constant and value is initialized at runtime.
+ addIntConstant(symbol, (int)lookupInProcess(symbol).getCIntegerAt(0, 4, false));
}
private void readVMLongConstants() {
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/compiler/OopMapSet.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/compiler/OopMapSet.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/compiler/OopMapSet.java Sun Apr 13 17:43:42 2008 -0400
@@ -68,7 +68,8 @@
public void visitValueLocation(Address valueAddr) {
}
- public void visitDeadLocation(Address deadAddr) {
+ public void visitNarrowOopLocation(Address narrowOopAddr) {
+ addressVisitor.visitCompOopAddress(narrowOopAddr);
}
}
@@ -197,9 +198,9 @@
}
}
- // We want dead, value and oop oop_types
+ // We want narow oop, value and oop oop_types
OopMapValue.OopTypes[] values = new OopMapValue.OopTypes[] {
- OopMapValue.OopTypes.OOP_VALUE, OopMapValue.OopTypes.VALUE_VALUE, OopMapValue.OopTypes.DEAD_VALUE
+ OopMapValue.OopTypes.OOP_VALUE, OopMapValue.OopTypes.VALUE_VALUE, OopMapValue.OopTypes.NARROWOOP_VALUE
};
{
@@ -214,8 +215,8 @@
visitor.visitOopLocation(loc);
} else if (omv.getType() == OopMapValue.OopTypes.VALUE_VALUE) {
visitor.visitValueLocation(loc);
- } else if (omv.getType() == OopMapValue.OopTypes.DEAD_VALUE) {
- visitor.visitDeadLocation(loc);
+ } else if (omv.getType() == OopMapValue.OopTypes.NARROWOOP_VALUE) {
+ visitor.visitNarrowOopLocation(loc);
}
}
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/compiler/OopMapValue.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/compiler/OopMapValue.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/compiler/OopMapValue.java Sun Apr 13 17:43:42 2008 -0400
@@ -50,7 +50,7 @@
static int UNUSED_VALUE;
static int OOP_VALUE;
static int VALUE_VALUE;
- static int DEAD_VALUE;
+ static int NARROWOOP_VALUE;
static int CALLEE_SAVED_VALUE;
static int DERIVED_OOP_VALUE;
@@ -74,7 +74,7 @@
UNUSED_VALUE = db.lookupIntConstant("OopMapValue::unused_value").intValue();
OOP_VALUE = db.lookupIntConstant("OopMapValue::oop_value").intValue();
VALUE_VALUE = db.lookupIntConstant("OopMapValue::value_value").intValue();
- DEAD_VALUE = db.lookupIntConstant("OopMapValue::dead_value").intValue();
+ NARROWOOP_VALUE = db.lookupIntConstant("OopMapValue::narrowoop_value").intValue();
CALLEE_SAVED_VALUE = db.lookupIntConstant("OopMapValue::callee_saved_value").intValue();
DERIVED_OOP_VALUE = db.lookupIntConstant("OopMapValue::derived_oop_value").intValue();
}
@@ -83,7 +83,7 @@
public static final OopTypes UNUSED_VALUE = new OopTypes() { int getValue() { return OopMapValue.UNUSED_VALUE; }};
public static final OopTypes OOP_VALUE = new OopTypes() { int getValue() { return OopMapValue.OOP_VALUE; }};
public static final OopTypes VALUE_VALUE = new OopTypes() { int getValue() { return OopMapValue.VALUE_VALUE; }};
- public static final OopTypes DEAD_VALUE = new OopTypes() { int getValue() { return OopMapValue.DEAD_VALUE; }};
+ public static final OopTypes NARROWOOP_VALUE = new OopTypes() { int getValue() { return OopMapValue.NARROWOOP_VALUE; }};
public static final OopTypes CALLEE_SAVED_VALUE = new OopTypes() { int getValue() { return OopMapValue.CALLEE_SAVED_VALUE; }};
public static final OopTypes DERIVED_OOP_VALUE = new OopTypes() { int getValue() { return OopMapValue.DERIVED_OOP_VALUE; }};
@@ -106,7 +106,7 @@
// Querying
public boolean isOop() { return (getValue() & TYPE_MASK_IN_PLACE) == OOP_VALUE; }
public boolean isValue() { return (getValue() & TYPE_MASK_IN_PLACE) == VALUE_VALUE; }
- public boolean isDead() { return (getValue() & TYPE_MASK_IN_PLACE) == DEAD_VALUE; }
+ public boolean isNarrowOop() { return (getValue() & TYPE_MASK_IN_PLACE) == NARROWOOP_VALUE; }
public boolean isCalleeSaved() { return (getValue() & TYPE_MASK_IN_PLACE) == CALLEE_SAVED_VALUE; }
public boolean isDerivedOop() { return (getValue() & TYPE_MASK_IN_PLACE) == DERIVED_OOP_VALUE; }
@@ -118,7 +118,7 @@
if (which == UNUSED_VALUE) return OopTypes.UNUSED_VALUE;
else if (which == OOP_VALUE) return OopTypes.OOP_VALUE;
else if (which == VALUE_VALUE) return OopTypes.VALUE_VALUE;
- else if (which == DEAD_VALUE) return OopTypes.DEAD_VALUE;
+ else if (which == NARROWOOP_VALUE) return OopTypes.NARROWOOP_VALUE;
else if (which == CALLEE_SAVED_VALUE) return OopTypes.CALLEE_SAVED_VALUE;
else if (which == DERIVED_OOP_VALUE) return OopTypes.DERIVED_OOP_VALUE;
else throw new InternalError("unknown which " + which + " (TYPE_MASK_IN_PLACE = " + TYPE_MASK_IN_PLACE + ")");
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/compiler/OopMapVisitor.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/compiler/OopMapVisitor.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/compiler/OopMapVisitor.java Sun Apr 13 17:43:42 2008 -0400
@@ -32,5 +32,5 @@
public void visitOopLocation(Address oopAddr);
public void visitDerivedOopLocation(Address baseOopAddr, Address derivedOopAddr);
public void visitValueLocation(Address valueAddr);
- public void visitDeadLocation(Address deadAddr);
+ public void visitNarrowOopLocation(Address narrowOopAddr);
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/Address.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/Address.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/Address.java Sun Apr 13 17:43:42 2008 -0400
@@ -87,6 +87,8 @@
throws UnmappedAddressException, UnalignedAddressException;
/** This returns null if the address at the given offset is NULL. */
public Address getAddressAt (long offset) throws UnmappedAddressException, UnalignedAddressException;
+ /** Returns the decoded address at the given offset */
+ public Address getCompOopAddressAt (long offset) throws UnmappedAddressException, UnalignedAddressException;
//
// Java-related routines
@@ -103,6 +105,8 @@
/** This returns null if the address at the given offset is NULL. */
public OopHandle getOopHandleAt (long offset)
throws UnmappedAddressException, UnalignedAddressException, NotInHeapException;
+ public OopHandle getCompOopHandleAt (long offset)
+ throws UnmappedAddressException, UnalignedAddressException, NotInHeapException;
//
// C/C++-related mutators. These throw UnmappedAddressException if
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/Debugger.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/Debugger.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/Debugger.java Sun Apr 13 17:43:42 2008 -0400
@@ -118,6 +118,9 @@
public long getJIntSize();
public long getJLongSize();
public long getJShortSize();
+ public long getHeapBase();
+ public long getHeapOopSize();
+ public long getLogMinObjAlignmentInBytes();
public ReadResult readBytesFromProcess(long address, long numBytes)
throws DebuggerException;
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java Sun Apr 13 17:43:42 2008 -0400
@@ -37,6 +37,7 @@
DbxDebugger interfaces.
*/
public abstract class DebuggerBase implements Debugger {
+
// May be set lazily, but must be set before calling any of the read
// routines below
protected MachineDescription machDesc;
@@ -52,6 +53,11 @@
protected long jlongSize;
protected long jshortSize;
protected boolean javaPrimitiveTypesConfigured;
+ // heap data.
+ protected long oopSize;
+ protected long heapOopSize;
+ protected long heapBase; // heap base for compressed oops.
+ protected long logMinObjAlignmentInBytes; // Used to decode compressed oops.
// Should be initialized if desired by calling initCache()
private PageCache cache;
@@ -153,6 +159,12 @@
javaPrimitiveTypesConfigured = true;
}
+ public void putHeapConst(long heapBase, long heapOopSize, long logMinObjAlignmentInBytes) {
+ this.heapBase = heapBase;
+ this.heapOopSize = heapOopSize;
+ this.logMinObjAlignmentInBytes = logMinObjAlignmentInBytes;
+ }
+
/** May be called by subclasses if desired to initialize the page
cache but may not be overridden */
protected final void initCache(long pageSize, long maxNumPages) {
@@ -442,6 +454,16 @@
return readCInteger(address, machDesc.getAddressSize(), true);
}
+ protected long readCompOopAddressValue(long address)
+ throws UnmappedAddressException, UnalignedAddressException {
+ long value = readCInteger(address, getHeapOopSize(), true);
+ if (value != 0) {
+ // See oop.inline.hpp decode_heap_oop
+ value = (long)(heapBase + (long)(value << logMinObjAlignmentInBytes));
+ }
+ return value;
+ }
+
protected void writeAddressValue(long address, long value)
throws UnmappedAddressException, UnalignedAddressException {
writeCInteger(address, machDesc.getAddressSize(), value);
@@ -518,4 +540,15 @@
public long getJShortSize() {
return jshortSize;
}
+
+ public long getHeapOopSize() {
+ return heapOopSize;
+ }
+
+ public long getHeapBase() {
+ return heapBase;
+ }
+ public long getLogMinObjAlignmentInBytes() {
+ return logMinObjAlignmentInBytes;
+ }
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/JVMDebugger.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/JVMDebugger.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/JVMDebugger.java Sun Apr 13 17:43:42 2008 -0400
@@ -42,4 +42,5 @@
long jintSize,
long jlongSize,
long jshortSize);
+ public void putHeapConst(long heapBase, long heapOopSize, long logMinObjAlignment);
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescription.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescription.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescription.java Sun Apr 13 17:43:42 2008 -0400
@@ -35,13 +35,6 @@
able to traverse arrays of pointers or oops. */
public long getAddressSize();
- /** Returns the size of an address in bytes. Currently needed to be
- able to traverse arrays of pointers or oops. (FIXME: since we're
- already reading the Java primitive types' sizes from the remote
- VM, it would be nice to remove this routine, using a similar
- mechanism to how the TypeDataBase deals with primitive types.) */
- public long getOopSize();
-
/** Returns the maximum value of the C integer type with the given
size in bytes and signedness. Throws IllegalArgumentException if
the size in bytes is not legal for a C type (or can not be
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionAMD64.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionAMD64.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionAMD64.java Sun Apr 13 17:43:42 2008 -0400
@@ -29,10 +29,6 @@
return 8;
}
- public long getOopSize() {
- return 8;
- }
-
public boolean isLP64() {
return true;
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionIA64.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionIA64.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionIA64.java Sun Apr 13 17:43:42 2008 -0400
@@ -29,10 +29,6 @@
return 8;
}
- public long getOopSize() {
- return 8;
- }
-
public boolean isLP64() {
return true;
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionIntelX86.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionIntelX86.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionIntelX86.java Sun Apr 13 17:43:42 2008 -0400
@@ -29,10 +29,6 @@
return 4;
}
- public long getOopSize() {
- return 4;
- }
-
public boolean isBigEndian() {
return false;
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionSPARC32Bit.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionSPARC32Bit.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionSPARC32Bit.java Sun Apr 13 17:43:42 2008 -0400
@@ -29,10 +29,6 @@
return 4;
}
- public long getOopSize() {
- return 4;
- }
-
public boolean isBigEndian() {
return true;
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionSPARC64Bit.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionSPARC64Bit.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionSPARC64Bit.java Sun Apr 13 17:43:42 2008 -0400
@@ -29,9 +29,6 @@
return 8;
}
- public long getOopSize() {
- return 8;
- }
public boolean isBigEndian() {
return true;
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxAddress.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxAddress.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxAddress.java Sun Apr 13 17:43:42 2008 -0400
@@ -71,6 +71,9 @@
public Address getAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
return debugger.readAddress(addr + offset);
}
+ public Address getCompOopAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
+ return debugger.readCompOopAddress(addr + offset);
+ }
//
// Java-related routines
@@ -113,6 +116,11 @@
return debugger.readOopHandle(addr + offset);
}
+ public OopHandle getCompOopHandleAt(long offset)
+ throws UnalignedAddressException, UnmappedAddressException, NotInHeapException {
+ return debugger.readCompOopHandle(addr + offset);
+ }
+
// Mutators -- not implemented for now (FIXME)
public void setCIntegerAt(long offset, long numBytes, long value) {
throw new DebuggerException("Unimplemented");
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxDebugger.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxDebugger.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxDebugger.java Sun Apr 13 17:43:42 2008 -0400
@@ -43,7 +43,9 @@
public long readCInteger(long address, long numBytes, boolean isUnsigned)
throws DebuggerException;
public DbxAddress readAddress(long address) throws DebuggerException;
+ public DbxAddress readCompOopAddress(long address) throws DebuggerException;
public DbxOopHandle readOopHandle(long address) throws DebuggerException;
+ public DbxOopHandle readCompOopHandle(long address) throws DebuggerException;
public long[] getThreadIntegerRegisterSet(int tid) throws DebuggerException;
public Address newAddress(long value) throws DebuggerException;
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxDebuggerLocal.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxDebuggerLocal.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxDebuggerLocal.java Sun Apr 13 17:43:42 2008 -0400
@@ -460,12 +460,23 @@
return (value == 0 ? null : new DbxAddress(this, value));
}
+ public DbxAddress readCompOopAddress(long address)
+ throws UnmappedAddressException, UnalignedAddressException {
+ long value = readCompOopAddressValue(address);
+ return (value == 0 ? null : new DbxAddress(this, value));
+ }
+
/** From the DbxDebugger interface */
public DbxOopHandle readOopHandle(long address)
throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
long value = readAddressValue(address);
return (value == 0 ? null : new DbxOopHandle(this, value));
}
+ public DbxOopHandle readCompOopHandle(long address)
+ throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
+ long value = readCompOopAddressValue(address);
+ return (value == 0 ? null : new DbxOopHandle(this, value));
+ }
//--------------------------------------------------------------------------------
// Thread context access. Can not be package private, but should
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/dummy/DummyAddress.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/dummy/DummyAddress.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/dummy/DummyAddress.java Sun Apr 13 17:43:42 2008 -0400
@@ -76,6 +76,10 @@
return new DummyAddress(debugger, badLong);
}
+ public Address getCompOopAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
+ return new DummyAddress(debugger, badLong);
+ }
+
//
// Java-related routines
//
@@ -116,6 +120,10 @@
throws UnalignedAddressException, UnmappedAddressException, NotInHeapException {
return new DummyOopHandle(debugger, badLong);
}
+ public OopHandle getCompOopHandleAt(long offset)
+ throws UnalignedAddressException, UnmappedAddressException, NotInHeapException {
+ return new DummyOopHandle(debugger, badLong);
+ }
// Mutators -- not implemented
public void setCIntegerAt(long offset, long numBytes, long value) {
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxAddress.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxAddress.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxAddress.java Sun Apr 13 17:43:42 2008 -0400
@@ -74,6 +74,11 @@
return debugger.readAddress(addr + offset);
}
+ public Address getCompOopAddressAt(long offset)
+ throws UnalignedAddressException, UnmappedAddressException {
+ return debugger.readCompOopAddress(addr + offset);
+ }
+
//
// Java-related routines
//
@@ -115,6 +120,11 @@
return debugger.readOopHandle(addr + offset);
}
+ public OopHandle getCompOopHandleAt(long offset)
+ throws UnalignedAddressException, UnmappedAddressException, NotInHeapException {
+ return debugger.readCompOopHandle(addr + offset);
+ }
+
// Mutators -- not implemented for now (FIXME)
public void setCIntegerAt(long offset, long numBytes, long value) {
throw new DebuggerException("Unimplemented");
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebugger.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebugger.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebugger.java Sun Apr 13 17:43:42 2008 -0400
@@ -45,7 +45,9 @@
public long readCInteger(long address, long numBytes, boolean isUnsigned)
throws DebuggerException;
public LinuxAddress readAddress(long address) throws DebuggerException;
+ public LinuxAddress readCompOopAddress(long address) throws DebuggerException;
public LinuxOopHandle readOopHandle(long address) throws DebuggerException;
+ public LinuxOopHandle readCompOopHandle(long address) throws DebuggerException;
public long[] getThreadIntegerRegisterSet(int lwp_id) throws DebuggerException;
public long getAddressValue(Address addr) throws DebuggerException;
public Address newAddress(long value) throws DebuggerException;
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java Sun Apr 13 17:43:42 2008 -0400
@@ -423,6 +423,11 @@
long value = readAddressValue(address);
return (value == 0 ? null : new LinuxAddress(this, value));
}
+ public LinuxAddress readCompOopAddress(long address)
+ throws UnmappedAddressException, UnalignedAddressException {
+ long value = readCompOopAddressValue(address);
+ return (value == 0 ? null : new LinuxAddress(this, value));
+ }
/** From the LinuxDebugger interface */
public LinuxOopHandle readOopHandle(long address)
@@ -431,6 +436,12 @@
long value = readAddressValue(address);
return (value == 0 ? null : new LinuxOopHandle(this, value));
}
+ public LinuxOopHandle readCompOopHandle(long address)
+ throws UnmappedAddressException, UnalignedAddressException,
+ NotInHeapException {
+ long value = readCompOopAddressValue(address);
+ return (value == 0 ? null : new LinuxOopHandle(this, value));
+ }
//----------------------------------------------------------------------
// Thread context access
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcAddress.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcAddress.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcAddress.java Sun Apr 13 17:43:42 2008 -0400
@@ -72,6 +72,10 @@
return debugger.readAddress(addr + offset);
}
+ public Address getCompOopAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
+ return debugger.readCompOopAddress(addr + offset);
+ }
+
//
// Java-related routines
//
@@ -112,6 +116,10 @@
throws UnalignedAddressException, UnmappedAddressException, NotInHeapException {
return debugger.readOopHandle(addr + offset);
}
+ public OopHandle getCompOopHandleAt(long offset)
+ throws UnalignedAddressException, UnmappedAddressException, NotInHeapException {
+ return debugger.readCompOopHandle(addr + offset);
+ }
// Mutators -- not implemented for now (FIXME)
public void setCIntegerAt(long offset, long numBytes, long value) {
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebugger.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebugger.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebugger.java Sun Apr 13 17:43:42 2008 -0400
@@ -46,7 +46,9 @@
public long readCInteger(long address, long numBytes, boolean isUnsigned)
throws DebuggerException;
public ProcAddress readAddress(long address) throws DebuggerException;
+ public ProcAddress readCompOopAddress(long address) throws DebuggerException;
public ProcOopHandle readOopHandle(long address) throws DebuggerException;
+ public ProcOopHandle readCompOopHandle(long address) throws DebuggerException;
public long[] getThreadIntegerRegisterSet(int tid) throws DebuggerException;
public long getAddressValue(Address addr) throws DebuggerException;
public Address newAddress(long value) throws DebuggerException;
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebuggerLocal.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebuggerLocal.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebuggerLocal.java Sun Apr 13 17:43:42 2008 -0400
@@ -53,8 +53,6 @@
*/
public class ProcDebuggerLocal extends DebuggerBase implements ProcDebugger {
-
-
protected static final int cacheSize = 16 * 1024 * 1024; // 16 MB
//------------------------------------------------------------------------
@@ -337,10 +335,21 @@
return (value == 0 ? null : new ProcAddress(this, value));
}
+ public ProcAddress readCompOopAddress(long address)
+ throws UnmappedAddressException, UnalignedAddressException {
+ long value = readCompOopAddressValue(address);
+ return (value == 0 ? null : new ProcAddress(this, value));
+ }
+
/** From the ProcDebugger interface */
public ProcOopHandle readOopHandle(long address)
throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
- long value = readAddressValue(address);
+ long value = readAddressValue(address);
+ return (value == 0 ? null : new ProcOopHandle(this, value));
+ }
+
+ public ProcOopHandle readCompOopHandle(long address) {
+ long value = readCompOopAddressValue(address);
return (value == 0 ? null : new ProcOopHandle(this, value));
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteAddress.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteAddress.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteAddress.java Sun Apr 13 17:43:42 2008 -0400
@@ -71,6 +71,9 @@
public Address getAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
return debugger.readAddress(addr + offset);
}
+ public Address getCompOopAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
+ return debugger.readCompOopAddress(addr + offset);
+ }
//
// Java-related routines
@@ -112,6 +115,10 @@
throws UnalignedAddressException, UnmappedAddressException, NotInHeapException {
return debugger.readOopHandle(addr + offset);
}
+ public OopHandle getCompOopHandleAt(long offset)
+ throws UnalignedAddressException, UnmappedAddressException, NotInHeapException {
+ return debugger.readCompOopHandle(addr + offset);
+ }
// Mutators -- not implemented for now (FIXME)
public void setCIntegerAt(long offset, long numBytes, long value) {
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebugger.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebugger.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebugger.java Sun Apr 13 17:43:42 2008 -0400
@@ -65,6 +65,9 @@
public long getJIntSize() throws RemoteException;
public long getJLongSize() throws RemoteException;
public long getJShortSize() throws RemoteException;
+ public long getHeapBase() throws RemoteException;
+ public long getHeapOopSize() throws RemoteException;
+ public long getLogMinObjAlignmentInBytes() throws RemoteException;
public boolean areThreadsEqual(long addrOrId1, boolean isAddress1,
long addrOrId2, boolean isAddress2) throws RemoteException;
public int getThreadHashCode(long addrOrId, boolean isAddress) throws RemoteException;
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerClient.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerClient.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerClient.java Sun Apr 13 17:43:42 2008 -0400
@@ -85,6 +85,9 @@
jlongSize = remoteDebugger.getJLongSize();
jshortSize = remoteDebugger.getJShortSize();
javaPrimitiveTypesConfigured = true;
+ heapBase = remoteDebugger.getHeapBase();
+ heapOopSize = remoteDebugger.getHeapOopSize();
+ logMinObjAlignmentInBytes = remoteDebugger.getLogMinObjAlignmentInBytes();
}
catch (RemoteException e) {
throw new DebuggerException(e);
@@ -298,12 +301,24 @@
return (value == 0 ? null : new RemoteAddress(this, value));
}
+ RemoteAddress readCompOopAddress(long address)
+ throws UnmappedAddressException, UnalignedAddressException {
+ long value = readCompOopAddressValue(address);
+ return (value == 0 ? null : new RemoteAddress(this, value));
+ }
+
RemoteOopHandle readOopHandle(long address)
throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
long value = readAddressValue(address);
return (value == 0 ? null : new RemoteOopHandle(this, value));
}
+ RemoteOopHandle readCompOopHandle(long address)
+ throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
+ long value = readCompOopAddressValue(address);
+ return (value == 0 ? null : new RemoteOopHandle(this, value));
+ }
+
boolean areThreadsEqual(Address addr1, Address addr2) {
try {
return remoteDebugger.areThreadsEqual(getAddressValue(addr1), true,
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerServer.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerServer.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerServer.java Sun Apr 13 17:43:42 2008 -0400
@@ -114,6 +114,17 @@
return debugger.getJShortSize();
}
+ public long getHeapBase() throws RemoteException {
+ return debugger.getHeapBase();
+ }
+
+ public long getHeapOopSize() throws RemoteException {
+ return debugger.getHeapOopSize();
+ }
+
+ public long getLogMinObjAlignmentInBytes() throws RemoteException {
+ return debugger.getLogMinObjAlignmentInBytes();
+ }
public boolean areThreadsEqual(long addrOrId1, boolean isAddress1,
long addrOrId2, boolean isAddress2) throws RemoteException {
ThreadProxy t1 = getThreadProxy(addrOrId1, isAddress1);
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32Address.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32Address.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32Address.java Sun Apr 13 17:43:42 2008 -0400
@@ -72,6 +72,10 @@
return debugger.readAddress(addr + offset);
}
+ public Address getCompOopAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
+ return debugger.readCompOopAddress(addr + offset);
+ }
+
//
// Java-related routines
//
@@ -112,6 +116,10 @@
throws UnalignedAddressException, UnmappedAddressException, NotInHeapException {
return debugger.readOopHandle(addr + offset);
}
+ public OopHandle getCompOopHandleAt(long offset)
+ throws UnalignedAddressException, UnmappedAddressException, NotInHeapException {
+ return debugger.readCompOopHandle(addr + offset);
+ }
//
// C/C++-related mutators
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32Debugger.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32Debugger.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32Debugger.java Sun Apr 13 17:43:42 2008 -0400
@@ -45,7 +45,9 @@
public long readCInteger(long address, long numBytes, boolean isUnsigned)
throws DebuggerException;
public Win32Address readAddress(long address) throws DebuggerException;
+ public Win32Address readCompOopAddress(long address) throws DebuggerException;
public Win32OopHandle readOopHandle(long address) throws DebuggerException;
+ public Win32OopHandle readCompOopHandle(long address) throws DebuggerException;
public void writeJBoolean(long address, boolean value) throws DebuggerException;
public void writeJByte(long address, byte value) throws DebuggerException;
public void writeJChar(long address, char value) throws DebuggerException;
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32DebuggerLocal.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32DebuggerLocal.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32DebuggerLocal.java Sun Apr 13 17:43:42 2008 -0400
@@ -306,12 +306,22 @@
return (Win32Address) newAddress(readAddressValue(address));
}
+ public Win32Address readCompOopAddress(long address)
+ throws UnmappedAddressException, UnalignedAddressException {
+ return (Win32Address) newAddress(readCompOopAddressValue(address));
+ }
+
/** From the Win32Debugger interface */
public Win32OopHandle readOopHandle(long address)
throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
long value = readAddressValue(address);
return (value == 0 ? null : new Win32OopHandle(this, value));
}
+ public Win32OopHandle readCompOopHandle(long address)
+ throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
+ long value = readCompOopAddressValue(address);
+ return (value == 0 ? null : new Win32OopHandle(this, value));
+ }
/** From the Win32Debugger interface */
public void writeAddress(long address, Win32Address value) {
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgAddress.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgAddress.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgAddress.java Sun Apr 13 17:43:42 2008 -0400
@@ -72,6 +72,10 @@
return debugger.readAddress(addr + offset);
}
+ public Address getCompOopAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
+ return debugger.readCompOopAddress(addr + offset);
+ }
+
//
// Java-related routines
//
@@ -113,6 +117,10 @@
return debugger.readOopHandle(addr + offset);
}
+ public OopHandle getCompOopHandleAt(long offset)
+ throws UnalignedAddressException, UnmappedAddressException, NotInHeapException {
+ return debugger.readCompOopHandle(addr + offset);
+ }
//
// C/C++-related mutators
//
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebugger.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebugger.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebugger.java Sun Apr 13 17:43:42 2008 -0400
@@ -45,7 +45,9 @@
public long readCInteger(long address, long numBytes, boolean isUnsigned)
throws DebuggerException;
public WindbgAddress readAddress(long address) throws DebuggerException;
+ public WindbgAddress readCompOopAddress(long address) throws DebuggerException;
public WindbgOopHandle readOopHandle(long address) throws DebuggerException;
+ public WindbgOopHandle readCompOopHandle(long address) throws DebuggerException;
// The returned array of register contents is guaranteed to be in
// the same order as in the DbxDebugger for Solaris/x86 or amd64; that is,
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebuggerLocal.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebuggerLocal.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebuggerLocal.java Sun Apr 13 17:43:42 2008 -0400
@@ -39,6 +39,7 @@
import sun.jvm.hotspot.debugger.cdbg.basic.BasicDebugEvent;
import sun.jvm.hotspot.utilities.*;
import sun.jvm.hotspot.utilities.memo.*;
+import sun.jvm.hotspot.runtime.*;
/** An implementation of the JVMDebugger interface which talks to
windbg and symbol table management is done in Java.
@@ -315,12 +316,22 @@
return (WindbgAddress) newAddress(readAddressValue(address));
}
+ public WindbgAddress readCompOopAddress(long address)
+ throws UnmappedAddressException, UnalignedAddressException {
+ return (WindbgAddress) newAddress(readCompOopAddressValue(address));
+ }
+
/** From the WindbgDebugger interface */
public WindbgOopHandle readOopHandle(long address)
throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
long value = readAddressValue(address);
return (value == 0 ? null : new WindbgOopHandle(this, value));
}
+ public WindbgOopHandle readCompOopHandle(long address)
+ throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
+ long value = readCompOopAddressValue(address);
+ return (value == 0 ? null : new WindbgOopHandle(this, value));
+ }
/** From the WindbgDebugger interface */
public int getAddressSize() {
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java Sun Apr 13 17:43:42 2008 -0400
@@ -53,6 +53,8 @@
// system obj array klass object
private static sun.jvm.hotspot.types.OopField systemObjArrayKlassObjField;
+ private static AddressField heapBaseField;
+
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
@@ -83,6 +85,8 @@
doubleArrayKlassObjField = type.getOopField("_doubleArrayKlassObj");
systemObjArrayKlassObjField = type.getOopField("_systemObjArrayKlassObj");
+
+ heapBaseField = type.getAddressField("_heap_base");
}
public Universe() {
@@ -96,6 +100,14 @@
}
}
+ public static long getHeapBase() {
+ if (heapBaseField.getValue() == null) {
+ return 0;
+ } else {
+ return heapBaseField.getValue().minus(null);
+ }
+ }
+
/** Returns "TRUE" iff "p" points into the allocated area of the heap. */
public boolean isIn(Address p) {
return heap().isIn(p);
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Array.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Array.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Array.java Sun Apr 13 17:43:42 2008 -0400
@@ -47,18 +47,52 @@
private static void initialize(TypeDataBase db) throws WrongTypeException {
Type type = db.lookupType("arrayOopDesc");
- length = new CIntField(type.getCIntegerField("_length"), 0);
- headerSize = type.getSize();
+ typeSize = (int)type.getSize();
}
// Size of the arrayOopDesc
- private static long headerSize;
+ private static long headerSize=0;
+ private static long lengthOffsetInBytes=0;
+ private static long typeSize;
+
+ private static long headerSizeInBytes() {
+ if (headerSize != 0) {
+ return headerSize;
+ }
+ if (VM.getVM().isCompressedOopsEnabled()) {
+ headerSize = typeSize;
+ } else {
+ headerSize = VM.getVM().alignUp(typeSize + VM.getVM().getIntSize(),
+ VM.getVM().getHeapWordSize());
+ }
+ return headerSize;
+ }
- // Fields
- private static CIntField length;
+ private static long headerSize(BasicType type) {
+ if (Universe.elementTypeShouldBeAligned(type)) {
+ return alignObjectSize(headerSizeInBytes())/VM.getVM().getHeapWordSize();
+ } else {
+ return headerSizeInBytes()/VM.getVM().getHeapWordSize();
+ }
+ }
+
+ private long lengthOffsetInBytes() {
+ if (lengthOffsetInBytes != 0) {
+ return lengthOffsetInBytes;
+ }
+ if (VM.getVM().isCompressedOopsEnabled()) {
+ lengthOffsetInBytes = typeSize - VM.getVM().getIntSize();
+ } else {
+ lengthOffsetInBytes = typeSize;
+ }
+ return lengthOffsetInBytes;
+ }
// Accessors for declared fields
- public long getLength() { return length.getValue(this); }
+ public long getLength() {
+ boolean isUnsigned = true;
+ return this.getHandle().getCIntegerAt(lengthOffsetInBytes(), VM.getVM().getIntSize(), isUnsigned);
+ }
public long getObjectSize() {
ArrayKlass klass = (ArrayKlass) getKlass();
@@ -72,20 +106,12 @@
}
public static long baseOffsetInBytes(BasicType type) {
- if (Universe.elementTypeShouldBeAligned(type)) {
- return (VM.getVM().isLP64()) ? alignObjectSize(headerSize)
- : VM.getVM().alignUp(headerSize, 8);
- } else {
- return headerSize;
- }
+ return headerSize(type) * VM.getVM().getHeapWordSize();
}
public boolean isArray() { return true; }
public void iterateFields(OopVisitor visitor, boolean doVMFields) {
super.iterateFields(visitor, doVMFields);
- if (doVMFields) {
- visitor.doCInt(length, true);
- }
}
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java Sun Apr 13 17:43:42 2008 -0400
@@ -31,10 +31,10 @@
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
-// A ConstantPool is an array containing class constants
+// A ConstantPool is an oop containing class constants
// as described in the class file
-public class ConstantPool extends Array implements ClassConstants {
+public class ConstantPool extends Oop implements ClassConstants {
// Used for debugging this code
private static final boolean DEBUG = false;
@@ -55,8 +55,9 @@
tags = new OopField(type.getOopField("_tags"), 0);
cache = new OopField(type.getOopField("_cache"), 0);
poolHolder = new OopField(type.getOopField("_pool_holder"), 0);
+ length = new CIntField(type.getCIntegerField("_length"), 0);
headerSize = type.getSize();
- elementSize = db.getOopSize();
+ elementSize = 0;
}
ConstantPool(OopHandle handle, ObjectHeap heap) {
@@ -68,7 +69,7 @@
private static OopField tags;
private static OopField cache;
private static OopField poolHolder;
-
+ private static CIntField length; // number of elements in oop
private static long headerSize;
private static long elementSize;
@@ -76,12 +77,22 @@
public TypeArray getTags() { return (TypeArray) tags.getValue(this); }
public ConstantPoolCache getCache() { return (ConstantPoolCache) cache.getValue(this); }
public Klass getPoolHolder() { return (Klass) poolHolder.getValue(this); }
+ public int getLength() { return (int)length.getValue(this); }
+
+ private long getElementSize() {
+ if (elementSize !=0 ) {
+ return elementSize;
+ } else {
+ elementSize = VM.getVM().getOopSize();
+ }
+ return elementSize;
+ }
private long indexOffset(long index) {
if (Assert.ASSERTS_ENABLED) {
- Assert.that(index > 0 && index < getLength(), "invalid cp index");
+ Assert.that(index > 0 && index < getLength(), "invalid cp index " + index + " " + getLength());
}
- return (index * elementSize) + headerSize;
+ return (index * getElementSize()) + headerSize;
}
public ConstantTag getTagAt(long index) {
@@ -464,7 +475,7 @@
}
public long getObjectSize() {
- return alignObjectSize(headerSize + (getLength() * elementSize));
+ return alignObjectSize(headerSize + (getLength() * getElementSize()));
}
//----------------------------------------------------------------------
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCache.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCache.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCache.java Sun Apr 13 17:43:42 2008 -0400
@@ -31,10 +31,10 @@
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
-// A ConstantPool is an array containing class constants
-// as described in the class file
-
-public class ConstantPoolCache extends Array {
+// ConstantPoolCache : A constant pool cache (constantPoolCacheOopDesc).
+// See cpCacheOop.hpp for details about this class.
+//
+public class ConstantPoolCache extends Oop {
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
@@ -47,9 +47,9 @@
Type type = db.lookupType("constantPoolCacheOopDesc");
constants = new OopField(type.getOopField("_constant_pool"), 0);
baseOffset = type.getSize();
-
Type elType = db.lookupType("ConstantPoolCacheEntry");
elementSize = elType.getSize();
+ length = new CIntField(type.getCIntegerField("_length"), 0);
}
ConstantPoolCache(OopHandle handle, ObjectHeap heap) {
@@ -62,6 +62,8 @@
private static long baseOffset;
private static long elementSize;
+ private static CIntField length;
+
public ConstantPool getConstants() { return (ConstantPool) constants.getValue(this); }
@@ -87,6 +89,10 @@
tty.print("ConstantPoolCache for " + getConstants().getPoolHolder().getName().asString());
}
+ public int getLength() {
+ return (int) length.getValue(this);
+ }
+
public void iterateFields(OopVisitor visitor, boolean doVMFields) {
super.iterateFields(visitor, doVMFields);
if (doVMFields) {
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCacheKlass.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCacheKlass.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCacheKlass.java Sun Apr 13 17:43:42 2008 -0400
@@ -32,7 +32,7 @@
// A ConstantPoolCacheKlass is the klass of a ConstantPoolCache
-public class ConstantPoolCacheKlass extends ArrayKlass {
+public class ConstantPoolCacheKlass extends Klass {
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
@@ -43,13 +43,20 @@
private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
Type type = db.lookupType("constantPoolCacheKlass");
+ headerSize = type.getSize() + Oop.getHeaderSize();
}
ConstantPoolCacheKlass(OopHandle handle, ObjectHeap heap) {
super(handle, heap);
}
+ public long getObjectSize() { return alignObjectSize(headerSize); }
+
public void printValueOn(PrintStream tty) {
tty.print("ConstantPoolCacheKlass");
}
+
+ private static long headerSize;
}
+
+
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolKlass.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolKlass.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolKlass.java Sun Apr 13 17:43:42 2008 -0400
@@ -32,7 +32,7 @@
// A ConstantPoolKlass is the klass of a ConstantPool
-public class ConstantPoolKlass extends ArrayKlass {
+public class ConstantPoolKlass extends Klass {
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
@@ -43,13 +43,19 @@
private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
Type type = db.lookupType("constantPoolKlass");
+ headerSize = type.getSize() + Oop.getHeaderSize();
}
ConstantPoolKlass(OopHandle handle, ObjectHeap heap) {
super(handle, heap);
}
+ public long getObjectSize() { return alignObjectSize(headerSize); }
+
public void printValueOn(PrintStream tty) {
tty.print("ConstantPoolKlass");
}
-};
+
+ private static long headerSize;
+}
+
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/DefaultOopVisitor.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/DefaultOopVisitor.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/DefaultOopVisitor.java Sun Apr 13 17:43:42 2008 -0400
@@ -46,6 +46,7 @@
// Callback methods for each field type in an object
public void doOop(OopField field, boolean isVMField) {}
+ public void doOop(NarrowOopField field, boolean isVMField) {}
public void doByte(ByteField field, boolean isVMField) {}
public void doChar(CharField field, boolean isVMField) {}
public void doBoolean(BooleanField field, boolean isVMField) {}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Instance.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Instance.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Instance.java Sun Apr 13 17:43:42 2008 -0400
@@ -40,15 +40,26 @@
}
});
}
+ private static long typeSize;
private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
Type type = db.lookupType("instanceOopDesc");
+ typeSize = type.getSize();
}
Instance(OopHandle handle, ObjectHeap heap) {
super(handle, heap);
}
+ // Returns header size in bytes.
+ public static long getHeaderSize() {
+ if (VM.getVM().isCompressedOopsEnabled()) {
+ return typeSize - VM.getVM().getIntSize();
+ } else {
+ return typeSize;
+ }
+ }
+
public boolean isInstance() { return true; }
public void iterateFields(OopVisitor visitor, boolean doVMFields) {
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java Sun Apr 13 17:43:42 2008 -0400
@@ -467,7 +467,6 @@
for (int index = 0; index < length; index += NEXT_OFFSET) {
short accessFlags = fields.getShortAt(index + ACCESS_FLAGS_OFFSET);
short signatureIndex = fields.getShortAt(index + SIGNATURE_INDEX_OFFSET);
-
FieldType type = new FieldType((Symbol) getConstants().getObjAt(signatureIndex));
AccessFlags access = new AccessFlags(accessFlags);
if (access.isStatic()) {
@@ -790,7 +789,11 @@
short signatureIndex = fields.getShortAt(index + SIGNATURE_INDEX_OFFSET);
FieldType type = new FieldType((Symbol) getConstants().getObjAt(signatureIndex));
if (type.isOop()) {
- return new OopField(this, index);
+ if (VM.getVM().isCompressedOopsEnabled()) {
+ return new NarrowOopField(this, index);
+ } else {
+ return new OopField(this, index);
+ }
}
if (type.isByte()) {
return new ByteField(this, index);
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Klass.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Klass.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Klass.java Sun Apr 13 17:43:42 2008 -0400
@@ -171,8 +171,7 @@
}
public long getObjectSize() {
- System.out.println("should not reach here");
- return 0;
+ throw new RuntimeException("should not reach here");
}
/** Array class with specific rank */
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/NarrowOopField.java
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/NarrowOopField.java Sun Apr 13 17:43:42 2008 -0400
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.oops;
+
+import sun.jvm.hotspot.debugger.*;
+
+// The class for an oop field simply provides access to the value.
+public class NarrowOopField extends OopField {
+ public NarrowOopField(FieldIdentifier id, long offset, boolean isVMField) {
+ super(id, offset, isVMField);
+ }
+
+ public NarrowOopField(sun.jvm.hotspot.types.OopField vmField, long startOffset) {
+ super(new NamedFieldIdentifier(vmField.getName()), vmField.getOffset() + startOffset, true);
+ }
+
+ public NarrowOopField(InstanceKlass holder, int fieldArrayIndex) {
+ super(holder, fieldArrayIndex);
+ }
+
+ public Oop getValue(Oop obj) {
+ return obj.getHeap().newOop(getValueAsOopHandle(obj));
+ }
+
+ /** Debugging support */
+ public OopHandle getValueAsOopHandle(Oop obj) {
+ return obj.getHandle().getCompOopHandleAt(getOffset());
+ }
+
+ public void setValue(Oop obj) throws MutationException {
+ // Fix this: setOopAt is missing in Address
+ }
+}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjArray.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjArray.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjArray.java Sun Apr 13 17:43:42 2008 -0400
@@ -43,7 +43,7 @@
private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
Type type = db.lookupType("objArrayOopDesc");
- elementSize = db.getOopSize();
+ elementSize = VM.getVM().getHeapOopSize();
}
ObjArray(OopHandle handle, ObjectHeap heap) {
@@ -54,9 +54,17 @@
private static long elementSize;
- public Oop getObjAt(long index) {
+ public OopHandle getOopHandleAt(long index) {
long offset = baseOffsetInBytes(BasicType.T_OBJECT) + (index * elementSize);
- return getHeap().newOop(getHandle().getOopHandleAt(offset));
+ if (VM.getVM().isCompressedOopsEnabled()) {
+ return getHandle().getCompOopHandleAt(offset);
+ } else {
+ return getHandle().getOopHandleAt(offset);
+ }
+ }
+
+ public Oop getObjAt(long index) {
+ return getHeap().newOop(getOopHandleAt(index));
}
public void printValueOn(PrintStream tty) {
@@ -69,7 +77,13 @@
long baseOffset = baseOffsetInBytes(BasicType.T_OBJECT);
for (int index = 0; index < length; index++) {
long offset = baseOffset + (index * elementSize);
- visitor.doOop(new OopField(new IndexableFieldIdentifier(index), offset, false), false);
+ OopField field;
+ if (VM.getVM().isCompressedOopsEnabled()) {
+ field = new NarrowOopField(new IndexableFieldIdentifier(index), offset, false);
+ } else {
+ field = new OopField(new IndexableFieldIdentifier(index), offset, false);
+ }
+ visitor.doOop(field, false);
}
}
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java Sun Apr 13 17:43:42 2008 -0400
@@ -41,6 +41,12 @@
public class ObjectHeap {
+ private static final boolean DEBUG;
+
+ static {
+ DEBUG = System.getProperty("sun.jvm.hotspot.oops.ObjectHeap.DEBUG") != null;
+ }
+
private OopHandle symbolKlassHandle;
private OopHandle methodKlassHandle;
private OopHandle constMethodKlassHandle;
@@ -152,7 +158,7 @@
public ObjectHeap(TypeDataBase db) throws WrongTypeException {
// Get commonly used sizes of basic types
- oopSize = db.getOopSize();
+ oopSize = VM.getVM().getOopSize();
byteSize = db.getJByteType().getSize();
charSize = db.getJCharType().getSize();
booleanSize = db.getJBooleanType().getSize();
@@ -440,12 +446,16 @@
try {
// Traverses the space from bottom to top
OopHandle handle = bottom.addOffsetToAsOopHandle(0);
+
while (handle.lessThan(top)) {
Oop obj = null;
try {
obj = newOop(handle);
} catch (UnknownOopException exp) {
+ if (DEBUG) {
+ throw new RuntimeException(" UnknownOopException " + exp);
+ }
}
if (obj == null) {
//Find the object size using Printezis bits and skip over
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHistogram.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHistogram.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHistogram.java Sun Apr 13 17:43:42 2008 -0400
@@ -64,8 +64,17 @@
List list = getElements();
ObjectHistogramElement.titleOn(tty);
Iterator iterator = list.listIterator();
+ int num=0;
+ int totalCount=0;
+ int totalSize=0;
while (iterator.hasNext()) {
- ((ObjectHistogramElement) iterator.next()).printOn(tty);
+ ObjectHistogramElement el = (ObjectHistogramElement) iterator.next();
+ num++;
+ totalCount+=el.getCount();
+ totalSize+=el.getSize();
+ tty.print(num + ":" + "\t\t");
+ el.printOn(tty);
}
+ tty.println("Total : " + "\t" + totalCount + "\t" + totalSize);
}
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHistogramElement.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHistogramElement.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHistogramElement.java Sun Apr 13 17:43:42 2008 -0400
@@ -110,12 +110,12 @@
public static void titleOn(PrintStream tty) {
tty.println("Object Histogram:");
tty.println();
- tty.println("Size" + "\t" + "Count" + "\t" + "Class description");
- tty.println("-------------------------------------------------------");
+ tty.println("num " + "\t" + " #instances" + "\t" + "#bytes" + "\t" + "Class description");
+ tty.println("--------------------------------------------------------------------------");
}
public void printOn(PrintStream tty) {
- tty.print(size + "\t" + count + "\t");
+ tty.print(count + "\t" + size + "\t");
tty.print(getDescription());
tty.println();
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Oop.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Oop.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Oop.java Sun Apr 13 17:43:42 2008 -0400
@@ -47,7 +47,8 @@
private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
Type type = db.lookupType("oopDesc");
mark = new CIntField(type.getCIntegerField("_mark"), 0);
- klass = new OopField(type.getOopField("_klass"), 0);
+ klass = new OopField(type.getOopField("_metadata._klass"), 0);
+ compressedKlass = new NarrowOopField(type.getOopField("_metadata._compressed_klass"), 0);
headerSize = type.getSize();
}
@@ -67,10 +68,11 @@
public OopHandle getHandle() { return handle; }
private static long headerSize;
- public static long getHeaderSize() { return headerSize; }
+ public static long getHeaderSize() { return headerSize; } // Header size in bytes.
private static CIntField mark;
private static OopField klass;
+ private static NarrowOopField compressedKlass;
public boolean isShared() {
return CompactingPermGenGen.isShared(handle);
@@ -86,7 +88,13 @@
// Accessors for declared fields
public Mark getMark() { return new Mark(getHandle()); }
- public Klass getKlass() { return (Klass) klass.getValue(this); }
+ public Klass getKlass() {
+ if (VM.getVM().isCompressedOopsEnabled()) {
+ return (Klass) compressedKlass.getValue(this);
+ } else {
+ return (Klass) klass.getValue(this);
+ }
+ }
public boolean isA(Klass k) {
return getKlass().isSubtypeOf(k);
@@ -120,7 +128,7 @@
// Align the object size.
public static long alignObjectSize(long size) {
- return VM.getVM().alignUp(size, VM.getVM().getMinObjAlignmentInBytes());
+ return VM.getVM().alignUp(size, VM.getVM().getMinObjAlignment());
}
// All vm's align longs, so pad out certain offsets.
@@ -163,7 +171,11 @@
void iterateFields(OopVisitor visitor, boolean doVMFields) {
if (doVMFields) {
visitor.doCInt(mark, true);
- visitor.doOop(klass, true);
+ if (VM.getVM().isCompressedOopsEnabled()) {
+ visitor.doOop(compressedKlass, true);
+ } else {
+ visitor.doOop(klass, true);
+ }
}
}
@@ -219,6 +231,10 @@
if (handle == null) {
return null;
}
- return handle.getOopHandleAt(klass.getOffset());
+ if (VM.getVM().isCompressedOopsEnabled()) {
+ return handle.getCompOopHandleAt(compressedKlass.getOffset());
+ } else {
+ return handle.getOopHandleAt(klass.getOffset());
+ }
}
};
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/OopPrinter.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/OopPrinter.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/OopPrinter.java Sun Apr 13 17:43:42 2008 -0400
@@ -57,6 +57,13 @@
Oop.printOopValueOn(field.getValue(getObj()), tty);
tty.println();
}
+
+ public void doOop(NarrowOopField field, boolean isVMField) {
+ printField(field);
+ Oop.printOopValueOn(field.getValue(getObj()), tty);
+ tty.println();
+ }
+
public void doChar(CharField field, boolean isVMField) {
printField(field);
char c = field.getValue(getObj());
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/OopUtilities.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/OopUtilities.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/OopUtilities.java Sun Apr 13 17:43:42 2008 -0400
@@ -281,8 +281,11 @@
} catch (RuntimeException re) {
// ignore, currently java_lang_Class::hc_klass_offset is zero
}
-
- hcKlassField = new OopField(new NamedFieldIdentifier("hc_klass"), hcKlassOffset, true);
+ if (VM.getVM().isCompressedOopsEnabled()) {
+ hcKlassField = new NarrowOopField(new NamedFieldIdentifier("hc_klass"), hcKlassOffset, true);
+ } else {
+ hcKlassField = new OopField(new NamedFieldIdentifier("hc_klass"), hcKlassOffset, true);
+ }
}
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/OopVisitor.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/OopVisitor.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/OopVisitor.java Sun Apr 13 17:43:42 2008 -0400
@@ -41,6 +41,7 @@
// Callback methods for each field type in an object
public void doOop(OopField field, boolean isVMField);
+ public void doOop(NarrowOopField field, boolean isVMField);
public void doByte(ByteField field, boolean isVMField);
public void doChar(CharField field, boolean isVMField);
public void doBoolean(BooleanField field, boolean isVMField);
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/AddressVisitor.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/AddressVisitor.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/AddressVisitor.java Sun Apr 13 17:43:42 2008 -0400
@@ -31,4 +31,5 @@
public interface AddressVisitor {
public void visitAddress(Address addr);
+ public void visitCompOopAddress(Address addr);
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Frame.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Frame.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Frame.java Sun Apr 13 17:43:42 2008 -0400
@@ -534,7 +534,8 @@
public void visitValueLocation(Address valueAddr) {
}
- public void visitDeadLocation(Address deadAddr) {
+ public void visitNarrowOopLocation(Address compOopAddr) {
+ addressVisitor.visitCompOopAddress(compOopAddr);
}
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java Sun Apr 13 17:43:42 2008 -0400
@@ -36,6 +36,7 @@
import sun.jvm.hotspot.oops.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
+import sun.jvm.hotspot.runtime.*;
/** This class encapsulates the global state of the VM; the
universe, object heap, interpreter, etc. It is a Singleton and
@@ -93,6 +94,10 @@
private boolean isLP64;
private int bytesPerLong;
private int minObjAlignmentInBytes;
+ private int logMinObjAlignmentInBytes;
+ private int heapWordSize;
+ private int heapOopSize;
+ private int oopSize;
/** This is only present in a non-core build */
private CodeCache codeCache;
/** This is only present in a C1 build */
@@ -117,6 +122,7 @@
private static Type uintxType;
private static CIntegerType boolType;
private Boolean sharingEnabled;
+ private Boolean compressedOopsEnabled;
// command line flags supplied to VM - see struct Flag in globals.hpp
public static final class Flag {
@@ -308,6 +314,11 @@
}
bytesPerLong = db.lookupIntConstant("BytesPerLong").intValue();
minObjAlignmentInBytes = db.lookupIntConstant("MinObjAlignmentInBytes").intValue();
+ // minObjAlignment = db.lookupIntConstant("MinObjAlignment").intValue();
+ logMinObjAlignmentInBytes = db.lookupIntConstant("LogMinObjAlignmentInBytes").intValue();
+ heapWordSize = db.lookupIntConstant("HeapWordSize").intValue();
+ oopSize = db.lookupIntConstant("oopSize").intValue();
+ heapOopSize = db.lookupIntConstant("heapOopSize").intValue();
intxType = db.lookupType("intx");
uintxType = db.lookupType("uintx");
@@ -331,6 +342,8 @@
throw new RuntimeException("Attempt to initialize VM twice");
}
soleInstance = new VM(db, debugger, debugger.getMachineDescription().isBigEndian());
+ debugger.putHeapConst(Universe.getHeapBase(), soleInstance.getHeapOopSize(),
+ soleInstance.logMinObjAlignmentInBytes);
for (Iterator iter = vmInitializedObservers.iterator(); iter.hasNext(); ) {
((Observer) iter.next()).update(null, null);
}
@@ -440,13 +453,17 @@
}
public long getOopSize() {
- return db.getOopSize();
+ return oopSize;
}
public long getLogAddressSize() {
return logAddressSize;
}
+ public long getIntSize() {
+ return db.getJIntType().getSize();
+ }
+
/** NOTE: this offset is in BYTES in this system! */
public long getStackBias() {
return stackBias;
@@ -467,10 +484,24 @@
}
/** Get minimum object alignment in bytes. */
+ public int getMinObjAlignment() {
+ return minObjAlignmentInBytes;
+ }
+
public int getMinObjAlignmentInBytes() {
return minObjAlignmentInBytes;
}
+ public int getLogMinObjAlignmentInBytes() {
+ return logMinObjAlignmentInBytes;
+ }
+ public int getHeapWordSize() {
+ return heapWordSize;
+ }
+
+ public int getHeapOopSize() {
+ return heapOopSize;
+ }
/** Utility routine for getting data structure alignment correct */
public long alignUp(long size, long alignment) {
return (size + alignment - 1) & ~(alignment - 1);
@@ -701,6 +732,14 @@
return sharingEnabled.booleanValue();
}
+ public boolean isCompressedOopsEnabled() {
+ if (compressedOopsEnabled == null) {
+ Flag flag = getCommandLineFlag("UseCompressedOops");
+ compressedOopsEnabled = (flag == null) ? Boolean.FALSE:
+ (flag.getBool()? Boolean.TRUE: Boolean.FALSE);
+ }
+ return compressedOopsEnabled.booleanValue();
+ }
// returns null, if not available.
public Flag[] getCommandLineFlags() {
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/types/Field.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/Field.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/Field.java Sun Apr 13 17:43:42 2008 -0400
@@ -109,6 +109,8 @@
public Address getAddress (Address addr) throws UnmappedAddressException, UnalignedAddressException, WrongTypeException;
public OopHandle getOopHandle(Address addr)
throws UnmappedAddressException, UnalignedAddressException, WrongTypeException, NotInHeapException;
+ public OopHandle getNarrowOopHandle(Address addr)
+ throws UnmappedAddressException, UnalignedAddressException, WrongTypeException, NotInHeapException;
/**
These accessors require that the field be static; otherwise,
a WrongTypeException will be thrown. Note that type checking is
@@ -138,4 +140,6 @@
public Address getAddress () throws UnmappedAddressException, UnalignedAddressException;
public OopHandle getOopHandle()
throws UnmappedAddressException, UnalignedAddressException, NotInHeapException;
+ public OopHandle getNarrowOopHandle()
+ throws UnmappedAddressException, UnalignedAddressException, NotInHeapException;
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/types/NarrowOopField.java
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/NarrowOopField.java Sun Apr 13 17:43:42 2008 -0400
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.types;
+
+import sun.jvm.hotspot.debugger.*;
+
+/** A specialization of Field which represents a field containing an
+ narrow oop value and which adds typechecked getValue() routines returning
+ OopHandles. */
+
+public interface NarrowOopField extends OopField {
+ /** The field must be nonstatic and the type of the field must be an
+ oop type, or a WrongTypeException will be thrown. */
+ public OopHandle getValue(Address addr) throws UnmappedAddressException, UnalignedAddressException, WrongTypeException;
+
+ /** The field must be static and the type of the field must be an
+ oop type, or a WrongTypeException will be thrown. */
+ public OopHandle getValue() throws UnmappedAddressException, UnalignedAddressException, WrongTypeException;
+}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/types/Type.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/Type.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/Type.java Sun Apr 13 17:43:42 2008 -0400
@@ -122,5 +122,6 @@
public JShortField getJShortField (String fieldName) throws WrongTypeException;
public CIntegerField getCIntegerField (String fieldName) throws WrongTypeException;
public OopField getOopField (String fieldName) throws WrongTypeException;
+ public NarrowOopField getNarrowOopField (String fieldName) throws WrongTypeException;
public AddressField getAddressField (String fieldName);
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicField.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicField.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicField.java Sun Apr 13 17:43:42 2008 -0400
@@ -43,6 +43,19 @@
/** Used for static fields only */
private Address staticFieldAddress;
+ // Copy constructor to create NarrowOopField from OopField.
+ public BasicField(Field fld) {
+ BasicField field = (BasicField)fld;
+
+ this.db = field.db;
+ this.containingType = field.containingType;
+ this.name = field.name;
+ this.type = field.type;
+ this.size = field.size;
+ this.isStatic = field.isStatic;
+ this.offset = field.offset;
+ this.staticFieldAddress = field.staticFieldAddress;
+ }
/** offsetInBytes is ignored if the field is static;
staticFieldAddress is used only if the field is static. */
public BasicField(BasicTypeDataBase db, Type containingType, String name, Type type,
@@ -161,6 +174,13 @@
}
return addr.getOopHandleAt(offset);
}
+ public OopHandle getNarrowOopHandle(Address addr)
+ throws UnmappedAddressException, UnalignedAddressException, WrongTypeException, NotInHeapException {
+ if (isStatic) {
+ throw new WrongTypeException();
+ }
+ return addr.getCompOopHandleAt(offset);
+ }
//--------------------------------------------------------------------------------
// Dereferencing operations for static fields
@@ -234,4 +254,11 @@
}
return staticFieldAddress.getOopHandleAt(0);
}
+ public OopHandle getNarrowOopHandle()
+ throws UnmappedAddressException, UnalignedAddressException, WrongTypeException, NotInHeapException {
+ if (!isStatic) {
+ throw new WrongTypeException();
+ }
+ return staticFieldAddress.getCompOopHandleAt(0);
+ }
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicFieldWrapper.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicFieldWrapper.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicFieldWrapper.java Sun Apr 13 17:43:42 2008 -0400
@@ -95,6 +95,10 @@
throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
return field.getOopHandle(addr);
}
+ public OopHandle getNarrowOopHandle(Address addr)
+ throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
+ return field.getNarrowOopHandle(addr);
+ }
public boolean getJBoolean () throws UnmappedAddressException, UnalignedAddressException, WrongTypeException {
return field.getJBoolean();
@@ -130,4 +134,8 @@
throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
return field.getOopHandle();
}
+ public OopHandle getNarrowOopHandle()
+ throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
+ return field.getNarrowOopHandle();
+ }
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicNarrowOopField.java
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicNarrowOopField.java Sun Apr 13 17:43:42 2008 -0400
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.types.basic;
+
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.types.*;
+
+/** A specialization of BasicField which represents a field containing
+ an oop value and which adds typechecked getValue() routines
+ returning OopHandles. */
+
+public class BasicNarrowOopField extends BasicOopField implements NarrowOopField {
+
+ private static final boolean DEBUG = false;
+
+ public BasicNarrowOopField (OopField oopf) {
+ super(oopf);
+ }
+
+ public BasicNarrowOopField(BasicTypeDataBase db, Type containingType, String name, Type type,
+ boolean isStatic, long offset, Address staticFieldAddress) {
+ super(db, containingType, name, type, isStatic, offset, staticFieldAddress);
+
+ if (DEBUG) {
+ System.out.println(" name " + name + " type " + type + " isStatic " + isStatic + " offset " + offset + " static addr " + staticFieldAddress);
+ }
+ if (!type.isOopType()) {
+ throw new WrongTypeException("Type of a BasicOopField must be an oop type");
+ }
+ }
+
+ /** The field must be nonstatic and the type of the field must be a
+ Java oop, or a WrongTypeException will be thrown. */
+ public OopHandle getValue(Address addr) throws UnmappedAddressException, UnalignedAddressException, WrongTypeException {
+ return getNarrowOopHandle(addr);
+ }
+
+ /** The field must be static and the type of the field must be a
+ Java oop, or a WrongTypeException will be thrown. */
+ public OopHandle getValue() throws UnmappedAddressException, UnalignedAddressException, WrongTypeException {
+ return getNarrowOopHandle();
+ }
+}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicOopField.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicOopField.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicOopField.java Sun Apr 13 17:43:42 2008 -0400
@@ -32,6 +32,12 @@
returning OopHandles. */
public class BasicOopField extends BasicField implements OopField {
+
+
+ public BasicOopField(OopField oopf) {
+ super(oopf);
+ }
+
public BasicOopField(BasicTypeDataBase db, Type containingType, String name, Type type,
boolean isStatic, long offset, Address staticFieldAddress) {
super(db, containingType, name, type, isStatic, offset, staticFieldAddress);
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicType.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicType.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicType.java Sun Apr 13 17:43:42 2008 -0400
@@ -273,6 +273,10 @@
return (OopField) field;
}
+ public NarrowOopField getNarrowOopField(String fieldName) throws WrongTypeException {
+ return (NarrowOopField) new BasicNarrowOopField(getOopField(fieldName));
+ }
+
public AddressField getAddressField(String fieldName) {
// This type can not be inferred (for now), so provide a wrapper
Field field = getField(fieldName);
@@ -287,7 +291,7 @@
name was already present in this class. */
public void addField(Field field) {
if (nameToFieldMap.get(field.getName()) != null) {
- throw new RuntimeException("field of name \"" + field.getName() + "\" already present");
+ throw new RuntimeException("field of name \"" + field.getName() + "\" already present in type " + this);
}
nameToFieldMap.put(field.getName(), field);
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicTypeDataBase.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicTypeDataBase.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicTypeDataBase.java Sun Apr 13 17:43:42 2008 -0400
@@ -27,6 +27,7 @@
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.runtime.VM;
/**
This is a basic implementation of the TypeDataBase interface.
It allows an external type database builder to add types to be
@@ -146,7 +147,7 @@
}
public long getOopSize() {
- return machDesc.getOopSize();
+ return VM.getVM().getOopSize();
}
public boolean addressTypeIsEqualToType(Address addr, Type type) {
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/FindInHeapPanel.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/FindInHeapPanel.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/FindInHeapPanel.java Sun Apr 13 17:43:42 2008 -0400
@@ -92,7 +92,17 @@
iterated += addressSize;
updateProgressBar();
}
+ public void visitCompOopAddress(Address addr) {
+ if (error) return;
+ Address val = addr.getCompOopAddressAt(0);
+ if (AddressOps.equal(val, value)) {
+ error = reportResult(addr);
+ }
+ iterated += addressSize;
+ updateProgressBar();
+
+ }
public void epilogue() {
iterated = 0;
updateProgressBar();
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java Sun Apr 13 17:43:42 2008 -0400
@@ -1077,8 +1077,8 @@
oms = new OopMapStream(map, OopMapValue.OopTypes.VALUE_VALUE);
buf.append(omvIterator.iterate(oms, "Value:", false));
- oms = new OopMapStream(map, OopMapValue.OopTypes.DEAD_VALUE);
- buf.append(omvIterator.iterate(oms, "Dead:", false));
+ oms = new OopMapStream(map, OopMapValue.OopTypes.NARROWOOP_VALUE);
+ buf.append(omvIterator.iterate(oms, "Oop:", false));
oms = new OopMapStream(map, OopMapValue.OopTypes.CALLEE_SAVED_VALUE);
buf.append(omvIterator.iterate(oms, "Callee saved:", true));
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/AbstractHeapGraphWriter.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/AbstractHeapGraphWriter.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/AbstractHeapGraphWriter.java Sun Apr 13 17:43:42 2008 -0400
@@ -156,6 +156,9 @@
throw new RuntimeException(exp);
}
}
+ public void visitCompOopAddress(Address handleAddr) {
+ throw new RuntimeException("Should not reach here. JNIHandles are not compressed");
+ }
});
} catch (RuntimeException re) {
handleRuntimeException(re);
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java Sun Apr 13 17:43:42 2008 -0400
@@ -574,6 +574,10 @@
throw new RuntimeException(exp);
}
}
+ public void visitCompOopAddress(Address handleAddr) {
+ throw new RuntimeException(
+ " Should not reach here. JNIHandles are not compressed \n");
+ }
});
} catch (RuntimeException re) {
handleRuntimeException(re);
@@ -601,8 +605,7 @@
writeObjectID(array.getKlass().getJavaMirror());
final int length = (int) array.getLength();
for (int index = 0; index < length; index++) {
- long offset = OBJECT_BASE_OFFSET + index * OBJ_ID_SIZE;
- OopHandle handle = array.getHandle().getOopHandleAt(offset);
+ OopHandle handle = array.getOopHandleAt(index);
writeObjectID(getAddressValue(handle));
}
}
@@ -803,8 +806,13 @@
break;
case JVM_SIGNATURE_CLASS:
case JVM_SIGNATURE_ARRAY: {
- OopHandle handle = ((OopField)field).getValueAsOopHandle(oop);
- writeObjectID(getAddressValue(handle));
+ if (VM.getVM().isCompressedOopsEnabled()) {
+ OopHandle handle = ((NarrowOopField)field).getValueAsOopHandle(oop);
+ writeObjectID(getAddressValue(handle));
+ } else {
+ OopHandle handle = ((OopField)field).getValueAsOopHandle(oop);
+ writeObjectID(getAddressValue(handle));
+ }
break;
}
default:
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/ReversePtrsAnalysis.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/ReversePtrsAnalysis.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/ReversePtrsAnalysis.java Sun Apr 13 17:43:42 2008 -0400
@@ -282,6 +282,15 @@
markAndTraverse(next);
}
+ public void visitCompOopAddress(Address addr) {
+ Oop next = heap.newOop(addr.getCompOopHandleAt(0));
+ LivenessPathElement lp = new LivenessPathElement(null,
+ new NamedFieldIdentifier(baseRootDescription +
+ " @ " + addr));
+ rp.put(lp, next);
+ markAndTraverse(next);
+ }
+
private String baseRootDescription;
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/RobustOopDeterminator.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/RobustOopDeterminator.java Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/RobustOopDeterminator.java Sun Apr 13 17:43:42 2008 -0400
@@ -51,7 +51,11 @@
private static void initialize(TypeDataBase db) {
Type type = db.lookupType("oopDesc");
- klassField = type.getOopField("_klass");
+ if (VM.getVM().isCompressedOopsEnabled()) {
+ klassField = type.getNarrowOopField("_metadata._compressed_klass");
+ } else {
+ klassField = type.getOopField("_metadata._klass");
+ }
}
public static boolean oopLooksValid(OopHandle oop) {
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/make/Makefile
--- a/hotspot/make/Makefile Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/make/Makefile Sun Apr 13 17:43:42 2008 -0400
@@ -85,6 +85,9 @@
C2_VM_TARGETS=product fastdebug optimized jvmg
KERNEL_VM_TARGETS=productkernel fastdebugkernel optimizedkernel jvmgkernel
+# JDK directory list
+JDK_DIRS=bin include jre lib demo
+
all: all_product all_fastdebug
all_product: product product1 productkernel docs export_product
all_fastdebug: fastdebug fastdebug1 fastdebugkernel docs export_fastdebug
@@ -341,7 +344,7 @@
$(RM) -r $(JDK_IMAGE_DIR)
$(MKDIR) -p $(JDK_IMAGE_DIR)
($(CD) $(JDK_IMPORT_PATH) && \
- $(TAR) -cf - bin include jre lib) | \
+ $(TAR) -cf - $(JDK_DIRS)) | \
($(CD) $(JDK_IMAGE_DIR) && $(TAR) -xf -)
copy_fastdebug_jdk:
@@ -349,11 +352,11 @@
$(MKDIR) -p $(JDK_IMAGE_DIR)/fastdebug
if [ -d $(JDK_IMPORT_PATH)/fastdebug ] ; then \
($(CD) $(JDK_IMPORT_PATH)/fastdebug && \
- $(TAR) -cf - bin include jre lib) | \
+ $(TAR) -cf - $(JDK_DIRS)) | \
($(CD) $(JDK_IMAGE_DIR)/fastdebug && $(TAR) -xf -) ; \
else \
($(CD) $(JDK_IMPORT_PATH) && \
- $(TAR) -cf - bin include jre lib) | \
+ $(TAR) -cf - $(JDK_DIRS)) | \
($(CD) $(JDK_IMAGE_DIR)/fastdebug && $(TAR) -xf -) ; \
fi
@@ -362,15 +365,15 @@
$(MKDIR) -p $(JDK_IMAGE_DIR)/debug
if [ -d $(JDK_IMPORT_PATH)/debug ] ; then \
($(CD) $(JDK_IMPORT_PATH)/debug && \
- $(TAR) -cf - bin include jre lib) | \
+ $(TAR) -cf - $(JDK_DIRS)) | \
($(CD) $(JDK_IMAGE_DIR)/debug && $(TAR) -xf -) ; \
elif [ -d $(JDK_IMPORT_PATH)/fastdebug ] ; then \
($(CD) $(JDK_IMPORT_PATH)/fastdebug && \
- $(TAR) -cf - bin include jre lib) | \
+ $(TAR) -cf - $(JDK_DIRS)) | \
($(CD) $(JDK_IMAGE_DIR)/debug && $(TAR) -xf -) ; \
else \
($(CD) $(JDK_IMPORT_PATH) && \
- $(TAR) -cf - bin include jre lib) | \
+ $(TAR) -cf - $(JDK_DIRS)) | \
($(CD) $(JDK_IMAGE_DIR)/debug && $(TAR) -xf -) ; \
fi
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/make/solaris/makefiles/sparcWorks.make
--- a/hotspot/make/solaris/makefiles/sparcWorks.make Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/make/solaris/makefiles/sparcWorks.make Sun Apr 13 17:43:42 2008 -0400
@@ -185,6 +185,12 @@
# no more exceptions
CFLAGS/NOEX=-features=no%except
+
+# avoid compilation problems arising from fact that C++ compiler tries
+# to search for external template definition by just compiling additional
+# source files in th same context
+CFLAGS += -template=no%extdef
+
# Reduce code bloat by reverting back to 5.0 behavior for static initializers
CFLAGS += -features=no%split_init
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/cpu/sparc/vm/assembler_sparc.cpp
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -1779,7 +1779,7 @@
// Check the klassOop of this object for being in the right area of memory.
// Cannot do the load in the delay above slot in case O0 is null
- ld_ptr(Address(O0_obj, 0, oopDesc::klass_offset_in_bytes()), O0_obj);
+ load_klass(O0_obj, O0_obj);
// assert((klass & klass_mask) == klass_bits);
if( Universe::verify_klass_mask() != Universe::verify_oop_mask() )
set(Universe::verify_klass_mask(), O2_mask);
@@ -1788,8 +1788,9 @@
and3(O0_obj, O2_mask, O4_temp);
cmp(O4_temp, O3_bits);
brx(notEqual, false, pn, fail);
+ delayed()->nop();
// Check the klass's klass
- delayed()->ld_ptr(Address(O0_obj, 0, oopDesc::klass_offset_in_bytes()), O0_obj);
+ load_klass(O0_obj, O0_obj);
and3(O0_obj, O2_mask, O4_temp);
cmp(O4_temp, O3_bits);
brx(notEqual, false, pn, fail);
@@ -2588,8 +2589,9 @@
and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
cmp(temp_reg, markOopDesc::biased_lock_pattern);
brx(Assembler::notEqual, false, Assembler::pn, cas_label);
-
- delayed()->ld_ptr(Address(obj_reg, 0, oopDesc::klass_offset_in_bytes()), temp_reg);
+ delayed()->nop();
+
+ load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
or3(G2_thread, temp_reg, temp_reg);
xor3(mark_reg, temp_reg, temp_reg);
@@ -2668,7 +2670,7 @@
//
// FIXME: due to a lack of registers we currently blow away the age
// bits in this situation. Should attempt to preserve them.
- ld_ptr(Address(obj_reg, 0, oopDesc::klass_offset_in_bytes()), temp_reg);
+ load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
or3(G2_thread, temp_reg, temp_reg);
casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
@@ -2700,7 +2702,7 @@
//
// FIXME: due to a lack of registers we currently blow away the age
// bits in this situation. Should attempt to preserve them.
- ld_ptr(Address(obj_reg, 0, oopDesc::klass_offset_in_bytes()), temp_reg);
+ load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
@@ -3406,7 +3408,7 @@
// set klass to intArrayKlass
set((intptr_t)Universe::intArrayKlassObj_addr(), t2);
ld_ptr(t2, 0, t2);
- st_ptr(t2, top, oopDesc::klass_offset_in_bytes());
+ store_klass(t2, top);
sub(t1, typeArrayOopDesc::header_size(T_INT), t1);
add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1);
sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1);
@@ -3534,3 +3536,139 @@
st(G0, Rtsp, Rscratch);
}
}
+
+void MacroAssembler::load_klass(Register s, Register d) {
+ // The number of bytes in this code is used by
+ // MachCallDynamicJavaNode::ret_addr_offset()
+ // if this changes, change that.
+ if (UseCompressedOops) {
+ lduw(s, oopDesc::klass_offset_in_bytes(), d);
+ decode_heap_oop_not_null(d);
+ } else {
+ ld_ptr(s, oopDesc::klass_offset_in_bytes(), d);
+ }
+}
+
+// ??? figure out src vs. dst!
+void MacroAssembler::store_klass(Register d, Register s1) {
+ if (UseCompressedOops) {
+ assert(s1 != d, "not enough registers");
+ encode_heap_oop_not_null(d);
+ // Zero out entire klass field first.
+ st_ptr(G0, s1, oopDesc::klass_offset_in_bytes());
+ st(d, s1, oopDesc::klass_offset_in_bytes());
+ } else {
+ st_ptr(d, s1, oopDesc::klass_offset_in_bytes());
+ }
+}
+
+void MacroAssembler::load_heap_oop(const Address& s, Register d, int offset) {
+ if (UseCompressedOops) {
+ lduw(s, d, offset);
+ decode_heap_oop(d);
+ } else {
+ ld_ptr(s, d, offset);
+ }
+}
+
+void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d) {
+ if (UseCompressedOops) {
+ lduw(s1, s2, d);
+ decode_heap_oop(d, d);
+ } else {
+ ld_ptr(s1, s2, d);
+ }
+}
+
+void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d) {
+ if (UseCompressedOops) {
+ lduw(s1, simm13a, d);
+ decode_heap_oop(d, d);
+ } else {
+ ld_ptr(s1, simm13a, d);
+ }
+}
+
+void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2) {
+ if (UseCompressedOops) {
+ assert(s1 != d && s2 != d, "not enough registers");
+ encode_heap_oop(d);
+ st(d, s1, s2);
+ } else {
+ st_ptr(d, s1, s2);
+ }
+}
+
+void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a) {
+ if (UseCompressedOops) {
+ assert(s1 != d, "not enough registers");
+ encode_heap_oop(d);
+ st(d, s1, simm13a);
+ } else {
+ st_ptr(d, s1, simm13a);
+ }
+}
+
+void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset) {
+ if (UseCompressedOops) {
+ assert(a.base() != d, "not enough registers");
+ encode_heap_oop(d);
+ st(d, a, offset);
+ } else {
+ st_ptr(d, a, offset);
+ }
+}
+
+
+void MacroAssembler::encode_heap_oop(Register src, Register dst) {
+ assert (UseCompressedOops, "must be compressed");
+ Label done;
+ if (src == dst) {
+ // optimize for frequent case src == dst
+ bpr(rc_nz, true, Assembler::pt, src, done);
+ delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken
+ bind(done);
+ srlx(src, LogMinObjAlignmentInBytes, dst);
+ } else {
+ bpr(rc_z, false, Assembler::pn, src, done);
+ delayed() -> mov(G0, dst);
+ // could be moved before branch, and annulate delay,
+ // but may add some unneeded work decoding null
+ sub(src, G6_heapbase, dst);
+ srlx(dst, LogMinObjAlignmentInBytes, dst);
+ bind(done);
+ }
+}
+
+
+void MacroAssembler::encode_heap_oop_not_null(Register r) {
+ assert (UseCompressedOops, "must be compressed");
+ sub(r, G6_heapbase, r);
+ srlx(r, LogMinObjAlignmentInBytes, r);
+}
+
+// Same algorithm as oops.inline.hpp decode_heap_oop.
+void MacroAssembler::decode_heap_oop(Register src, Register dst) {
+ assert (UseCompressedOops, "must be compressed");
+ Label done;
+ sllx(src, LogMinObjAlignmentInBytes, dst);
+ bpr(rc_nz, true, Assembler::pt, dst, done);
+ delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken
+ bind(done);
+}
+
+void MacroAssembler::decode_heap_oop_not_null(Register r) {
+ // Do not add assert code to this unless you change vtableStubs_sparc.cpp
+ // pd_code_size_limit.
+ assert (UseCompressedOops, "must be compressed");
+ sllx(r, LogMinObjAlignmentInBytes, r);
+ add(r, G6_heapbase, r);
+}
+
+void MacroAssembler::reinit_heapbase() {
+ if (UseCompressedOops) {
+ // call indirectly to solve generation ordering problem
+ Address base(G6_heapbase, (address)Universe::heap_base_addr());
+ load_ptr_contents(base, G6_heapbase);
+ }
+}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/cpu/sparc/vm/assembler_sparc.hpp
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -59,6 +59,7 @@
// This global always holds the current JavaThread pointer:
REGISTER_DECLARATION(Register, G2_thread , G2);
+REGISTER_DECLARATION(Register, G6_heapbase , G6);
// The following globals are part of the Java calling convention:
@@ -1975,6 +1976,29 @@
inline void tstbool( Register s ) { tst(s); }
inline void movbool( bool boolconst, Register d) { mov( (int) boolconst, d); }
+ // klass oop manipulations if compressed
+ void load_klass(Register src_oop, Register dst);
+ void store_klass(Register dst_oop, Register s1);
+
+ // oop manipulations
+ void load_heap_oop(const Address& s, Register d, int offset = 0);
+ void load_heap_oop(Register s1, Register s2, Register d);
+ void load_heap_oop(Register s1, int simm13a, Register d);
+ void store_heap_oop(Register d, Register s1, Register s2);
+ void store_heap_oop(Register d, Register s1, int simm13a);
+ void store_heap_oop(Register d, const Address& a, int offset = 0);
+
+ void encode_heap_oop(Register src, Register dst);
+ void encode_heap_oop(Register r) {
+ encode_heap_oop(r, r);
+ }
+ void decode_heap_oop(Register src, Register dst);
+ void decode_heap_oop(Register r) {
+ decode_heap_oop(r, r);
+ }
+ void encode_heap_oop_not_null(Register r);
+ void decode_heap_oop_not_null(Register r);
+
// Support for managing the JavaThread pointer (i.e.; the reference to
// thread-local information).
void get_thread(); // load G2_thread
@@ -2050,6 +2074,9 @@
void push_CPU_state();
void pop_CPU_state();
+ // if heap base register is used - reinit it with the correct value
+ void reinit_heapbase();
+
// Debugging
void _verify_oop(Register reg, const char * msg, const char * file, int line);
void _verify_oop_addr(Address addr, const char * msg, const char * file, int line);
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp
--- a/hotspot/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -236,7 +236,7 @@
Register t1, // temp register
Register t2 // temp register
) {
- const int hdr_size_in_bytes = oopDesc::header_size_in_bytes();
+ const int hdr_size_in_bytes = instanceOopDesc::base_offset_in_bytes();
initialize_header(obj, klass, noreg, t1, t2);
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/cpu/sparc/vm/copy_sparc.hpp
--- a/hotspot/src/cpu/sparc/vm/copy_sparc.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/sparc/vm/copy_sparc.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -137,24 +137,20 @@
}
static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
-#if 0
- if (HeapWordsPerLong == 1 ||
- (HeapWordsPerLong == 2 &&
- mask_bits((uintptr_t)tohw, right_n_bits(LogBytesPerLong)) == 0 &&
- ((count & 1) ? false : count >>= 1))) {
- julong* to = (julong*)tohw;
- julong v = ((julong)value << 32) | value;
- while (count-- > 0) {
- *to++ = v;
- }
- } else {
-#endif
- juint* to = (juint*)tohw;
- count *= HeapWordSize / BytesPerInt;
- while (count-- > 0) {
- *to++ = value;
- }
- // }
+#ifdef _LP64
+ guarantee(mask_bits((uintptr_t)tohw, right_n_bits(LogBytesPerLong)) == 0,
+ "unaligned fill words");
+ julong* to = (julong*)tohw;
+ julong v = ((julong)value << 32) | value;
+ while (count-- > 0) {
+ *to++ = v;
+ }
+#else // _LP64
+ juint* to = (juint*)tohw;
+ while (count-- > 0) {
+ *to++ = value;
+ }
+#endif // _LP64
}
static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) {
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -859,7 +859,7 @@
// Generate a subtype check: branch to ok_is_subtype if sub_klass is
-// a subtype of super_klass. Blows registers Rsub_klass, tmp1, tmp2.
+// a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2.
void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
Register Rsuper_klass,
Register Rtmp1,
@@ -891,6 +891,9 @@
// Now do a linear scan of the secondary super-klass chain.
delayed()->ld_ptr( Rsub_klass, sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes(), Rtmp2 );
+ // compress superclass
+ if (UseCompressedOops) encode_heap_oop(Rsuper_klass);
+
// Rtmp2 holds the objArrayOop of secondary supers.
ld( Rtmp2, arrayOopDesc::length_offset_in_bytes(), Rtmp1 );// Load the array length
// Check for empty secondary super list
@@ -900,20 +903,28 @@
bind( loop );
br( Assembler::equal, false, Assembler::pn, not_subtype );
delayed()->nop();
+
// load next super to check
- ld_ptr( Rtmp2, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rtmp3 );
-
- // Bump array pointer forward one oop
- add( Rtmp2, wordSize, Rtmp2 );
+ if (UseCompressedOops) {
+ ld( Rtmp2, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rtmp3);
+ // Bump array pointer forward one oop
+ add( Rtmp2, 4, Rtmp2 );
+ } else {
+ ld_ptr( Rtmp2, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rtmp3);
+ // Bump array pointer forward one oop
+ add( Rtmp2, wordSize, Rtmp2);
+ }
// Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list
cmp( Rtmp3, Rsuper_klass );
// A miss means we are NOT a subtype and need to keep looping
brx( Assembler::notEqual, false, Assembler::pt, loop );
delayed()->deccc( Rtmp1 ); // dec trip counter in delay slot
// Falling out the bottom means we found a hit; we ARE a subtype
+ if (UseCompressedOops) decode_heap_oop(Rsuper_klass);
br( Assembler::always, false, Assembler::pt, ok_is_subtype );
// Update the cache
- delayed()->st_ptr( Rsuper_klass, Rsub_klass, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() );
+ delayed()->st_ptr( Rsuper_klass, Rsub_klass,
+ sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() );
bind(not_subtype);
profile_typecheck_failed(Rtmp1);
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/cpu/sparc/vm/register_definitions_sparc.cpp
--- a/hotspot/src/cpu/sparc/vm/register_definitions_sparc.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/sparc/vm/register_definitions_sparc.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -131,6 +131,7 @@
REGISTER_DEFINITION(Register, G2_thread);
+REGISTER_DEFINITION(Register, G6_heapbase);
REGISTER_DEFINITION(Register, G5_method);
REGISTER_DEFINITION(Register, G5_megamorphic_method);
REGISTER_DEFINITION(Register, G5_inline_cache_reg);
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -160,18 +160,24 @@
map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg());
#endif /* _LP64 */
+
+#ifdef _LP64
+ int debug_offset = 0;
+#else
+ int debug_offset = 4;
+#endif
// Save the G's
__ stx(G1, SP, g1_offset+STACK_BIAS);
- map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + 4)>>2), G1->as_VMReg());
+ map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg());
__ stx(G3, SP, g3_offset+STACK_BIAS);
- map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + 4)>>2), G3->as_VMReg());
+ map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg());
__ stx(G4, SP, g4_offset+STACK_BIAS);
- map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + 4)>>2), G4->as_VMReg());
+ map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg());
__ stx(G5, SP, g5_offset+STACK_BIAS);
- map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + 4)>>2), G5->as_VMReg());
+ map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg());
// This is really a waste but we'll keep things as they were for now
if (true) {
@@ -182,11 +188,11 @@
map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next());
map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next());
map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next());
-#endif /* _LP64 */
map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next());
map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next());
map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next());
map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next());
+#endif /* _LP64 */
}
@@ -1217,7 +1223,7 @@
__ verify_oop(O0);
__ verify_oop(G5_method);
- __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch);
+ __ load_klass(O0, G3_scratch);
__ verify_oop(G3_scratch);
#if !defined(_LP64) && defined(COMPILER2)
@@ -1820,7 +1826,7 @@
const Register temp_reg = G3_scratch;
Address ic_miss(temp_reg, SharedRuntime::get_ic_miss_stub());
__ verify_oop(O0);
- __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg);
+ __ load_klass(O0, temp_reg);
__ cmp(temp_reg, G5_inline_cache_reg);
__ brx(Assembler::equal, true, Assembler::pt, L);
__ delayed()->nop();
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/cpu/sparc/vm/sparc.ad
--- a/hotspot/src/cpu/sparc/vm/sparc.ad Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad Sun Apr 13 17:43:42 2008 -0400
@@ -544,11 +544,19 @@
assert(!UseInlineCaches, "expect vtable calls only if not using ICs");
int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
+ int klass_load_size;
+ if (UseCompressedOops) {
+ klass_load_size = 3*BytesPerInstWord; // see MacroAssembler::load_klass()
+ } else {
+ klass_load_size = 1*BytesPerInstWord;
+ }
if( Assembler::is_simm13(v_off) ) {
- return (3*BytesPerInstWord + // ld_ptr, ld_ptr, ld_ptr
+ return klass_load_size +
+ (2*BytesPerInstWord + // ld_ptr, ld_ptr
NativeCall::instruction_size); // call; delay slot
} else {
- return (5*BytesPerInstWord + // ld_ptr, set_hi, set, ld_ptr, ld_ptr
+ return klass_load_size +
+ (4*BytesPerInstWord + // set_hi, set, ld_ptr, ld_ptr
NativeCall::instruction_size); // call; delay slot
}
}
@@ -1591,7 +1599,13 @@
void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
st->print_cr("\nUEP:");
#ifdef _LP64
- st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
+ if (UseCompressedOops) {
+ st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
+ st->print_cr("\tSLL R_G5,3,R_G5");
+ st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5");
+ } else {
+ st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
+ }
st->print_cr("\tCMP R_G5,R_G3" );
st->print ("\tTne xcc,R_G0+ST_RESERVED_FOR_USER_0+2");
#else // _LP64
@@ -1610,7 +1624,7 @@
assert( G5_ic_reg != temp_reg, "conflicting registers" );
// Load klass from reciever
- __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg);
+ __ load_klass(O0, temp_reg);
// Compare against expected klass
__ cmp(temp_reg, G5_ic_reg);
// Branch to miss code, checks xcc or icc depending
@@ -1811,6 +1825,11 @@
reg == R_I3H_num ||
reg == R_I4H_num ||
reg == R_I5H_num ) return true;
+
+ if ((UseCompressedOops) && (reg == R_G6_num || reg == R_G6H_num)) {
+ return true;
+ }
+
#else
// 32-bit builds with longs-in-one-entry pass longs in G1 & G4.
// Longs cannot be passed in O regs, because O regs become I regs
@@ -2474,7 +2493,13 @@
// get receiver klass (receiver already checked for non-null)
// If we end up going thru a c2i adapter interpreter expects method in G5
int off = __ offset();
- __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch);
+ __ load_klass(O0, G3_scratch);
+ int klass_load_size;
+ if (UseCompressedOops) {
+ klass_load_size = 3*BytesPerInstWord;
+ } else {
+ klass_load_size = 1*BytesPerInstWord;
+ }
int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
if( __ is_simm13(v_off) ) {
@@ -2484,7 +2509,8 @@
__ Assembler::sethi(v_off & ~0x3ff, G5_method);
__ or3(G5_method, v_off & 0x3ff, G5_method);
// ld_ptr, set_hi, set
- assert(__ offset() - off == 3*BytesPerInstWord, "Unexpected instruction size(s)");
+ assert(__ offset() - off == klass_load_size + 2*BytesPerInstWord,
+ "Unexpected instruction size(s)");
__ ld_ptr(G3, G5_method, G5_method);
}
// NOTE: for vtable dispatches, the vtable entry will never be null.
@@ -2860,12 +2886,12 @@
int count_offset = java_lang_String:: count_offset_in_bytes();
// load str1 (jchar*) base address into tmp1_reg
- __ ld_ptr(Address(str1_reg, 0, value_offset), tmp1_reg);
+ __ load_heap_oop(Address(str1_reg, 0, value_offset), tmp1_reg);
__ ld(Address(str1_reg, 0, offset_offset), result_reg);
__ add(tmp1_reg, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1_reg);
__ ld(Address(str1_reg, 0, count_offset), str1_reg); // hoisted
__ sll(result_reg, exact_log2(sizeof(jchar)), result_reg);
- __ ld_ptr(Address(str2_reg, 0, value_offset), tmp2_reg); // hoisted
+ __ load_heap_oop(Address(str2_reg, 0, value_offset), tmp2_reg); // hoisted
__ add(result_reg, tmp1_reg, tmp1_reg);
// load str2 (jchar*) base address into tmp2_reg
@@ -3016,6 +3042,7 @@
MacroAssembler _masm(&cbuf);
__ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) );
%}
+
enc_class enc_repl8b( iRegI src, iRegL dst ) %{
MacroAssembler _masm(&cbuf);
Register src_reg = reg_to_register_object($src$$reg);
@@ -3189,15 +3216,15 @@
c_return_value %{
assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
#ifdef _LP64
- static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
- static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
- static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
- static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
+ static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
+ static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
+ static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
+ static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
#else // !_LP64
- static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num };
- static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
- static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num };
- static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
+ static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num };
+ static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
+ static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num };
+ static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
#endif
return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
(is_outgoing?lo_out:lo_in)[ideal_reg] );
@@ -3207,15 +3234,15 @@
return_value %{
assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
#ifdef _LP64
- static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
- static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
- static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
- static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
+ static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
+ static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
+ static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
+ static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
#else // !_LP64
- static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num };
- static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
- static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num };
- static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
+ static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num };
+ static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
+ static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num };
+ static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
#endif
return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
(is_outgoing?lo_out:lo_in)[ideal_reg] );
@@ -3408,6 +3435,27 @@
interface(CONST_INTER);
%}
+// Pointer Immediate
+operand immN()
+%{
+ match(ConN);
+
+ op_cost(10);
+ format %{ %}
+ interface(CONST_INTER);
+%}
+
+// NULL Pointer Immediate
+operand immN0()
+%{
+ predicate(n->get_narrowcon() == 0);
+ match(ConN);
+
+ op_cost(0);
+ format %{ %}
+ interface(CONST_INTER);
+%}
+
operand immL() %{
match(ConL);
op_cost(40);
@@ -3672,6 +3720,14 @@
interface(REG_INTER);
%}
+operand iRegN() %{
+ constraint(ALLOC_IN_RC(int_reg));
+ match(RegN);
+
+ format %{ %}
+ interface(REG_INTER);
+%}
+
// Long Register
operand iRegL() %{
constraint(ALLOC_IN_RC(long_reg));
@@ -5392,9 +5448,30 @@
ins_pipe(iload_mem);
%}
+// Load Compressed Pointer
+instruct loadN(iRegN dst, memory mem) %{
+ match(Set dst (LoadN mem));
+ ins_cost(MEMORY_REF_COST);
+ size(4);
+
+ format %{ "LDUW $mem,$dst\t! compressed ptr" %}
+ ins_encode %{
+ Register base = as_Register($mem$$base);
+ Register index = as_Register($mem$$index);
+ Register dst = $dst$$Register;
+ if (index != G0) {
+ __ lduw(base, index, dst);
+ } else {
+ __ lduw(base, $mem$$disp, dst);
+ }
+ %}
+ ins_pipe(iload_mem);
+%}
+
// Load Klass Pointer
instruct loadKlass(iRegP dst, memory mem) %{
match(Set dst (LoadKlass mem));
+ predicate(!n->in(MemNode::Address)->bottom_type()->is_narrow());
ins_cost(MEMORY_REF_COST);
size(4);
@@ -5409,6 +5486,30 @@
ins_pipe(iload_mem);
%}
+// Load Klass Pointer
+instruct loadKlassComp(iRegP dst, memory mem) %{
+ match(Set dst (LoadKlass mem));
+ predicate(n->in(MemNode::Address)->bottom_type()->is_narrow());
+ ins_cost(MEMORY_REF_COST);
+
+ format %{ "LDUW $mem,$dst\t! compressed klass ptr" %}
+
+ ins_encode %{
+ Register base = as_Register($mem$$base);
+ Register index = as_Register($mem$$index);
+ Register dst = $dst$$Register;
+ if (index != G0) {
+ __ lduw(base, index, dst);
+ } else {
+ __ lduw(base, $mem$$disp, dst);
+ }
+ // klass oop never null but this is generated for nonheader klass loads
+ // too which can be null.
+ __ decode_heap_oop(dst);
+ %}
+ ins_pipe(iload_mem);
+%}
+
// Load Short (16bit signed)
instruct loadS(iRegI dst, memory mem) %{
match(Set dst (LoadS mem));
@@ -5508,6 +5609,24 @@
ins_pipe(loadConP_poll);
%}
+instruct loadConN(iRegN dst, immN src) %{
+ match(Set dst src);
+ ins_cost(DEFAULT_COST * 2);
+ format %{ "SET $src,$dst\t!ptr" %}
+ ins_encode %{
+ address con = (address)$src$$constant;
+ Register dst = $dst$$Register;
+ if (con == NULL) {
+ __ mov(G0, dst);
+ } else {
+ __ set_oop((jobject)$src$$constant, dst);
+ __ encode_heap_oop(dst);
+ }
+ %}
+ ins_pipe(loadConP);
+
+%}
+
instruct loadConL(iRegL dst, immL src, o7RegL tmp) %{
// %%% maybe this should work like loadConD
match(Set dst src);
@@ -5741,6 +5860,44 @@
ins_pipe(istore_mem_zero);
%}
+// Store Compressed Pointer
+instruct storeN(memory dst, iRegN src) %{
+ match(Set dst (StoreN dst src));
+ ins_cost(MEMORY_REF_COST);
+ size(4);
+
+ format %{ "STW $src,$dst\t! compressed ptr" %}
+ ins_encode %{
+ Register base = as_Register($dst$$base);
+ Register index = as_Register($dst$$index);
+ Register src = $src$$Register;
+ if (index != G0) {
+ __ stw(src, base, index);
+ } else {
+ __ stw(src, base, $dst$$disp);
+ }
+ %}
+ ins_pipe(istore_mem_spORreg);
+%}
+
+instruct storeN0(memory dst, immN0 src) %{
+ match(Set dst (StoreN dst src));
+ ins_cost(MEMORY_REF_COST);
+ size(4);
+
+ format %{ "STW $src,$dst\t! compressed ptr" %}
+ ins_encode %{
+ Register base = as_Register($dst$$base);
+ Register index = as_Register($dst$$index);
+ if (index != G0) {
+ __ stw(0, base, index);
+ } else {
+ __ stw(0, base, $dst$$disp);
+ }
+ %}
+ ins_pipe(istore_mem_zero);
+%}
+
// Store Double
instruct storeD( memory mem, regD src) %{
match(Set mem (StoreD mem src));
@@ -5798,6 +5955,26 @@
ins_pipe(fstoreD_mem_reg);
%}
+// Convert oop pointer into compressed form
+instruct encodeHeapOop(iRegN dst, iRegP src) %{
+ match(Set dst (EncodeP src));
+ format %{ "SRL $src,3,$dst\t encodeHeapOop" %}
+ ins_encode %{
+ __ encode_heap_oop($src$$Register, $dst$$Register);
+ %}
+ ins_pipe(ialu_reg);
+%}
+
+instruct decodeHeapOop(iRegP dst, iRegN src) %{
+ match(Set dst (DecodeN src));
+ format %{ "decode_heap_oop $src, $dst" %}
+ ins_encode %{
+ __ decode_heap_oop($src$$Register, $dst$$Register);
+ %}
+ ins_pipe(ialu_reg);
+%}
+
+
// Store Zero into Aligned Packed Bytes
instruct storeA8B0(memory mem, immI0 zero) %{
match(Set mem (Store8B mem zero));
@@ -6434,17 +6611,27 @@
instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
effect( USE mem_ptr, KILL ccr, KILL tmp1);
-#ifdef _LP64
format %{
"MOV $newval,O7\n\t"
- "CASXA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
+ "CASA_PTR [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
"CMP $oldval,O7\t\t! See if we made progress\n\t"
"MOV 1,$res\n\t"
"MOVne xcc,R_G0,$res"
%}
+#ifdef _LP64
ins_encode( enc_casx(mem_ptr, oldval, newval),
enc_lflags_ne_to_boolean(res) );
#else
+ ins_encode( enc_casi(mem_ptr, oldval, newval),
+ enc_iflags_ne_to_boolean(res) );
+#endif
+ ins_pipe( long_memory_op );
+%}
+
+instruct compareAndSwapN_bool_comp(iRegP mem_ptr, iRegN oldval, iRegN newval, iRegI res, o7RegI tmp, flagsReg ccr ) %{
+ match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
+ effect( USE mem_ptr, KILL ccr, KILL tmp);
+
format %{
"MOV $newval,O7\n\t"
"CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
@@ -6452,9 +6639,18 @@
"MOV 1,$res\n\t"
"MOVne icc,R_G0,$res"
%}
- ins_encode( enc_casi(mem_ptr, oldval, newval),
- enc_iflags_ne_to_boolean(res) );
-#endif
+ ins_encode %{
+ Register Rmem = reg_to_register_object($mem_ptr$$reg);
+ Register Rold = reg_to_register_object($oldval$$reg);
+ Register Rnew = reg_to_register_object($newval$$reg);
+ Register Rres = reg_to_register_object($res$$reg);
+
+ __ cas(Rmem, Rold, Rnew);
+ __ cmp( Rold, Rnew );
+ __ mov(1, Rres);
+ __ movcc( Assembler::notEqual, false, Assembler::icc, G0, Rres );
+ %}
+
ins_pipe( long_memory_op );
%}
@@ -8607,6 +8803,17 @@
ins_pipe(partial_subtype_check_pipe);
%}
+
+instruct compP_iRegN_immN0(flagsRegP pcc, iRegN op1, immN0 op2 ) %{
+ match(Set pcc (CmpN op1 op2));
+
+ size(4);
+ format %{ "CMP $op1,$op2\t! ptr" %}
+ opcode(Assembler::subcc_op3, Assembler::arith_op);
+ ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
+ ins_pipe(ialu_cconly_reg_imm);
+%}
+
// ============================================================================
// inlined locking and unlocking
@@ -8648,9 +8855,10 @@
ins_pipe(long_memory_op);
%}
-instruct string_compare(o0RegP str1, o1RegP str2, g3RegP tmp1, g4RegP tmp2, notemp_iRegI result, flagsReg ccr) %{
+instruct string_compare(o0RegP str1, o1RegP str2, g3RegP tmp1, g4RegP tmp2, notemp_iRegI result,
+ o7RegI tmp3, flagsReg ccr) %{
match(Set result (StrComp str1 str2));
- effect(USE_KILL str1, USE_KILL str2, KILL tmp1, KILL tmp2, KILL ccr);
+ effect(USE_KILL str1, USE_KILL str2, KILL tmp1, KILL tmp2, KILL ccr, KILL tmp3);
ins_cost(300);
format %{ "String Compare $str1,$str2 -> $result" %}
ins_encode( enc_String_Compare(str1, str2, tmp1, tmp2, result) );
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -127,6 +127,7 @@
// setup thread register
__ ld_ptr(thread.as_address(), G2_thread);
+ __ reinit_heapbase();
#ifdef ASSERT
// make sure we have no pending exceptions
@@ -896,6 +897,7 @@
// super: O2, argument, not changed
// raddr: O7, blown by call
address generate_partial_subtype_check() {
+ __ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "partial_subtype_check");
address start = __ pc();
Label loop, miss;
@@ -914,7 +916,7 @@
#if defined(COMPILER2) && !defined(_LP64)
// Do not use a 'save' because it blows the 64-bit O registers.
- __ add(SP,-4*wordSize,SP); // Make space for 4 temps
+ __ add(SP,-4*wordSize,SP); // Make space for 4 temps (stack must be 2 words aligned)
__ st_ptr(L0,SP,(frame::register_save_words+0)*wordSize);
__ st_ptr(L1,SP,(frame::register_save_words+1)*wordSize);
__ st_ptr(L2,SP,(frame::register_save_words+2)*wordSize);
@@ -934,6 +936,17 @@
Register L2_super = L2;
Register L3_index = L3;
+#ifdef _LP64
+ Register L4_ooptmp = L4;
+
+ if (UseCompressedOops) {
+ // this must be under UseCompressedOops check, as we rely upon fact
+ // that L4 not clobbered in C2 on 32-bit platforms, where we do explicit save
+ // on stack, see several lines above
+ __ encode_heap_oop(Rsuper, L4_ooptmp);
+ }
+#endif
+
inc_counter_np(SharedRuntime::_partial_subtype_ctr, L0, L1);
__ ld_ptr( Rsub, sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes(), L3 );
@@ -942,18 +955,33 @@
__ clr(L3_index); // zero index
// Load a little early; will load 1 off the end of the array.
// Ok for now; revisit if we have other uses of this routine.
- __ ld_ptr(L1_ary_ptr,0,L2_super);// Will load a little early
- __ align(CodeEntryAlignment);
-
+ if (UseCompressedOops) {
+ __ ld(L1_ary_ptr,0,L2_super);// Will load a little early
+ } else {
+ __ ld_ptr(L1_ary_ptr,0,L2_super);// Will load a little early
+ }
+
+ assert(heapOopSize != 0, "heapOopSize should be initialized");
// The scan loop
__ BIND(loop);
- __ add(L1_ary_ptr,wordSize,L1_ary_ptr); // Bump by OOP size
+ __ add(L1_ary_ptr, heapOopSize, L1_ary_ptr); // Bump by OOP size
__ cmp(L3_index,L0_ary_len);
__ br(Assembler::equal,false,Assembler::pn,miss);
__ delayed()->inc(L3_index); // Bump index
- __ subcc(L2_super,Rsuper,Rret); // Check for match; zero in Rret for a hit
- __ brx( Assembler::notEqual, false, Assembler::pt, loop );
- __ delayed()->ld_ptr(L1_ary_ptr,0,L2_super); // Will load a little early
+
+ if (UseCompressedOops) {
+#ifdef _LP64
+ __ subcc(L2_super,L4_ooptmp,Rret); // Check for match; zero in Rret for a hit
+ __ br( Assembler::notEqual, false, Assembler::pt, loop );
+ __ delayed()->ld(L1_ary_ptr,0,L2_super);// Will load a little early
+#else
+ ShouldNotReachHere();
+#endif
+ } else {
+ __ subcc(L2_super,Rsuper,Rret); // Check for match; zero in Rret for a hit
+ __ brx( Assembler::notEqual, false, Assembler::pt, loop );
+ __ delayed()->ld_ptr(L1_ary_ptr,0,L2_super);// Will load a little early
+ }
// Got a hit; report success; set cache. Cache load doesn't
// happen here; for speed it is directly emitted by the compiler.
@@ -1107,7 +1135,6 @@
}
#endif // 0
}
-
//
// Generate post-write barrier for array.
//
@@ -1148,8 +1175,8 @@
Label L_loop;
- __ sll_ptr(count, LogBytesPerOop, count);
- __ sub(count, BytesPerOop, count);
+ __ sll_ptr(count, LogBytesPerHeapOop, count);
+ __ sub(count, BytesPerHeapOop, count);
__ add(count, addr, count);
// Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
__ srl_ptr(addr, CardTableModRefBS::card_shift, addr);
@@ -1171,7 +1198,6 @@
ShouldNotReachHere();
}
-
}
@@ -2226,7 +2252,12 @@
__ mov(count, G5);
gen_write_ref_array_pre_barrier(G1, G5);
#ifdef _LP64
- generate_disjoint_long_copy_core(aligned);
+ assert_clean_int(count, O3); // Make sure 'count' is clean int.
+ if (UseCompressedOops) {
+ generate_disjoint_int_copy_core(aligned);
+ } else {
+ generate_disjoint_long_copy_core(aligned);
+ }
#else
generate_disjoint_int_copy_core(aligned);
#endif
@@ -2274,10 +2305,14 @@
StubRoutines::arrayof_oop_disjoint_arraycopy() :
disjoint_oop_copy_entry;
- array_overlap_test(nooverlap_target, LogBytesPerWord);
+ array_overlap_test(nooverlap_target, LogBytesPerHeapOop);
#ifdef _LP64
- generate_conjoint_long_copy_core(aligned);
+ if (UseCompressedOops) {
+ generate_conjoint_int_copy_core(aligned);
+ } else {
+ generate_conjoint_long_copy_core(aligned);
+ }
#else
generate_conjoint_int_copy_core(aligned);
#endif
@@ -2377,8 +2412,6 @@
StubCodeMark mark(this, "StubRoutines", name);
address start = __ pc();
- int klass_off = oopDesc::klass_offset_in_bytes();
-
gen_write_ref_array_pre_barrier(G1, G5);
@@ -2395,7 +2428,7 @@
{ Label L;
__ mov(O3, G1); // spill: overlap test smashes O3
__ mov(O4, G4); // spill: overlap test smashes O4
- array_overlap_test(L, LogBytesPerWord);
+ array_overlap_test(L, LogBytesPerHeapOop);
__ stop("checkcast_copy within a single array");
__ bind(L);
__ mov(G1, O3);
@@ -2429,18 +2462,18 @@
__ bind(store_element);
// deccc(G1_remain); // decrement the count (hoisted)
- __ st_ptr(G3_oop, O1_to, O5_offset); // store the oop
- __ inc(O5_offset, wordSize); // step to next offset
+ __ store_heap_oop(G3_oop, O1_to, O5_offset); // store the oop
+ __ inc(O5_offset, heapOopSize); // step to next offset
__ brx(Assembler::zero, true, Assembler::pt, do_card_marks);
__ delayed()->set(0, O0); // return -1 on success
// ======== loop entry is here ========
__ bind(load_element);
- __ ld_ptr(O0_from, O5_offset, G3_oop); // load the oop
+ __ load_heap_oop(O0_from, O5_offset, G3_oop); // load the oop
__ br_null(G3_oop, true, Assembler::pt, store_element);
__ delayed()->deccc(G1_remain); // decrement the count
- __ ld_ptr(G3_oop, klass_off, G4_klass); // query the object klass
+ __ load_klass(G3_oop, G4_klass); // query the object klass
generate_type_check(G4_klass, O3_ckoff, O4_ckval, G5_super,
// branch to this on success:
@@ -2642,17 +2675,23 @@
BLOCK_COMMENT("arraycopy argument klass checks");
// get src->klass()
- __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), G3_src_klass);
+ if (UseCompressedOops) {
+ __ delayed()->nop(); // ??? not good
+ __ load_klass(src, G3_src_klass);
+ } else {
+ __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), G3_src_klass);
+ }
#ifdef ASSERT
// assert(src->klass() != NULL);
BLOCK_COMMENT("assert klasses not null");
{ Label L_a, L_b;
__ br_notnull(G3_src_klass, false, Assembler::pt, L_b); // it is broken if klass is NULL
- __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
+ __ delayed()->nop();
__ bind(L_a);
__ stop("broken null klass");
__ bind(L_b);
+ __ load_klass(dst, G4_dst_klass);
__ br_null(G4_dst_klass, false, Assembler::pn, L_a); // this would be broken also
__ delayed()->mov(G0, G4_dst_klass); // scribble the temp
BLOCK_COMMENT("assert done");
@@ -2673,12 +2712,19 @@
// Load 32-bits signed value. Use br() instruction with it to check icc.
__ lduw(G3_src_klass, lh_offset, G5_lh);
+ if (UseCompressedOops) {
+ __ load_klass(dst, G4_dst_klass);
+ }
// Handle objArrays completely differently...
juint objArray_lh = Klass::array_layout_helper(T_OBJECT);
__ set(objArray_lh, O5_temp);
__ cmp(G5_lh, O5_temp);
__ br(Assembler::equal, false, Assembler::pt, L_objArray);
- __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
+ if (UseCompressedOops) {
+ __ delayed()->nop();
+ } else {
+ __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
+ }
// if (src->klass() != dst->klass()) return -1;
__ cmp(G3_src_klass, G4_dst_klass);
@@ -2777,8 +2823,8 @@
__ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
__ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
- __ sll_ptr(src_pos, LogBytesPerOop, src_pos);
- __ sll_ptr(dst_pos, LogBytesPerOop, dst_pos);
+ __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos);
+ __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos);
__ add(src, src_pos, from); // src_addr
__ add(dst, dst_pos, to); // dst_addr
__ BIND(L_plain_copy);
@@ -2801,8 +2847,8 @@
// Marshal the base address arguments now, freeing registers.
__ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
__ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
- __ sll_ptr(src_pos, LogBytesPerOop, src_pos);
- __ sll_ptr(dst_pos, LogBytesPerOop, dst_pos);
+ __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos);
+ __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos);
__ add(src, src_pos, from); // src_addr
__ add(dst, dst_pos, to); // dst_addr
__ signx(length, count); // length (reloaded)
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp
--- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -591,7 +591,10 @@
address entry = __ pc();
Label slow_path;
- if ( UseFastAccessorMethods) {
+
+ // XXX: for compressed oops pointer loading and decoding doesn't fit in
+ // delay slot and damages G1
+ if ( UseFastAccessorMethods && !UseCompressedOops ) {
// Check if we need to reach a safepoint and generate full interpreter
// frame if so.
Address sync_state(G3_scratch, SafepointSynchronize::address_of_state());
@@ -953,6 +956,7 @@
// Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD
__ restore_thread(L7_thread_cache); // restore G2_thread
+ __ reinit_heapbase();
// must we block?
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp
--- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -462,8 +462,8 @@
transition(itos, atos);
// Otos_i: index
// tos: array
- __ index_check(O2, Otos_i, LogBytesPerWord, G3_scratch, O3);
- __ ld_ptr(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i);
+ __ index_check(O2, Otos_i, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O3);
+ __ load_heap_oop(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i);
__ verify_oop(Otos_i);
}
@@ -736,15 +736,16 @@
// O2: index
// O3: array
__ verify_oop(Otos_i);
- __ index_check_without_pop(O3, O2, LogBytesPerWord, G3_scratch, O1);
+ __ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1);
// do array store check - check for NULL value first
__ br_null( Otos_i, false, Assembler::pn, is_null );
- __ delayed()->
- ld_ptr(O3, oopDesc::klass_offset_in_bytes(), O4); // get array klass
+ __ delayed()->nop();
+
+ __ load_klass(O3, O4); // get array klass
+ __ load_klass(Otos_i, O5); // get value klass
// do fast instanceof cache test
- __ ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), O5); // get value klass
__ ld_ptr(O4, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes(), O4);
@@ -766,7 +767,7 @@
// Store is OK.
__ bind(store_ok);
- __ st_ptr(Otos_i, O1, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
+ __ store_heap_oop(Otos_i, O1, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
// Quote from rememberedSet.hpp: For objArrays, the precise card
// corresponding to the pointer store is dirtied so we don't need to
// scavenge the entire array.
@@ -777,7 +778,7 @@
__ delayed()->inc(Lesp, 3* Interpreter::stackElementSize()); // adj sp (pops array, index and value)
__ bind(is_null);
- __ st_ptr(Otos_i, element);
+ __ store_heap_oop(Otos_i, element);
__ profile_null_seen(G3_scratch);
__ inc(Lesp, 3* Interpreter::stackElementSize()); // adj sp (pops array, index and value)
__ bind(done);
@@ -1833,7 +1834,7 @@
assert(state == vtos, "only valid state");
__ mov(G0, G3_scratch);
__ access_local_ptr(G3_scratch, Otos_i);
- __ ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), O2);
+ __ load_klass(Otos_i, O2);
__ set(JVM_ACC_HAS_FINALIZER, G3);
__ ld(O2, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), O2);
__ andcc(G3, O2, G0);
@@ -2078,7 +2079,7 @@
__ delayed() ->cmp(Rflags, itos);
// atos
- __ ld_ptr(Rclass, Roffset, Otos_i);
+ __ load_heap_oop(Rclass, Roffset, Otos_i);
__ verify_oop(Otos_i);
__ push(atos);
if (!is_static) {
@@ -2259,7 +2260,7 @@
__ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d);
break;
case Bytecodes::_fast_agetfield:
- __ ld_ptr(Otos_i, Roffset, Otos_i);
+ __ load_heap_oop(Otos_i, Roffset, Otos_i);
break;
default:
ShouldNotReachHere();
@@ -2448,7 +2449,7 @@
// atos
__ pop_ptr();
__ verify_oop(Otos_i);
- __ st_ptr(Otos_i, Rclass, Roffset);
+ __ store_heap_oop(Otos_i, Rclass, Roffset);
__ store_check(G1_scratch, Rclass, Roffset);
__ ba(false, checkVolatile);
__ delayed()->tst(Lscratch);
@@ -2490,7 +2491,7 @@
__ pop_ptr();
pop_and_check_object(Rclass);
__ verify_oop(Otos_i);
- __ st_ptr(Otos_i, Rclass, Roffset);
+ __ store_heap_oop(Otos_i, Rclass, Roffset);
__ store_check(G1_scratch, Rclass, Roffset);
patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch);
__ ba(false, checkVolatile);
@@ -2645,7 +2646,7 @@
__ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
break;
case Bytecodes::_fast_aputfield:
- __ st_ptr(Otos_i, Rclass, Roffset);
+ __ store_heap_oop(Otos_i, Rclass, Roffset);
__ store_check(G1_scratch, Rclass, Roffset);
break;
default:
@@ -2688,7 +2689,7 @@
__ verify_oop(Rreceiver);
__ null_check(Rreceiver);
if (state == atos) {
- __ ld_ptr(Rreceiver, Roffset, Otos_i);
+ __ load_heap_oop(Rreceiver, Roffset, Otos_i);
} else if (state == itos) {
__ ld (Rreceiver, Roffset, Otos_i) ;
} else if (state == ftos) {
@@ -2790,7 +2791,7 @@
// get receiver klass
__ null_check(O0, oopDesc::klass_offset_in_bytes());
- __ ld_ptr(Address(O0, 0, oopDesc::klass_offset_in_bytes()), Rrecv);
+ __ load_klass(O0, Rrecv);
__ verify_oop(Rrecv);
__ profile_virtual_call(Rrecv, O4);
@@ -2958,7 +2959,7 @@
// get receiver klass
__ null_check(O0, oopDesc::klass_offset_in_bytes());
- __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), RklassOop);
+ __ load_klass(O0, RklassOop);
__ verify_oop(RklassOop);
// Special case of invokeinterface called for virtual method of
@@ -3221,7 +3222,7 @@
__ set((intptr_t)markOopDesc::prototype(), G4_scratch);
}
__ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark
- __ st_ptr(RinstanceKlass, RallocatedObject, oopDesc::klass_offset_in_bytes()); // klass
+ __ store_klass(RinstanceKlass, RallocatedObject); // klass
{
SkipIfEqual skip_if(
@@ -3277,7 +3278,7 @@
__ delayed()->nop();
// Get value klass in RobjKlass
- __ ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), RobjKlass); // get value klass
+ __ load_klass(Otos_i, RobjKlass); // get value klass
// Get constant pool tag
__ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
@@ -3295,13 +3296,14 @@
__ pop_ptr(Otos_i, G3_scratch); // restore receiver
__ br(Assembler::always, false, Assembler::pt, resolved);
- __ delayed()->ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), RobjKlass); // get value klass
+ __ delayed()->nop();
// Extract target class from constant pool
__ bind(quicked);
__ add(Roffset, sizeof(constantPoolOopDesc), Roffset);
__ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
__ bind(resolved);
+ __ load_klass(Otos_i, RobjKlass); // get value klass
// Generate a fast subtype check. Branch to cast_ok if no
// failure. Throw exception if failure.
@@ -3334,7 +3336,7 @@
__ delayed()->nop();
// Get value klass in RobjKlass
- __ ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), RobjKlass); // get value klass
+ __ load_klass(Otos_i, RobjKlass); // get value klass
// Get constant pool tag
__ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
@@ -3352,7 +3354,7 @@
__ pop_ptr(Otos_i, G3_scratch); // restore receiver
__ br(Assembler::always, false, Assembler::pt, resolved);
- __ delayed()->ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), RobjKlass); // get value klass
+ __ delayed()->nop();
// Extract target class from constant pool
@@ -3361,6 +3363,7 @@
__ get_constant_pool(Lscratch);
__ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
__ bind(resolved);
+ __ load_klass(Otos_i, RobjKlass); // get value klass
// Generate a fast subtype check. Branch to cast_ok if no
// failure. Return 0 if failure.
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -64,6 +64,15 @@
if (FLAG_IS_DEFAULT(UseInlineCaches)) {
UseInlineCaches = false;
}
+#ifdef _LP64
+ // Single issue niagara1 is slower for CompressedOops
+ // but niagaras after that it's fine.
+ if (!is_niagara1_plus()) {
+ if (FLAG_IS_DEFAULT(UseCompressedOops)) {
+ FLAG_SET_ERGO(bool, UseCompressedOops, false);
+ }
+ }
+#endif // _LP64
#ifdef COMPILER2
// Indirect branch is the same cost as direct
if (FLAG_IS_DEFAULT(UseJumpTables)) {
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp
--- a/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -60,7 +60,7 @@
// get receiver klass
address npe_addr = __ pc();
- __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch);
+ __ load_klass(O0, G3_scratch);
// set methodOop (in case of interpreted method), and destination address
int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
@@ -131,7 +131,7 @@
// get receiver klass (also an implicit null-check)
address npe_addr = __ pc();
- __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_klassOop);
+ __ load_klass(O0, G3_klassOop);
__ verify_oop(G3_klassOop);
// Push a new window to get some temp registers. This chops the head of all
@@ -237,11 +237,16 @@
else {
const int slop = 2*BytesPerInstWord; // sethi;add (needed for long offsets)
if (is_vtable_stub) {
- const int basic = 5*BytesPerInstWord; // ld;ld;ld,jmp,nop
+ // ld;ld;ld,jmp,nop
+ const int basic = 5*BytesPerInstWord +
+ // shift;add for load_klass
+ (UseCompressedOops ? 2*BytesPerInstWord : 0);
return basic + slop;
} else {
// save, ld, ld, sll, and, add, add, ld, cmp, br, add, ld, add, ld, ld, jmp, restore, sethi, jmpl, restore
- const int basic = (20 LP64_ONLY(+ 6)) * BytesPerInstWord;
+ const int basic = (20 LP64_ONLY(+ 6)) * BytesPerInstWord +
+ // shift;add for load_klass
+ (UseCompressedOops ? 2*BytesPerInstWord : 0);
return (basic + slop);
}
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/cpu/x86/vm/assembler_x86_64.cpp
--- a/hotspot/src/cpu/x86/vm/assembler_x86_64.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/x86/vm/assembler_x86_64.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -127,6 +127,7 @@
bool Assembler::reachable(AddressLiteral adr) {
int64_t disp;
+
// None will force a 64bit literal to the code stream. Likely a placeholder
// for something that will be patched later and we need to certain it will
// always be reachable.
@@ -636,7 +637,7 @@
case 0x8A: // movb r, a
case 0x8B: // movl r, a
case 0x8F: // popl a
- debug_only(has_disp32 = true);
+ debug_only(has_disp32 = true;)
break;
case 0x68: // pushq #32
@@ -2891,7 +2892,7 @@
}
// scans rcx double words (m64) at [rdi] for occurance of rax
-void Assembler::repne_scan() {
+void Assembler::repne_scanq() {
// REPNE/REPNZ
emit_byte(0xF2);
// SCASQ
@@ -2899,6 +2900,14 @@
emit_byte(0xAF);
}
+void Assembler::repne_scanl() {
+ // REPNE/REPNZ
+ emit_byte(0xF2);
+ // SCASL
+ emit_byte(0xAF);
+}
+
+
void Assembler::setb(Condition cc, Register dst) {
assert(0 <= cc && cc < 16, "illegal cc");
int encode = prefix_and_encode(dst->encoding(), true);
@@ -4597,7 +4606,6 @@
// pass args on stack, only touch rax
pushq(reg);
-
// avoid using pushptr, as it modifies scratch registers
// and our contract is not to modify anything
ExternalAddress buffer((address)b);
@@ -4664,9 +4672,9 @@
JavaThread* thread = JavaThread::current();
JavaThreadState saved_state = thread->thread_state();
thread->set_thread_state(_thread_in_vm);
- ttyLocker ttyl;
#ifndef PRODUCT
if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
+ ttyLocker ttyl;
BytecodeCounter::print();
}
#endif
@@ -4674,6 +4682,7 @@
// XXX correct this offset for amd64
// This is the value of eip which points to where verify_oop will return.
if (os::message_box(msg, "Execution stopped, print registers?")) {
+ ttyLocker ttyl;
tty->print_cr("rip = 0x%016lx", pc);
tty->print_cr("rax = 0x%016lx", regs[15]);
tty->print_cr("rbx = 0x%016lx", regs[12]);
@@ -4695,6 +4704,7 @@
}
ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
} else {
+ ttyLocker ttyl;
::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
msg);
}
@@ -4891,7 +4901,7 @@
movq(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
// set klass to intArrayKlass
movptr(t1, ExternalAddress((address) Universe::intArrayKlassObj_addr()));
- movq(Address(top, oopDesc::klass_offset_in_bytes()), t1);
+ store_klass(top, t1);
// refill the tlab with an eden allocation
bind(do_refill);
@@ -4938,7 +4948,6 @@
assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
- Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes());
Address saved_mark_addr(lock_reg, 0);
if (PrintBiasedLockingStatistics && counters == NULL)
@@ -4962,7 +4971,7 @@
jcc(Assembler::notEqual, cas_label);
// The bias pattern is present in the object's header. Need to check
// whether the bias owner and the epoch are both still current.
- movq(tmp_reg, klass_addr);
+ load_klass(tmp_reg, obj_reg);
movq(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
orq(tmp_reg, r15_thread);
xorq(tmp_reg, swap_reg);
@@ -5037,7 +5046,7 @@
//
// FIXME: due to a lack of registers we currently blow away the age
// bits in this situation. Should attempt to preserve them.
- movq(tmp_reg, klass_addr);
+ load_klass(tmp_reg, obj_reg);
movq(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
orq(tmp_reg, r15_thread);
if (os::is_MP()) {
@@ -5068,7 +5077,7 @@
//
// FIXME: due to a lack of registers we currently blow away the age
// bits in this situation. Should attempt to preserve them.
- movq(tmp_reg, klass_addr);
+ load_klass(tmp_reg, obj_reg);
movq(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
if (os::is_MP()) {
lock();
@@ -5104,6 +5113,113 @@
}
+void MacroAssembler::load_klass(Register dst, Register src) {
+ if (UseCompressedOops) {
+ movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
+ decode_heap_oop_not_null(dst);
+ } else {
+ movq(dst, Address(src, oopDesc::klass_offset_in_bytes()));
+ }
+}
+
+void MacroAssembler::store_klass(Register dst, Register src) {
+ if (UseCompressedOops) {
+ encode_heap_oop_not_null(src);
+ // zero the entire klass field first as the gap needs to be zeroed too.
+ movptr(Address(dst, oopDesc::klass_offset_in_bytes()), NULL_WORD);
+ movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
+ } else {
+ movq(Address(dst, oopDesc::klass_offset_in_bytes()), src);
+ }
+}
+
+void MacroAssembler::load_heap_oop(Register dst, Address src) {
+ if (UseCompressedOops) {
+ movl(dst, src);
+ decode_heap_oop(dst);
+ } else {
+ movq(dst, src);
+ }
+}
+
+void MacroAssembler::store_heap_oop(Address dst, Register src) {
+ if (UseCompressedOops) {
+ assert(!dst.uses(src), "not enough registers");
+ encode_heap_oop(src);
+ movl(dst, src);
+ } else {
+ movq(dst, src);
+ }
+}
+
+// Algorithm must match oop.inline.hpp encode_heap_oop.
+void MacroAssembler::encode_heap_oop(Register r) {
+ assert (UseCompressedOops, "should be compressed");
+#ifdef ASSERT
+ Label ok;
+ pushq(rscratch1); // cmpptr trashes rscratch1
+ cmpptr(r12_heapbase, ExternalAddress((address)Universe::heap_base_addr()));
+ jcc(Assembler::equal, ok);
+ stop("MacroAssembler::encode_heap_oop: heap base corrupted?");
+ bind(ok);
+ popq(rscratch1);
+#endif
+ verify_oop(r);
+ testq(r, r);
+ cmovq(Assembler::equal, r, r12_heapbase);
+ subq(r, r12_heapbase);
+ shrq(r, LogMinObjAlignmentInBytes);
+}
+
+void MacroAssembler::encode_heap_oop_not_null(Register r) {
+ assert (UseCompressedOops, "should be compressed");
+#ifdef ASSERT
+ Label ok;
+ testq(r, r);
+ jcc(Assembler::notEqual, ok);
+ stop("null oop passed to encode_heap_oop_not_null");
+ bind(ok);
+#endif
+ verify_oop(r);
+ subq(r, r12_heapbase);
+ shrq(r, LogMinObjAlignmentInBytes);
+}
+
+void MacroAssembler::decode_heap_oop(Register r) {
+ assert (UseCompressedOops, "should be compressed");
+#ifdef ASSERT
+ Label ok;
+ pushq(rscratch1);
+ cmpptr(r12_heapbase,
+ ExternalAddress((address)Universe::heap_base_addr()));
+ jcc(Assembler::equal, ok);
+ stop("MacroAssembler::decode_heap_oop: heap base corrupted?");
+ bind(ok);
+ popq(rscratch1);
+#endif
+
+ Label done;
+ shlq(r, LogMinObjAlignmentInBytes);
+ jccb(Assembler::equal, done);
+ addq(r, r12_heapbase);
+#if 0
+ // alternate decoding probably a wash.
+ testq(r, r);
+ jccb(Assembler::equal, done);
+ leaq(r, Address(r12_heapbase, r, Address::times_8, 0));
+#endif
+ bind(done);
+ verify_oop(r);
+}
+
+void MacroAssembler::decode_heap_oop_not_null(Register r) {
+ assert (UseCompressedOops, "should only be used for compressed headers");
+ // Cannot assert, unverified entry point counts instructions (see .ad file)
+ // vtableStubs also counts instructions in pd_code_size_limit.
+ assert(Address::times_8 == LogMinObjAlignmentInBytes, "decode alg wrong");
+ leaq(r, Address(r12_heapbase, r, Address::times_8, 0));
+}
+
Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
switch (cond) {
// Note some conditions are synonyms for others
@@ -5173,3 +5289,9 @@
movq(Address(tmp, (-i*os::vm_page_size())), size );
}
}
+
+void MacroAssembler::reinit_heapbase() {
+ if (UseCompressedOops) {
+ movptr(r12_heapbase, ExternalAddress((address)Universe::heap_base_addr()));
+ }
+}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/cpu/x86/vm/assembler_x86_64.hpp
--- a/hotspot/src/cpu/x86/vm/assembler_x86_64.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/x86/vm/assembler_x86_64.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -37,7 +37,7 @@
#else
n_int_register_parameters_c = 6, // rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
n_float_register_parameters_c = 8, // xmm0 - xmm7 (c_farg0, c_farg1, ... )
-#endif
+#endif // _WIN64
n_int_register_parameters_j = 6, // j_rarg0, j_rarg1, ...
n_float_register_parameters_j = 8 // j_farg0, j_farg1, ...
};
@@ -77,7 +77,7 @@
REGISTER_DECLARATION(XMMRegister, c_farg6, xmm6);
REGISTER_DECLARATION(XMMRegister, c_farg7, xmm7);
-#endif
+#endif // _WIN64
// Symbolically name the register arguments used by the Java calling convention.
// We have control over the convention for java so we can do what we please.
@@ -105,7 +105,7 @@
#else
REGISTER_DECLARATION(Register, j_rarg3, c_rarg4);
REGISTER_DECLARATION(Register, j_rarg4, c_rarg5);
-#endif /* _WIN64 */
+#endif // _WIN64
REGISTER_DECLARATION(Register, j_rarg5, c_rarg0);
REGISTER_DECLARATION(XMMRegister, j_farg0, xmm0);
@@ -120,7 +120,8 @@
REGISTER_DECLARATION(Register, rscratch1, r10); // volatile
REGISTER_DECLARATION(Register, rscratch2, r11); // volatile
-REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved
+REGISTER_DECLARATION(Register, r12_heapbase, r12); // callee-saved
+REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved
#endif // _LP64
@@ -785,7 +786,8 @@
void rep_movl();
void rep_movq();
void rep_set();
- void repne_scan();
+ void repne_scanl();
+ void repne_scanq();
void setb(Condition cc, Register dst);
void clflush(Address adr);
@@ -1099,6 +1101,17 @@
void movbool(Address dst, Register src);
void testbool(Register dst);
+ // oop manipulations
+ void load_klass(Register dst, Register src);
+ void store_klass(Register dst, Register src);
+
+ void load_heap_oop(Register dst, Address src);
+ void store_heap_oop(Address dst, Register src);
+ void encode_heap_oop(Register r);
+ void decode_heap_oop(Register r);
+ void encode_heap_oop_not_null(Register r);
+ void decode_heap_oop_not_null(Register r);
+
// Stack frame creation/removal
void enter();
void leave();
@@ -1250,6 +1263,9 @@
void verify_oop(Register reg, const char* s = "broken oop");
void verify_oop_addr(Address addr, const char * s = "broken oop addr");
+ // if heap base register is used - reinit it with the correct value
+ void reinit_heapbase();
+
// only if +VerifyFPU
void verify_FPU(int stack_depth, const char* s = "illegal FPU state") {}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp
--- a/hotspot/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -218,7 +218,7 @@
void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2) {
assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0,
"con_size_in_bytes is not multiple of alignment");
- const int hdr_size_in_bytes = oopDesc::header_size_in_bytes();
+ const int hdr_size_in_bytes = instanceOopDesc::base_offset_in_bytes();
initialize_header(obj, klass, noreg, t1, t2);
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -267,15 +267,29 @@
addq(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
// Scan rcx words at [rdi] for occurance of rax
// Set NZ/Z based on last compare
- repne_scan();
- // Not equal?
- jcc(Assembler::notEqual, not_subtype);
+
+ // this part is kind tricky, as values in supers array could be 32 or 64 bit wide
+ // and we store values in objArrays always encoded, thus we need to encode value
+ // before repne
+ if (UseCompressedOops) {
+ encode_heap_oop(rax);
+ repne_scanl();
+ // Not equal?
+ jcc(Assembler::notEqual, not_subtype);
+ // decode heap oop here for movq
+ decode_heap_oop(rax);
+ } else {
+ repne_scanq();
+ jcc(Assembler::notEqual, not_subtype);
+ }
// Must be equal but missed in cache. Update cache.
movq(Address(Rsub_klass, sizeof(oopDesc) +
Klass::secondary_super_cache_offset_in_bytes()), rax);
jmp(ok_is_subtype);
bind(not_subtype);
+ // decode heap oop here for miss
+ if (UseCompressedOops) decode_heap_oop(rax);
profile_typecheck_failed(rcx); // blows rcx
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp
--- a/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -375,7 +375,7 @@
__ cmpl(rdx, atos);
__ jcc(Assembler::notEqual, notObj);
// atos
- __ movq(rax, field_address);
+ __ load_heap_oop(rax, field_address);
__ jmp(xreturn_path);
__ bind(notObj);
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/cpu/x86/vm/register_definitions_x86.cpp
--- a/hotspot/src/cpu/x86/vm/register_definitions_x86.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/x86/vm/register_definitions_x86.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -106,6 +106,7 @@
REGISTER_DEFINITION(Register, rscratch1);
REGISTER_DEFINITION(Register, rscratch2);
+REGISTER_DEFINITION(Register, r12_heapbase);
REGISTER_DEFINITION(Register, r15_thread);
#endif // AMD64
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -789,7 +789,7 @@
{
__ verify_oop(holder);
- __ movq(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
+ __ load_klass(temp, receiver);
__ verify_oop(temp);
__ cmpq(temp, Address(holder, compiledICHolderOopDesc::holder_klass_offset()));
@@ -1297,21 +1297,26 @@
const Register ic_reg = rax;
const Register receiver = j_rarg0;
+ const Register tmp = rdx;
Label ok;
Label exception_pending;
__ verify_oop(receiver);
- __ cmpq(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
+ __ pushq(tmp); // spill (any other registers free here???)
+ __ load_klass(tmp, receiver);
+ __ cmpq(ic_reg, tmp);
__ jcc(Assembler::equal, ok);
+ __ popq(tmp);
__ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
+ __ bind(ok);
+ __ popq(tmp);
+
// Verified entry point must be aligned
__ align(8);
- __ bind(ok);
-
int vep_offset = ((intptr_t)__ pc()) - start;
// The instruction at the verified entry point must be 5 bytes or longer
@@ -1663,6 +1668,7 @@
__ andq(rsp, -16); // align stack as required by ABI
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
__ movq(rsp, r12); // restore sp
+ __ reinit_heapbase();
// Restore any method result value
restore_native_result(masm, ret_type, stack_slots);
__ bind(Continue);
@@ -1725,7 +1731,6 @@
__ bind(done);
}
-
{
SkipIfEqual skip(masm, &DTraceMethodProbes, false);
save_native_result(masm, ret_type, stack_slots);
@@ -1829,6 +1834,7 @@
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
__ movq(rsp, r12); // restore sp
+ __ reinit_heapbase();
#ifdef ASSERT
{
Label L;
@@ -1859,6 +1865,7 @@
__ andq(rsp, -16); // align stack as required by ABI
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
__ movq(rsp, r12); // restore sp
+ __ reinit_heapbase();
restore_native_result(masm, ret_type, stack_slots);
// and continue
__ jmp(reguard_done);
@@ -1941,9 +1948,8 @@
map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
// Normal deoptimization. Save exec mode for unpack_frames.
- __ movl(r12, Deoptimization::Unpack_deopt); // callee-saved
+ __ movl(r14, Deoptimization::Unpack_deopt); // callee-saved
__ jmp(cont);
-
int exception_offset = __ pc() - start;
// Prolog for exception case
@@ -1955,7 +1961,7 @@
map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
// Deopt during an exception. Save exec mode for unpack_frames.
- __ movl(r12, Deoptimization::Unpack_exception); // callee-saved
+ __ movl(r14, Deoptimization::Unpack_exception); // callee-saved
__ bind(cont);
@@ -2088,7 +2094,7 @@
__ set_last_Java_frame(noreg, rbp, NULL);
__ movq(c_rarg0, r15_thread);
- __ movl(c_rarg1, r12); // second arg: exec_mode
+ __ movl(c_rarg1, r14); // second arg: exec_mode
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
// Set an oopmap for the call site
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -30,6 +30,7 @@
// see the comment in stubRoutines.hpp
#define __ _masm->
+#define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
@@ -252,6 +253,7 @@
// Load up thread register
__ movq(r15_thread, thread);
+ __ reinit_heapbase();
#ifdef ASSERT
// make sure we have no pending exceptions
@@ -945,7 +947,7 @@
__ jcc(Assembler::notZero, error);
// make sure klass is 'reasonable'
- __ movq(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass
+ __ load_klass(rax, rax); // get klass
__ testq(rax, rax);
__ jcc(Assembler::zero, error); // if klass is NULL it is broken
// Check if the klass is in the right area of memory
@@ -957,7 +959,7 @@
__ jcc(Assembler::notZero, error);
// make sure klass' klass is 'reasonable'
- __ movq(rax, Address(rax, oopDesc::klass_offset_in_bytes()));
+ __ load_klass(rax, rax);
__ testq(rax, rax);
__ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken
// Check if the klass' klass is in the right area of memory
@@ -1001,6 +1003,7 @@
BLOCK_COMMENT("call MacroAssembler::debug");
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug)));
__ movq(rsp, r12); // restore rsp
+ __ reinit_heapbase(); // r12 is heapbase
__ popaq(); // pop registers
__ ret(3 * wordSize); // pop caller saved stuff
@@ -1652,6 +1655,7 @@
// Arguments:
// aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
// ignored
+ // is_oop - true => oop array, so generate store check code
// name - stub name string
//
// Inputs:
@@ -1665,9 +1669,9 @@
//
// Side Effects:
// disjoint_int_copy_entry is set to the no-overlap entry point
- // used by generate_conjoint_int_copy().
+ // used by generate_conjoint_int_oop_copy().
//
- address generate_disjoint_int_copy(bool aligned, const char *name) {
+ address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, const char *name) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
address start = __ pc();
@@ -1680,19 +1684,30 @@
const Register qword_count = count;
const Register end_from = from; // source array end address
const Register end_to = to; // destination array end address
+ const Register saved_to = r11; // saved destination array address
// End pointers are inclusive, and if count is not zero they point
// to the last unit copied: end_to[0] := end_from[0]
__ enter(); // required for proper stackwalking of RuntimeStub frame
assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
- disjoint_int_copy_entry = __ pc();
+ (is_oop ? disjoint_oop_copy_entry : disjoint_int_copy_entry) = __ pc();
+
+ if (is_oop) {
+ // no registers are destroyed by this call
+ gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2);
+ }
+
BLOCK_COMMENT("Entry:");
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
setup_arg_regs(); // from => rdi, to => rsi, count => rdx
// r9 and r10 may be used to save non-volatile registers
+ if (is_oop) {
+ __ movq(saved_to, to);
+ }
+
// 'from', 'to' and 'count' are now valid
__ movq(dword_count, count);
__ shrq(count, 1); // count => qword_count
@@ -1718,6 +1733,10 @@
__ movl(Address(end_to, 8), rax);
__ BIND(L_exit);
+ if (is_oop) {
+ __ leaq(end_to, Address(saved_to, dword_count, Address::times_4, -4));
+ gen_write_ref_array_post_barrier(saved_to, end_to, rax);
+ }
inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
restore_arg_regs();
__ xorq(rax, rax); // return 0
@@ -1734,6 +1753,7 @@
// Arguments:
// aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
// ignored
+ // is_oop - true => oop array, so generate store check code
// name - stub name string
//
// Inputs:
@@ -1745,12 +1765,12 @@
// the hardware handle it. The two dwords within qwords that span
// cache line boundaries will still be loaded and stored atomicly.
//
- address generate_conjoint_int_copy(bool aligned, const char *name) {
+ address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, const char *name) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
address start = __ pc();
- Label L_copy_32_bytes, L_copy_8_bytes, L_copy_2_bytes;
+ Label L_copy_32_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit;
const Register from = rdi; // source array address
const Register to = rsi; // destination array address
const Register count = rdx; // elements count
@@ -1760,14 +1780,21 @@
__ enter(); // required for proper stackwalking of RuntimeStub frame
assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
- int_copy_entry = __ pc();
+ if (is_oop) {
+ // no registers are destroyed by this call
+ gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2);
+ }
+
+ (is_oop ? oop_copy_entry : int_copy_entry) = __ pc();
BLOCK_COMMENT("Entry:");
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
- array_overlap_test(disjoint_int_copy_entry, Address::times_4);
+ array_overlap_test(is_oop ? disjoint_oop_copy_entry : disjoint_int_copy_entry,
+ Address::times_4);
setup_arg_regs(); // from => rdi, to => rsi, count => rdx
// r9 and r10 may be used to save non-volatile registers
+ assert_clean_int(count, rax); // Make sure 'count' is clean int.
// 'from', 'to' and 'count' are now valid
__ movq(dword_count, count);
__ shrq(count, 1); // count => qword_count
@@ -1789,6 +1816,9 @@
__ jcc(Assembler::notZero, L_copy_8_bytes);
inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
+ if (is_oop) {
+ __ jmp(L_exit);
+ }
restore_arg_regs();
__ xorq(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
@@ -1797,7 +1827,13 @@
// Copy in 32-bytes chunks
copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
- inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
+ inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
+ __ bind(L_exit);
+ if (is_oop) {
+ Register end_to = rdx;
+ __ leaq(end_to, Address(to, dword_count, Address::times_4, -4));
+ gen_write_ref_array_post_barrier(to, end_to, rax);
+ }
restore_arg_regs();
__ xorq(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
@@ -1817,7 +1853,7 @@
// c_rarg1 - destination array address
// c_rarg2 - element count, treated as ssize_t, can be zero
//
- // Side Effects:
+ // Side Effects:
// disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the
// no-overlap entry point used by generate_conjoint_long_oop_copy().
//
@@ -1857,7 +1893,7 @@
// Copy from low to high addresses. Use 'to' as scratch.
__ leaq(end_from, Address(from, qword_count, Address::times_8, -8));
- __ leaq(end_to, Address(to, qword_count, Address::times_8, -8));
+ __ leaq(end_to, Address(to, qword_count, Address::times_8, -8));
__ negq(qword_count);
__ jmp(L_copy_32_bytes);
@@ -1923,11 +1959,14 @@
address disjoint_copy_entry = NULL;
if (is_oop) {
+ assert(!UseCompressedOops, "shouldn't be called for compressed oops");
disjoint_copy_entry = disjoint_oop_copy_entry;
oop_copy_entry = __ pc();
+ array_overlap_test(disjoint_oop_copy_entry, Address::times_8);
} else {
disjoint_copy_entry = disjoint_long_copy_entry;
long_copy_entry = __ pc();
+ array_overlap_test(disjoint_long_copy_entry, Address::times_8);
}
BLOCK_COMMENT("Entry:");
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
@@ -1945,8 +1984,6 @@
gen_write_ref_array_pre_barrier(to, saved_count);
}
- // Copy from high to low addresses. Use rcx as scratch.
-
__ jmp(L_copy_32_bytes);
// Copy trailing qwords
@@ -2038,7 +2075,14 @@
// Scan rcx words at [rdi] for occurance of rax
// Set NZ/Z based on last compare
__ movq(rax, super_klass);
- __ repne_scan();
+ if (UseCompressedOops) {
+ // Compare against compressed form. Don't need to uncompress because
+ // looks like orig rax is restored in popq below.
+ __ encode_heap_oop(rax);
+ __ repne_scanl();
+ } else {
+ __ repne_scanq();
+ }
// Unspill the temp. registers:
__ popq(rdi);
@@ -2115,7 +2159,7 @@
// caller guarantees that the arrays really are different
// otherwise, we would have to make conjoint checks
{ Label L;
- array_overlap_test(L, Address::times_8);
+ array_overlap_test(L, TIMES_OOP);
__ stop("checkcast_copy within a single array");
__ bind(L);
}
@@ -2160,12 +2204,11 @@
#endif //ASSERT
// Loop-invariant addresses. They are exclusive end pointers.
- Address end_from_addr(from, length, Address::times_8, 0);
- Address end_to_addr(to, length, Address::times_8, 0);
+ Address end_from_addr(from, length, TIMES_OOP, 0);
+ Address end_to_addr(to, length, TIMES_OOP, 0);
// Loop-variant addresses. They assume post-incremented count < 0.
- Address from_element_addr(end_from, count, Address::times_8, 0);
- Address to_element_addr(end_to, count, Address::times_8, 0);
- Address oop_klass_addr(rax_oop, oopDesc::klass_offset_in_bytes());
+ Address from_element_addr(end_from, count, TIMES_OOP, 0);
+ Address to_element_addr(end_to, count, TIMES_OOP, 0);
gen_write_ref_array_pre_barrier(to, count);
@@ -2189,17 +2232,17 @@
__ align(16);
__ BIND(L_store_element);
- __ movq(to_element_addr, rax_oop); // store the oop
+ __ store_heap_oop(to_element_addr, rax_oop); // store the oop
__ incrementq(count); // increment the count toward zero
__ jcc(Assembler::zero, L_do_card_marks);
// ======== loop entry is here ========
__ BIND(L_load_element);
- __ movq(rax_oop, from_element_addr); // load the oop
+ __ load_heap_oop(rax_oop, from_element_addr); // load the oop
__ testq(rax_oop, rax_oop);
__ jcc(Assembler::zero, L_store_element);
- __ movq(r11_klass, oop_klass_addr); // query the object klass
+ __ load_klass(r11_klass, rax_oop);// query the object klass
generate_type_check(r11_klass, ckoff, ckval, L_store_element);
// ======== end loop ========
@@ -2425,15 +2468,14 @@
// registers used as temp
const Register r11_length = r11; // elements count to copy
const Register r10_src_klass = r10; // array klass
+ const Register r9_dst_klass = r9; // dest array klass
// if (length < 0) return -1;
__ movl(r11_length, C_RARG4); // length (elements count, 32-bits value)
__ testl(r11_length, r11_length);
__ jccb(Assembler::negative, L_failed_0);
- Address src_klass_addr(src, oopDesc::klass_offset_in_bytes());
- Address dst_klass_addr(dst, oopDesc::klass_offset_in_bytes());
- __ movq(r10_src_klass, src_klass_addr);
+ __ load_klass(r10_src_klass, src);
#ifdef ASSERT
// assert(src->klass() != NULL);
BLOCK_COMMENT("assert klasses not null");
@@ -2443,7 +2485,8 @@
__ bind(L1);
__ stop("broken null klass");
__ bind(L2);
- __ cmpq(dst_klass_addr, 0);
+ __ load_klass(r9_dst_klass, dst);
+ __ cmpq(r9_dst_klass, 0);
__ jcc(Assembler::equal, L1); // this would be broken also
BLOCK_COMMENT("assert done");
}
@@ -2470,7 +2513,8 @@
__ jcc(Assembler::equal, L_objArray);
// if (src->klass() != dst->klass()) return -1;
- __ cmpq(r10_src_klass, dst_klass_addr);
+ __ load_klass(r9_dst_klass, dst);
+ __ cmpq(r10_src_klass, r9_dst_klass);
__ jcc(Assembler::notEqual, L_failed);
// if (!src->is_Array()) return -1;
@@ -2559,17 +2603,18 @@
Label L_plain_copy, L_checkcast_copy;
// test array classes for subtyping
- __ cmpq(r10_src_klass, dst_klass_addr); // usual case is exact equality
+ __ load_klass(r9_dst_klass, dst);
+ __ cmpq(r10_src_klass, r9_dst_klass); // usual case is exact equality
__ jcc(Assembler::notEqual, L_checkcast_copy);
// Identically typed arrays can be copied without element-wise checks.
arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
r10, L_failed);
- __ leaq(from, Address(src, src_pos, Address::times_8,
+ __ leaq(from, Address(src, src_pos, TIMES_OOP,
arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr
- __ leaq(to, Address(dst, dst_pos, Address::times_8,
- arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
+ __ leaq(to, Address(dst, dst_pos, TIMES_OOP,
+ arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
__ movslq(count, r11_length); // length
__ BIND(L_plain_copy);
__ jump(RuntimeAddress(oop_copy_entry));
@@ -2579,7 +2624,7 @@
{
// assert(r11_length == C_RARG4); // will reload from here
Register r11_dst_klass = r11;
- __ movq(r11_dst_klass, dst_klass_addr);
+ __ load_klass(r11_dst_klass, dst);
// Before looking at dst.length, make sure dst is also an objArray.
__ cmpl(Address(r11_dst_klass, lh_offset), objArray_lh);
@@ -2593,13 +2638,13 @@
__ movl(r11_length, C_RARG4); // reload
arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
rax, L_failed);
- __ movl(r11_dst_klass, dst_klass_addr); // reload
+ __ load_klass(r11_dst_klass, dst); // reload
#endif
// Marshal the base address arguments now, freeing registers.
- __ leaq(from, Address(src, src_pos, Address::times_8,
+ __ leaq(from, Address(src, src_pos, TIMES_OOP,
arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
- __ leaq(to, Address(dst, dst_pos, Address::times_8,
+ __ leaq(to, Address(dst, dst_pos, TIMES_OOP,
arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
__ movl(count, C_RARG4); // length (reloaded)
Register sco_temp = c_rarg3; // this register is free now
@@ -2648,14 +2693,20 @@
StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy");
- StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy");
- StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, "jint_arraycopy");
+ StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, "jint_disjoint_arraycopy");
+ StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, "jint_arraycopy");
StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, "jlong_disjoint_arraycopy");
StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, "jlong_arraycopy");
- StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, "oop_disjoint_arraycopy");
- StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, "oop_arraycopy");
+
+ if (UseCompressedOops) {
+ StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, "oop_disjoint_arraycopy");
+ StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, "oop_arraycopy");
+ } else {
+ StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, "oop_disjoint_arraycopy");
+ StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, "oop_arraycopy");
+ }
StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy");
StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy");
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -664,7 +664,7 @@
// work registers
const Register method = rbx;
- const Register t = r12;
+ const Register t = r11;
// allocate space for parameters
__ get_method(method);
@@ -844,6 +844,7 @@
__ andq(rsp, -16); // align stack as required by ABI
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
__ movq(rsp, r12); // restore sp
+ __ reinit_heapbase();
__ bind(Continue);
}
@@ -891,6 +892,7 @@
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
__ movq(rsp, r12); // restore sp
__ popaq(); // XXX only restore smashed registers
+ __ reinit_heapbase();
__ bind(no_reguard);
}
@@ -1360,6 +1362,7 @@
// rdx: return address/pc that threw exception
__ restore_bcp(); // r13 points to call/send
__ restore_locals();
+ __ reinit_heapbase(); // restore r12 as heapbase.
// Entry point for exceptions thrown within interpreter code
Interpreter::_throw_exception_entry = __ pc();
// expression stack is undefined here
@@ -1658,6 +1661,7 @@
__ andq(rsp, -16); // align stack as required by ABI
__ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
__ movq(rsp, r12); // restore sp
+ __ reinit_heapbase();
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -557,8 +557,8 @@
// eax: index
// rdx: array
index_check(rdx, rax); // kills rbx
- __ movq(rax, Address(rdx, rax,
- Address::times_8,
+ __ load_heap_oop(rax, Address(rdx, rax,
+ UseCompressedOops ? Address::times_4 : Address::times_8,
arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
}
@@ -870,15 +870,15 @@
__ jcc(Assembler::zero, is_null);
// Move subklass into rbx
- __ movq(rbx, Address(rax, oopDesc::klass_offset_in_bytes()));
+ __ load_klass(rbx, rax);
// Move superklass into rax
- __ movq(rax, Address(rdx, oopDesc::klass_offset_in_bytes()));
+ __ load_klass(rax, rdx);
__ movq(rax, Address(rax,
sizeof(oopDesc) +
objArrayKlass::element_klass_offset_in_bytes()));
- // Compress array + index*8 + 12 into a single register. Frees rcx.
+ // Compress array + index*oopSize + 12 into a single register. Frees rcx.
__ leaq(rdx, Address(rdx, rcx,
- Address::times_8,
+ UseCompressedOops ? Address::times_4 : Address::times_8,
arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
// Generate subtype check. Blows rcx, rdi
@@ -892,17 +892,17 @@
// Come here on success
__ bind(ok_is_subtype);
__ movq(rax, at_tos()); // Value
- __ movq(Address(rdx, 0), rax);
+ __ store_heap_oop(Address(rdx, 0), rax);
__ store_check(rdx);
__ jmp(done);
// Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx]
__ bind(is_null);
__ profile_null_seen(rbx);
- __ movq(Address(rdx, rcx,
- Address::times_8,
- arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
- rax);
+ __ store_heap_oop(Address(rdx, rcx,
+ UseCompressedOops ? Address::times_4 : Address::times_8,
+ arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
+ rax);
// Pop stack arguments
__ bind(done);
@@ -1934,7 +1934,7 @@
if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
assert(state == vtos, "only valid state");
__ movq(c_rarg1, aaddress(0));
- __ movq(rdi, Address(c_rarg1, oopDesc::klass_offset_in_bytes()));
+ __ load_klass(rdi, c_rarg1);
__ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
__ testl(rdi, JVM_ACC_HAS_FINALIZER);
Label skip_register_finalizer;
@@ -2184,7 +2184,7 @@
__ cmpl(flags, atos);
__ jcc(Assembler::notEqual, notObj);
// atos
- __ movq(rax, field);
+ __ load_heap_oop(rax, field);
__ push(atos);
if (!is_static) {
patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
@@ -2394,7 +2394,7 @@
// atos
__ pop(atos);
if (!is_static) pop_and_check_object(obj);
- __ movq(field, rax);
+ __ store_heap_oop(field, rax);
__ store_check(obj, field); // Need to mark card
if (!is_static) {
patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx);
@@ -2515,7 +2515,7 @@
const Address field(c_rarg3, 0);
switch (bytecode()) { // load values into the jvalue object
- case Bytecodes::_fast_aputfield: // fall through
+ case Bytecodes::_fast_aputfield: __ movq(field, rax); break;
case Bytecodes::_fast_lputfield: __ movq(field, rax); break;
case Bytecodes::_fast_iputfield: __ movl(field, rax); break;
case Bytecodes::_fast_bputfield: __ movb(field, rax); break;
@@ -2582,7 +2582,7 @@
// access field
switch (bytecode()) {
case Bytecodes::_fast_aputfield:
- __ movq(field, rax);
+ __ store_heap_oop(field, rax);
__ store_check(rcx, field);
break;
case Bytecodes::_fast_lputfield:
@@ -2631,8 +2631,8 @@
__ jcc(Assembler::zero, L1);
// access constant pool cache entry
__ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1);
+ __ verify_oop(rax);
__ movq(r12, rax); // save object pointer before call_VM() clobbers it
- __ verify_oop(rax);
__ movq(c_rarg1, rax);
// c_rarg1: object pointer copied above
// c_rarg2: cache entry pointer
@@ -2641,6 +2641,7 @@
InterpreterRuntime::post_field_access),
c_rarg1, c_rarg2);
__ movq(rax, r12); // restore object pointer
+ __ reinit_heapbase();
__ bind(L1);
}
@@ -2667,7 +2668,7 @@
// access field
switch (bytecode()) {
case Bytecodes::_fast_agetfield:
- __ movq(rax, field);
+ __ load_heap_oop(rax, field);
__ verify_oop(rax);
break;
case Bytecodes::_fast_lgetfield:
@@ -2725,7 +2726,7 @@
__ movl(rax, Address(rax, rbx, Address::times_1));
break;
case atos:
- __ movq(rax, Address(rax, rbx, Address::times_1));
+ __ load_heap_oop(rax, Address(rax, rbx, Address::times_1));
__ verify_oop(rax);
break;
case ftos:
@@ -2787,7 +2788,8 @@
__ movl(recv, flags);
__ andl(recv, 0xFF);
if (TaggedStackInterpreter) __ shll(recv, 1); // index*2
- __ movq(recv, Address(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1)));
+ __ movq(recv, Address(rsp, recv, Address::times_8,
+ -Interpreter::expr_offset_in_bytes(1)));
__ verify_oop(recv);
}
@@ -2854,7 +2856,7 @@
// get receiver klass
__ null_check(recv, oopDesc::klass_offset_in_bytes());
- __ movq(rax, Address(recv, oopDesc::klass_offset_in_bytes()));
+ __ load_klass(rax, recv);
__ verify_oop(rax);
@@ -2866,8 +2868,8 @@
assert(vtableEntry::size() * wordSize == 8,
"adjust the scaling in the code below");
__ movq(method, Address(rax, index,
- Address::times_8,
- base + vtableEntry::method_offset_in_bytes()));
+ Address::times_8,
+ base + vtableEntry::method_offset_in_bytes()));
__ movq(rdx, Address(method, methodOopDesc::interpreter_entry_offset()));
__ jump_from_interpreted(method, rdx);
}
@@ -2932,7 +2934,7 @@
// Get receiver klass into rdx - also a null check
__ restore_locals(); // restore r14
- __ movq(rdx, Address(rcx, oopDesc::klass_offset_in_bytes()));
+ __ load_klass(rdx, rcx);
__ verify_oop(rdx);
// profile this call
@@ -3161,7 +3163,7 @@
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes()),
(intptr_t) markOopDesc::prototype()); // header (address 0x1)
}
- __ movq(Address(rax, oopDesc::klass_offset_in_bytes()), rsi); // klass
+ __ store_klass(rax, rsi); // klass
__ jmp(done);
}
@@ -3223,12 +3225,12 @@
typeArrayOopDesc::header_size(T_BYTE) * wordSize),
JVM_CONSTANT_Class);
__ jcc(Assembler::equal, quicked);
-
+ __ push(atos); // save receiver for result, and for GC
__ movq(r12, rcx); // save rcx XXX
- __ push(atos); // save receiver for result, and for GC
call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
+ __ movq(rcx, r12); // restore rcx XXX
+ __ reinit_heapbase();
__ pop_ptr(rdx); // restore receiver
- __ movq(rcx, r12); // restore rcx XXX
__ jmpb(resolved);
// Get superklass in rax and subklass in rbx
@@ -3238,7 +3240,7 @@
Address::times_8, sizeof(constantPoolOopDesc)));
__ bind(resolved);
- __ movq(rbx, Address(rdx, oopDesc::klass_offset_in_bytes()));
+ __ load_klass(rbx, rdx);
// Generate subtype check. Blows rcx, rdi. Object in rdx.
// Superklass in rax. Subklass in rbx.
@@ -3280,19 +3282,20 @@
JVM_CONSTANT_Class);
__ jcc(Assembler::equal, quicked);
+ __ push(atos); // save receiver for result, and for GC
__ movq(r12, rcx); // save rcx
- __ push(atos); // save receiver for result, and for GC
call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
+ __ movq(rcx, r12); // restore rcx
+ __ reinit_heapbase();
__ pop_ptr(rdx); // restore receiver
- __ movq(rdx, Address(rdx, oopDesc::klass_offset_in_bytes()));
- __ movq(rcx, r12); // restore rcx
+ __ load_klass(rdx, rdx);
__ jmpb(resolved);
// Get superklass in rax and subklass in rdx
__ bind(quicked);
- __ movq(rdx, Address(rax, oopDesc::klass_offset_in_bytes()));
+ __ load_klass(rdx, rax);
__ movq(rax, Address(rcx, rbx,
- Address::times_8, sizeof(constantPoolOopDesc)));
+ Address::times_8, sizeof(constantPoolOopDesc)));
__ bind(resolved);
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp
--- a/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -56,7 +56,7 @@
// get receiver klass
address npe_addr = __ pc();
- __ movq(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
+ __ load_klass(rax, j_rarg0);
// compute entry offset (in words)
int entry_offset =
@@ -131,7 +131,7 @@
// get receiver klass (also an implicit null-check)
address npe_addr = __ pc();
- __ movq(rbx, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
+ __ load_klass(rbx, j_rarg0);
// If we take a trap while this arg is on the stack we will not
// be able to walk the stack properly. This is not an issue except
@@ -181,7 +181,7 @@
// Get methodOop and entrypoint for compiler
// Get klass pointer again
- __ movq(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
+ __ load_klass(rax, j_rarg0);
const Register method = rbx;
__ movq(method, Address(rax, j_rarg1, Address::times_1, method_offset));
@@ -226,10 +226,12 @@
int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
if (is_vtable_stub) {
// Vtable stub size
- return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0);
+ return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0) +
+ (UseCompressedOops ? 16 : 0); // 1 leaq can be 3 bytes + 1 long
} else {
// Itable stub size
- return (DebugVtables ? 636 : 72) + (CountCompiledCalls ? 13 : 0);
+ return (DebugVtables ? 636 : 72) + (CountCompiledCalls ? 13 : 0) +
+ (UseCompressedOops ? 32 : 0); // 2 leaqs
}
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/cpu/x86/vm/x86_32.ad
--- a/hotspot/src/cpu/x86/vm/x86_32.ad Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/x86/vm/x86_32.ad Sun Apr 13 17:43:42 2008 -0400
@@ -4538,8 +4538,8 @@
// Location of C & interpreter return values
c_return_value %{
assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
- static int lo[Op_RegL+1] = { 0, 0, EAX_num, EAX_num, FPR1L_num, FPR1L_num, EAX_num };
- static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, FPR1H_num, EDX_num };
+ static int lo[Op_RegL+1] = { 0, 0, OptoReg::Bad, EAX_num, EAX_num, FPR1L_num, FPR1L_num, EAX_num };
+ static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, FPR1H_num, EDX_num };
// in SSE2+ mode we want to keep the FPU stack clean so pretend
// that C functions return float and double results in XMM0.
@@ -4554,8 +4554,8 @@
// Location of return values
return_value %{
assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
- static int lo[Op_RegL+1] = { 0, 0, EAX_num, EAX_num, FPR1L_num, FPR1L_num, EAX_num };
- static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, FPR1H_num, EDX_num };
+ static int lo[Op_RegL+1] = { 0, 0, OptoReg::Bad, EAX_num, EAX_num, FPR1L_num, FPR1L_num, EAX_num };
+ static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, FPR1H_num, EDX_num };
if( ideal_reg == Op_RegD && UseSSE>=2 )
return OptoRegPair(XMM0b_num,XMM0a_num);
if( ideal_reg == Op_RegF && UseSSE>=1 )
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/cpu/x86/vm/x86_64.ad
--- a/hotspot/src/cpu/x86/vm/x86_64.ad Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad Sun Apr 13 17:43:42 2008 -0400
@@ -312,7 +312,6 @@
R9, R9_H,
R10, R10_H,
R11, R11_H,
- R12, R12_H,
R13, R13_H,
R14, R14_H);
@@ -392,7 +391,6 @@
R9, R9_H,
R10, R10_H,
R11, R11_H,
- R12, R12_H,
R13, R13_H,
R14, R14_H);
@@ -406,7 +404,6 @@
R9, R9_H,
R10, R10_H,
R11, R11_H,
- R12, R12_H,
R13, R13_H,
R14, R14_H);
@@ -421,7 +418,6 @@
R9, R9_H,
R10, R10_H,
R11, R11_H,
- R12, R12_H,
R13, R13_H,
R14, R14_H);
@@ -436,7 +432,6 @@
R9, R9_H,
R10, R10_H,
R11, R11_H,
- R12, R12_H,
R13, R13_H,
R14, R14_H);
@@ -449,6 +444,9 @@
// Singleton class for RDX long register
reg_class long_rdx_reg(RDX, RDX_H);
+// Singleton class for R12 long register
+reg_class long_r12_reg(R12, R12_H);
+
// Class for all int registers (except RSP)
reg_class int_reg(RAX,
RDX,
@@ -461,7 +459,6 @@
R9,
R10,
R11,
- R12,
R13,
R14);
@@ -476,7 +473,6 @@
R9,
R10,
R11,
- R12,
R13,
R14);
@@ -490,7 +486,6 @@
R9,
R10,
R11,
- R12,
R13,
R14);
@@ -1844,8 +1839,14 @@
#ifndef PRODUCT
void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
{
- st->print_cr("cmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t"
- "# Inline cache check", oopDesc::klass_offset_in_bytes());
+ if (UseCompressedOops) {
+ st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t", oopDesc::klass_offset_in_bytes());
+ st->print_cr("leaq rscratch1, [r12_heapbase, r, Address::times_8, 0]");
+ st->print_cr("cmpq rax, rscratch1\t # Inline cache check");
+ } else {
+ st->print_cr("cmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t"
+ "# Inline cache check", oopDesc::klass_offset_in_bytes());
+ }
st->print_cr("\tjne SharedRuntime::_ic_miss_stub");
st->print_cr("\tnop");
if (!OptoBreakpoint) {
@@ -1860,7 +1861,12 @@
#ifdef ASSERT
uint code_size = cbuf.code_size();
#endif
- masm.cmpq(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
+ if (UseCompressedOops) {
+ masm.load_klass(rscratch1, j_rarg0);
+ masm.cmpq(rax, rscratch1);
+ } else {
+ masm.cmpq(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
+ }
masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
@@ -1871,6 +1877,10 @@
// Leave space for int3
nops_cnt += 1;
}
+ if (UseCompressedOops) {
+ // ??? divisible by 4 is aligned?
+ nops_cnt += 1;
+ }
masm.nop(nops_cnt);
assert(cbuf.code_size() - code_size == size(ra_),
@@ -1879,7 +1889,11 @@
uint MachUEPNode::size(PhaseRegAlloc* ra_) const
{
- return OptoBreakpoint ? 11 : 12;
+ if (UseCompressedOops) {
+ return OptoBreakpoint ? 19 : 20;
+ } else {
+ return OptoBreakpoint ? 11 : 12;
+ }
}
@@ -2052,6 +2066,7 @@
reg == RCX_num || reg == RCX_H_num ||
reg == R8_num || reg == R8_H_num ||
reg == R9_num || reg == R9_H_num ||
+ reg == R12_num || reg == R12_H_num ||
reg == XMM0_num || reg == XMM0_H_num ||
reg == XMM1_num || reg == XMM1_H_num ||
reg == XMM2_num || reg == XMM2_H_num ||
@@ -2087,6 +2102,17 @@
return LONG_RDX_REG_mask;
}
+static Address build_address(int b, int i, int s, int d) {
+ Register index = as_Register(i);
+ Address::ScaleFactor scale = (Address::ScaleFactor)s;
+ if (index == rsp) {
+ index = noreg;
+ scale = Address::no_scale;
+ }
+ Address addr(as_Register(b), index, scale, d);
+ return addr;
+}
+
%}
//----------ENCODING BLOCK-----------------------------------------------------
@@ -2545,7 +2571,7 @@
Register Rrax = as_Register(RAX_enc); // super class
Register Rrcx = as_Register(RCX_enc); // killed
Register Rrsi = as_Register(RSI_enc); // sub class
- Label hit, miss;
+ Label hit, miss, cmiss;
MacroAssembler _masm(&cbuf);
// Compare super with sub directly, since super is not in its own SSA.
@@ -2562,12 +2588,27 @@
Klass::secondary_supers_offset_in_bytes()));
__ movl(Rrcx, Address(Rrdi, arrayOopDesc::length_offset_in_bytes()));
__ addq(Rrdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
- __ repne_scan();
- __ jcc(Assembler::notEqual, miss);
- __ movq(Address(Rrsi,
- sizeof(oopDesc) +
- Klass::secondary_super_cache_offset_in_bytes()),
- Rrax);
+ if (UseCompressedOops) {
+ __ encode_heap_oop(Rrax);
+ __ repne_scanl();
+ __ jcc(Assembler::notEqual, cmiss);
+ __ decode_heap_oop(Rrax);
+ __ movq(Address(Rrsi,
+ sizeof(oopDesc) +
+ Klass::secondary_super_cache_offset_in_bytes()),
+ Rrax);
+ __ jmp(hit);
+ __ bind(cmiss);
+ __ decode_heap_oop(Rrax);
+ __ jmp(miss);
+ } else {
+ __ repne_scanq();
+ __ jcc(Assembler::notEqual, miss);
+ __ movq(Address(Rrsi,
+ sizeof(oopDesc) +
+ Klass::secondary_super_cache_offset_in_bytes()),
+ Rrax);
+ }
__ bind(hit);
if ($primary) {
__ xorq(Rrdi, Rrdi);
@@ -3693,10 +3734,10 @@
int count_offset = java_lang_String::count_offset_in_bytes();
int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR);
- masm.movq(rax, Address(rsi, value_offset));
+ masm.load_heap_oop(rax, Address(rsi, value_offset));
masm.movl(rcx, Address(rsi, offset_offset));
masm.leaq(rax, Address(rax, rcx, Address::times_2, base_offset));
- masm.movq(rbx, Address(rdi, value_offset));
+ masm.load_heap_oop(rbx, Address(rdi, value_offset));
masm.movl(rcx, Address(rdi, offset_offset));
masm.leaq(rbx, Address(rbx, rcx, Address::times_2, base_offset));
@@ -4120,6 +4161,7 @@
%}
+
//----------FRAME--------------------------------------------------------------
// Definition of frame structure and management information.
//
@@ -4255,6 +4297,7 @@
static const int lo[Op_RegL + 1] = {
0,
0,
+ RAX_num, // Op_RegN
RAX_num, // Op_RegI
RAX_num, // Op_RegP
XMM0_num, // Op_RegF
@@ -4264,13 +4307,14 @@
static const int hi[Op_RegL + 1] = {
0,
0,
+ OptoReg::Bad, // Op_RegN
OptoReg::Bad, // Op_RegI
RAX_H_num, // Op_RegP
OptoReg::Bad, // Op_RegF
XMM0_H_num, // Op_RegD
RAX_H_num // Op_RegL
};
-
+ assert(ARRAY_SIZE(hi) == _last_machine_leaf - 1, "missing type");
return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
%}
%}
@@ -4417,9 +4461,25 @@
interface(CONST_INTER);
%}
-// Unsigned 31-bit Pointer Immediate
-// Can be used in both 32-bit signed and 32-bit unsigned insns.
-// Works for nulls and markOops; not for relocatable (oop) pointers.
+// Pointer Immediate
+operand immN() %{
+ match(ConN);
+
+ op_cost(10);
+ format %{ %}
+ interface(CONST_INTER);
+%}
+
+// NULL Pointer Immediate
+operand immN0() %{
+ predicate(n->get_narrowcon() == 0);
+ match(ConN);
+
+ op_cost(5);
+ format %{ %}
+ interface(CONST_INTER);
+%}
+
operand immP31()
%{
predicate(!n->as_Type()->type()->isa_oopptr()
@@ -4431,6 +4491,7 @@
interface(CONST_INTER);
%}
+
// Long Immediate
operand immL()
%{
@@ -4767,6 +4828,23 @@
interface(REG_INTER);
%}
+
+operand r12RegL() %{
+ constraint(ALLOC_IN_RC(long_r12_reg));
+ match(RegL);
+
+ format %{ %}
+ interface(REG_INTER);
+%}
+
+operand rRegN() %{
+ constraint(ALLOC_IN_RC(int_reg));
+ match(RegN);
+
+ format %{ %}
+ interface(REG_INTER);
+%}
+
// Question: Why is r15_RegP (the read-only TLS register) a match for rRegP?
// Answer: Operand match rules govern the DFA as it processes instruction inputs.
// It's fine for an instruction input which expects rRegP to match a r15_RegP.
@@ -4822,6 +4900,18 @@
interface(REG_INTER);
%}
+// Special Registers
+// Return a compressed pointer value
+operand rax_RegN()
+%{
+ constraint(ALLOC_IN_RC(int_rax_reg));
+ match(RegN);
+ match(rRegN);
+
+ format %{ %}
+ interface(REG_INTER);
+%}
+
// Used in AtomicAdd
operand rbx_RegP()
%{
@@ -5112,6 +5202,21 @@
%}
%}
+// Indirect Memory Times Scale Plus Index Register Plus Offset Operand
+operand indIndexScaleOffsetComp(rRegN src, immL32 off, r12RegL base) %{
+ constraint(ALLOC_IN_RC(ptr_reg));
+ match(AddP (DecodeN src base) off);
+
+ op_cost(10);
+ format %{"[$base + $src << 3 + $off] (compressed)" %}
+ interface(MEMORY_INTER) %{
+ base($base);
+ index($src);
+ scale(0x3);
+ disp($off);
+ %}
+%}
+
// Indirect Memory Times Scale Plus Positive Index Register Plus Offset Operand
operand indPosIndexScaleOffset(any_RegP reg, immL32 off, rRegI idx, immI2 scale)
%{
@@ -5259,7 +5364,8 @@
// case of this is memory operands.
opclass memory(indirect, indOffset8, indOffset32, indIndexOffset, indIndex,
- indIndexScale, indIndexScaleOffset, indPosIndexScaleOffset);
+ indIndexScale, indIndexScaleOffset, indPosIndexScaleOffset,
+ indIndexScaleOffsetComp);
//----------PIPELINE-----------------------------------------------------------
// Rules which define the behavior of the target architectures pipeline.
@@ -5937,10 +6043,28 @@
ins_pipe(ialu_reg_mem); // XXX
%}
+// Load Compressed Pointer
+instruct loadN(rRegN dst, memory mem, rFlagsReg cr)
+%{
+ match(Set dst (LoadN mem));
+ effect(KILL cr);
+
+ ins_cost(125); // XXX
+ format %{ "movl $dst, $mem\t# compressed ptr" %}
+ ins_encode %{
+ Address addr = build_address($mem$$base, $mem$$index, $mem$$scale, $mem$$disp);
+ Register dst = as_Register($dst$$reg);
+ __ movl(dst, addr);
+ %}
+ ins_pipe(ialu_reg_mem); // XXX
+%}
+
+
// Load Klass Pointer
instruct loadKlass(rRegP dst, memory mem)
%{
match(Set dst (LoadKlass mem));
+ predicate(!n->in(MemNode::Address)->bottom_type()->is_narrow());
ins_cost(125); // XXX
format %{ "movq $dst, $mem\t# class" %}
@@ -5949,6 +6073,25 @@
ins_pipe(ialu_reg_mem); // XXX
%}
+// Load Klass Pointer
+instruct loadKlassComp(rRegP dst, memory mem)
+%{
+ match(Set dst (LoadKlass mem));
+ predicate(n->in(MemNode::Address)->bottom_type()->is_narrow());
+
+ ins_cost(125); // XXX
+ format %{ "movl $dst, $mem\t# compressed class" %}
+ ins_encode %{
+ Address addr = build_address($mem$$base, $mem$$index, $mem$$scale, $mem$$disp);
+ Register dst = as_Register($dst$$reg);
+ __ movl(dst, addr);
+ // klass is never null in the header but this is generated for all
+ // klass loads not just the _klass field in the header.
+ __ decode_heap_oop(dst);
+ %}
+ ins_pipe(ialu_reg_mem); // XXX
+%}
+
// Load Float
instruct loadF(regF dst, memory mem)
%{
@@ -6203,6 +6346,35 @@
ins_pipe(pipe_slow);
%}
+instruct loadConN0(rRegN dst, immN0 src, rFlagsReg cr) %{
+ match(Set dst src);
+ effect(KILL cr);
+ format %{ "xorq $dst, $src\t# compressed ptr" %}
+ ins_encode %{
+ Register dst = $dst$$Register;
+ __ xorq(dst, dst);
+ %}
+ ins_pipe(ialu_reg);
+%}
+
+instruct loadConN(rRegN dst, immN src) %{
+ match(Set dst src);
+
+ ins_cost(125);
+ format %{ "movl $dst, $src\t# compressed ptr" %}
+ ins_encode %{
+ address con = (address)$src$$constant;
+ Register dst = $dst$$Register;
+ if (con == NULL) {
+ ShouldNotReachHere();
+ } else {
+ __ movoop(dst, (jobject)$src$$constant);
+ __ encode_heap_oop_not_null(dst);
+ }
+ %}
+ ins_pipe(ialu_reg_fat); // XXX
+%}
+
instruct loadConF0(regF dst, immF0 src)
%{
match(Set dst src);
@@ -6458,6 +6630,22 @@
ins_pipe(ialu_mem_imm);
%}
+// Store Compressed Pointer
+instruct storeN(memory mem, rRegN src, rFlagsReg cr)
+%{
+ match(Set mem (StoreN mem src));
+ effect(KILL cr);
+
+ ins_cost(125); // XXX
+ format %{ "movl $mem, $src\t# ptr" %}
+ ins_encode %{
+ Address addr = build_address($mem$$base, $mem$$index, $mem$$scale, $mem$$disp);
+ Register src = as_Register($src$$reg);
+ __ movl(addr, src);
+ %}
+ ins_pipe(ialu_mem_reg);
+%}
+
// Store Integer Immediate
instruct storeImmI(memory mem, immI src)
%{
@@ -6805,6 +6993,39 @@
ins_pipe(ialu_reg_reg); // XXX
%}
+
+// Convert oop pointer into compressed form
+instruct encodeHeapOop(rRegN dst, rRegP src, rFlagsReg cr) %{
+ match(Set dst (EncodeP src));
+ effect(KILL cr);
+ format %{ "encode_heap_oop $dst,$src" %}
+ ins_encode %{
+ Register s = $src$$Register;
+ Register d = $dst$$Register;
+ if (s != d) {
+ __ movq(d, s);
+ }
+ __ encode_heap_oop(d);
+ %}
+ ins_pipe(ialu_reg_long);
+%}
+
+instruct decodeHeapOop(rRegP dst, rRegN src, rFlagsReg cr) %{
+ match(Set dst (DecodeN src));
+ effect(KILL cr);
+ format %{ "decode_heap_oop $dst,$src" %}
+ ins_encode %{
+ Register s = $src$$Register;
+ Register d = $dst$$Register;
+ if (s != d) {
+ __ movq(d, s);
+ }
+ __ decode_heap_oop(d);
+ %}
+ ins_pipe(ialu_reg_long);
+%}
+
+
//----------Conditional Move---------------------------------------------------
// Jump
// dummy instruction for generating temp registers
@@ -7521,6 +7742,28 @@
%}
+instruct compareAndSwapN(rRegI res,
+ memory mem_ptr,
+ rax_RegN oldval, rRegN newval,
+ rFlagsReg cr) %{
+ match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
+ effect(KILL cr, KILL oldval);
+
+ format %{ "cmpxchgl $mem_ptr,$newval\t# "
+ "If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
+ "sete $res\n\t"
+ "movzbl $res, $res" %}
+ opcode(0x0F, 0xB1);
+ ins_encode(lock_prefix,
+ REX_reg_mem(newval, mem_ptr),
+ OpcP, OpcS,
+ reg_mem(newval, mem_ptr),
+ REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
+ REX_reg_breg(res, res), // movzbl
+ Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
+ ins_pipe( pipe_cmpxchg );
+%}
+
//----------Subtraction Instructions-------------------------------------------
// Integer Subtraction Instructions
@@ -10771,6 +11014,14 @@
ins_pipe(ialu_cr_reg_imm);
%}
+instruct testN_reg(rFlagsReg cr, rRegN src, immN0 zero) %{
+ match(Set cr (CmpN src zero));
+
+ format %{ "testl $src, $src" %}
+ ins_encode %{ __ testl($src$$Register, $src$$Register); %}
+ ins_pipe(ialu_cr_reg_imm);
+%}
+
// Yanked all unsigned pointer compare operations.
// Pointer compares are done with CmpP which is already unsigned.
@@ -11018,6 +11269,7 @@
rdi_RegP result)
%{
match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
+ predicate(!UseCompressedOops); // decoding oop kills condition codes
effect(KILL rcx, KILL result);
ins_cost(1000);
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp
--- a/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -196,7 +196,7 @@
printf("\n");
GEN_VALUE(OFFSET_HeapBlockHeader_used, offset_of(HeapBlock::Header, _used));
- GEN_OFFS(oopDesc, _klass);
+ GEN_OFFS(oopDesc, _metadata);
printf("\n");
GEN_VALUE(AccessFlags_NATIVE, JVM_ACC_NATIVE);
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/os/solaris/dtrace/jhelper.d
--- a/hotspot/src/os/solaris/dtrace/jhelper.d Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/os/solaris/dtrace/jhelper.d Sun Apr 13 17:43:42 2008 -0400
@@ -46,6 +46,7 @@
extern pointer __1cJCodeCacheF_heap_;
extern pointer __1cIUniverseP_methodKlassObj_;
extern pointer __1cIUniverseO_collectedHeap_;
+extern pointer __1cIUniverseK_heap_base_;
extern pointer __1cHnmethodG__vtbl_;
extern pointer __1cKBufferBlobG__vtbl_;
@@ -107,7 +108,7 @@
copyin_offset(OFFSET_constantPoolOopDesc_pool_holder);
copyin_offset(OFFSET_HeapBlockHeader_used);
- copyin_offset(OFFSET_oopDesc_klass);
+ copyin_offset(OFFSET_oopDesc_metadata);
copyin_offset(OFFSET_symbolOopDesc_length);
copyin_offset(OFFSET_symbolOopDesc_body);
@@ -150,6 +151,7 @@
this->Universe_methodKlassOop = copyin_ptr(&``__1cIUniverseP_methodKlassObj_);
this->CodeCache_heap_address = copyin_ptr(&``__1cJCodeCacheF_heap_);
+ this->Universe_heap_base = copyin_ptr(&``__1cIUniverseK_heap_base_);
/* Reading volatile values */
this->CodeCache_low = copyin_ptr(this->CodeCache_heap_address +
@@ -293,10 +295,27 @@
dtrace:helper:ustack:
/!this->done && this->vtbl == this->BufferBlob_vtbl &&
+this->Universe_heap_base == NULL &&
this->methodOopPtr > this->heap_start && this->methodOopPtr < this->heap_end/
{
MARK_LINE;
- this->klass = copyin_ptr(this->methodOopPtr + OFFSET_oopDesc_klass);
+ this->klass = copyin_ptr(this->methodOopPtr + OFFSET_oopDesc_metadata);
+ this->methodOop = this->klass == this->Universe_methodKlassOop;
+ this->done = !this->methodOop;
+}
+
+dtrace:helper:ustack:
+/!this->done && this->vtbl == this->BufferBlob_vtbl &&
+this->Universe_heap_base != NULL &&
+this->methodOopPtr > this->heap_start && this->methodOopPtr < this->heap_end/
+{
+ MARK_LINE;
+ /*
+ * Read compressed pointer and decode heap oop, same as oop.inline.hpp
+ */
+ this->cklass = copyin_uint32(this->methodOopPtr + OFFSET_oopDesc_metadata);
+ this->klass = (uint64_t)((uintptr_t)this->Universe_heap_base +
+ ((uintptr_t)this->cklass << 3));
this->methodOop = this->klass == this->Universe_methodKlassOop;
this->done = !this->methodOop;
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/os/solaris/dtrace/libjvm_db.c
--- a/hotspot/src/os/solaris/dtrace/libjvm_db.c Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/os/solaris/dtrace/libjvm_db.c Sun Apr 13 17:43:42 2008 -0400
@@ -148,9 +148,11 @@
uint64_t Universe_methodKlassObj_address;
uint64_t CodeCache_heap_address;
+ uint64_t Universe_heap_base_address;
/* Volatiles */
uint64_t Universe_methodKlassObj;
+ uint64_t Universe_heap_base;
uint64_t CodeCache_low;
uint64_t CodeCache_high;
uint64_t CodeCache_segmap_low;
@@ -166,7 +168,6 @@
Frame_t curr_fr;
};
-
static int
read_string(struct ps_prochandle *P,
char *buf, /* caller's buffer */
@@ -185,6 +186,14 @@
return -1;
}
+static int read_compressed_pointer(jvm_agent_t* J, uint64_t base, uint32_t *ptr) {
+ int err = -1;
+ uint32_t ptr32;
+ err = ps_pread(J->P, base, &ptr32, sizeof(uint32_t));
+ *ptr = ptr32;
+ return err;
+}
+
static int read_pointer(jvm_agent_t* J, uint64_t base, uint64_t* ptr) {
int err = -1;
uint32_t ptr32;
@@ -270,6 +279,9 @@
if (strcmp("_methodKlassObj", vmp->fieldName) == 0) {
J->Universe_methodKlassObj_address = vmp->address;
}
+ if (strcmp("_heap_base", vmp->fieldName) == 0) {
+ J->Universe_heap_base_address = vmp->address;
+ }
}
CHECK_FAIL(err);
@@ -292,6 +304,8 @@
err = read_pointer(J, J->Universe_methodKlassObj_address, &J->Universe_methodKlassObj);
CHECK_FAIL(err);
+ err = read_pointer(J, J->Universe_heap_base_address, &J->Universe_heap_base);
+ CHECK_FAIL(err);
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory +
OFFSET_VirtualSpace_low, &J->CodeCache_low);
CHECK_FAIL(err);
@@ -444,7 +458,17 @@
static int is_methodOop(jvm_agent_t* J, uint64_t methodOopPtr) {
uint64_t klass;
int err;
- err = read_pointer(J, methodOopPtr + OFFSET_oopDesc_klass, &klass);
+ // If heap_base is nonnull, this was a compressed oop.
+ if (J->Universe_heap_base != NULL) {
+ uint32_t cklass;
+ err = read_compressed_pointer(J, methodOopPtr + OFFSET_oopDesc_metadata,
+ &cklass);
+ // decode heap oop, same as oop.inline.hpp
+ klass = (uint64_t)((uintptr_t)J->Universe_heap_base +
+ ((uintptr_t)cklass << 3));
+ } else {
+ err = read_pointer(J, methodOopPtr + OFFSET_oopDesc_metadata, &klass);
+ }
if (err != PS_OK) goto fail;
return klass == J->Universe_methodKlassObj;
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/os/windows/vm/os_windows.cpp
--- a/hotspot/src/os/windows/vm/os_windows.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/os/windows/vm/os_windows.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -3116,7 +3116,7 @@
// as reserve size, since on a 64-bit platform we'll run into that more
// often than running out of virtual memory space. We can use the
// lower value of the two calculations as the os_thread_limit.
- size_t max_address_space = ((size_t)1 << (BitsPerOop - 1)) - (200 * K * K);
+ size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
// at exit methods are called in the reverse order of their registration.
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/os_cpu/solaris_sparc/vm/solaris_sparc.s
--- a/hotspot/src/os_cpu/solaris_sparc/vm/solaris_sparc.s Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/solaris_sparc.s Sun Apr 13 17:43:42 2008 -0400
@@ -33,7 +33,9 @@
!! by the .il "call", in some cases optimizing the code, completely eliding it,
!! or by moving the code from the "call site".
-
+ !! ASM better know we may use G6 for our own purposes
+ .register %g6, #ignore
+
.globl SafeFetch32
.align 32
.global Fetch32PFI, Fetch32Resume
@@ -106,6 +108,7 @@
.globl _raw_thread_id
.align 32
_raw_thread_id:
+ .register %g7, #scratch
retl
mov %g7, %o0
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/adlc/archDesc.cpp
--- a/hotspot/src/share/vm/adlc/archDesc.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/adlc/archDesc.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -867,6 +867,7 @@
Form *form = (Form*)_globalNames[result];
assert( form, "Result operand must be defined");
OperandForm *oper = form->is_operand();
+ if (oper == NULL) form->dump();
assert( oper, "Result must be an OperandForm");
return reg_mask( *oper );
}
@@ -908,6 +909,7 @@
switch( last_char ) {
case 'I': return "TypeInt::INT";
case 'P': return "TypePtr::BOTTOM";
+ case 'N': return "TypeNarrowOop::BOTTOM";
case 'F': return "Type::FLOAT";
case 'D': return "Type::DOUBLE";
case 'L': return "TypeLong::LONG";
@@ -944,7 +946,7 @@
// Create InstructForm and assign type for each ideal instruction.
for ( int j = _last_machine_leaf+1; j < _last_opcode; ++j) {
char *ident = (char *)NodeClassNames[j];
- if(!strcmp(ident, "ConI") || !strcmp(ident, "ConP") ||
+ if(!strcmp(ident, "ConI") || !strcmp(ident, "ConP") || !strcmp(ident, "ConN") ||
!strcmp(ident, "ConF") || !strcmp(ident, "ConD") ||
!strcmp(ident, "ConL") || !strcmp(ident, "Con" ) ||
!strcmp(ident, "Bool") ) {
@@ -1109,6 +1111,7 @@
if ( strcmp(idealName,"CmpI") == 0
|| strcmp(idealName,"CmpU") == 0
|| strcmp(idealName,"CmpP") == 0
+ || strcmp(idealName,"CmpN") == 0
|| strcmp(idealName,"CmpL") == 0
|| strcmp(idealName,"CmpD") == 0
|| strcmp(idealName,"CmpF") == 0
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/adlc/forms.cpp
--- a/hotspot/src/share/vm/adlc/forms.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/adlc/forms.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -211,6 +211,7 @@
if (strcmp(name,"ConI")==0) return Form::idealI;
if (strcmp(name,"ConP")==0) return Form::idealP;
+ if (strcmp(name,"ConN")==0) return Form::idealN;
if (strcmp(name,"ConL")==0) return Form::idealL;
if (strcmp(name,"ConF")==0) return Form::idealF;
if (strcmp(name,"ConD")==0) return Form::idealD;
@@ -256,6 +257,7 @@
if( strcmp(opType,"LoadPLocked")==0 ) return Form::idealP;
if( strcmp(opType,"LoadLLocked")==0 ) return Form::idealL;
if( strcmp(opType,"LoadP")==0 ) return Form::idealP;
+ if( strcmp(opType,"LoadN")==0 ) return Form::idealN;
if( strcmp(opType,"LoadRange")==0 ) return Form::idealI;
if( strcmp(opType,"LoadS")==0 ) return Form::idealS;
if( strcmp(opType,"Load16B")==0 ) return Form::idealB;
@@ -286,6 +288,7 @@
if( strcmp(opType,"StoreI")==0) return Form::idealI;
if( strcmp(opType,"StoreL")==0) return Form::idealL;
if( strcmp(opType,"StoreP")==0) return Form::idealP;
+ if( strcmp(opType,"StoreN")==0) return Form::idealN;
if( strcmp(opType,"Store16B")==0) return Form::idealB;
if( strcmp(opType,"Store8B")==0) return Form::idealB;
if( strcmp(opType,"Store4B")==0) return Form::idealB;
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/adlc/forms.hpp
--- a/hotspot/src/share/vm/adlc/forms.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/adlc/forms.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -168,7 +168,8 @@
idealD = 5, // Double type
idealB = 6, // Byte type
idealC = 7, // Char type
- idealS = 8 // String type
+ idealS = 8, // String type
+ idealN = 9 // Narrow oop types
};
// Convert ideal name to a DataType, return DataType::none if not a 'ConX'
Form::DataType ideal_to_const_type(const char *ideal_type_name) const;
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/adlc/formssel.cpp
--- a/hotspot/src/share/vm/adlc/formssel.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/adlc/formssel.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -726,6 +726,9 @@
if( _matrule && _matrule->_rChild &&
(!strcmp(_matrule->_rChild->_opType,"CastPP") || // new result type
!strcmp(_matrule->_rChild->_opType,"CastX2P") || // new result type
+ !strcmp(_matrule->_rChild->_opType,"DecodeN") ||
+ !strcmp(_matrule->_rChild->_opType,"EncodeP") ||
+ !strcmp(_matrule->_rChild->_opType,"LoadN") ||
!strcmp(_matrule->_rChild->_opType,"CreateEx") || // type of exception
!strcmp(_matrule->_rChild->_opType,"CheckCastPP")) ) return true;
else if ( is_ideal_load() == Form::idealP ) return true;
@@ -2101,6 +2104,7 @@
if (strcmp(name,"RegF")==0) size = 1;
if (strcmp(name,"RegD")==0) size = 2;
if (strcmp(name,"RegL")==0) size = 2;
+ if (strcmp(name,"RegN")==0) size = 1;
if (strcmp(name,"RegP")==0) size = globalAD->get_preproc_def("_LP64") ? 2 : 1;
if (size == 0) return false;
return size == reg_class->size();
@@ -2365,11 +2369,12 @@
void OperandForm::format_constant(FILE *fp, uint const_index, uint const_type) {
switch(const_type) {
- case Form::idealI: fprintf(fp,"st->print(\"#%%d\", _c%d);\n", const_index); break;
- case Form::idealP: fprintf(fp,"_c%d->dump_on(st);\n", const_index); break;
- case Form::idealL: fprintf(fp,"st->print(\"#%%lld\", _c%d);\n", const_index); break;
- case Form::idealF: fprintf(fp,"st->print(\"#%%f\", _c%d);\n", const_index); break;
- case Form::idealD: fprintf(fp,"st->print(\"#%%f\", _c%d);\n", const_index); break;
+ case Form::idealI: fprintf(fp,"st->print(\"#%%d\", _c%d);\n", const_index); break;
+ case Form::idealP: fprintf(fp,"_c%d->dump_on(st);\n", const_index); break;
+ case Form::idealN: fprintf(fp,"_c%d->dump_on(st);\n", const_index); break;
+ case Form::idealL: fprintf(fp,"st->print(\"#%%lld\", _c%d);\n", const_index); break;
+ case Form::idealF: fprintf(fp,"st->print(\"#%%f\", _c%d);\n", const_index); break;
+ case Form::idealD: fprintf(fp,"st->print(\"#%%f\", _c%d);\n", const_index); break;
default:
assert( false, "ShouldNotReachHere()");
}
@@ -3300,9 +3305,9 @@
int MatchNode::needs_ideal_memory_edge(FormDict &globals) const {
static const char *needs_ideal_memory_list[] = {
- "StoreI","StoreL","StoreP","StoreD","StoreF" ,
+ "StoreI","StoreL","StoreP","StoreN","StoreD","StoreF" ,
"StoreB","StoreC","Store" ,"StoreFP",
- "LoadI" ,"LoadL", "LoadP" ,"LoadD" ,"LoadF" ,
+ "LoadI" ,"LoadL", "LoadP" ,"LoadN", "LoadD" ,"LoadF" ,
"LoadB" ,"LoadC" ,"LoadS" ,"Load" ,
"Store4I","Store2I","Store2L","Store2D","Store4F","Store2F","Store16B",
"Store8B","Store4B","Store8C","Store4C","Store2C",
@@ -3311,7 +3316,7 @@
"LoadRange", "LoadKlass", "LoadL_unaligned", "LoadD_unaligned",
"LoadPLocked", "LoadLLocked",
"StorePConditional", "StoreLConditional",
- "CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP",
+ "CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP", "CompareAndSwapN",
"StoreCM",
"ClearArray"
};
@@ -3712,6 +3717,7 @@
if( base_operand(position, globals, result, name, opType) &&
(strcmp(opType,"RegI")==0 ||
strcmp(opType,"RegP")==0 ||
+ strcmp(opType,"RegN")==0 ||
strcmp(opType,"RegL")==0 ||
strcmp(opType,"RegF")==0 ||
strcmp(opType,"RegD")==0 ||
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/adlc/output_c.cpp
--- a/hotspot/src/share/vm/adlc/output_c.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/adlc/output_c.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -1546,6 +1546,18 @@
// Build a mapping from operand index to input edges
fprintf(fp," unsigned idx0 = oper_input_base();\n");
+
+ // The order in which inputs are added to a node is very
+ // strange. Store nodes get a memory input before Expand is
+ // called and all other nodes get it afterwards so
+ // oper_input_base is wrong during expansion. This code adjusts
+ // is so that expansion will work correctly.
+ bool missing_memory_edge = node->_matrule->needs_ideal_memory_edge(_globalNames) &&
+ node->is_ideal_store() == Form::none;
+ if (missing_memory_edge) {
+ fprintf(fp," idx0--; // Adjust base because memory edge hasn't been inserted yet\n");
+ }
+
for( i = 0; i < node->num_opnds(); i++ ) {
fprintf(fp," unsigned idx%d = idx%d + num%d;\n",
i+1,i,i);
@@ -1600,8 +1612,10 @@
int node_mem_op = node->memory_operand(_globalNames);
assert( node_mem_op != InstructForm::NO_MEMORY_OPERAND,
"expand rule member needs memory but top-level inst doesn't have any" );
- // Copy memory edge
- fprintf(fp," n%d->add_req(_in[1]);\t// Add memory edge\n", cnt);
+ if (!missing_memory_edge) {
+ // Copy memory edge
+ fprintf(fp," n%d->add_req(_in[1]);\t// Add memory edge\n", cnt);
+ }
}
// Iterate over the new instruction's operands
@@ -2363,6 +2377,8 @@
fprintf(fp,"uint %sNode::size(PhaseRegAlloc *ra_) const {\n",
inst._ident);
+ fprintf(fp, " assert(VerifyOops || MachNode::size(ra_) <= %s, \"bad fixed size\");\n", inst._size);
+
//(2)
// Print the size
fprintf(fp, " return (VerifyOops ? MachNode::size(ra_) : %s);\n", inst._size);
@@ -3426,6 +3442,8 @@
fprintf(fp, "_leaf->get_int()");
} else if ( (strcmp(optype,"ConP") == 0) ) {
fprintf(fp, "_leaf->bottom_type()->is_ptr()");
+ } else if ( (strcmp(optype,"ConN") == 0) ) {
+ fprintf(fp, "_leaf->bottom_type()->is_narrowoop()");
} else if ( (strcmp(optype,"ConF") == 0) ) {
fprintf(fp, "_leaf->getf()");
} else if ( (strcmp(optype,"ConD") == 0) ) {
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/adlc/output_h.cpp
--- a/hotspot/src/share/vm/adlc/output_h.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/adlc/output_h.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -203,6 +203,10 @@
if (i > 0) fprintf(fp,", ");
fprintf(fp," const TypePtr *_c%d;\n", i);
}
+ else if (!strcmp(type, "ConN")) {
+ if (i > 0) fprintf(fp,", ");
+ fprintf(fp," const TypeNarrowOop *_c%d;\n", i);
+ }
else if (!strcmp(type, "ConL")) {
if (i > 0) fprintf(fp,", ");
fprintf(fp," jlong _c%d;\n", i);
@@ -235,6 +239,10 @@
fprintf(fp," const TypePtr *_c%d;\n", i);
i++;
}
+ else if (!strcmp(comp->base_type(globals), "ConN")) {
+ fprintf(fp," const TypePtr *_c%d;\n", i);
+ i++;
+ }
else if (!strcmp(comp->base_type(globals), "ConL")) {
fprintf(fp," jlong _c%d;\n", i);
i++;
@@ -280,6 +288,7 @@
fprintf(fp,is_ideal_bool ? "BoolTest::mask c%d" : "int32 c%d", i);
break;
}
+ case Form::idealN : { fprintf(fp,"const TypeNarrowOop *c%d", i); break; }
case Form::idealP : { fprintf(fp,"const TypePtr *c%d", i); break; }
case Form::idealL : { fprintf(fp,"jlong c%d", i); break; }
case Form::idealF : { fprintf(fp,"jfloat c%d", i); break; }
@@ -302,6 +311,11 @@
fprintf(fp,"const TypePtr *c%d", i);
i++;
}
+ else if (!strcmp(comp->base_type(globals), "ConN")) {
+ if (i > 0) fprintf(fp,", ");
+ fprintf(fp,"const TypePtr *c%d", i);
+ i++;
+ }
else if (!strcmp(comp->base_type(globals), "ConL")) {
if (i > 0) fprintf(fp,", ");
fprintf(fp,"jlong c%d", i);
@@ -360,6 +374,10 @@
fprintf(fp," _c%d->dump_on(st);\n", i);
++i;
}
+ else if (!strcmp(ideal_type, "ConN")) {
+ fprintf(fp," _c%d->dump();\n", i);
+ ++i;
+ }
else if (!strcmp(ideal_type, "ConL")) {
fprintf(fp," st->print(\"#\" INT64_FORMAT, _c%d);\n", i);
++i;
@@ -417,8 +435,13 @@
// Replacement variable
const char *rep_var = oper._format->_rep_vars.iter();
// Check that it is a local name, and an operand
- OperandForm *op = oper._localNames[rep_var]->is_operand();
- assert( op, "replacement variable was not found in local names");
+ const Form* form = oper._localNames[rep_var];
+ if (form == NULL) {
+ globalAD->syntax_err(oper._linenum,
+ "\'%s\' not found in format for %s\n", rep_var, oper._ident);
+ assert(form, "replacement variable was not found in local names");
+ }
+ OperandForm *op = form->is_operand();
// Get index if register or constant
if ( op->_matrule && op->_matrule->is_base_register(globals) ) {
idx = oper.register_position( globals, rep_var);
@@ -483,9 +506,14 @@
} else {
// Replacement variable
const char *rep_var = oper._format->_rep_vars.iter();
- // Check that it is a local name, and an operand
- OperandForm *op = oper._localNames[rep_var]->is_operand();
- assert( op, "replacement variable was not found in local names");
+ // Check that it is a local name, and an operand
+ const Form* form = oper._localNames[rep_var];
+ if (form == NULL) {
+ globalAD->syntax_err(oper._linenum,
+ "\'%s\' not found in format for %s\n", rep_var, oper._ident);
+ assert(form, "replacement variable was not found in local names");
+ }
+ OperandForm *op = form->is_operand();
// Get index if register or constant
if ( op->_matrule && op->_matrule->is_base_register(globals) ) {
idx = oper.register_position( globals, rep_var);
@@ -1163,7 +1191,7 @@
if( type != NULL ) {
Form::DataType data_type = oper->is_base_constant(_globalNames);
// Check if we are an ideal pointer type
- if( data_type == Form::idealP ) {
+ if( data_type == Form::idealP || data_type == Form::idealN ) {
// Return the ideal type we already have:
fprintf(fp," return _c0;");
} else {
@@ -1291,6 +1319,16 @@
fprintf(fp, " return _c0->isa_oop_ptr();");
fprintf(fp, " }\n");
}
+ else if (!strcmp(oper->ideal_type(_globalNames), "ConN")) {
+ // Access the locally stored constant
+ fprintf(fp," virtual intptr_t constant() const {");
+ fprintf(fp, " return _c0->make_oopptr()->get_con();");
+ fprintf(fp, " }\n");
+ // Generate query to determine if this pointer is an oop
+ fprintf(fp," virtual bool constant_is_oop() const {");
+ fprintf(fp, " return _c0->make_oopptr()->isa_oop_ptr();");
+ fprintf(fp, " }\n");
+ }
else if (!strcmp(oper->ideal_type(_globalNames), "ConL")) {
fprintf(fp," virtual intptr_t constant() const {");
// We don't support addressing modes with > 4Gig offsets.
@@ -1748,6 +1786,7 @@
fprintf(fp," return TypeInt::make(opnd_array(1)->constant());\n");
break;
case Form::idealP:
+ case Form::idealN:
fprintf(fp," return opnd_array(1)->type();\n",result);
break;
case Form::idealD:
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/asm/codeBuffer.cpp
--- a/hotspot/src/share/vm/asm/codeBuffer.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/asm/codeBuffer.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -281,8 +281,10 @@
// Need to return a pc, doesn't matter what it is since it will be
// replaced during resolution later.
- // (Don't return NULL or badAddress, since branches shouldn't overflow.)
- return base;
+ // Don't return NULL or badAddress, since branches shouldn't overflow.
+ // Don't return base either because that could overflow displacements
+ // for shorter branches. It will get checked when bound.
+ return branch_pc;
}
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/c1/c1_Runtime1.cpp
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -1074,6 +1074,43 @@
JRT_END
+// Array copy return codes.
+enum {
+ ac_failed = -1, // arraycopy failed
+ ac_ok = 0 // arraycopy succeeded
+};
+
+
+template int obj_arraycopy_work(oopDesc* src, T* src_addr,
+ oopDesc* dst, T* dst_addr,
+ int length) {
+
+ // For performance reasons, we assume we are using a card marking write
+ // barrier. The assert will fail if this is not the case.
+ // Note that we use the non-virtual inlineable variant of write_ref_array.
+ BarrierSet* bs = Universe::heap()->barrier_set();
+ assert(bs->has_write_ref_array_opt(),
+ "Barrier set must have ref array opt");
+ if (src == dst) {
+ // same object, no check
+ Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
+ bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
+ (HeapWord*)(dst_addr + length)));
+ return ac_ok;
+ } else {
+ klassOop bound = objArrayKlass::cast(dst->klass())->element_klass();
+ klassOop stype = objArrayKlass::cast(src->klass())->element_klass();
+ if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) {
+ // Elements are guaranteed to be subtypes, so no check necessary
+ Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
+ bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
+ (HeapWord*)(dst_addr + length)));
+ return ac_ok;
+ }
+ }
+ return ac_failed;
+}
+
// fast and direct copy of arrays; returning -1, means that an exception may be thrown
// and we did not copy anything
JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int dst_pos, int length))
@@ -1081,11 +1118,6 @@
_generic_arraycopy_cnt++; // Slow-path oop array copy
#endif
- enum {
- ac_failed = -1, // arraycopy failed
- ac_ok = 0 // arraycopy succeeded
- };
-
if (src == NULL || dst == NULL || src_pos < 0 || dst_pos < 0 || length < 0) return ac_failed;
if (!dst->is_array() || !src->is_array()) return ac_failed;
if ((unsigned int) arrayOop(src)->length() < (unsigned int)src_pos + (unsigned int)length) return ac_failed;
@@ -1105,30 +1137,14 @@
memmove(dst_addr, src_addr, length << l2es);
return ac_ok;
} else if (src->is_objArray() && dst->is_objArray()) {
- oop* src_addr = objArrayOop(src)->obj_at_addr(src_pos);
- oop* dst_addr = objArrayOop(dst)->obj_at_addr(dst_pos);
- // For performance reasons, we assume we are using a card marking write
- // barrier. The assert will fail if this is not the case.
- // Note that we use the non-virtual inlineable variant of write_ref_array.
- BarrierSet* bs = Universe::heap()->barrier_set();
- assert(bs->has_write_ref_array_opt(),
- "Barrier set must have ref array opt");
- if (src == dst) {
- // same object, no check
- Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
- bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
- (HeapWord*)(dst_addr + length)));
- return ac_ok;
+ if (UseCompressedOops) { // will need for tiered
+ narrowOop *src_addr = objArrayOop(src)->obj_at_addr(src_pos);
+ narrowOop *dst_addr = objArrayOop(dst)->obj_at_addr(dst_pos);
+ return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);
} else {
- klassOop bound = objArrayKlass::cast(dst->klass())->element_klass();
- klassOop stype = objArrayKlass::cast(src->klass())->element_klass();
- if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) {
- // Elements are guaranteed to be subtypes, so no check necessary
- Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
- bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
- (HeapWord*)(dst_addr + length)));
- return ac_ok;
- }
+ oop *src_addr = objArrayOop(src)->obj_at_addr(src_pos);
+ oop *dst_addr = objArrayOop(dst)->obj_at_addr(dst_pos);
+ return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);
}
}
return ac_failed;
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/ci/ciInstanceKlass.cpp
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/ci/ciInstanceKlass.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -48,6 +48,7 @@
// Next line must follow and use the result of the previous line:
_is_linked = _is_initialized || ik->is_linked();
_nonstatic_field_size = ik->nonstatic_field_size();
+ _has_nonstatic_fields = ik->has_nonstatic_fields();
_nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields:
_nof_implementors = ik->nof_implementors();
@@ -93,6 +94,7 @@
_is_initialized = false;
_is_linked = false;
_nonstatic_field_size = -1;
+ _has_nonstatic_fields = false;
_nonstatic_fields = NULL;
_nof_implementors = -1;
_loader = loader;
@@ -201,7 +203,7 @@
assert(offset >= 0 && offset < layout_helper(), "offset must be tame");
#endif
- if (offset < (instanceOopDesc::header_size() * wordSize)) {
+ if (offset < instanceOopDesc::base_offset_in_bytes()) {
// All header offsets belong properly to java/lang/Object.
return CURRENT_ENV->Object_klass();
}
@@ -210,7 +212,8 @@
for (;;) {
assert(self->is_loaded(), "must be loaded to have size");
ciInstanceKlass* super = self->super();
- if (super == NULL || !super->contains_field_offset(offset)) {
+ if (super == NULL || super->nof_nonstatic_fields() == 0 ||
+ !super->contains_field_offset(offset)) {
return self;
} else {
self = super; // return super->get_canonical_holder(offset)
@@ -381,31 +384,28 @@
if (_nonstatic_fields != NULL)
return _nonstatic_fields->length();
- // Size in bytes of my fields, including inherited fields.
- // About equal to size_helper() - sizeof(oopDesc).
- int fsize = nonstatic_field_size() * wordSize;
- if (fsize == 0) { // easy shortcut
+ if (!has_nonstatic_fields()) {
Arena* arena = CURRENT_ENV->arena();
_nonstatic_fields = new (arena) GrowableArray(arena, 0, 0, NULL);
return 0;
}
assert(!is_java_lang_Object(), "bootstrap OK");
+ // Size in bytes of my fields, including inherited fields.
+ int fsize = nonstatic_field_size() * wordSize;
+
ciInstanceKlass* super = this->super();
- int super_fsize = 0;
- int super_flen = 0;
GrowableArray* super_fields = NULL;
- if (super != NULL) {
- super_fsize = super->nonstatic_field_size() * wordSize;
- super_flen = super->nof_nonstatic_fields();
+ if (super != NULL && super->has_nonstatic_fields()) {
+ int super_fsize = super->nonstatic_field_size() * wordSize;
+ int super_flen = super->nof_nonstatic_fields();
super_fields = super->_nonstatic_fields;
assert(super_flen == 0 || super_fields != NULL, "first get nof_fields");
- }
-
- // See if I am no larger than my super; if so, I can use his fields.
- if (fsize == super_fsize) {
- _nonstatic_fields = super_fields;
- return super_fields->length();
+ // See if I am no larger than my super; if so, I can use his fields.
+ if (fsize == super_fsize) {
+ _nonstatic_fields = super_fields;
+ return super_fields->length();
+ }
}
GrowableArray* fields = NULL;
@@ -425,11 +425,11 @@
// (In principle, they could mix with superclass fields.)
fields->sort(sort_field_by_offset);
#ifdef ASSERT
- int last_offset = sizeof(oopDesc);
+ int last_offset = instanceOopDesc::base_offset_in_bytes();
for (int i = 0; i < fields->length(); i++) {
ciField* field = fields->at(i);
int offset = field->offset_in_bytes();
- int size = (field->_type == NULL) ? oopSize : field->size_in_bytes();
+ int size = (field->_type == NULL) ? heapOopSize : field->size_in_bytes();
assert(last_offset <= offset, "no field overlap");
if (last_offset > (int)sizeof(oopDesc))
assert((offset - last_offset) < BytesPerLong, "no big holes");
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/ci/ciInstanceKlass.hpp
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/ci/ciInstanceKlass.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -35,15 +35,16 @@
friend class ciBytecodeStream;
private:
- bool _is_shared;
-
jobject _loader;
jobject _protection_domain;
+ bool _is_shared;
bool _is_initialized;
bool _is_linked;
bool _has_finalizer;
bool _has_subklass;
+ bool _has_nonstatic_fields;
+
ciFlags _flags;
jint _nonstatic_field_size;
jint _nonstatic_oop_map_size;
@@ -132,6 +133,9 @@
jint nonstatic_field_size() {
assert(is_loaded(), "must be loaded");
return _nonstatic_field_size; }
+ jint has_nonstatic_fields() {
+ assert(is_loaded(), "must be loaded");
+ return _has_nonstatic_fields; }
jint nonstatic_oop_map_size() {
assert(is_loaded(), "must be loaded");
return _nonstatic_oop_map_size; }
@@ -164,8 +168,7 @@
bool has_finalizable_subclass();
bool contains_field_offset(int offset) {
- return (offset/wordSize) >= instanceOopDesc::header_size()
- && (offset/wordSize)-instanceOopDesc::header_size() < nonstatic_field_size();
+ return instanceOopDesc::contains_field_offset(offset, nonstatic_field_size());
}
// Get the instance of java.lang.Class corresponding to
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/ci/ciObjectFactory.cpp
--- a/hotspot/src/share/vm/ci/ciObjectFactory.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -121,7 +121,7 @@
for (int i = T_BOOLEAN; i <= T_CONFLICT; i++) {
BasicType t = (BasicType)i;
- if (type2name(t) != NULL && t != T_OBJECT && t != T_ARRAY) {
+ if (type2name(t) != NULL && t != T_OBJECT && t != T_ARRAY && t != T_NARROWOOP) {
ciType::_basic_types[t] = new (_arena) ciType(t);
init_ident_of(ciType::_basic_types[t]);
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/classfile/classFileParser.cpp
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -2341,7 +2341,7 @@
// Incrementing next_nonstatic_oop_offset here advances the
// location where the real java fields are placed.
const int extra = java_lang_Class::number_of_fake_oop_fields;
- (*next_nonstatic_oop_offset_ptr) += (extra * wordSize);
+ (*next_nonstatic_oop_offset_ptr) += (extra * heapOopSize);
}
@@ -2647,7 +2647,7 @@
align_object_offset(vtable_size) +
align_object_offset(itable_size)) * wordSize;
next_static_double_offset = next_static_oop_offset +
- (fac.static_oop_count * oopSize);
+ (fac.static_oop_count * heapOopSize);
if ( fac.static_double_count &&
(Universe::field_type_should_be_aligned(T_DOUBLE) ||
Universe::field_type_should_be_aligned(T_LONG)) ) {
@@ -2687,6 +2687,14 @@
int nonstatic_byte_count = fac.nonstatic_byte_count;
int nonstatic_oop_count = fac.nonstatic_oop_count;
+ bool super_has_nonstatic_fields =
+ (super_klass() != NULL && super_klass->has_nonstatic_fields());
+ bool has_nonstatic_fields = super_has_nonstatic_fields ||
+ ((nonstatic_double_count + nonstatic_word_count +
+ nonstatic_short_count + nonstatic_byte_count +
+ nonstatic_oop_count) != 0);
+
+
// Prepare list of oops for oop maps generation.
u2* nonstatic_oop_offsets;
u2* nonstatic_oop_length;
@@ -2703,7 +2711,7 @@
java_lang_Class_fix_post(&next_nonstatic_field_offset);
nonstatic_oop_offsets[0] = (u2)first_nonstatic_field_offset;
int fake_oop_count = (( next_nonstatic_field_offset -
- first_nonstatic_field_offset ) / oopSize);
+ first_nonstatic_field_offset ) / heapOopSize);
nonstatic_oop_length [0] = (u2)fake_oop_count;
nonstatic_oop_map_count = 1;
nonstatic_oop_count -= fake_oop_count;
@@ -2715,7 +2723,7 @@
#ifndef PRODUCT
if( PrintCompactFieldsSavings ) {
next_nonstatic_double_offset = next_nonstatic_field_offset +
- (nonstatic_oop_count * oopSize);
+ (nonstatic_oop_count * heapOopSize);
if ( nonstatic_double_count > 0 ) {
next_nonstatic_double_offset = align_size_up(next_nonstatic_double_offset, BytesPerLong);
}
@@ -2749,7 +2757,15 @@
class_name() == vmSymbols::java_lang_ref_SoftReference() ||
class_name() == vmSymbols::java_lang_StackTraceElement() ||
class_name() == vmSymbols::java_lang_String() ||
- class_name() == vmSymbols::java_lang_Throwable()) ) {
+ class_name() == vmSymbols::java_lang_Throwable() ||
+ class_name() == vmSymbols::java_lang_Boolean() ||
+ class_name() == vmSymbols::java_lang_Character() ||
+ class_name() == vmSymbols::java_lang_Float() ||
+ class_name() == vmSymbols::java_lang_Double() ||
+ class_name() == vmSymbols::java_lang_Byte() ||
+ class_name() == vmSymbols::java_lang_Short() ||
+ class_name() == vmSymbols::java_lang_Integer() ||
+ class_name() == vmSymbols::java_lang_Long())) {
allocation_style = 0; // Allocate oops first
compact_fields = false; // Don't compact fields
}
@@ -2758,7 +2774,7 @@
// Fields order: oops, longs/doubles, ints, shorts/chars, bytes
next_nonstatic_oop_offset = next_nonstatic_field_offset;
next_nonstatic_double_offset = next_nonstatic_oop_offset +
- (nonstatic_oop_count * oopSize);
+ (nonstatic_oop_count * heapOopSize);
} else if( allocation_style == 1 ) {
// Fields order: longs/doubles, ints, shorts/chars, bytes, oops
next_nonstatic_double_offset = next_nonstatic_field_offset;
@@ -2775,8 +2791,18 @@
int nonstatic_short_space_offset;
int nonstatic_byte_space_offset;
- if( nonstatic_double_count > 0 ) {
- int offset = next_nonstatic_double_offset;
+ bool compact_into_header = (UseCompressedOops &&
+ allocation_style == 1 && compact_fields &&
+ !super_has_nonstatic_fields);
+
+ if( compact_into_header || nonstatic_double_count > 0 ) {
+ int offset;
+ // Pack something in with the header if no super klass has done so.
+ if (compact_into_header) {
+ offset = oopDesc::klass_gap_offset_in_bytes();
+ } else {
+ offset = next_nonstatic_double_offset;
+ }
next_nonstatic_double_offset = align_size_up(offset, BytesPerLong);
if( compact_fields && offset != next_nonstatic_double_offset ) {
// Allocate available fields into the gap before double field.
@@ -2804,12 +2830,13 @@
}
// Allocate oop field in the gap if there are no other fields for that.
nonstatic_oop_space_offset = offset;
- if( length >= oopSize && nonstatic_oop_count > 0 &&
+ if(!compact_into_header && length >= heapOopSize &&
+ nonstatic_oop_count > 0 &&
allocation_style != 0 ) { // when oop fields not first
nonstatic_oop_count -= 1;
nonstatic_oop_space_count = 1; // Only one will fit
- length -= oopSize;
- offset += oopSize;
+ length -= heapOopSize;
+ offset += heapOopSize;
}
}
}
@@ -2828,9 +2855,9 @@
next_nonstatic_oop_offset = next_nonstatic_byte_offset + nonstatic_byte_count;
if( nonstatic_oop_count > 0 ) {
notaligned_offset = next_nonstatic_oop_offset;
- next_nonstatic_oop_offset = align_size_up(next_nonstatic_oop_offset, oopSize);
+ next_nonstatic_oop_offset = align_size_up(next_nonstatic_oop_offset, heapOopSize);
}
- notaligned_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * oopSize);
+ notaligned_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * heapOopSize);
}
next_nonstatic_type_offset = align_size_up(notaligned_offset, wordSize );
nonstatic_field_size = nonstatic_field_size + ((next_nonstatic_type_offset
@@ -2846,7 +2873,7 @@
switch (atype) {
case STATIC_OOP:
real_offset = next_static_oop_offset;
- next_static_oop_offset += oopSize;
+ next_static_oop_offset += heapOopSize;
break;
case STATIC_BYTE:
real_offset = next_static_byte_offset;
@@ -2868,16 +2895,16 @@
case NONSTATIC_OOP:
if( nonstatic_oop_space_count > 0 ) {
real_offset = nonstatic_oop_space_offset;
- nonstatic_oop_space_offset += oopSize;
+ nonstatic_oop_space_offset += heapOopSize;
nonstatic_oop_space_count -= 1;
} else {
real_offset = next_nonstatic_oop_offset;
- next_nonstatic_oop_offset += oopSize;
+ next_nonstatic_oop_offset += heapOopSize;
}
// Update oop maps
if( nonstatic_oop_map_count > 0 &&
nonstatic_oop_offsets[nonstatic_oop_map_count - 1] ==
- (u2)(real_offset - nonstatic_oop_length[nonstatic_oop_map_count - 1] * oopSize) ) {
+ (u2)(real_offset - nonstatic_oop_length[nonstatic_oop_map_count - 1] * heapOopSize) ) {
// Extend current oop map
nonstatic_oop_length[nonstatic_oop_map_count - 1] += 1;
} else {
@@ -2970,6 +2997,7 @@
//this_klass->set_super(super_klass());
this_klass->set_class_loader(class_loader());
this_klass->set_nonstatic_field_size(nonstatic_field_size);
+ this_klass->set_has_nonstatic_fields(has_nonstatic_fields);
this_klass->set_static_oop_field_size(fac.static_oop_count);
cp->set_pool_holder(this_klass());
this_klass->set_constants(cp());
@@ -3128,7 +3156,7 @@
OopMapBlock* first_map = super->start_of_nonstatic_oop_maps();
OopMapBlock* last_map = first_map + map_size - 1;
- int next_offset = last_map->offset() + (last_map->length() * oopSize);
+ int next_offset = last_map->offset() + (last_map->length() * heapOopSize);
if (next_offset == first_nonstatic_oop_offset) {
// There is no gap bettwen superklass's last oop field and first
// local oop field, merge maps.
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/classfile/javaClasses.cpp
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -520,16 +520,12 @@
JavaThread* java_lang_Thread::thread(oop java_thread) {
- return (JavaThread*) java_thread->obj_field(_eetop_offset);
+ return (JavaThread*)java_thread->address_field(_eetop_offset);
}
void java_lang_Thread::set_thread(oop java_thread, JavaThread* thread) {
- // We are storing a JavaThread* (malloc'ed data) into a long field in the thread
- // object. The store has to be 64-bit wide so we use a pointer store, but we
- // cannot call oopDesc::obj_field_put since it includes a write barrier!
- oop* addr = java_thread->obj_field_addr(_eetop_offset);
- *addr = (oop) thread;
+ java_thread->address_field_put(_eetop_offset, (address)thread);
}
@@ -1038,8 +1034,8 @@
if (_dirty && _methods != NULL) {
BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
- bs->write_ref_array(MemRegion((HeapWord*)_methods->obj_at_addr(0),
- _methods->length() * HeapWordsPerOop));
+ bs->write_ref_array(MemRegion((HeapWord*)_methods->base(),
+ _methods->array_size()));
_dirty = false;
}
}
@@ -1083,8 +1079,9 @@
method = mhandle();
}
- // _methods->obj_at_put(_index, method);
- *_methods->obj_at_addr(_index) = method;
+ _methods->obj_at_put(_index, method);
+ // bad for UseCompressedOops
+ // *_methods->obj_at_addr(_index) = method;
_bcis->ushort_at_put(_index, bci);
_index++;
_dirty = true;
@@ -1973,39 +1970,30 @@
// Support for java_lang_ref_Reference
-
-void java_lang_ref_Reference::set_referent(oop ref, oop value) {
- ref->obj_field_put(referent_offset, value);
-}
-
-oop* java_lang_ref_Reference::referent_addr(oop ref) {
- return ref->obj_field_addr(referent_offset);
-}
-
-void java_lang_ref_Reference::set_next(oop ref, oop value) {
- ref->obj_field_put(next_offset, value);
-}
-
-oop* java_lang_ref_Reference::next_addr(oop ref) {
- return ref->obj_field_addr(next_offset);
+oop java_lang_ref_Reference::pending_list_lock() {
+ instanceKlass* ik = instanceKlass::cast(SystemDictionary::reference_klass());
+ char *addr = (((char *)ik->start_of_static_fields()) + static_lock_offset);
+ if (UseCompressedOops) {
+ return oopDesc::load_decode_heap_oop((narrowOop *)addr);
+ } else {
+ return oopDesc::load_decode_heap_oop((oop*)addr);
+ }
}
-void java_lang_ref_Reference::set_discovered(oop ref, oop value) {
- ref->obj_field_put(discovered_offset, value);
-}
-
-oop* java_lang_ref_Reference::discovered_addr(oop ref) {
- return ref->obj_field_addr(discovered_offset);
+HeapWord *java_lang_ref_Reference::pending_list_addr() {
+ instanceKlass* ik = instanceKlass::cast(SystemDictionary::reference_klass());
+ char *addr = (((char *)ik->start_of_static_fields()) + static_pending_offset);
+ // XXX This might not be HeapWord aligned, almost rather be char *.
+ return (HeapWord*)addr;
}
-oop* java_lang_ref_Reference::pending_list_lock_addr() {
- instanceKlass* ik = instanceKlass::cast(SystemDictionary::reference_klass());
- return (oop*)(((char *)ik->start_of_static_fields()) + static_lock_offset);
-}
-
-oop* java_lang_ref_Reference::pending_list_addr() {
- instanceKlass* ik = instanceKlass::cast(SystemDictionary::reference_klass());
- return (oop *)(((char *)ik->start_of_static_fields()) + static_pending_offset);
+oop java_lang_ref_Reference::pending_list() {
+ char *addr = (char *)pending_list_addr();
+ if (UseCompressedOops) {
+ return oopDesc::load_decode_heap_oop((narrowOop *)addr);
+ } else {
+ return oopDesc::load_decode_heap_oop((oop*)addr);
+ }
}
@@ -2291,8 +2279,11 @@
// Invoked before SystemDictionary::initialize, so pre-loaded classes
// are not available to determine the offset_of_static_fields.
void JavaClasses::compute_hard_coded_offsets() {
- const int x = wordSize;
- const int header = instanceOopDesc::header_size_in_bytes();
+ const int x = heapOopSize;
+ // Objects don't get allocated in the gap in the header with compressed oops
+ // for these special classes because hard coded offsets can't be conditional
+ // so base_offset_in_bytes() is wrong here, allocate after the header.
+ const int header = sizeof(instanceOopDesc);
// Do the String Class
java_lang_String::value_offset = java_lang_String::hc_value_offset * x + header;
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/classfile/javaClasses.hpp
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -691,24 +691,47 @@
static int number_of_fake_oop_fields;
// Accessors
- static oop referent(oop ref) { return *referent_addr(ref); }
- static void set_referent(oop ref, oop value);
- static oop* referent_addr(oop ref);
-
- static oop next(oop ref) { return *next_addr(ref); }
- static void set_next(oop ref, oop value);
- static oop* next_addr(oop ref);
+ static oop referent(oop ref) {
+ return ref->obj_field(referent_offset);
+ }
+ static void set_referent(oop ref, oop value) {
+ ref->obj_field_put(referent_offset, value);
+ }
+ static void set_referent_raw(oop ref, oop value) {
+ ref->obj_field_raw_put(referent_offset, value);
+ }
+ static HeapWord* referent_addr(oop ref) {
+ return ref->obj_field_addr(referent_offset);
+ }
+ static oop next(oop ref) {
+ return ref->obj_field(next_offset);
+ }
+ static void set_next(oop ref, oop value) {
+ ref->obj_field_put(next_offset, value);
+ }
+ static void set_next_raw(oop ref, oop value) {
+ ref->obj_field_raw_put(next_offset, value);
+ }
+ static HeapWord* next_addr(oop ref) {
+ return ref->obj_field_addr(next_offset);
+ }
+ static oop discovered(oop ref) {
+ return ref->obj_field(discovered_offset);
+ }
+ static void set_discovered(oop ref, oop value) {
+ ref->obj_field_put(discovered_offset, value);
+ }
+ static void set_discovered_raw(oop ref, oop value) {
+ ref->obj_field_raw_put(discovered_offset, value);
+ }
+ static HeapWord* discovered_addr(oop ref) {
+ return ref->obj_field_addr(discovered_offset);
+ }
+ // Accessors for statics
+ static oop pending_list_lock();
+ static oop pending_list();
- static oop discovered(oop ref) { return *discovered_addr(ref); }
- static void set_discovered(oop ref, oop value);
- static oop* discovered_addr(oop ref);
-
- // Accessors for statics
- static oop pending_list_lock() { return *pending_list_lock_addr(); }
- static oop pending_list() { return *pending_list_addr(); }
-
- static oop* pending_list_lock_addr();
- static oop* pending_list_addr();
+ static HeapWord* pending_list_addr();
};
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/compiler/oopMap.cpp
--- a/hotspot/src/share/vm/compiler/oopMap.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/compiler/oopMap.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -169,11 +169,8 @@
}
-void OopMap::set_dead(VMReg reg) {
- // At this time, we only need dead entries in our OopMap when ZapDeadCompiledLocals is active.
- if (ZapDeadCompiledLocals) {
- set_xxx(reg, OopMapValue::dead_value, VMRegImpl::Bad());
- }
+void OopMap::set_narrowoop(VMReg reg) {
+ set_xxx(reg, OopMapValue::narrowoop_value, VMRegImpl::Bad());
}
@@ -305,7 +302,9 @@
}
class DoNothingClosure: public OopClosure {
-public: void do_oop(oop* p) {}
+ public:
+ void do_oop(oop* p) {}
+ void do_oop(narrowOop* p) {}
};
static DoNothingClosure do_nothing;
@@ -349,23 +348,21 @@
void OopMapSet::oops_do(const frame *fr, const RegisterMap* reg_map, OopClosure* f) {
// add derived oops to a table
- all_do(fr, reg_map, f, add_derived_oop, &do_nothing, &do_nothing);
+ all_do(fr, reg_map, f, add_derived_oop, &do_nothing);
}
void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
OopClosure* oop_fn, void derived_oop_fn(oop*, oop*),
- OopClosure* value_fn, OopClosure* dead_fn) {
+ OopClosure* value_fn) {
CodeBlob* cb = fr->cb();
- {
- assert(cb != NULL, "no codeblob");
- }
+ assert(cb != NULL, "no codeblob");
NOT_PRODUCT(if (TraceCodeBlobStacks) trace_codeblob_maps(fr, reg_map);)
OopMapSet* maps = cb->oop_maps();
- OopMap* map = cb->oop_map_for_return_address(fr->pc());
- assert(map != NULL, " no ptr map found");
+ OopMap* map = cb->oop_map_for_return_address(fr->pc());
+ assert(map != NULL, "no ptr map found");
// handle derived pointers first (otherwise base pointer may be
// changed before derived pointer offset has been collected)
@@ -393,8 +390,8 @@
}
}
- // We want dead, value and oop oop_types
- int mask = OopMapValue::oop_value | OopMapValue::value_value | OopMapValue::dead_value;
+ // We want coop, value and oop oop_types
+ int mask = OopMapValue::oop_value | OopMapValue::value_value | OopMapValue::narrowoop_value;
{
for (OopMapStream oms(map,mask); !oms.is_done(); oms.next()) {
omv = oms.current();
@@ -402,11 +399,15 @@
if ( loc != NULL ) {
if ( omv.type() == OopMapValue::oop_value ) {
#ifdef ASSERT
- if (COMPILER2_PRESENT(!DoEscapeAnalysis &&) !Universe::heap()->is_in_or_null(*loc)) {
+ if (COMPILER2_PRESENT(!DoEscapeAnalysis &&)
+ (((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
+ !Universe::heap()->is_in_or_null(*loc)) {
tty->print_cr("# Found non oop pointer. Dumping state at failure");
// try to dump out some helpful debugging information
trace_codeblob_maps(fr, reg_map);
omv.print();
+ tty->print_cr("register r");
+ omv.reg()->print();
tty->print_cr("loc = %p *loc = %p\n", loc, (address)*loc);
// do the real assert.
assert(Universe::heap()->is_in_or_null(*loc), "found non oop pointer");
@@ -415,8 +416,17 @@
oop_fn->do_oop(loc);
} else if ( omv.type() == OopMapValue::value_value ) {
value_fn->do_oop(loc);
- } else if ( omv.type() == OopMapValue::dead_value ) {
- dead_fn->do_oop(loc);
+ } else if ( omv.type() == OopMapValue::narrowoop_value ) {
+ narrowOop *nl = (narrowOop*)loc;
+#ifndef VM_LITTLE_ENDIAN
+ if (!omv.reg()->is_stack()) {
+ // compressed oops in registers only take up 4 bytes of an
+ // 8 byte register but they are in the wrong part of the
+ // word so adjust loc to point at the right place.
+ nl = (narrowOop*)((address)nl + 4);
+ }
+#endif
+ oop_fn->do_oop(nl);
}
}
}
@@ -519,8 +529,8 @@
case OopMapValue::value_value:
st->print("Value" );
break;
- case OopMapValue::dead_value:
- st->print("Dead" );
+ case OopMapValue::narrowoop_value:
+ tty->print("NarrowOop" );
break;
case OopMapValue::callee_saved_value:
st->print("Callers_" );
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/compiler/oopMap.hpp
--- a/hotspot/src/share/vm/compiler/oopMap.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/compiler/oopMap.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -61,7 +61,7 @@
unused_value =0, // powers of 2, for masking OopMapStream
oop_value = 1,
value_value = 2,
- dead_value = 4,
+ narrowoop_value = 4,
callee_saved_value = 8,
derived_oop_value= 16,
stack_obj = 32 };
@@ -90,14 +90,14 @@
// Querying
bool is_oop() { return mask_bits(value(), type_mask_in_place) == oop_value; }
bool is_value() { return mask_bits(value(), type_mask_in_place) == value_value; }
- bool is_dead() { return mask_bits(value(), type_mask_in_place) == dead_value; }
+ bool is_narrowoop() { return mask_bits(value(), type_mask_in_place) == narrowoop_value; }
bool is_callee_saved() { return mask_bits(value(), type_mask_in_place) == callee_saved_value; }
bool is_derived_oop() { return mask_bits(value(), type_mask_in_place) == derived_oop_value; }
bool is_stack_obj() { return mask_bits(value(), type_mask_in_place) == stack_obj; }
void set_oop() { set_value((value() & register_mask_in_place) | oop_value); }
void set_value() { set_value((value() & register_mask_in_place) | value_value); }
- void set_dead() { set_value((value() & register_mask_in_place) | dead_value); }
+ void set_narrowoop() { set_value((value() & register_mask_in_place) | narrowoop_value); }
void set_callee_saved() { set_value((value() & register_mask_in_place) | callee_saved_value); }
void set_derived_oop() { set_value((value() & register_mask_in_place) | derived_oop_value); }
void set_stack_obj() { set_value((value() & register_mask_in_place) | stack_obj); }
@@ -176,6 +176,7 @@
// slots to hold 4-byte values like ints and floats in the LP64 build.
void set_oop ( VMReg local);
void set_value( VMReg local);
+ void set_narrowoop(VMReg local);
void set_dead ( VMReg local);
void set_callee_saved( VMReg local, VMReg caller_machine_register );
void set_derived_oop ( VMReg local, VMReg derived_from_local_register );
@@ -245,7 +246,7 @@
static void all_do(const frame* fr, const RegisterMap* reg_map,
OopClosure* oop_fn,
void derived_oop_fn(oop* base, oop* derived),
- OopClosure* value_fn, OopClosure* dead_fn);
+ OopClosure* value_fn);
// Printing
void print_on(outputStream* st) const;
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -29,22 +29,34 @@
class CMSBitMap;
class CMSMarkStack;
class CMSCollector;
-template class GenericTaskQueue;
-typedef GenericTaskQueue OopTaskQueue;
-template class GenericTaskQueueSet;
-typedef GenericTaskQueueSet OopTaskQueueSet;
class MarkFromRootsClosure;
class Par_MarkFromRootsClosure;
+// Decode the oop and call do_oop on it.
+#define DO_OOP_WORK_DEFN \
+ void do_oop(oop obj); \
+ template inline void do_oop_work(T* p) { \
+ T heap_oop = oopDesc::load_heap_oop(p); \
+ if (!oopDesc::is_null(heap_oop)) { \
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \
+ do_oop(obj); \
+ } \
+ }
+
class MarkRefsIntoClosure: public OopsInGenClosure {
- const MemRegion _span;
- CMSBitMap* _bitMap;
- const bool _should_do_nmethods;
+ private:
+ const MemRegion _span;
+ CMSBitMap* _bitMap;
+ const bool _should_do_nmethods;
+ protected:
+ DO_OOP_WORK_DEFN
public:
MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap,
bool should_do_nmethods);
- void do_oop(oop* p);
- void do_oop_nv(oop* p) { MarkRefsIntoClosure::do_oop(p); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
+ inline void do_oop_nv(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
bool do_header() { return true; }
virtual const bool do_nmethods() const {
return _should_do_nmethods;
@@ -57,15 +69,20 @@
// A variant of the above used in certain kinds of CMS
// marking verification.
class MarkRefsIntoVerifyClosure: public OopsInGenClosure {
- const MemRegion _span;
- CMSBitMap* _verification_bm;
- CMSBitMap* _cms_bm;
- const bool _should_do_nmethods;
+ private:
+ const MemRegion _span;
+ CMSBitMap* _verification_bm;
+ CMSBitMap* _cms_bm;
+ const bool _should_do_nmethods;
+ protected:
+ DO_OOP_WORK_DEFN
public:
MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
CMSBitMap* cms_bm, bool should_do_nmethods);
- void do_oop(oop* p);
- void do_oop_nv(oop* p) { MarkRefsIntoVerifyClosure::do_oop(p); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
+ inline void do_oop_nv(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
bool do_header() { return true; }
virtual const bool do_nmethods() const {
return _should_do_nmethods;
@@ -75,37 +92,40 @@
}
};
-
// The non-parallel version (the parallel version appears further below).
class PushAndMarkClosure: public OopClosure {
- CMSCollector* _collector;
- MemRegion _span;
- CMSBitMap* _bit_map;
- CMSBitMap* _mod_union_table;
- CMSMarkStack* _mark_stack;
- CMSMarkStack* _revisit_stack;
- bool _concurrent_precleaning;
- bool const _should_remember_klasses;
+ private:
+ CMSCollector* _collector;
+ MemRegion _span;
+ CMSBitMap* _bit_map;
+ CMSBitMap* _mod_union_table;
+ CMSMarkStack* _mark_stack;
+ CMSMarkStack* _revisit_stack;
+ bool _concurrent_precleaning;
+ bool const _should_remember_klasses;
+ protected:
+ DO_OOP_WORK_DEFN
public:
PushAndMarkClosure(CMSCollector* collector,
MemRegion span,
ReferenceProcessor* rp,
CMSBitMap* bit_map,
CMSBitMap* mod_union_table,
- CMSMarkStack* mark_stack,
- CMSMarkStack* revisit_stack,
- bool concurrent_precleaning);
-
- void do_oop(oop* p);
- void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop(p); }
+ CMSMarkStack* mark_stack,
+ CMSMarkStack* revisit_stack,
+ bool concurrent_precleaning);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); }
+ inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
bool do_header() { return true; }
Prefetch::style prefetch_style() {
return Prefetch::do_read;
}
- const bool should_remember_klasses() const {
+ virtual const bool should_remember_klasses() const {
return _should_remember_klasses;
}
- void remember_klass(Klass* k);
+ virtual void remember_klass(Klass* k);
};
// In the parallel case, the revisit stack, the bit map and the
@@ -115,12 +135,15 @@
// used in the non-parallel case above is here replaced with
// an OopTaskQueue structure to allow efficient work stealing.
class Par_PushAndMarkClosure: public OopClosure {
- CMSCollector* _collector;
- MemRegion _span;
- CMSBitMap* _bit_map;
- OopTaskQueue* _work_queue;
- CMSMarkStack* _revisit_stack;
- bool const _should_remember_klasses;
+ private:
+ CMSCollector* _collector;
+ MemRegion _span;
+ CMSBitMap* _bit_map;
+ OopTaskQueue* _work_queue;
+ CMSMarkStack* _revisit_stack;
+ bool const _should_remember_klasses;
+ protected:
+ DO_OOP_WORK_DEFN
public:
Par_PushAndMarkClosure(CMSCollector* collector,
MemRegion span,
@@ -128,43 +151,48 @@
CMSBitMap* bit_map,
OopTaskQueue* work_queue,
CMSMarkStack* revisit_stack);
-
- void do_oop(oop* p);
- void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop(p); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
+ inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
bool do_header() { return true; }
Prefetch::style prefetch_style() {
return Prefetch::do_read;
}
- const bool should_remember_klasses() const {
+ virtual const bool should_remember_klasses() const {
return _should_remember_klasses;
}
- void remember_klass(Klass* k);
+ virtual void remember_klass(Klass* k);
};
-
// The non-parallel version (the parallel version appears further below).
class MarkRefsIntoAndScanClosure: public OopsInGenClosure {
- MemRegion _span;
- CMSBitMap* _bit_map;
- CMSMarkStack* _mark_stack;
- PushAndMarkClosure _pushAndMarkClosure;
- CMSCollector* _collector;
- bool _yield;
+ private:
+ MemRegion _span;
+ CMSBitMap* _bit_map;
+ CMSMarkStack* _mark_stack;
+ PushAndMarkClosure _pushAndMarkClosure;
+ CMSCollector* _collector;
+ Mutex* _freelistLock;
+ bool _yield;
// Whether closure is being used for concurrent precleaning
- bool _concurrent_precleaning;
- Mutex* _freelistLock;
+ bool _concurrent_precleaning;
+ protected:
+ DO_OOP_WORK_DEFN
public:
MarkRefsIntoAndScanClosure(MemRegion span,
ReferenceProcessor* rp,
CMSBitMap* bit_map,
CMSBitMap* mod_union_table,
- CMSMarkStack* mark_stack,
- CMSMarkStack* revisit_stack,
+ CMSMarkStack* mark_stack,
+ CMSMarkStack* revisit_stack,
CMSCollector* collector,
bool should_yield,
bool concurrent_precleaning);
- void do_oop(oop* p);
- void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop(p); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
+ inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
bool do_header() { return true; }
virtual const bool do_nmethods() const { return true; }
Prefetch::style prefetch_style() {
@@ -185,11 +213,14 @@
// sycnhronized. An OopTaskQueue structure, supporting efficient
// workstealing, replaces a CMSMarkStack for storing grey objects.
class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
- MemRegion _span;
- CMSBitMap* _bit_map;
- OopTaskQueue* _work_queue;
- const uint _low_water_mark;
- Par_PushAndMarkClosure _par_pushAndMarkClosure;
+ private:
+ MemRegion _span;
+ CMSBitMap* _bit_map;
+ OopTaskQueue* _work_queue;
+ const uint _low_water_mark;
+ Par_PushAndMarkClosure _par_pushAndMarkClosure;
+ protected:
+ DO_OOP_WORK_DEFN
public:
Par_MarkRefsIntoAndScanClosure(CMSCollector* collector,
MemRegion span,
@@ -197,8 +228,10 @@
CMSBitMap* bit_map,
OopTaskQueue* work_queue,
CMSMarkStack* revisit_stack);
- void do_oop(oop* p);
- void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop(p); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
+ inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
bool do_header() { return true; }
virtual const bool do_nmethods() const { return true; }
Prefetch::style prefetch_style() {
@@ -211,28 +244,34 @@
// following the first checkpoint. Its use is buried in
// the closure MarkFromRootsClosure.
class PushOrMarkClosure: public OopClosure {
- CMSCollector* _collector;
- MemRegion _span;
- CMSBitMap* _bitMap;
- CMSMarkStack* _markStack;
- CMSMarkStack* _revisitStack;
- HeapWord* const _finger;
- MarkFromRootsClosure* const _parent;
- bool const _should_remember_klasses;
+ private:
+ CMSCollector* _collector;
+ MemRegion _span;
+ CMSBitMap* _bitMap;
+ CMSMarkStack* _markStack;
+ CMSMarkStack* _revisitStack;
+ HeapWord* const _finger;
+ MarkFromRootsClosure* const
+ _parent;
+ bool const _should_remember_klasses;
+ protected:
+ DO_OOP_WORK_DEFN
public:
PushOrMarkClosure(CMSCollector* cms_collector,
MemRegion span,
CMSBitMap* bitMap,
- CMSMarkStack* markStack,
- CMSMarkStack* revisitStack,
- HeapWord* finger,
+ CMSMarkStack* markStack,
+ CMSMarkStack* revisitStack,
+ HeapWord* finger,
MarkFromRootsClosure* parent);
- void do_oop(oop* p);
- void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop(p); }
- const bool should_remember_klasses() const {
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); }
+ inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
+ virtual const bool should_remember_klasses() const {
return _should_remember_klasses;
}
- void remember_klass(Klass* k);
+ virtual void remember_klass(Klass* k);
// Deal with a stack overflow condition
void handle_stack_overflow(HeapWord* lost);
private:
@@ -244,6 +283,7 @@
// following the first checkpoint. Its use is buried in
// the closure Par_MarkFromRootsClosure.
class Par_PushOrMarkClosure: public OopClosure {
+ private:
CMSCollector* _collector;
MemRegion _whole_span;
MemRegion _span; // local chunk
@@ -253,24 +293,29 @@
CMSMarkStack* _revisit_stack;
HeapWord* const _finger;
HeapWord** const _global_finger_addr;
- Par_MarkFromRootsClosure* const _parent;
- bool const _should_remember_klasses;
+ Par_MarkFromRootsClosure* const
+ _parent;
+ bool const _should_remember_klasses;
+ protected:
+ DO_OOP_WORK_DEFN
public:
Par_PushOrMarkClosure(CMSCollector* cms_collector,
- MemRegion span,
- CMSBitMap* bit_map,
- OopTaskQueue* work_queue,
- CMSMarkStack* mark_stack,
- CMSMarkStack* revisit_stack,
- HeapWord* finger,
- HeapWord** global_finger_addr,
- Par_MarkFromRootsClosure* parent);
- void do_oop(oop* p);
- void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop(p); }
- const bool should_remember_klasses() const {
+ MemRegion span,
+ CMSBitMap* bit_map,
+ OopTaskQueue* work_queue,
+ CMSMarkStack* mark_stack,
+ CMSMarkStack* revisit_stack,
+ HeapWord* finger,
+ HeapWord** global_finger_addr,
+ Par_MarkFromRootsClosure* parent);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
+ inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
+ virtual const bool should_remember_klasses() const {
return _should_remember_klasses;
}
- void remember_klass(Klass* k);
+ virtual void remember_klass(Klass* k);
// Deal with a stack overflow condition
void handle_stack_overflow(HeapWord* lost);
private:
@@ -282,10 +327,13 @@
// This is currently used during the (weak) reference object
// processing phase of the CMS final checkpoint step.
class CMSKeepAliveClosure: public OopClosure {
+ private:
CMSCollector* _collector;
MemRegion _span;
CMSMarkStack* _mark_stack;
CMSBitMap* _bit_map;
+ protected:
+ DO_OOP_WORK_DEFN
public:
CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
CMSBitMap* bit_map, CMSMarkStack* mark_stack):
@@ -293,16 +341,20 @@
_span(span),
_bit_map(bit_map),
_mark_stack(mark_stack) { }
-
- void do_oop(oop* p);
- void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop(p); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
+ inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
};
class CMSInnerParMarkAndPushClosure: public OopClosure {
+ private:
CMSCollector* _collector;
MemRegion _span;
OopTaskQueue* _work_queue;
CMSBitMap* _bit_map;
+ protected:
+ DO_OOP_WORK_DEFN
public:
CMSInnerParMarkAndPushClosure(CMSCollector* collector,
MemRegion span, CMSBitMap* bit_map,
@@ -311,24 +363,32 @@
_span(span),
_bit_map(bit_map),
_work_queue(work_queue) { }
- void do_oop(oop* p);
- void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop(p); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
+ inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
};
// A parallel (MT) version of the above, used when
// reference processing is parallel; the only difference
// is in the do_oop method.
class CMSParKeepAliveClosure: public OopClosure {
+ private:
CMSCollector* _collector;
MemRegion _span;
OopTaskQueue* _work_queue;
CMSBitMap* _bit_map;
- CMSInnerParMarkAndPushClosure _mark_and_push;
+ CMSInnerParMarkAndPushClosure
+ _mark_and_push;
const uint _low_water_mark;
void trim_queue(uint max);
+ protected:
+ DO_OOP_WORK_DEFN
public:
CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
CMSBitMap* bit_map, OopTaskQueue* work_queue);
- void do_oop(oop* p);
- void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop(p); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
+ inline void do_oop_nv(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
};
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -177,7 +177,7 @@
assert(q->forwardee() == NULL, "should be forwarded to NULL");
}
- debug_only(MarkSweep::register_live_oop(q, adjusted_size));
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
compact_top += adjusted_size;
// we need to update the offset table so that the beginnings of objects can be
@@ -1211,7 +1211,7 @@
return fc;
}
-oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size, oop* ref) {
+oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
assert_locked();
@@ -2116,7 +2116,6 @@
splitBirth(to2);
}
-
void CompactibleFreeListSpace::print() const {
tty->print(" CompactibleFreeListSpace");
Space::print();
@@ -2130,6 +2129,7 @@
}
class VerifyAllBlksClosure: public BlkClosure {
+ private:
const CompactibleFreeListSpace* _sp;
const MemRegion _span;
@@ -2137,7 +2137,7 @@
VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
MemRegion span) : _sp(sp), _span(span) { }
- size_t do_blk(HeapWord* addr) {
+ virtual size_t do_blk(HeapWord* addr) {
size_t res;
if (_sp->block_is_obj(addr)) {
oop p = oop(addr);
@@ -2160,12 +2160,54 @@
};
class VerifyAllOopsClosure: public OopClosure {
+ private:
const CMSCollector* _collector;
const CompactibleFreeListSpace* _sp;
const MemRegion _span;
const bool _past_remark;
const CMSBitMap* _bit_map;
+ protected:
+ void do_oop(void* p, oop obj) {
+ if (_span.contains(obj)) { // the interior oop points into CMS heap
+ if (!_span.contains(p)) { // reference from outside CMS heap
+ // Should be a valid object; the first disjunct below allows
+ // us to sidestep an assertion in block_is_obj() that insists
+ // that p be in _sp. Note that several generations (and spaces)
+ // are spanned by _span (CMS heap) above.
+ guarantee(!_sp->is_in_reserved(obj) ||
+ _sp->block_is_obj((HeapWord*)obj),
+ "Should be an object");
+ guarantee(obj->is_oop(), "Should be an oop");
+ obj->verify();
+ if (_past_remark) {
+ // Remark has been completed, the object should be marked
+ _bit_map->isMarked((HeapWord*)obj);
+ }
+ } else { // reference within CMS heap
+ if (_past_remark) {
+ // Remark has been completed -- so the referent should have
+ // been marked, if referring object is.
+ if (_bit_map->isMarked(_collector->block_start(p))) {
+ guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
+ }
+ }
+ }
+ } else if (_sp->is_in_reserved(p)) {
+ // the reference is from FLS, and points out of FLS
+ guarantee(obj->is_oop(), "Should be an oop");
+ obj->verify();
+ }
+ }
+
+ template void do_oop_work(T* p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ do_oop(p, obj);
+ }
+ }
+
public:
VerifyAllOopsClosure(const CMSCollector* collector,
const CompactibleFreeListSpace* sp, MemRegion span,
@@ -2173,40 +2215,8 @@
OopClosure(), _collector(collector), _sp(sp), _span(span),
_past_remark(past_remark), _bit_map(bit_map) { }
- void do_oop(oop* ptr) {
- oop p = *ptr;
- if (p != NULL) {
- if (_span.contains(p)) { // the interior oop points into CMS heap
- if (!_span.contains(ptr)) { // reference from outside CMS heap
- // Should be a valid object; the first disjunct below allows
- // us to sidestep an assertion in block_is_obj() that insists
- // that p be in _sp. Note that several generations (and spaces)
- // are spanned by _span (CMS heap) above.
- guarantee(!_sp->is_in_reserved(p) || _sp->block_is_obj((HeapWord*)p),
- "Should be an object");
- guarantee(p->is_oop(), "Should be an oop");
- p->verify();
- if (_past_remark) {
- // Remark has been completed, the object should be marked
- _bit_map->isMarked((HeapWord*)p);
- }
- }
- else { // reference within CMS heap
- if (_past_remark) {
- // Remark has been completed -- so the referent should have
- // been marked, if referring object is.
- if (_bit_map->isMarked(_collector->block_start(ptr))) {
- guarantee(_bit_map->isMarked((HeapWord*)p), "Marking error?");
- }
- }
- }
- } else if (_sp->is_in_reserved(ptr)) {
- // the reference is from FLS, and points out of FLS
- guarantee(p->is_oop(), "Should be an oop");
- p->verify();
- }
- }
- }
+ virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); }
+ virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
};
void CompactibleFreeListSpace::verify(bool ignored) const {
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -540,7 +540,7 @@
HeapWord* allocate(size_t size);
HeapWord* par_allocate(size_t size);
- oop promote(oop obj, size_t obj_size, oop* ref);
+ oop promote(oop obj, size_t obj_size);
void gc_prologue();
void gc_epilogue();
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -1226,7 +1226,7 @@
return NULL;
}
-oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size, oop* ref) {
+oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
// allocate, copy and if necessary update promoinfo --
// delegate to underlying space.
@@ -1238,7 +1238,7 @@
}
#endif // #ifndef PRODUCT
- oop res = _cmsSpace->promote(obj, obj_size, ref);
+ oop res = _cmsSpace->promote(obj, obj_size);
if (res == NULL) {
// expand and retry
size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
@@ -1249,7 +1249,7 @@
assert(next_gen() == NULL, "assumption, based upon which no attempt "
"is made to pass on a possibly failing "
"promotion to next generation");
- res = _cmsSpace->promote(obj, obj_size, ref);
+ res = _cmsSpace->promote(obj, obj_size);
}
if (res != NULL) {
// See comment in allocate() about when objects should
@@ -3922,13 +3922,15 @@
}
class Par_ConcMarkingClosure: public OopClosure {
+ private:
CMSCollector* _collector;
MemRegion _span;
CMSBitMap* _bit_map;
CMSMarkStack* _overflow_stack;
CMSMarkStack* _revisit_stack; // XXXXXX Check proper use
OopTaskQueue* _work_queue;
-
+ protected:
+ DO_OOP_WORK_DEFN
public:
Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue,
CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
@@ -3937,8 +3939,8 @@
_work_queue(work_queue),
_bit_map(bit_map),
_overflow_stack(overflow_stack) { } // need to initialize revisit stack etc.
-
- void do_oop(oop* p);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
void trim_queue(size_t max);
void handle_stack_overflow(HeapWord* lost);
};
@@ -3947,11 +3949,9 @@
// the salient assumption here is that stolen oops must
// always be initialized, so we do not need to check for
// uninitialized objects before scanning here.
-void Par_ConcMarkingClosure::do_oop(oop* p) {
- oop this_oop = *p;
- assert(this_oop->is_oop_or_null(),
- "expected an oop or NULL");
- HeapWord* addr = (HeapWord*)this_oop;
+void Par_ConcMarkingClosure::do_oop(oop obj) {
+ assert(obj->is_oop_or_null(), "expected an oop or NULL");
+ HeapWord* addr = (HeapWord*)obj;
// Check if oop points into the CMS generation
// and is not marked
if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
@@ -3970,7 +3970,7 @@
}
)
if (simulate_overflow ||
- !(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) {
+ !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
// stack overflow
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
@@ -3987,6 +3987,9 @@
}
}
+void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
+void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
+
void Par_ConcMarkingClosure::trim_queue(size_t max) {
while (_work_queue->size() > max) {
oop new_oop;
@@ -4086,8 +4089,8 @@
//
// Tony 2006.06.29
for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
- ConcurrentMarkSweepThread::should_yield() &&
- !CMSCollector::foregroundGCIsActive(); ++i) {
+ ConcurrentMarkSweepThread::should_yield() &&
+ !CMSCollector::foregroundGCIsActive(); ++i) {
os::sleep(Thread::current(), 1, false);
ConcurrentMarkSweepThread::acknowledge_yield_request();
}
@@ -6048,8 +6051,8 @@
// See the comment in coordinator_yield()
for (unsigned i = 0; i < CMSYieldSleepCount &&
- ConcurrentMarkSweepThread::should_yield() &&
- !CMSCollector::foregroundGCIsActive(); ++i) {
+ ConcurrentMarkSweepThread::should_yield() &&
+ !CMSCollector::foregroundGCIsActive(); ++i) {
os::sleep(Thread::current(), 1, false);
ConcurrentMarkSweepThread::acknowledge_yield_request();
}
@@ -6362,18 +6365,18 @@
assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
}
-void MarkRefsIntoClosure::do_oop(oop* p) {
+void MarkRefsIntoClosure::do_oop(oop obj) {
// if p points into _span, then mark corresponding bit in _markBitMap
- oop thisOop = *p;
- if (thisOop != NULL) {
- assert(thisOop->is_oop(), "expected an oop");
- HeapWord* addr = (HeapWord*)thisOop;
- if (_span.contains(addr)) {
- // this should be made more efficient
- _bitMap->mark(addr);
- }
- }
-}
+ assert(obj->is_oop(), "expected an oop");
+ HeapWord* addr = (HeapWord*)obj;
+ if (_span.contains(addr)) {
+ // this should be made more efficient
+ _bitMap->mark(addr);
+ }
+}
+
+void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
+void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
// A variant of the above, used for CMS marking verification.
MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
@@ -6387,22 +6390,22 @@
assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
}
-void MarkRefsIntoVerifyClosure::do_oop(oop* p) {
+void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
// if p points into _span, then mark corresponding bit in _markBitMap
- oop this_oop = *p;
- if (this_oop != NULL) {
- assert(this_oop->is_oop(), "expected an oop");
- HeapWord* addr = (HeapWord*)this_oop;
- if (_span.contains(addr)) {
- _verification_bm->mark(addr);
- if (!_cms_bm->isMarked(addr)) {
- oop(addr)->print();
- gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
- fatal("... aborting");
- }
- }
- }
-}
+ assert(obj->is_oop(), "expected an oop");
+ HeapWord* addr = (HeapWord*)obj;
+ if (_span.contains(addr)) {
+ _verification_bm->mark(addr);
+ if (!_cms_bm->isMarked(addr)) {
+ oop(addr)->print();
+ gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
+ fatal("... aborting");
+ }
+ }
+}
+
+void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
+void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
//////////////////////////////////////////////////
// MarkRefsIntoAndScanClosure
@@ -6438,13 +6441,13 @@
// The marks are made in the marking bit map and the marking stack is
// used for keeping the (newly) grey objects during the scan.
// The parallel version (Par_...) appears further below.
-void MarkRefsIntoAndScanClosure::do_oop(oop* p) {
- oop this_oop = *p;
- if (this_oop != NULL) {
- assert(this_oop->is_oop(), "expected an oop");
- HeapWord* addr = (HeapWord*)this_oop;
- assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
- assert(_collector->overflow_list_is_empty(), "should be empty");
+void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
+ if (obj != NULL) {
+ assert(obj->is_oop(), "expected an oop");
+ HeapWord* addr = (HeapWord*)obj;
+ assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
+ assert(_collector->overflow_list_is_empty(),
+ "overflow list should be empty");
if (_span.contains(addr) &&
!_bit_map->isMarked(addr)) {
// mark bit map (object is now grey)
@@ -6452,7 +6455,7 @@
// push on marking stack (stack should be empty), and drain the
// stack by applying this closure to the oops in the oops popped
// from the stack (i.e. blacken the grey objects)
- bool res = _mark_stack->push(this_oop);
+ bool res = _mark_stack->push(obj);
assert(res, "Should have space to push on empty stack");
do {
oop new_oop = _mark_stack->pop();
@@ -6488,6 +6491,9 @@
}
}
+void MarkRefsIntoAndScanClosure::do_oop(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
+void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
+
void MarkRefsIntoAndScanClosure::do_yield_work() {
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
"CMS thread should hold CMS token");
@@ -6506,9 +6512,11 @@
_collector->icms_wait();
// See the comment in coordinator_yield()
- for (unsigned i = 0; i < CMSYieldSleepCount &&
- ConcurrentMarkSweepThread::should_yield() &&
- !CMSCollector::foregroundGCIsActive(); ++i) {
+ for (unsigned i = 0;
+ i < CMSYieldSleepCount &&
+ ConcurrentMarkSweepThread::should_yield() &&
+ !CMSCollector::foregroundGCIsActive();
+ ++i) {
os::sleep(Thread::current(), 1, false);
ConcurrentMarkSweepThread::acknowledge_yield_request();
}
@@ -6545,13 +6553,12 @@
// the scan phase whence they are also available for stealing by parallel
// threads. Since the marking bit map is shared, updates are
// synchronized (via CAS).
-void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) {
- oop this_oop = *p;
- if (this_oop != NULL) {
+void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
+ if (obj != NULL) {
// Ignore mark word because this could be an already marked oop
// that may be chained at the end of the overflow list.
- assert(this_oop->is_oop(true /* ignore mark word */), "expected an oop");
- HeapWord* addr = (HeapWord*)this_oop;
+ assert(obj->is_oop(), "expected an oop");
+ HeapWord* addr = (HeapWord*)obj;
if (_span.contains(addr) &&
!_bit_map->isMarked(addr)) {
// mark bit map (object will become grey):
@@ -6565,7 +6572,7 @@
// queue to an appropriate length by applying this closure to
// the oops in the oops popped from the stack (i.e. blacken the
// grey objects)
- bool res = _work_queue->push(this_oop);
+ bool res = _work_queue->push(obj);
assert(res, "Low water mark should be less than capacity?");
trim_queue(_low_water_mark);
} // Else, another thread claimed the object
@@ -6573,6 +6580,9 @@
}
}
+void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
+void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
+
// This closure is used to rescan the marked objects on the dirty cards
// in the mod union table and the card table proper.
size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
@@ -6675,8 +6685,8 @@
// See the comment in coordinator_yield()
for (unsigned i = 0; i < CMSYieldSleepCount &&
- ConcurrentMarkSweepThread::should_yield() &&
- !CMSCollector::foregroundGCIsActive(); ++i) {
+ ConcurrentMarkSweepThread::should_yield() &&
+ !CMSCollector::foregroundGCIsActive(); ++i) {
os::sleep(Thread::current(), 1, false);
ConcurrentMarkSweepThread::acknowledge_yield_request();
}
@@ -6928,13 +6938,13 @@
assert(_markStack->isEmpty(),
"should drain stack to limit stack usage");
// convert ptr to an oop preparatory to scanning
- oop this_oop = oop(ptr);
+ oop obj = oop(ptr);
// Ignore mark word in verification below, since we
// may be running concurrent with mutators.
- assert(this_oop->is_oop(true), "should be an oop");
+ assert(obj->is_oop(true), "should be an oop");
assert(_finger <= ptr, "_finger runneth ahead");
// advance the finger to right end of this object
- _finger = ptr + this_oop->size();
+ _finger = ptr + obj->size();
assert(_finger > ptr, "we just incremented it above");
// On large heaps, it may take us some time to get through
// the marking phase (especially if running iCMS). During
@@ -6980,7 +6990,7 @@
_span, _bitMap, _markStack,
_revisitStack,
_finger, this);
- bool res = _markStack->push(this_oop);
+ bool res = _markStack->push(obj);
assert(res, "Empty non-zero size stack should have space for single push");
while (!_markStack->isEmpty()) {
oop new_oop = _markStack->pop();
@@ -7052,13 +7062,13 @@
assert(_work_queue->size() == 0,
"should drain stack to limit stack usage");
// convert ptr to an oop preparatory to scanning
- oop this_oop = oop(ptr);
+ oop obj = oop(ptr);
// Ignore mark word in verification below, since we
// may be running concurrent with mutators.
- assert(this_oop->is_oop(true), "should be an oop");
+ assert(obj->is_oop(true), "should be an oop");
assert(_finger <= ptr, "_finger runneth ahead");
// advance the finger to right end of this object
- _finger = ptr + this_oop->size();
+ _finger = ptr + obj->size();
assert(_finger > ptr, "we just incremented it above");
// On large heaps, it may take us some time to get through
// the marking phase (especially if running iCMS). During
@@ -7106,7 +7116,7 @@
_revisit_stack,
_finger,
gfa, this);
- bool res = _work_queue->push(this_oop); // overflow could occur here
+ bool res = _work_queue->push(obj); // overflow could occur here
assert(res, "Will hold once we use workqueues");
while (true) {
oop new_oop;
@@ -7176,15 +7186,15 @@
assert(_mark_stack->isEmpty(),
"should drain stack to limit stack usage");
// convert addr to an oop preparatory to scanning
- oop this_oop = oop(addr);
- assert(this_oop->is_oop(), "should be an oop");
+ oop obj = oop(addr);
+ assert(obj->is_oop(), "should be an oop");
assert(_finger <= addr, "_finger runneth ahead");
// advance the finger to right end of this object
- _finger = addr + this_oop->size();
+ _finger = addr + obj->size();
assert(_finger > addr, "we just incremented it above");
// Note: the finger doesn't advance while we drain
// the stack below.
- bool res = _mark_stack->push(this_oop);
+ bool res = _mark_stack->push(obj);
assert(res, "Empty non-zero size stack should have space for single push");
while (!_mark_stack->isEmpty()) {
oop new_oop = _mark_stack->pop();
@@ -7207,6 +7217,8 @@
_mark_stack(mark_stack)
{ }
+void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
+void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
// Upon stack overflow, we discard (part of) the stack,
// remembering the least address amongst those discarded
@@ -7219,20 +7231,20 @@
_mark_stack->expand(); // expand the stack if possible
}
-void PushAndMarkVerifyClosure::do_oop(oop* p) {
- oop this_oop = *p;
- assert(this_oop->is_oop_or_null(), "expected an oop or NULL");
- HeapWord* addr = (HeapWord*)this_oop;
+void PushAndMarkVerifyClosure::do_oop(oop obj) {
+ assert(obj->is_oop_or_null(), "expected an oop or NULL");
+ HeapWord* addr = (HeapWord*)obj;
if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
// Oop lies in _span and isn't yet grey or black
_verification_bm->mark(addr); // now grey
if (!_cms_bm->isMarked(addr)) {
oop(addr)->print();
- gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
+ gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
+ addr);
fatal("... aborting");
}
- if (!_mark_stack->push(this_oop)) { // stack overflow
+ if (!_mark_stack->push(obj)) { // stack overflow
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
SIZE_FORMAT, _mark_stack->capacity());
@@ -7285,7 +7297,6 @@
_should_remember_klasses(collector->should_unload_classes())
{ }
-
void CMSCollector::lower_restart_addr(HeapWord* low) {
assert(_span.contains(low), "Out of bounds addr");
if (_restart_addr == NULL) {
@@ -7321,12 +7332,10 @@
_overflow_stack->expand(); // expand the stack if possible
}
-
-void PushOrMarkClosure::do_oop(oop* p) {
- oop thisOop = *p;
+void PushOrMarkClosure::do_oop(oop obj) {
// Ignore mark word because we are running concurrent with mutators.
- assert(thisOop->is_oop_or_null(true), "expected an oop or NULL");
- HeapWord* addr = (HeapWord*)thisOop;
+ assert(obj->is_oop_or_null(true), "expected an oop or NULL");
+ HeapWord* addr = (HeapWord*)obj;
if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
// Oop lies in _span and isn't yet grey or black
_bitMap->mark(addr); // now grey
@@ -7342,7 +7351,7 @@
simulate_overflow = true;
}
)
- if (simulate_overflow || !_markStack->push(thisOop)) { // stack overflow
+ if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
SIZE_FORMAT, _markStack->capacity());
@@ -7358,11 +7367,13 @@
}
}
-void Par_PushOrMarkClosure::do_oop(oop* p) {
- oop this_oop = *p;
+void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
+void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
+
+void Par_PushOrMarkClosure::do_oop(oop obj) {
// Ignore mark word because we are running concurrent with mutators.
- assert(this_oop->is_oop_or_null(true), "expected an oop or NULL");
- HeapWord* addr = (HeapWord*)this_oop;
+ assert(obj->is_oop_or_null(true), "expected an oop or NULL");
+ HeapWord* addr = (HeapWord*)obj;
if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
// Oop lies in _span and isn't yet grey or black
// We read the global_finger (volatile read) strictly after marking oop
@@ -7391,7 +7402,7 @@
}
)
if (simulate_overflow ||
- !(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) {
+ !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
// stack overflow
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
@@ -7408,6 +7419,8 @@
}
}
+void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
+void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
MemRegion span,
@@ -7432,16 +7445,11 @@
// Grey object rescan during pre-cleaning and second checkpoint phases --
// the non-parallel version (the parallel version appears further below.)
-void PushAndMarkClosure::do_oop(oop* p) {
- oop this_oop = *p;
- // Ignore mark word verification. If during concurrent precleaning
- // the object monitor may be locked. If during the checkpoint
- // phases, the object may already have been reached by a different
- // path and may be at the end of the global overflow list (so
- // the mark word may be NULL).
- assert(this_oop->is_oop_or_null(true/* ignore mark word */),
+void PushAndMarkClosure::do_oop(oop obj) {
+ // If _concurrent_precleaning, ignore mark word verification
+ assert(obj->is_oop_or_null(_concurrent_precleaning),
"expected an oop or NULL");
- HeapWord* addr = (HeapWord*)this_oop;
+ HeapWord* addr = (HeapWord*)obj;
// Check if oop points into the CMS generation
// and is not marked
if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
@@ -7456,7 +7464,7 @@
simulate_overflow = true;
}
)
- if (simulate_overflow || !_mark_stack->push(this_oop)) {
+ if (simulate_overflow || !_mark_stack->push(obj)) {
if (_concurrent_precleaning) {
// During precleaning we can just dirty the appropriate card
// in the mod union table, thus ensuring that the object remains
@@ -7468,7 +7476,7 @@
} else {
// During the remark phase, we need to remember this oop
// in the overflow list.
- _collector->push_on_overflow_list(this_oop);
+ _collector->push_on_overflow_list(obj);
_collector->_ser_pmc_remark_ovflw++;
}
}
@@ -7492,10 +7500,12 @@
assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
}
+void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
+void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
+
// Grey object rescan during second checkpoint phase --
// the parallel version.
-void Par_PushAndMarkClosure::do_oop(oop* p) {
- oop this_oop = *p;
+void Par_PushAndMarkClosure::do_oop(oop obj) {
// In the assert below, we ignore the mark word because
// this oop may point to an already visited object that is
// on the overflow stack (in which case the mark word has
@@ -7507,9 +7517,9 @@
// value, by the time we get to examined this failing assert in
// the debugger, is_oop_or_null(false) may subsequently start
// to hold.
- assert(this_oop->is_oop_or_null(true),
+ assert(obj->is_oop_or_null(true),
"expected an oop or NULL");
- HeapWord* addr = (HeapWord*)this_oop;
+ HeapWord* addr = (HeapWord*)obj;
// Check if oop points into the CMS generation
// and is not marked
if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
@@ -7527,14 +7537,17 @@
simulate_overflow = true;
}
)
- if (simulate_overflow || !_work_queue->push(this_oop)) {
- _collector->par_push_on_overflow_list(this_oop);
+ if (simulate_overflow || !_work_queue->push(obj)) {
+ _collector->par_push_on_overflow_list(obj);
_collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS
}
} // Else, some other thread got there first
}
}
+void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
+void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
+
void PushAndMarkClosure::remember_klass(Klass* k) {
if (!_revisit_stack->push(oop(k))) {
fatal("Revisit stack overflowed in PushAndMarkClosure");
@@ -8228,9 +8241,8 @@
}
// CMSKeepAliveClosure: the serial version
-void CMSKeepAliveClosure::do_oop(oop* p) {
- oop this_oop = *p;
- HeapWord* addr = (HeapWord*)this_oop;
+void CMSKeepAliveClosure::do_oop(oop obj) {
+ HeapWord* addr = (HeapWord*)obj;
if (_span.contains(addr) &&
!_bit_map->isMarked(addr)) {
_bit_map->mark(addr);
@@ -8242,26 +8254,28 @@
simulate_overflow = true;
}
)
- if (simulate_overflow || !_mark_stack->push(this_oop)) {
- _collector->push_on_overflow_list(this_oop);
+ if (simulate_overflow || !_mark_stack->push(obj)) {
+ _collector->push_on_overflow_list(obj);
_collector->_ser_kac_ovflw++;
}
}
}
+void CMSKeepAliveClosure::do_oop(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
+void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
+
// CMSParKeepAliveClosure: a parallel version of the above.
// The work queues are private to each closure (thread),
// but (may be) available for stealing by other threads.
-void CMSParKeepAliveClosure::do_oop(oop* p) {
- oop this_oop = *p;
- HeapWord* addr = (HeapWord*)this_oop;
+void CMSParKeepAliveClosure::do_oop(oop obj) {
+ HeapWord* addr = (HeapWord*)obj;
if (_span.contains(addr) &&
!_bit_map->isMarked(addr)) {
// In general, during recursive tracing, several threads
// may be concurrently getting here; the first one to
// "tag" it, claims it.
if (_bit_map->par_mark(addr)) {
- bool res = _work_queue->push(this_oop);
+ bool res = _work_queue->push(obj);
assert(res, "Low water mark should be much less than capacity");
// Do a recursive trim in the hope that this will keep
// stack usage lower, but leave some oops for potential stealers
@@ -8270,6 +8284,9 @@
}
}
+void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
+void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
+
void CMSParKeepAliveClosure::trim_queue(uint max) {
while (_work_queue->size() > max) {
oop new_oop;
@@ -8285,9 +8302,8 @@
}
}
-void CMSInnerParMarkAndPushClosure::do_oop(oop* p) {
- oop this_oop = *p;
- HeapWord* addr = (HeapWord*)this_oop;
+void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
+ HeapWord* addr = (HeapWord*)obj;
if (_span.contains(addr) &&
!_bit_map->isMarked(addr)) {
if (_bit_map->par_mark(addr)) {
@@ -8299,14 +8315,17 @@
simulate_overflow = true;
}
)
- if (simulate_overflow || !_work_queue->push(this_oop)) {
- _collector->par_push_on_overflow_list(this_oop);
+ if (simulate_overflow || !_work_queue->push(obj)) {
+ _collector->par_push_on_overflow_list(obj);
_collector->_par_kac_ovflw++;
}
} // Else another thread got there already
}
}
+void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
+void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
+
//////////////////////////////////////////////////////////////////
// CMSExpansionCause /////////////////////////////
//////////////////////////////////////////////////////////////////
@@ -8337,12 +8356,12 @@
while (!_mark_stack->isEmpty() ||
// if stack is empty, check the overflow list
_collector->take_from_overflow_list(num, _mark_stack)) {
- oop this_oop = _mark_stack->pop();
- HeapWord* addr = (HeapWord*)this_oop;
+ oop obj = _mark_stack->pop();
+ HeapWord* addr = (HeapWord*)obj;
assert(_span.contains(addr), "Should be within span");
assert(_bit_map->isMarked(addr), "Should be marked");
- assert(this_oop->is_oop(), "Should be an oop");
- this_oop->oop_iterate(_keep_alive);
+ assert(obj->is_oop(), "Should be an oop");
+ obj->oop_iterate(_keep_alive);
}
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -1138,7 +1138,7 @@
// Allocation support
HeapWord* allocate(size_t size, bool tlab);
HeapWord* have_lock_and_allocate(size_t size, bool tlab);
- oop promote(oop obj, size_t obj_size, oop* ref);
+ oop promote(oop obj, size_t obj_size);
HeapWord* par_allocate(size_t size, bool tlab) {
return allocate(size, tlab);
}
@@ -1301,9 +1301,8 @@
// This closure is used to check that a certain set of oops is empty.
class FalseClosure: public OopClosure {
public:
- void do_oop(oop* p) {
- guarantee(false, "Should be an empty set");
- }
+ void do_oop(oop* p) { guarantee(false, "Should be an empty set"); }
+ void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); }
};
// This closure is used to do concurrent marking from the roots
@@ -1380,6 +1379,12 @@
CMSBitMap* _verification_bm;
CMSBitMap* _cms_bm;
CMSMarkStack* _mark_stack;
+ protected:
+ void do_oop(oop p);
+ template inline void do_oop_work(T *p) {
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+ do_oop(obj);
+ }
public:
PushAndMarkVerifyClosure(CMSCollector* cms_collector,
MemRegion span,
@@ -1387,6 +1392,7 @@
CMSBitMap* cms_bm,
CMSMarkStack* mark_stack);
void do_oop(oop* p);
+ void do_oop(narrowOop* p);
// Deal with a stack overflow condition
void handle_stack_overflow(HeapWord* lost);
};
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/includeDB_gc_parNew
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_parNew Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_parNew Sun Apr 13 17:43:42 2008 -0400
@@ -19,7 +19,7 @@
// Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
// CA 95054 USA or visit www.sun.com if you need additional information or
// have any questions.
-//
+//
//
asParNewGeneration.hpp adaptiveSizePolicy.hpp
@@ -66,8 +66,8 @@
parNewGeneration.cpp handles.inline.hpp
parNewGeneration.cpp java.hpp
parNewGeneration.cpp objArrayOop.hpp
+parNewGeneration.cpp oop.inline.hpp
parNewGeneration.cpp oop.pcgc.inline.hpp
-parNewGeneration.cpp oop.inline.hpp
parNewGeneration.cpp parGCAllocBuffer.hpp
parNewGeneration.cpp parNewGeneration.hpp
parNewGeneration.cpp parOopClosures.inline.hpp
@@ -80,3 +80,8 @@
parNewGeneration.hpp defNewGeneration.hpp
parNewGeneration.hpp parGCAllocBuffer.hpp
parNewGeneration.hpp taskqueue.hpp
+
+parOopClosures.hpp genOopClosures.hpp
+
+parOopClosures.inline.hpp parNewGeneration.hpp
+parOopClosures.inline.hpp parOopClosures.hpp
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge Sun Apr 13 17:43:42 2008 -0400
@@ -19,7 +19,7 @@
// Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
// CA 95054 USA or visit www.sun.com if you need additional information or
// have any questions.
-//
+//
//
// NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps!
@@ -279,6 +279,7 @@
psParallelCompact.hpp objectStartArray.hpp
psParallelCompact.hpp oop.hpp
psParallelCompact.hpp parMarkBitMap.hpp
+psParallelCompact.hpp psCompactionManager.hpp
psParallelCompact.hpp sharedHeap.hpp
psOldGen.cpp psAdaptiveSizePolicy.hpp
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp
--- a/hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -32,18 +32,19 @@
_allocated(0), _wasted(0)
{
assert (min_size() > AlignmentReserve, "Inconsistency!");
+ // arrayOopDesc::header_size depends on command line initialization.
+ FillerHeaderSize = align_object_size(arrayOopDesc::header_size(T_INT));
+ AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0;
}
-const size_t ParGCAllocBuffer::FillerHeaderSize =
- align_object_size(arrayOopDesc::header_size(T_INT));
+size_t ParGCAllocBuffer::FillerHeaderSize;
// If the minimum object size is greater than MinObjAlignment, we can
// end up with a shard at the end of the buffer that's smaller than
// the smallest object. We can't allow that because the buffer must
// look like it's full of objects when we retire it, so we make
// sure we have enough space for a filler int array object.
-const size_t ParGCAllocBuffer::AlignmentReserve =
- oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0;
+size_t ParGCAllocBuffer::AlignmentReserve;
void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) {
assert(!retain || end_of_gc, "Can only retain at GC end.");
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp
--- a/hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -41,8 +41,8 @@
size_t _allocated; // in HeapWord units
size_t _wasted; // in HeapWord units
char tail[32];
- static const size_t FillerHeaderSize;
- static const size_t AlignmentReserve;
+ static size_t FillerHeaderSize;
+ static size_t AlignmentReserve;
public:
// Initializes the buffer to be empty, but with the given "word_sz".
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -104,16 +104,15 @@
// must be removed.
arrayOop(old)->set_length(end);
}
+
// process our set of indices (include header in first chunk)
- oop* start_addr = start == 0 ? (oop*)obj : obj->obj_at_addr(start);
- oop* end_addr = obj->base() + end; // obj_at_addr(end) asserts end < length
- MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr);
+ // should make sure end is even (aligned to HeapWord in case of compressed oops)
if ((HeapWord *)obj < young_old_boundary()) {
// object is in to_space
- obj->oop_iterate(&_to_space_closure, mr);
+ obj->oop_iterate_range(&_to_space_closure, start, end);
} else {
// object is in old generation
- obj->oop_iterate(&_old_gen_closure, mr);
+ obj->oop_iterate_range(&_old_gen_closure, start, end);
}
}
@@ -319,7 +318,6 @@
}
}
-
ParScanClosure::ParScanClosure(ParNewGeneration* g,
ParScanThreadState* par_scan_state) :
OopsInGenClosure(g), _par_scan_state(par_scan_state), _g(g)
@@ -328,11 +326,25 @@
_boundary = _g->reserved().end();
}
+void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); }
+void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
+
+void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); }
+void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
+
+void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); }
+void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
+
+void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); }
+void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }
+
ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
ParScanThreadState* par_scan_state)
: ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
-{
-}
+{}
+
+void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); }
+void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); }
#ifdef WIN32
#pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */
@@ -475,51 +487,66 @@
ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) :
DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {}
-void
-// ParNewGeneration::
-ParKeepAliveClosure::do_oop(oop* p) {
- // We never expect to see a null reference being processed
- // as a weak reference.
- assert (*p != NULL, "expected non-null ref");
- assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
+template
+void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
+#ifdef ASSERT
+ {
+ assert(!oopDesc::is_null(*p), "expected non-null ref");
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+ // We never expect to see a null reference being processed
+ // as a weak reference.
+ assert(obj->is_oop(), "expected an oop while scanning weak refs");
+ }
+#endif // ASSERT
_par_cl->do_oop_nv(p);
if (Universe::heap()->is_in_reserved(p)) {
- _rs->write_ref_field_gc_par(p, *p);
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+ _rs->write_ref_field_gc_par(p, obj);
}
}
+void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); }
+void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); }
+
// ParNewGeneration::
KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) :
DefNewGeneration::KeepAliveClosure(cl) {}
-void
-// ParNewGeneration::
-KeepAliveClosure::do_oop(oop* p) {
- // We never expect to see a null reference being processed
- // as a weak reference.
- assert (*p != NULL, "expected non-null ref");
- assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
+template
+void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
+#ifdef ASSERT
+ {
+ assert(!oopDesc::is_null(*p), "expected non-null ref");
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+ // We never expect to see a null reference being processed
+ // as a weak reference.
+ assert(obj->is_oop(), "expected an oop while scanning weak refs");
+ }
+#endif // ASSERT
_cl->do_oop_nv(p);
if (Universe::heap()->is_in_reserved(p)) {
- _rs->write_ref_field_gc_par(p, *p);
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+ _rs->write_ref_field_gc_par(p, obj);
}
}
-void ScanClosureWithParBarrier::do_oop(oop* p) {
- oop obj = *p;
- // Should we copy the obj?
- if (obj != NULL) {
+void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); }
+void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); }
+
+template void ScanClosureWithParBarrier::do_oop_work(T* p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
if ((HeapWord*)obj < _boundary) {
assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
- if (obj->is_forwarded()) {
- *p = obj->forwardee();
- } else {
- *p = _g->DefNewGeneration::copy_to_survivor_space(obj, p);
- }
+ oop new_obj = obj->is_forwarded()
+ ? obj->forwardee()
+ : _g->DefNewGeneration::copy_to_survivor_space(obj);
+ oopDesc::encode_store_heap_oop_not_null(p, new_obj);
}
if (_gc_barrier) {
// If p points to a younger generation, mark the card.
@@ -530,6 +557,9 @@
}
}
+void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
+void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
+
class ParNewRefProcTaskProxy: public AbstractGangTask {
typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
public:
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -33,7 +33,6 @@
// but they must be here to allow ParScanClosure::do_oop_work to be defined
// in genOopClosures.inline.hpp.
-
typedef OopTaskQueue ObjToScanQueue;
typedef OopTaskQueueSet ObjToScanQueueSet;
@@ -41,15 +40,20 @@
const int PAR_STATS_ENABLED = 0;
class ParKeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
+ private:
ParScanWeakRefClosure* _par_cl;
+ protected:
+ template void do_oop_work(T* p);
public:
ParKeepAliveClosure(ParScanWeakRefClosure* cl);
- void do_oop(oop* p);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
};
// The state needed by thread performing parallel young-gen collection.
class ParScanThreadState {
friend class ParScanThreadStateSet;
+ private:
ObjToScanQueue *_work_queue;
ParGCAllocBuffer _to_space_alloc_buffer;
@@ -111,7 +115,7 @@
ObjToScanQueueSet* work_queue_set_, size_t desired_plab_sz_,
ParallelTaskTerminator& term_);
-public:
+ public:
ageTable* age_table() {return &_ageTable;}
ObjToScanQueue* work_queue() { return _work_queue; }
@@ -195,13 +199,13 @@
double elapsed() {
return os::elapsedTime() - _start;
}
-
};
class ParNewGenTask: public AbstractGangTask {
- ParNewGeneration* _gen;
- Generation* _next_gen;
- HeapWord* _young_old_boundary;
+ private:
+ ParNewGeneration* _gen;
+ Generation* _next_gen;
+ HeapWord* _young_old_boundary;
class ParScanThreadStateSet* _state_set;
public:
@@ -216,35 +220,44 @@
};
class KeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
+ protected:
+ template void do_oop_work(T* p);
public:
KeepAliveClosure(ScanWeakRefClosure* cl);
- void do_oop(oop* p);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
};
class EvacuateFollowersClosureGeneral: public VoidClosure {
- GenCollectedHeap* _gch;
- int _level;
- OopsInGenClosure* _scan_cur_or_nonheap;
- OopsInGenClosure* _scan_older;
- public:
- EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
- OopsInGenClosure* cur,
- OopsInGenClosure* older);
- void do_void();
+ private:
+ GenCollectedHeap* _gch;
+ int _level;
+ OopsInGenClosure* _scan_cur_or_nonheap;
+ OopsInGenClosure* _scan_older;
+ public:
+ EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
+ OopsInGenClosure* cur,
+ OopsInGenClosure* older);
+ virtual void do_void();
};
// Closure for scanning ParNewGeneration.
// Same as ScanClosure, except does parallel GC barrier.
class ScanClosureWithParBarrier: public ScanClosure {
-public:
+ protected:
+ template void do_oop_work(T* p);
+ public:
ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier);
- void do_oop(oop* p);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
};
// Implements AbstractRefProcTaskExecutor for ParNew.
class ParNewRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
-public:
-
+ private:
+ ParNewGeneration& _generation;
+ ParScanThreadStateSet& _state_set;
+ public:
ParNewRefProcTaskExecutor(ParNewGeneration& generation,
ParScanThreadStateSet& state_set)
: _generation(generation), _state_set(state_set)
@@ -255,9 +268,6 @@
virtual void execute(EnqueueTask& task);
// Switch to single threaded mode.
virtual void set_single_threaded_mode();
-private:
- ParNewGeneration& _generation;
- ParScanThreadStateSet& _state_set;
};
@@ -269,6 +279,7 @@
friend class ParNewRefProcTaskExecutor;
friend class ParScanThreadStateSet;
+ private:
// XXX use a global constant instead of 64!
struct ObjToScanQueuePadded {
ObjToScanQueue work_queue;
@@ -314,7 +325,7 @@
// the details of the policy.
virtual void adjust_desired_tenuring_threshold();
-public:
+ public:
ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level);
~ParNewGeneration() {
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.hpp
--- a/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -26,70 +26,77 @@
class ParScanThreadState;
class ParNewGeneration;
-template class GenericTaskQueueSet;
-typedef GenericTaskQueueSet ObjToScanQueueSet;
+typedef OopTaskQueueSet ObjToScanQueueSet;
class ParallelTaskTerminator;
class ParScanClosure: public OopsInGenClosure {
-protected:
+ protected:
ParScanThreadState* _par_scan_state;
- ParNewGeneration* _g;
- HeapWord* _boundary;
- void do_oop_work(oop* p,
- bool gc_barrier,
- bool root_scan);
-
- void par_do_barrier(oop* p);
-
-public:
+ ParNewGeneration* _g;
+ HeapWord* _boundary;
+ template void inline par_do_barrier(T* p);
+ template void inline do_oop_work(T* p,
+ bool gc_barrier,
+ bool root_scan);
+ public:
ParScanClosure(ParNewGeneration* g, ParScanThreadState* par_scan_state);
};
class ParScanWithBarrierClosure: public ParScanClosure {
-public:
- void do_oop(oop* p) { do_oop_work(p, true, false); }
- void do_oop_nv(oop* p) { do_oop_work(p, true, false); }
+ public:
ParScanWithBarrierClosure(ParNewGeneration* g,
ParScanThreadState* par_scan_state) :
ParScanClosure(g, par_scan_state) {}
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p);
+ inline void do_oop_nv(narrowOop* p);
};
class ParScanWithoutBarrierClosure: public ParScanClosure {
-public:
+ public:
ParScanWithoutBarrierClosure(ParNewGeneration* g,
ParScanThreadState* par_scan_state) :
ParScanClosure(g, par_scan_state) {}
- void do_oop(oop* p) { do_oop_work(p, false, false); }
- void do_oop_nv(oop* p) { do_oop_work(p, false, false); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p);
+ inline void do_oop_nv(narrowOop* p);
};
class ParRootScanWithBarrierTwoGensClosure: public ParScanClosure {
-public:
+ public:
ParRootScanWithBarrierTwoGensClosure(ParNewGeneration* g,
ParScanThreadState* par_scan_state) :
ParScanClosure(g, par_scan_state) {}
- void do_oop(oop* p) { do_oop_work(p, true, true); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
};
class ParRootScanWithoutBarrierClosure: public ParScanClosure {
-public:
+ public:
ParRootScanWithoutBarrierClosure(ParNewGeneration* g,
ParScanThreadState* par_scan_state) :
ParScanClosure(g, par_scan_state) {}
- void do_oop(oop* p) { do_oop_work(p, false, true); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
};
class ParScanWeakRefClosure: public ScanWeakRefClosure {
-protected:
+ protected:
ParScanThreadState* _par_scan_state;
-public:
+ template inline void do_oop_work(T* p);
+ public:
ParScanWeakRefClosure(ParNewGeneration* g,
ParScanThreadState* par_scan_state);
- void do_oop(oop* p);
- void do_oop_nv(oop* p);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p);
+ inline void do_oop_nv(narrowOop* p);
};
class ParEvacuateFollowersClosure: public VoidClosure {
+ private:
ParScanThreadState* _par_scan_state;
ParScanThreadState* par_scan_state() { return _par_scan_state; }
@@ -121,8 +128,7 @@
ParallelTaskTerminator* _terminator;
ParallelTaskTerminator* terminator() { return _terminator; }
-
-public:
+ public:
ParEvacuateFollowersClosure(
ParScanThreadState* par_scan_state_,
ParScanWithoutBarrierClosure* to_space_closure_,
@@ -132,5 +138,5 @@
ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_,
ObjToScanQueueSet* task_queues_,
ParallelTaskTerminator* terminator_);
- void do_void();
+ virtual void do_void();
};
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp
--- a/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -22,10 +22,9 @@
*
*/
-inline void ParScanWeakRefClosure::do_oop(oop* p)
-{
- oop obj = *p;
- assert (obj != NULL, "null weak reference?");
+template inline void ParScanWeakRefClosure::do_oop_work(T* p) {
+ assert (!oopDesc::is_null(*p), "null weak reference?");
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
// weak references are sometimes scanned twice; must check
// that to-space doesn't already contain this object
if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) {
@@ -33,41 +32,43 @@
// ParScanClosure::do_oop_work).
klassOop objK = obj->klass();
markOop m = obj->mark();
+ oop new_obj;
if (m->is_marked()) { // Contains forwarding pointer.
- *p = ParNewGeneration::real_forwardee(obj);
+ new_obj = ParNewGeneration::real_forwardee(obj);
} else {
size_t obj_sz = obj->size_given_klass(objK->klass_part());
- *p = ((ParNewGeneration*)_g)->copy_to_survivor_space(_par_scan_state,
- obj, obj_sz, m);
+ new_obj = ((ParNewGeneration*)_g)->copy_to_survivor_space(_par_scan_state,
+ obj, obj_sz, m);
}
+ oopDesc::encode_store_heap_oop_not_null(p, new_obj);
}
}
-inline void ParScanWeakRefClosure::do_oop_nv(oop* p)
-{
- ParScanWeakRefClosure::do_oop(p);
-}
+inline void ParScanWeakRefClosure::do_oop_nv(oop* p) { ParScanWeakRefClosure::do_oop_work(p); }
+inline void ParScanWeakRefClosure::do_oop_nv(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); }
-inline void ParScanClosure::par_do_barrier(oop* p) {
+template inline void ParScanClosure::par_do_barrier(T* p) {
assert(generation()->is_in_reserved(p), "expected ref in generation");
- oop obj = *p;
- assert(obj != NULL, "expected non-null object");
+ assert(!oopDesc::is_null(*p), "expected non-null object");
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
// If p points to a younger generation, mark the card.
if ((HeapWord*)obj < gen_boundary()) {
rs()->write_ref_field_gc_par(p, obj);
}
}
-inline void ParScanClosure::do_oop_work(oop* p,
+template
+inline void ParScanClosure::do_oop_work(T* p,
bool gc_barrier,
bool root_scan) {
- oop obj = *p;
assert((!Universe::heap()->is_in_reserved(p) ||
generation()->is_in_reserved(p))
&& (generation()->level() == 0 || gc_barrier),
"The gen must be right, and we must be doing the barrier "
"in older generations.");
- if (obj != NULL) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
if ((HeapWord*)obj < _boundary) {
assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
// OK, we need to ensure that it is copied.
@@ -78,11 +79,14 @@
// forwarded.
klassOop objK = obj->klass();
markOop m = obj->mark();
+ oop new_obj;
if (m->is_marked()) { // Contains forwarding pointer.
- *p = ParNewGeneration::real_forwardee(obj);
+ new_obj = ParNewGeneration::real_forwardee(obj);
+ oopDesc::encode_store_heap_oop_not_null(p, new_obj);
} else {
size_t obj_sz = obj->size_given_klass(objK->klass_part());
- *p = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m);
+ new_obj = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m);
+ oopDesc::encode_store_heap_oop_not_null(p, new_obj);
if (root_scan) {
// This may have pushed an object. If we have a root
// category with a lot of roots, can't let the queue get too
@@ -97,3 +101,9 @@
}
}
}
+
+inline void ParScanWithBarrierClosure::do_oop_nv(oop* p) { ParScanClosure::do_oop_work(p, true, false); }
+inline void ParScanWithBarrierClosure::do_oop_nv(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
+
+inline void ParScanWithoutBarrierClosure::do_oop_nv(oop* p) { ParScanClosure::do_oop_work(p, false, false); }
+inline void ParScanWithoutBarrierClosure::do_oop_nv(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -28,17 +28,16 @@
// Checks an individual oop for missing precise marks. Mark
// may be either dirty or newgen.
class CheckForUnmarkedOops : public OopClosure {
- PSYoungGen* _young_gen;
+ private:
+ PSYoungGen* _young_gen;
CardTableExtension* _card_table;
- HeapWord* _unmarked_addr;
- jbyte* _unmarked_card;
+ HeapWord* _unmarked_addr;
+ jbyte* _unmarked_card;
- public:
- CheckForUnmarkedOops( PSYoungGen* young_gen, CardTableExtension* card_table ) :
- _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
-
- virtual void do_oop(oop* p) {
- if (_young_gen->is_in_reserved(*p) &&
+ protected:
+ template void do_oop_work(T* p) {
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+ if (_young_gen->is_in_reserved(obj) &&
!_card_table->addr_is_marked_imprecise(p)) {
// Don't overwrite the first missing card mark
if (_unmarked_addr == NULL) {
@@ -48,6 +47,13 @@
}
}
+ public:
+ CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) :
+ _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
+
+ virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); }
+ virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); }
+
bool has_unmarked_oop() {
return _unmarked_addr != NULL;
}
@@ -56,7 +62,8 @@
// Checks all objects for the existance of some type of mark,
// precise or imprecise, dirty or newgen.
class CheckForUnmarkedObjects : public ObjectClosure {
- PSYoungGen* _young_gen;
+ private:
+ PSYoungGen* _young_gen;
CardTableExtension* _card_table;
public:
@@ -75,7 +82,7 @@
// we test for missing precise marks first. If any are found, we don't
// fail unless the object head is also unmarked.
virtual void do_object(oop obj) {
- CheckForUnmarkedOops object_check( _young_gen, _card_table );
+ CheckForUnmarkedOops object_check(_young_gen, _card_table);
obj->oop_iterate(&object_check);
if (object_check.has_unmarked_oop()) {
assert(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
@@ -85,19 +92,25 @@
// Checks for precise marking of oops as newgen.
class CheckForPreciseMarks : public OopClosure {
- PSYoungGen* _young_gen;
+ private:
+ PSYoungGen* _young_gen;
CardTableExtension* _card_table;
+ protected:
+ template void do_oop_work(T* p) {
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+ if (_young_gen->is_in_reserved(obj)) {
+ assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop");
+ _card_table->set_card_newgen(p);
+ }
+ }
+
public:
CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) :
_young_gen(young_gen), _card_table(card_table) { }
- virtual void do_oop(oop* p) {
- if (_young_gen->is_in_reserved(*p)) {
- assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop");
- _card_table->set_card_newgen(p);
- }
- }
+ virtual void do_oop(oop* p) { CheckForPreciseMarks::do_oop_work(p); }
+ virtual void do_oop(narrowOop* p) { CheckForPreciseMarks::do_oop_work(p); }
};
// We get passed the space_top value to prevent us from traversing into
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -80,7 +80,7 @@
static bool card_is_verify(int value) { return value == verify_card; }
// Card marking
- void inline_write_ref_field_gc(oop* field, oop new_val) {
+ void inline_write_ref_field_gc(void* field, oop new_val) {
jbyte* byte = byte_for(field);
*byte = youngergen_card;
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -146,7 +146,7 @@
{
ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
uint parallel_gc_threads = heap->gc_task_manager()->workers();
- TaskQueueSetSuper* qset = ParCompactionManager::chunk_array();
+ ChunkTaskQueueSet* qset = ParCompactionManager::chunk_array();
ParallelTaskTerminator terminator(parallel_gc_threads, qset);
GCTaskQueue* q = GCTaskQueue::create();
for(uint i=0; imark_addr(), 0);
+ template inline void* push_and_pop(T* p) {
+ oop o = oopDesc::load_decode_heap_oop_not_null(p);
+ Prefetch::write(o->mark_addr(), 0);
// This prefetch is intended to make sure the size field of array
// oops is in cache. It assumes the the object layout is
// mark -> klass -> size, and that mark and klass are heapword
// sized. If this should change, this prefetch will need updating!
- Prefetch::write((*p)->mark_addr() + (HeapWordSize*2), 0);
+ Prefetch::write(o->mark_addr() + (HeapWordSize*2), 0);
_prefetch_queue[_prefetch_index++] = p;
_prefetch_index &= (PREFETCH_QUEUE_SIZE-1);
return _prefetch_queue[_prefetch_index];
}
// Stores a NULL pointer in the pop'd location.
- inline oop* pop() {
+ inline void* pop() {
_prefetch_queue[_prefetch_index++] = NULL;
_prefetch_index &= (PREFETCH_QUEUE_SIZE-1);
return _prefetch_queue[_prefetch_index];
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -168,7 +168,7 @@
start_array->allocate_block(compact_top);
}
- debug_only(MarkSweep::register_live_oop(oop(q), size));
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), size));
compact_top += size;
assert(compact_top <= dest->space()->end(),
"Exceeding space in destination");
@@ -234,7 +234,7 @@
start_array->allocate_block(compact_top);
}
- debug_only(MarkSweep::register_live_oop(oop(q), sz));
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), sz));
compact_top += sz;
assert(compact_top <= dest->space()->end(),
"Exceeding space in destination");
@@ -326,15 +326,11 @@
HeapWord* end = _first_dead;
while (q < end) {
- debug_only(MarkSweep::track_interior_pointers(oop(q)));
-
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));
// point all the oops to the new location
size_t size = oop(q)->adjust_pointers();
-
- debug_only(MarkSweep::check_interior_pointers());
-
- debug_only(MarkSweep::validate_live_oop(oop(q), size));
-
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));
q += size;
}
@@ -354,11 +350,11 @@
Prefetch::write(q, interval);
if (oop(q)->is_gc_marked()) {
// q is alive
- debug_only(MarkSweep::track_interior_pointers(oop(q)));
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));
// point all the oops to the new location
size_t size = oop(q)->adjust_pointers();
- debug_only(MarkSweep::check_interior_pointers());
- debug_only(MarkSweep::validate_live_oop(oop(q), size));
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));
debug_only(prev_q = q);
q += size;
} else {
@@ -392,7 +388,7 @@
while (q < end) {
size_t size = oop(q)->size();
assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
- debug_only(MarkSweep::live_oop_moved_to(q, size, q));
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q));
debug_only(prev_q = q);
q += size;
}
@@ -427,7 +423,7 @@
Prefetch::write(compaction_top, copy_interval);
// copy object and reinit its mark
- debug_only(MarkSweep::live_oop_moved_to(q, size, compaction_top));
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, compaction_top));
assert(q != compaction_top, "everything in this pass should be moving");
Copy::aligned_conjoint_words(q, compaction_top, size);
oop(compaction_top)->init_mark();
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -81,14 +81,14 @@
#endif // #ifdef ASSERT
#ifdef VALIDATE_MARK_SWEEP
-GrowableArray* PSParallelCompact::_root_refs_stack = NULL;
+GrowableArray* PSParallelCompact::_root_refs_stack = NULL;
GrowableArray * PSParallelCompact::_live_oops = NULL;
GrowableArray * PSParallelCompact::_live_oops_moved_to = NULL;
GrowableArray* PSParallelCompact::_live_oops_size = NULL;
size_t PSParallelCompact::_live_oops_index = 0;
size_t PSParallelCompact::_live_oops_index_at_perm = 0;
-GrowableArray* PSParallelCompact::_other_refs_stack = NULL;
-GrowableArray* PSParallelCompact::_adjusted_pointers = NULL;
+GrowableArray* PSParallelCompact::_other_refs_stack = NULL;
+GrowableArray* PSParallelCompact::_adjusted_pointers = NULL;
bool PSParallelCompact::_pointer_tracking = false;
bool PSParallelCompact::_root_tracking = true;
@@ -811,46 +811,23 @@
ParallelCompactData PSParallelCompact::_summary_data;
PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
+
+void PSParallelCompact::IsAliveClosure::do_object(oop p) { ShouldNotReachHere(); }
+bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
+
+void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
+void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
+
PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true);
PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false);
-void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) {
-#ifdef VALIDATE_MARK_SWEEP
- if (ValidateMarkSweep) {
- if (!Universe::heap()->is_in_reserved(p)) {
- _root_refs_stack->push(p);
- } else {
- _other_refs_stack->push(p);
- }
- }
-#endif
- mark_and_push(_compaction_manager, p);
-}
-
-void PSParallelCompact::mark_and_follow(ParCompactionManager* cm,
- oop* p) {
- assert(Universe::heap()->is_in_reserved(p),
- "we should only be traversing objects here");
- oop m = *p;
- if (m != NULL && mark_bitmap()->is_unmarked(m)) {
- if (mark_obj(m)) {
- m->follow_contents(cm); // Follow contents of the marked object
- }
- }
-}
-
-// Anything associated with this variable is temporary.
-
-void PSParallelCompact::mark_and_push_internal(ParCompactionManager* cm,
- oop* p) {
- // Push marked object, contents will be followed later
- oop m = *p;
- if (mark_obj(m)) {
- // This thread marked the object and
- // owns the subsequent processing of it.
- cm->save_for_scanning(m);
- }
-}
+void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); }
+void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
+
+void PSParallelCompact::FollowStackClosure::do_void() { follow_stack(_compaction_manager); }
+
+void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(_compaction_manager, p); }
+void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); }
void PSParallelCompact::post_initialize() {
ParallelScavengeHeap* heap = gc_heap();
@@ -2751,23 +2728,6 @@
young_gen->move_and_update(cm);
}
-void PSParallelCompact::follow_root(ParCompactionManager* cm, oop* p) {
- assert(!Universe::heap()->is_in_reserved(p),
- "roots shouldn't be things within the heap");
-#ifdef VALIDATE_MARK_SWEEP
- if (ValidateMarkSweep) {
- guarantee(!_root_refs_stack->contains(p), "should only be in here once");
- _root_refs_stack->push(p);
- }
-#endif
- oop m = *p;
- if (m != NULL && mark_bitmap()->is_unmarked(m)) {
- if (mark_obj(m)) {
- m->follow_contents(cm); // Follow contents of the marked object
- }
- }
- follow_stack(cm);
-}
void PSParallelCompact::follow_stack(ParCompactionManager* cm) {
while(!cm->overflow_stack()->is_empty()) {
@@ -2807,7 +2767,7 @@
#ifdef VALIDATE_MARK_SWEEP
-void PSParallelCompact::track_adjusted_pointer(oop* p, oop newobj, bool isroot) {
+void PSParallelCompact::track_adjusted_pointer(void* p, bool isroot) {
if (!ValidateMarkSweep)
return;
@@ -2821,7 +2781,7 @@
if (index != -1) {
int l = _root_refs_stack->length();
if (l > 0 && l - 1 != index) {
- oop* last = _root_refs_stack->pop();
+ void* last = _root_refs_stack->pop();
assert(last != p, "should be different");
_root_refs_stack->at_put(index, last);
} else {
@@ -2832,7 +2792,7 @@
}
-void PSParallelCompact::check_adjust_pointer(oop* p) {
+void PSParallelCompact::check_adjust_pointer(void* p) {
_adjusted_pointers->push(p);
}
@@ -2840,7 +2800,8 @@
class AdjusterTracker: public OopClosure {
public:
AdjusterTracker() {};
- void do_oop(oop* o) { PSParallelCompact::check_adjust_pointer(o); }
+ void do_oop(oop* o) { PSParallelCompact::check_adjust_pointer(o); }
+ void do_oop(narrowOop* o) { PSParallelCompact::check_adjust_pointer(o); }
};
@@ -2948,25 +2909,6 @@
}
#endif //VALIDATE_MARK_SWEEP
-void PSParallelCompact::adjust_pointer(oop* p, bool isroot) {
- oop obj = *p;
- VALIDATE_MARK_SWEEP_ONLY(oop saved_new_pointer = NULL);
- if (obj != NULL) {
- oop new_pointer = (oop) summary_data().calc_new_pointer(obj);
- assert(new_pointer != NULL || // is forwarding ptr?
- obj->is_shared(), // never forwarded?
- "should have a new location");
- // Just always do the update unconditionally?
- if (new_pointer != NULL) {
- *p = new_pointer;
- assert(Universe::heap()->is_in_reserved(new_pointer),
- "should be in object space");
- VALIDATE_MARK_SWEEP_ONLY(saved_new_pointer = new_pointer);
- }
- }
- VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, saved_new_pointer, isroot));
-}
-
// Update interior oops in the ranges of chunks [beg_chunk, end_chunk).
void
PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -80,11 +80,11 @@
static const size_t ChunkSize;
static const size_t ChunkSizeBytes;
- // Mask for the bits in a size_t to get an offset within a chunk.
+ // Mask for the bits in a size_t to get an offset within a chunk.
static const size_t ChunkSizeOffsetMask;
- // Mask for the bits in a pointer to get an offset within a chunk.
+ // Mask for the bits in a pointer to get an offset within a chunk.
static const size_t ChunkAddrOffsetMask;
- // Mask for the bits in a pointer to get the address of the start of a chunk.
+ // Mask for the bits in a pointer to get the address of the start of a chunk.
static const size_t ChunkAddrMask;
static const size_t Log2BlockSize;
@@ -229,7 +229,7 @@
// 1 bit marks the end of an object.
class BlockData
{
- public:
+ public:
typedef short int blk_ofs_t;
blk_ofs_t offset() const { return _offset >= 0 ? _offset : -_offset; }
@@ -269,7 +269,7 @@
return !_first_is_start_bit;
}
- private:
+ private:
blk_ofs_t _offset;
// This is temporary until the mark_bitmap is separated into
// a start bit array and an end bit array.
@@ -277,7 +277,7 @@
#ifdef ASSERT
short _set_phase;
static short _cur_phase;
- public:
+ public:
static void set_cur_phase(short v) { _cur_phase = v; }
#endif
};
@@ -729,48 +729,51 @@
} SpaceId;
public:
- // In line closure decls
+ // Inline closure decls
//
-
class IsAliveClosure: public BoolObjectClosure {
public:
- void do_object(oop p) { assert(false, "don't call"); }
- bool do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
+ virtual void do_object(oop p);
+ virtual bool do_object_b(oop p);
};
class KeepAliveClosure: public OopClosure {
+ private:
ParCompactionManager* _compaction_manager;
+ protected:
+ template inline void do_oop_work(T* p);
public:
- KeepAliveClosure(ParCompactionManager* cm) {
- _compaction_manager = cm;
- }
- void do_oop(oop* p);
+ KeepAliveClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
};
- class FollowRootClosure: public OopsInGenClosure{
+ // Current unused
+ class FollowRootClosure: public OopsInGenClosure {
+ private:
ParCompactionManager* _compaction_manager;
public:
- FollowRootClosure(ParCompactionManager* cm) {
- _compaction_manager = cm;
- }
- void do_oop(oop* p) { follow_root(_compaction_manager, p); }
+ FollowRootClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
virtual const bool do_nmethods() const { return true; }
};
class FollowStackClosure: public VoidClosure {
+ private:
ParCompactionManager* _compaction_manager;
public:
- FollowStackClosure(ParCompactionManager* cm) {
- _compaction_manager = cm;
- }
- void do_void() { follow_stack(_compaction_manager); }
+ FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
+ virtual void do_void();
};
class AdjustPointerClosure: public OopsInGenClosure {
+ private:
bool _is_root;
public:
- AdjustPointerClosure(bool is_root) : _is_root(is_root) {}
- void do_oop(oop* p) { adjust_pointer(p, _is_root); }
+ AdjustPointerClosure(bool is_root) : _is_root(is_root) { }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
};
// Closure for verifying update of pointers. Does not
@@ -805,8 +808,6 @@
friend class instanceKlassKlass;
friend class RefProcTaskProxy;
- static void mark_and_push_internal(ParCompactionManager* cm, oop* p);
-
private:
static elapsedTimer _accumulated_time;
static unsigned int _total_invocations;
@@ -838,9 +839,9 @@
private:
// Closure accessors
- static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; }
+ static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; }
static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&_adjust_root_pointer_closure; }
- static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
+ static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
static void initialize_space_info();
@@ -859,10 +860,11 @@
static void follow_stack(ParCompactionManager* cm);
static void follow_weak_klass_links(ParCompactionManager* cm);
- static void adjust_pointer(oop* p, bool is_root);
+ template static inline void adjust_pointer(T* p, bool is_root);
static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); }
- static void follow_root(ParCompactionManager* cm, oop* p);
+ template
+ static inline void follow_root(ParCompactionManager* cm, T* p);
// Compute the dense prefix for the designated space. This is an experimental
// implementation currently not used in production.
@@ -971,14 +973,14 @@
protected:
#ifdef VALIDATE_MARK_SWEEP
- static GrowableArray* _root_refs_stack;
+ static GrowableArray* _root_refs_stack;
static GrowableArray * _live_oops;
static GrowableArray * _live_oops_moved_to;
static GrowableArray* _live_oops_size;
static size_t _live_oops_index;
static size_t _live_oops_index_at_perm;
- static GrowableArray* _other_refs_stack;
- static GrowableArray* _adjusted_pointers;
+ static GrowableArray* _other_refs_stack;
+ static GrowableArray* _adjusted_pointers;
static bool _pointer_tracking;
static bool _root_tracking;
@@ -999,12 +1001,12 @@
public:
class MarkAndPushClosure: public OopClosure {
+ private:
ParCompactionManager* _compaction_manager;
public:
- MarkAndPushClosure(ParCompactionManager* cm) {
- _compaction_manager = cm;
- }
- void do_oop(oop* p) { mark_and_push(_compaction_manager, p); }
+ MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
virtual const bool do_nmethods() const { return true; }
};
@@ -1038,21 +1040,9 @@
// Marking support
static inline bool mark_obj(oop obj);
- static bool mark_obj(oop* p) {
- if (*p != NULL) {
- return mark_obj(*p);
- } else {
- return false;
- }
- }
- static void mark_and_push(ParCompactionManager* cm, oop* p) {
- // Check mark and maybe push on
- // marking stack
- oop m = *p;
- if (m != NULL && mark_bitmap()->is_unmarked(m)) {
- mark_and_push_internal(cm, p);
- }
- }
+ // Check mark and maybe push on marking stack
+ template static inline void mark_and_push(ParCompactionManager* cm,
+ T* p);
// Compaction support.
// Return true if p is in the range [beg_addr, end_addr).
@@ -1127,13 +1117,17 @@
static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
// Mark pointer and follow contents.
- static void mark_and_follow(ParCompactionManager* cm, oop* p);
+ template
+ static inline void mark_and_follow(ParCompactionManager* cm, T* p);
static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
static ParallelCompactData& summary_data() { return _summary_data; }
- static inline void adjust_pointer(oop* p) { adjust_pointer(p, false); }
- static inline void adjust_pointer(oop* p,
+ static inline void adjust_pointer(oop* p) { adjust_pointer(p, false); }
+ static inline void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); }
+
+ template
+ static inline void adjust_pointer(T* p,
HeapWord* beg_addr,
HeapWord* end_addr);
@@ -1147,8 +1141,8 @@
static jlong millis_since_last_gc();
#ifdef VALIDATE_MARK_SWEEP
- static void track_adjusted_pointer(oop* p, oop newobj, bool isroot);
- static void check_adjust_pointer(oop* p); // Adjust this pointer
+ static void track_adjusted_pointer(void* p, bool isroot);
+ static void check_adjust_pointer(void* p);
static void track_interior_pointers(oop obj);
static void check_interior_pointers();
@@ -1185,7 +1179,7 @@
#endif // #ifdef ASSERT
};
-bool PSParallelCompact::mark_obj(oop obj) {
+inline bool PSParallelCompact::mark_obj(oop obj) {
const int obj_size = obj->size();
if (mark_bitmap()->mark_obj(obj, obj_size)) {
_summary_data.add_obj(obj, obj_size);
@@ -1195,13 +1189,94 @@
}
}
-inline bool PSParallelCompact::print_phases()
-{
+template
+inline void PSParallelCompact::follow_root(ParCompactionManager* cm, T* p) {
+ assert(!Universe::heap()->is_in_reserved(p),
+ "roots shouldn't be things within the heap");
+#ifdef VALIDATE_MARK_SWEEP
+ if (ValidateMarkSweep) {
+ guarantee(!_root_refs_stack->contains(p), "should only be in here once");
+ _root_refs_stack->push(p);
+ }
+#endif
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ if (mark_bitmap()->is_unmarked(obj)) {
+ if (mark_obj(obj)) {
+ obj->follow_contents(cm);
+ }
+ }
+ }
+ follow_stack(cm);
+}
+
+template
+inline void PSParallelCompact::mark_and_follow(ParCompactionManager* cm,
+ T* p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ if (mark_bitmap()->is_unmarked(obj)) {
+ if (mark_obj(obj)) {
+ obj->follow_contents(cm);
+ }
+ }
+ }
+}
+
+template
+inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ if (mark_bitmap()->is_unmarked(obj)) {
+ if (mark_obj(obj)) {
+ // This thread marked the object and owns the subsequent processing of it.
+ cm->save_for_scanning(obj);
+ }
+ }
+ }
+}
+
+template
+inline void PSParallelCompact::adjust_pointer(T* p, bool isroot) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ oop new_obj = (oop)summary_data().calc_new_pointer(obj);
+ assert(new_obj != NULL || // is forwarding ptr?
+ obj->is_shared(), // never forwarded?
+ "should be forwarded");
+ // Just always do the update unconditionally?
+ if (new_obj != NULL) {
+ assert(Universe::heap()->is_in_reserved(new_obj),
+ "should be in object space");
+ oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+ }
+ }
+ VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, isroot));
+}
+
+template
+inline void PSParallelCompact::KeepAliveClosure::do_oop_work(T* p) {
+#ifdef VALIDATE_MARK_SWEEP
+ if (ValidateMarkSweep) {
+ if (!Universe::heap()->is_in_reserved(p)) {
+ _root_refs_stack->push(p);
+ } else {
+ _other_refs_stack->push(p);
+ }
+ }
+#endif
+ mark_and_push(_compaction_manager, p);
+}
+
+inline bool PSParallelCompact::print_phases() {
return _print_phases;
}
-inline double PSParallelCompact::normal_distribution(double density)
-{
+inline double PSParallelCompact::normal_distribution(double density) {
assert(_dwl_initialized, "uninitialized");
const double squared_term = (density - _dwl_mean) / _dwl_std_dev;
return _dwl_first_term * exp(-0.5 * squared_term * squared_term);
@@ -1257,10 +1332,11 @@
return ((HeapWord*) k) >= dense_prefix(perm_space_id);
}
-inline void PSParallelCompact::adjust_pointer(oop* p,
+template
+inline void PSParallelCompact::adjust_pointer(T* p,
HeapWord* beg_addr,
HeapWord* end_addr) {
- if (is_in(p, beg_addr, end_addr)) {
+ if (is_in((HeapWord*)p, beg_addr, end_addr)) {
adjust_pointer(p);
}
}
@@ -1332,18 +1408,18 @@
inline void do_addr(HeapWord* addr);
};
-inline void UpdateOnlyClosure::do_addr(HeapWord* addr) {
+inline void UpdateOnlyClosure::do_addr(HeapWord* addr)
+{
_start_array->allocate_block(addr);
oop(addr)->update_contents(compaction_manager());
}
class FillClosure: public ParMarkBitMapClosure {
-public:
- FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id):
+ public:
+ FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) :
ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm),
_space_id(space_id),
- _start_array(PSParallelCompact::start_array(space_id))
- {
+ _start_array(PSParallelCompact::start_array(space_id)) {
assert(_space_id == PSParallelCompact::perm_space_id ||
_space_id == PSParallelCompact::old_space_id,
"cannot use FillClosure in the young gen");
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -25,7 +25,7 @@
#include "incls/_precompiled.incl"
#include "incls/_psPromotionLAB.cpp.incl"
-const size_t PSPromotionLAB::filler_header_size = align_object_size(typeArrayOopDesc::header_size(T_INT));
+size_t PSPromotionLAB::filler_header_size;
// This is the shared initialization code. It sets up the basic pointers,
// and allows enough extra space for a filler object. We call a virtual
@@ -41,6 +41,10 @@
set_end(end);
set_top(bottom);
+ // Initialize after VM starts up because header_size depends on compressed
+ // oops.
+ filler_header_size = align_object_size(typeArrayOopDesc::header_size(T_INT));
+
// We can be initialized to a zero size!
if (free() > 0) {
if (ZapUnusedHeapArea) {
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -32,7 +32,7 @@
class PSPromotionLAB : public CHeapObj {
protected:
- static const size_t filler_header_size;
+ static size_t filler_header_size;
enum LabState {
needs_flush,
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -182,7 +182,7 @@
claimed_stack_depth()->initialize();
queue_size = claimed_stack_depth()->max_elems();
// We want the overflow stack to be permanent
- _overflow_stack_depth = new (ResourceObj::C_HEAP) GrowableArray(10, true);
+ _overflow_stack_depth = new (ResourceObj::C_HEAP) GrowableArray(10, true);
_overflow_stack_breadth = NULL;
} else {
claimed_stack_breadth()->initialize();
@@ -240,6 +240,7 @@
#endif // PS_PM_STATS
}
+
void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
assert(depth_first(), "invariant");
assert(overflow_stack_depth() != NULL, "invariant");
@@ -254,13 +255,15 @@
#endif /* ASSERT */
do {
- oop* p;
+ StarTask p;
// Drain overflow stack first, so other threads can steal from
// claimed stack while we work.
while(!overflow_stack_depth()->is_empty()) {
- p = overflow_stack_depth()->pop();
- process_popped_location_depth(p);
+ // linux compiler wants different overloaded operator= in taskqueue to
+ // assign to p that the other compilers don't like.
+ StarTask ptr = overflow_stack_depth()->pop();
+ process_popped_location_depth(ptr);
}
if (totally_drain) {
@@ -365,7 +368,7 @@
//
oop PSPromotionManager::copy_to_survivor_space(oop o, bool depth_first) {
- assert(PSScavenge::should_scavenge(o), "Sanity");
+ assert(PSScavenge::should_scavenge(&o), "Sanity");
oop new_obj = NULL;
@@ -530,16 +533,30 @@
// This code must come after the CAS test, or it will print incorrect
// information.
if (TraceScavenge) {
- gclog_or_tty->print_cr("{%s %s 0x%x -> 0x%x (%d)}",
- PSScavenge::should_scavenge(new_obj) ? "copying" : "tenuring",
+ gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (" SIZE_FORMAT ")}",
+ PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size());
-
}
#endif
return new_obj;
}
+template void PSPromotionManager::process_array_chunk_work(
+ oop obj,
+ int start, int end) {
+ assert(start < end, "invariant");
+ T* const base = (T*)objArrayOop(obj)->base();
+ T* p = base + start;
+ T* const chunk_end = base + end;
+ while (p < chunk_end) {
+ if (PSScavenge::should_scavenge(p)) {
+ claim_or_forward_depth(p);
+ }
+ ++p;
+ }
+}
+
void PSPromotionManager::process_array_chunk(oop old) {
assert(PSChunkLargeArrays, "invariant");
assert(old->is_objArray(), "invariant");
@@ -569,15 +586,10 @@
arrayOop(old)->set_length(actual_length);
}
- assert(start < end, "invariant");
- oop* const base = objArrayOop(obj)->base();
- oop* p = base + start;
- oop* const chunk_end = base + end;
- while (p < chunk_end) {
- if (PSScavenge::should_scavenge(*p)) {
- claim_or_forward_depth(p);
- }
- ++p;
+ if (UseCompressedOops) {
+ process_array_chunk_work(obj, start, end);
+ } else {
+ process_array_chunk_work(obj, start, end);
}
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -42,8 +42,6 @@
class PSOldGen;
class ParCompactionManager;
-#define PS_CHUNKED_ARRAY_OOP_MASK 1
-
#define PS_PM_STATS 0
class PSPromotionManager : public CHeapObj {
@@ -80,7 +78,7 @@
PrefetchQueue _prefetch_queue;
OopStarTaskQueue _claimed_stack_depth;
- GrowableArray* _overflow_stack_depth;
+ GrowableArray* _overflow_stack_depth;
OopTaskQueue _claimed_stack_breadth;
GrowableArray* _overflow_stack_breadth;
@@ -92,13 +90,15 @@
uint _min_array_size_for_chunking;
// Accessors
- static PSOldGen* old_gen() { return _old_gen; }
- static MutableSpace* young_space() { return _young_space; }
+ static PSOldGen* old_gen() { return _old_gen; }
+ static MutableSpace* young_space() { return _young_space; }
inline static PSPromotionManager* manager_array(int index);
+ template inline void claim_or_forward_internal_depth(T* p);
+ template inline void claim_or_forward_internal_breadth(T* p);
- GrowableArray* overflow_stack_depth() { return _overflow_stack_depth; }
- GrowableArray* overflow_stack_breadth() { return _overflow_stack_breadth; }
+ GrowableArray* overflow_stack_depth() { return _overflow_stack_depth; }
+ GrowableArray* overflow_stack_breadth() { return _overflow_stack_breadth; }
// On the task queues we push reference locations as well as
// partially-scanned arrays (in the latter case, we push an oop to
@@ -116,27 +116,37 @@
// (oop). We do all the necessary casting in the mask / unmask
// methods to avoid sprinkling the rest of the code with more casts.
- bool is_oop_masked(oop* p) {
- return ((intptr_t) p & PS_CHUNKED_ARRAY_OOP_MASK) == PS_CHUNKED_ARRAY_OOP_MASK;
+ // These are added to the taskqueue so PS_CHUNKED_ARRAY_OOP_MASK (or any
+ // future masks) can't conflict with COMPRESSED_OOP_MASK
+#define PS_CHUNKED_ARRAY_OOP_MASK 0x2
+
+ bool is_oop_masked(StarTask p) {
+ // If something is marked chunked it's always treated like wide oop*
+ return (((intptr_t)(oop*)p) & PS_CHUNKED_ARRAY_OOP_MASK) ==
+ PS_CHUNKED_ARRAY_OOP_MASK;
}
oop* mask_chunked_array_oop(oop obj) {
assert(!is_oop_masked((oop*) obj), "invariant");
- oop* ret = (oop*) ((intptr_t) obj | PS_CHUNKED_ARRAY_OOP_MASK);
+ oop* ret = (oop*) ((uintptr_t)obj | PS_CHUNKED_ARRAY_OOP_MASK);
assert(is_oop_masked(ret), "invariant");
return ret;
}
- oop unmask_chunked_array_oop(oop* p) {
+ oop unmask_chunked_array_oop(StarTask p) {
assert(is_oop_masked(p), "invariant");
- oop ret = oop((intptr_t) p & ~PS_CHUNKED_ARRAY_OOP_MASK);
+ assert(!p.is_narrow(), "chunked array oops cannot be narrow");
+ oop *chunk = (oop*)p; // cast p to oop (uses conversion operator)
+ oop ret = oop((oop*)((uintptr_t)chunk & ~PS_CHUNKED_ARRAY_OOP_MASK));
assert(!is_oop_masked((oop*) ret), "invariant");
return ret;
}
+ template void process_array_chunk_work(oop obj,
+ int start, int end);
void process_array_chunk(oop old);
- void push_depth(oop* p) {
+ template void push_depth(T* p) {
assert(depth_first(), "pre-condition");
#if PS_PM_STATS
@@ -175,7 +185,7 @@
}
protected:
- static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; }
+ static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; }
static OopTaskQueueSet* stack_array_breadth() { return _stack_array_breadth; }
public:
@@ -227,6 +237,7 @@
drain_stacks_breadth(totally_drain);
}
}
+ public:
void drain_stacks_cond_depth() {
if (claimed_stack_depth()->size() > _target_stack_size) {
drain_stacks_depth(false);
@@ -256,15 +267,11 @@
return _depth_first;
}
- inline void process_popped_location_depth(oop* p);
+ inline void process_popped_location_depth(StarTask p);
inline void flush_prefetch_queue();
-
- inline void claim_or_forward_depth(oop* p);
- inline void claim_or_forward_internal_depth(oop* p);
-
- inline void claim_or_forward_breadth(oop* p);
- inline void claim_or_forward_internal_breadth(oop* p);
+ template inline void claim_or_forward_depth(T* p);
+ template inline void claim_or_forward_breadth(T* p);
#if PS_PM_STATS
void increment_steals(oop* p = NULL) {
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -28,64 +28,68 @@
return _manager_array[index];
}
-inline void PSPromotionManager::claim_or_forward_internal_depth(oop* p) {
- if (p != NULL) {
- oop o = *p;
+template
+inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) {
+ if (p != NULL) { // XXX: error if p != NULL here
+ oop o = oopDesc::load_decode_heap_oop_not_null(p);
if (o->is_forwarded()) {
o = o->forwardee();
-
// Card mark
if (PSScavenge::is_obj_in_young((HeapWord*) o)) {
PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
}
- *p = o;
+ oopDesc::encode_store_heap_oop_not_null(p, o);
} else {
push_depth(p);
}
}
}
-inline void PSPromotionManager::claim_or_forward_internal_breadth(oop* p) {
- if (p != NULL) {
- oop o = *p;
+template
+inline void PSPromotionManager::claim_or_forward_internal_breadth(T* p) {
+ if (p != NULL) { // XXX: error if p != NULL here
+ oop o = oopDesc::load_decode_heap_oop_not_null(p);
if (o->is_forwarded()) {
o = o->forwardee();
} else {
o = copy_to_survivor_space(o, false);
}
-
// Card mark
if (PSScavenge::is_obj_in_young((HeapWord*) o)) {
PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
}
- *p = o;
+ oopDesc::encode_store_heap_oop_not_null(p, o);
}
}
inline void PSPromotionManager::flush_prefetch_queue() {
assert(!depth_first(), "invariant");
- for (int i=0; i<_prefetch_queue.length(); i++) {
- claim_or_forward_internal_breadth(_prefetch_queue.pop());
+ for (int i = 0; i < _prefetch_queue.length(); i++) {
+ claim_or_forward_internal_breadth((oop*)_prefetch_queue.pop());
}
}
-inline void PSPromotionManager::claim_or_forward_depth(oop* p) {
+template
+inline void PSPromotionManager::claim_or_forward_depth(T* p) {
assert(depth_first(), "invariant");
- assert(PSScavenge::should_scavenge(*p, true), "revisiting object?");
- assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+ assert(PSScavenge::should_scavenge(p, true), "revisiting object?");
+ assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap,
+ "Sanity");
assert(Universe::heap()->is_in(p), "pointer outside heap");
claim_or_forward_internal_depth(p);
}
-inline void PSPromotionManager::claim_or_forward_breadth(oop* p) {
+template
+inline void PSPromotionManager::claim_or_forward_breadth(T* p) {
assert(!depth_first(), "invariant");
- assert(PSScavenge::should_scavenge(*p, true), "revisiting object?");
- assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+ assert(PSScavenge::should_scavenge(p, true), "revisiting object?");
+ assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap,
+ "Sanity");
assert(Universe::heap()->is_in(p), "pointer outside heap");
if (UsePrefetchQueue) {
- claim_or_forward_internal_breadth(_prefetch_queue.push_and_pop(p));
+ claim_or_forward_internal_breadth((T*)_prefetch_queue.push_and_pop(p));
} else {
// This option is used for testing. The use of the prefetch
// queue can delay the processing of the objects and thus
@@ -106,12 +110,16 @@
}
}
-inline void PSPromotionManager::process_popped_location_depth(oop* p) {
+inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
if (is_oop_masked(p)) {
assert(PSChunkLargeArrays, "invariant");
oop const old = unmask_chunked_array_oop(p);
process_array_chunk(old);
} else {
- PSScavenge::copy_and_push_safe_barrier(this, p);
+ if (p.is_narrow()) {
+ PSScavenge::copy_and_push_safe_barrier(this, (narrowOop*)p);
+ } else {
+ PSScavenge::copy_and_push_safe_barrier(this, (oop*)p);
+ }
}
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -65,16 +65,18 @@
assert(_promotion_manager != NULL, "Sanity");
}
- void do_oop(oop* p) {
- assert (*p != NULL, "expected non-null ref");
- assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
+ template void do_oop_work(T* p) {
+ assert (!oopDesc::is_null(*p), "expected non-null ref");
+ assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(),
+ "expected an oop while scanning weak refs");
- oop obj = oop(*p);
// Weak refs may be visited more than once.
- if (PSScavenge::should_scavenge(obj, _to_space)) {
+ if (PSScavenge::should_scavenge(p, _to_space)) {
PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p);
}
}
+ virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); }
+ virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); }
};
class PSEvacuateFollowersClosure: public VoidClosure {
@@ -83,7 +85,7 @@
public:
PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {}
- void do_void() {
+ virtual void do_void() {
assert(_promotion_manager != NULL, "Sanity");
_promotion_manager->drain_stacks(true);
guarantee(_promotion_manager->stacks_empty(),
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -116,16 +116,16 @@
// If an attempt to promote fails, this method is invoked
static void oop_promotion_failed(oop obj, markOop obj_mark);
- static inline bool should_scavenge(oop p);
+ template static inline bool should_scavenge(T* p);
// These call should_scavenge() above and, if it returns true, also check that
// the object was not newly copied into to_space. The version with the bool
// argument is a convenience wrapper that fetches the to_space pointer from
// the heap and calls the other version (if the arg is true).
- static inline bool should_scavenge(oop p, MutableSpace* to_space);
- static inline bool should_scavenge(oop p, bool check_to_space);
+ template static inline bool should_scavenge(T* p, MutableSpace* to_space);
+ template static inline bool should_scavenge(T* p, bool check_to_space);
- inline static void copy_and_push_safe_barrier(PSPromotionManager* pm, oop* p);
+ template inline static void copy_and_push_safe_barrier(PSPromotionManager* pm, T* p);
// Is an object in the young generation
// This assumes that the HeapWord argument is in the heap,
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -22,28 +22,33 @@
*
*/
-
inline void PSScavenge::save_to_space_top_before_gc() {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
_to_space_top_before_gc = heap->young_gen()->to_space()->top();
}
-inline bool PSScavenge::should_scavenge(oop p) {
- return p == NULL ? false : PSScavenge::is_obj_in_young((HeapWord*) p);
+template inline bool PSScavenge::should_scavenge(T* p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (oopDesc::is_null(heap_oop)) return false;
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ return PSScavenge::is_obj_in_young((HeapWord*)obj);
}
-inline bool PSScavenge::should_scavenge(oop p, MutableSpace* to_space) {
+template
+inline bool PSScavenge::should_scavenge(T* p, MutableSpace* to_space) {
if (should_scavenge(p)) {
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
// Skip objects copied to to_space since the scavenge started.
- HeapWord* const addr = (HeapWord*) p;
+ HeapWord* const addr = (HeapWord*)obj;
return addr < to_space_top_before_gc() || addr >= to_space->end();
}
return false;
}
-inline bool PSScavenge::should_scavenge(oop p, bool check_to_space) {
+template
+inline bool PSScavenge::should_scavenge(T* p, bool check_to_space) {
if (check_to_space) {
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*) Universe::heap();
+ ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
return should_scavenge(p, heap->young_gen()->to_space());
}
return should_scavenge(p);
@@ -52,24 +57,23 @@
// Attempt to "claim" oop at p via CAS, push the new obj if successful
// This version tests the oop* to make sure it is within the heap before
// attempting marking.
+template
inline void PSScavenge::copy_and_push_safe_barrier(PSPromotionManager* pm,
- oop* p) {
- assert(should_scavenge(*p, true), "revisiting object?");
+ T* p) {
+ assert(should_scavenge(p, true), "revisiting object?");
- oop o = *p;
- if (o->is_forwarded()) {
- *p = o->forwardee();
- } else {
- *p = pm->copy_to_survivor_space(o, pm->depth_first());
- }
+ oop o = oopDesc::load_decode_heap_oop_not_null(p);
+ oop new_obj = o->is_forwarded()
+ ? o->forwardee()
+ : pm->copy_to_survivor_space(o, pm->depth_first());
+ oopDesc::encode_store_heap_oop_not_null(p, new_obj);
// We cannot mark without test, as some code passes us pointers
// that are outside the heap.
- if ((!PSScavenge::is_obj_in_young((HeapWord*) p)) &&
+ if ((!PSScavenge::is_obj_in_young((HeapWord*)p)) &&
Universe::heap()->is_in_reserved(p)) {
- o = *p;
- if (PSScavenge::is_obj_in_young((HeapWord*) o)) {
- card_table()->inline_write_ref_field_gc(p, o);
+ if (PSScavenge::is_obj_in_young((HeapWord*)new_obj)) {
+ card_table()->inline_write_ref_field_gc(p, new_obj);
}
}
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -34,15 +34,17 @@
private:
PSPromotionManager* _promotion_manager;
- public:
- PSScavengeRootsClosure(PSPromotionManager* pm) : _promotion_manager(pm) { }
-
- virtual void do_oop(oop* p) {
- if (PSScavenge::should_scavenge(*p)) {
+ protected:
+ template void do_oop_work(T *p) {
+ if (PSScavenge::should_scavenge(p)) {
// We never card mark roots, maybe call a func without test?
PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p);
}
}
+ public:
+ PSScavengeRootsClosure(PSPromotionManager* pm) : _promotion_manager(pm) { }
+ void do_oop(oop* p) { PSScavengeRootsClosure::do_oop_work(p); }
+ void do_oop(narrowOop* p) { PSScavengeRootsClosure::do_oop_work(p); }
};
void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) {
@@ -135,7 +137,7 @@
int random_seed = 17;
if (pm->depth_first()) {
while(true) {
- oop* p;
+ StarTask p;
if (PSPromotionManager::steal_depth(which, &random_seed, p)) {
#if PS_PM_STATS
pm->increment_steals(p);
@@ -164,8 +166,7 @@
}
}
}
- guarantee(pm->stacks_empty(),
- "stacks should be empty at this point");
+ guarantee(pm->stacks_empty(), "stacks should be empty at this point");
}
//
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp
--- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -36,16 +36,16 @@
ReferenceProcessor* MarkSweep::_ref_processor = NULL;
#ifdef VALIDATE_MARK_SWEEP
-GrowableArray* MarkSweep::_root_refs_stack = NULL;
+GrowableArray* MarkSweep::_root_refs_stack = NULL;
GrowableArray * MarkSweep::_live_oops = NULL;
GrowableArray * MarkSweep::_live_oops_moved_to = NULL;
GrowableArray* MarkSweep::_live_oops_size = NULL;
size_t MarkSweep::_live_oops_index = 0;
size_t MarkSweep::_live_oops_index_at_perm = 0;
-GrowableArray* MarkSweep::_other_refs_stack = NULL;
-GrowableArray* MarkSweep::_adjusted_pointers = NULL;
-bool MarkSweep::_pointer_tracking = false;
-bool MarkSweep::_root_tracking = true;
+GrowableArray* MarkSweep::_other_refs_stack = NULL;
+GrowableArray* MarkSweep::_adjusted_pointers = NULL;
+bool MarkSweep::_pointer_tracking = false;
+bool MarkSweep::_root_tracking = true;
GrowableArray* MarkSweep::_cur_gc_live_oops = NULL;
GrowableArray* MarkSweep::_cur_gc_live_oops_moved_to = NULL;
@@ -59,7 +59,6 @@
_revisit_klass_stack->push(k);
}
-
void MarkSweep::follow_weak_klass_links() {
// All klasses on the revisit stack are marked at this point.
// Update and follow all subklass, sibling and implementor links.
@@ -69,44 +68,15 @@
follow_stack();
}
+MarkSweep::FollowRootClosure MarkSweep::follow_root_closure;
-void MarkSweep::mark_and_follow(oop* p) {
- assert(Universe::heap()->is_in_reserved(p),
- "we should only be traversing objects here");
- oop m = *p;
- if (m != NULL && !m->mark()->is_marked()) {
- mark_object(m);
- m->follow_contents(); // Follow contents of the marked object
- }
-}
-
-void MarkSweep::_mark_and_push(oop* p) {
- // Push marked object, contents will be followed later
- oop m = *p;
- mark_object(m);
- _marking_stack->push(m);
-}
+void MarkSweep::FollowRootClosure::do_oop(oop* p) { follow_root(p); }
+void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
MarkSweep::MarkAndPushClosure MarkSweep::mark_and_push_closure;
-void MarkSweep::follow_root(oop* p) {
- assert(!Universe::heap()->is_in_reserved(p),
- "roots shouldn't be things within the heap");
-#ifdef VALIDATE_MARK_SWEEP
- if (ValidateMarkSweep) {
- guarantee(!_root_refs_stack->contains(p), "should only be in here once");
- _root_refs_stack->push(p);
- }
-#endif
- oop m = *p;
- if (m != NULL && !m->mark()->is_marked()) {
- mark_object(m);
- m->follow_contents(); // Follow contents of the marked object
- }
- follow_stack();
-}
-
-MarkSweep::FollowRootClosure MarkSweep::follow_root_closure;
+void MarkSweep::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(p); }
+void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(p); }
void MarkSweep::follow_stack() {
while (!_marking_stack->is_empty()) {
@@ -118,6 +88,7 @@
MarkSweep::FollowStackClosure MarkSweep::follow_stack_closure;
+void MarkSweep::FollowStackClosure::do_void() { follow_stack(); }
// We preserve the mark which should be replaced at the end and the location that it
// will go. Note that the object that this markOop belongs to isn't currently at that
@@ -142,6 +113,9 @@
MarkSweep::AdjustPointerClosure MarkSweep::adjust_root_pointer_closure(true);
MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure(false);
+void MarkSweep::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); }
+void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
+
void MarkSweep::adjust_marks() {
assert(_preserved_oop_stack == NULL ||
_preserved_oop_stack->length() == _preserved_mark_stack->length(),
@@ -187,7 +161,7 @@
#ifdef VALIDATE_MARK_SWEEP
-void MarkSweep::track_adjusted_pointer(oop* p, oop newobj, bool isroot) {
+void MarkSweep::track_adjusted_pointer(void* p, bool isroot) {
if (!ValidateMarkSweep)
return;
@@ -201,7 +175,7 @@
if (index != -1) {
int l = _root_refs_stack->length();
if (l > 0 && l - 1 != index) {
- oop* last = _root_refs_stack->pop();
+ void* last = _root_refs_stack->pop();
assert(last != p, "should be different");
_root_refs_stack->at_put(index, last);
} else {
@@ -211,19 +185,17 @@
}
}
-
-void MarkSweep::check_adjust_pointer(oop* p) {
+void MarkSweep::check_adjust_pointer(void* p) {
_adjusted_pointers->push(p);
}
-
class AdjusterTracker: public OopClosure {
public:
- AdjusterTracker() {};
- void do_oop(oop* o) { MarkSweep::check_adjust_pointer(o); }
+ AdjusterTracker() {}
+ void do_oop(oop* o) { MarkSweep::check_adjust_pointer(o); }
+ void do_oop(narrowOop* o) { MarkSweep::check_adjust_pointer(o); }
};
-
void MarkSweep::track_interior_pointers(oop obj) {
if (ValidateMarkSweep) {
_adjusted_pointers->clear();
@@ -234,7 +206,6 @@
}
}
-
void MarkSweep::check_interior_pointers() {
if (ValidateMarkSweep) {
_pointer_tracking = false;
@@ -242,7 +213,6 @@
}
}
-
void MarkSweep::reset_live_oop_tracking(bool at_perm) {
if (ValidateMarkSweep) {
guarantee((size_t)_live_oops->length() == _live_oops_index, "should be at end of live oops");
@@ -250,7 +220,6 @@
}
}
-
void MarkSweep::register_live_oop(oop p, size_t size) {
if (ValidateMarkSweep) {
_live_oops->push(p);
@@ -283,7 +252,6 @@
}
}
-
void MarkSweep::compaction_complete() {
if (RecordMarkSweepCompaction) {
GrowableArray* _tmp_live_oops = _cur_gc_live_oops;
@@ -299,7 +267,6 @@
}
}
-
void MarkSweep::print_new_location_of_heap_address(HeapWord* q) {
if (!RecordMarkSweepCompaction) {
tty->print_cr("Requires RecordMarkSweepCompaction to be enabled");
@@ -318,7 +285,7 @@
HeapWord* new_oop = _last_gc_live_oops_moved_to->at(i);
size_t offset = (q - old_oop);
tty->print_cr("Address " PTR_FORMAT, q);
- tty->print_cr(" Was in oop " PTR_FORMAT ", size %d, at offset %d", old_oop, sz, offset);
+ tty->print_cr(" Was in oop " PTR_FORMAT ", size " SIZE_FORMAT ", at offset " SIZE_FORMAT, old_oop, sz, offset);
tty->print_cr(" Now in oop " PTR_FORMAT ", actual address " PTR_FORMAT, new_oop, new_oop + offset);
return;
}
@@ -328,23 +295,16 @@
}
#endif //VALIDATE_MARK_SWEEP
-MarkSweep::IsAliveClosure MarkSweep::is_alive;
+MarkSweep::IsAliveClosure MarkSweep::is_alive;
-void MarkSweep::KeepAliveClosure::do_oop(oop* p) {
-#ifdef VALIDATE_MARK_SWEEP
- if (ValidateMarkSweep) {
- if (!Universe::heap()->is_in_reserved(p)) {
- _root_refs_stack->push(p);
- } else {
- _other_refs_stack->push(p);
- }
- }
-#endif
- mark_and_push(p);
-}
+void MarkSweep::IsAliveClosure::do_object(oop p) { ShouldNotReachHere(); }
+bool MarkSweep::IsAliveClosure::do_object_b(oop p) { return p->is_gc_marked(); }
MarkSweep::KeepAliveClosure MarkSweep::keep_alive;
+void MarkSweep::KeepAliveClosure::do_oop(oop* p) { MarkSweep::KeepAliveClosure::do_oop_work(p); }
+void MarkSweep::KeepAliveClosure::do_oop(narrowOop* p) { MarkSweep::KeepAliveClosure::do_oop_work(p); }
+
void marksweep_init() { /* empty */ }
#ifndef PRODUCT
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp
--- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -46,55 +46,59 @@
#define VALIDATE_MARK_SWEEP_ONLY(code)
#endif
-
// declared at end
class PreservedMark;
class MarkSweep : AllStatic {
//
- // In line closure decls
+ // Inline closure decls
//
-
- class FollowRootClosure: public OopsInGenClosure{
+ class FollowRootClosure: public OopsInGenClosure {
public:
- void do_oop(oop* p) { follow_root(p); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
virtual const bool do_nmethods() const { return true; }
};
class MarkAndPushClosure: public OopClosure {
public:
- void do_oop(oop* p) { mark_and_push(p); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
virtual const bool do_nmethods() const { return true; }
};
class FollowStackClosure: public VoidClosure {
public:
- void do_void() { follow_stack(); }
+ virtual void do_void();
};
class AdjustPointerClosure: public OopsInGenClosure {
+ private:
bool _is_root;
public:
AdjustPointerClosure(bool is_root) : _is_root(is_root) {}
- void do_oop(oop* p) { _adjust_pointer(p, _is_root); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
};
// Used for java/lang/ref handling
class IsAliveClosure: public BoolObjectClosure {
public:
- void do_object(oop p) { assert(false, "don't call"); }
- bool do_object_b(oop p) { return p->is_gc_marked(); }
+ virtual void do_object(oop p);
+ virtual bool do_object_b(oop p);
};
class KeepAliveClosure: public OopClosure {
+ protected:
+ template void do_oop_work(T* p);
public:
- void do_oop(oop* p);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
};
//
// Friend decls
//
-
friend class AdjustPointerClosure;
friend class KeepAliveClosure;
friend class VM_MarkSweep;
@@ -120,14 +124,14 @@
static ReferenceProcessor* _ref_processor;
#ifdef VALIDATE_MARK_SWEEP
- static GrowableArray* _root_refs_stack;
+ static GrowableArray* _root_refs_stack;
static GrowableArray * _live_oops;
static GrowableArray * _live_oops_moved_to;
static GrowableArray* _live_oops_size;
static size_t _live_oops_index;
static size_t _live_oops_index_at_perm;
- static GrowableArray* _other_refs_stack;
- static GrowableArray* _adjusted_pointers;
+ static GrowableArray* _other_refs_stack;
+ static GrowableArray* _adjusted_pointers;
static bool _pointer_tracking;
static bool _root_tracking;
@@ -146,9 +150,8 @@
static GrowableArray* _last_gc_live_oops_size;
#endif
-
// Non public closures
- static IsAliveClosure is_alive;
+ static IsAliveClosure is_alive;
static KeepAliveClosure keep_alive;
// Class unloading. Update subklass/sibling/implementor links at end of marking phase.
@@ -159,9 +162,9 @@
public:
// Public closures
- static FollowRootClosure follow_root_closure;
- static MarkAndPushClosure mark_and_push_closure;
- static FollowStackClosure follow_stack_closure;
+ static FollowRootClosure follow_root_closure;
+ static MarkAndPushClosure mark_and_push_closure;
+ static FollowStackClosure follow_stack_closure;
static AdjustPointerClosure adjust_root_pointer_closure;
static AdjustPointerClosure adjust_pointer_closure;
@@ -170,39 +173,29 @@
// Call backs for marking
static void mark_object(oop obj);
- static void follow_root(oop* p); // Mark pointer and follow contents. Empty marking
-
- // stack afterwards.
+ // Mark pointer and follow contents. Empty marking stack afterwards.
+ template static inline void follow_root(T* p);
+ // Mark pointer and follow contents.
+ template static inline void mark_and_follow(T* p);
+ // Check mark and maybe push on marking stack
+ template static inline void mark_and_push(T* p);
- static void mark_and_follow(oop* p); // Mark pointer and follow contents.
- static void _mark_and_push(oop* p); // Mark pointer and push obj on
- // marking stack.
-
+ static void follow_stack(); // Empty marking stack.
- static void mark_and_push(oop* p) { // Check mark and maybe push on
- // marking stack
- // assert(Universe::is_reserved_heap((oop)p), "we should only be traversing objects here");
- oop m = *p;
- if (m != NULL && !m->mark()->is_marked()) {
- _mark_and_push(p);
- }
- }
+ static void preserve_mark(oop p, markOop mark);
+ // Save the mark word so it can be restored later
+ static void adjust_marks(); // Adjust the pointers in the preserved marks table
+ static void restore_marks(); // Restore the marks that we saved in preserve_mark
- static void follow_stack(); // Empty marking stack.
-
+ template static inline void adjust_pointer(T* p, bool isroot);
- static void preserve_mark(oop p, markOop mark); // Save the mark word so it can be restored later
- static void adjust_marks(); // Adjust the pointers in the preserved marks table
- static void restore_marks(); // Restore the marks that we saved in preserve_mark
-
- static void _adjust_pointer(oop* p, bool isroot);
-
- static void adjust_root_pointer(oop* p) { _adjust_pointer(p, true); }
- static void adjust_pointer(oop* p) { _adjust_pointer(p, false); }
+ static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); }
+ static void adjust_pointer(oop* p) { adjust_pointer(p, false); }
+ static void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); }
#ifdef VALIDATE_MARK_SWEEP
- static void track_adjusted_pointer(oop* p, oop newobj, bool isroot);
- static void check_adjust_pointer(oop* p); // Adjust this pointer
+ static void track_adjusted_pointer(void* p, bool isroot);
+ static void check_adjust_pointer(void* p);
static void track_interior_pointers(oop obj);
static void check_interior_pointers();
@@ -223,7 +216,6 @@
static void revisit_weak_klass_link(Klass* k); // Update subklass/sibling/implementor links at end of marking.
};
-
class PreservedMark VALUE_OBJ_CLASS_SPEC {
private:
oop _obj;
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp
--- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -22,32 +22,11 @@
*
*/
-inline void MarkSweep::_adjust_pointer(oop* p, bool isroot) {
- oop obj = *p;
- VALIDATE_MARK_SWEEP_ONLY(oop saved_new_pointer = NULL);
- if (obj != NULL) {
- oop new_pointer = oop(obj->mark()->decode_pointer());
- assert(new_pointer != NULL || // is forwarding ptr?
- obj->mark() == markOopDesc::prototype() || // not gc marked?
- (UseBiasedLocking && obj->mark()->has_bias_pattern()) || // not gc marked?
- obj->is_shared(), // never forwarded?
- "should contain a forwarding pointer");
- if (new_pointer != NULL) {
- *p = new_pointer;
- assert(Universe::heap()->is_in_reserved(new_pointer),
- "should be in object space");
- VALIDATE_MARK_SWEEP_ONLY(saved_new_pointer = new_pointer);
- }
- }
- VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, saved_new_pointer, isroot));
-}
-
inline void MarkSweep::mark_object(oop obj) {
-
#ifndef SERIALGC
if (UseParallelOldGC && VerifyParallelOldWithMarkSweep) {
assert(PSParallelCompact::mark_bitmap()->is_marked(obj),
- "Should be marked in the marking bitmap");
+ "Should be marked in the marking bitmap");
}
#endif // SERIALGC
@@ -60,3 +39,80 @@
preserve_mark(obj, mark);
}
}
+
+template inline void MarkSweep::follow_root(T* p) {
+ assert(!Universe::heap()->is_in_reserved(p),
+ "roots shouldn't be things within the heap");
+#ifdef VALIDATE_MARK_SWEEP
+ if (ValidateMarkSweep) {
+ guarantee(!_root_refs_stack->contains(p), "should only be in here once");
+ _root_refs_stack->push(p);
+ }
+#endif
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ if (!obj->mark()->is_marked()) {
+ mark_object(obj);
+ obj->follow_contents();
+ }
+ }
+ follow_stack();
+}
+
+template inline void MarkSweep::mark_and_follow(T* p) {
+// assert(Universe::heap()->is_in_reserved(p), "should be in object space");
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ if (!obj->mark()->is_marked()) {
+ mark_object(obj);
+ obj->follow_contents();
+ }
+ }
+}
+
+template inline void MarkSweep::mark_and_push(T* p) {
+// assert(Universe::heap()->is_in_reserved(p), "should be in object space");
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ if (!obj->mark()->is_marked()) {
+ mark_object(obj);
+ _marking_stack->push(obj);
+ }
+ }
+}
+
+template inline void MarkSweep::adjust_pointer(T* p, bool isroot) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ oop new_obj = oop(obj->mark()->decode_pointer());
+ assert(new_obj != NULL || // is forwarding ptr?
+ obj->mark() == markOopDesc::prototype() || // not gc marked?
+ (UseBiasedLocking && obj->mark()->has_bias_pattern()) ||
+ // not gc marked?
+ obj->is_shared(), // never forwarded?
+ "should be forwarded");
+ if (new_obj != NULL) {
+ assert(Universe::heap()->is_in_reserved(new_obj),
+ "should be in object space");
+ oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+ }
+ }
+ VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, isroot));
+}
+
+template inline void MarkSweep::KeepAliveClosure::do_oop_work(T* p) {
+#ifdef VALIDATE_MARK_SWEEP
+ if (ValidateMarkSweep) {
+ if (!Universe::heap()->is_in_reserved(p)) {
+ _root_refs_stack->push(p);
+ } else {
+ _other_refs_stack->push(p);
+ }
+ }
+#endif
+ mark_and_push(p);
+}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_interface/collectedHeap.cpp
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -35,7 +35,6 @@
CollectedHeap::CollectedHeap() :
_reserved(), _barrier_set(NULL), _is_gc_active(false),
_total_collections(0), _total_full_collections(0),
- _max_heap_capacity(0),
_gc_cause(GCCause::_no_gc), _gc_lastcause(GCCause::_no_gc) {
NOT_PRODUCT(_promotion_failure_alot_count = 0;)
NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_interface/collectedHeap.hpp
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -53,7 +53,6 @@
bool _is_gc_active;
unsigned int _total_collections; // ... started
unsigned int _total_full_collections; // ... started
- size_t _max_heap_capacity;
NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
@@ -149,10 +148,7 @@
virtual void post_initialize() = 0;
MemRegion reserved_region() const { return _reserved; }
-
- // Return the number of bytes currently reserved, committed, and used,
- // respectively, for holding objects.
- size_t reserved_obj_bytes() const { return _reserved.byte_size(); }
+ address base() const { return (address)reserved_region().start(); }
// Future cleanup here. The following functions should specify bytes or
// heapwords as part of their signature.
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -61,7 +61,10 @@
obj->set_klass(klass());
assert(!Universe::is_fully_initialized() || obj->blueprint() != NULL,
"missing blueprint");
+}
+// Support for jvmti and dtrace
+inline void post_allocation_notify(KlassHandle klass, oop obj) {
// support for JVMTI VMObjectAlloc event (no-op if not enabled)
JvmtiExport::vm_object_alloc_event_collector(obj);
@@ -79,18 +82,22 @@
post_allocation_setup_common(klass, obj, size);
assert(Universe::is_bootstrapping() ||
!((oop)obj)->blueprint()->oop_is_array(), "must not be an array");
+ // notify jvmti and dtrace
+ post_allocation_notify(klass, (oop)obj);
}
void CollectedHeap::post_allocation_setup_array(KlassHandle klass,
HeapWord* obj,
size_t size,
int length) {
- // Set array length before posting jvmti object alloc event
- // in post_allocation_setup_common()
assert(length >= 0, "length should be non-negative");
+ post_allocation_setup_common(klass, obj, size);
+ // Must set length after installing klass as set_klass zeros the length
+ // field in UseCompressedOops
((arrayOop)obj)->set_length(length);
- post_allocation_setup_common(klass, obj, size);
assert(((oop)obj)->blueprint()->oop_is_array(), "must be an array");
+ // notify jvmti and dtrace (must be after length is set for dtrace)
+ post_allocation_notify(klass, (oop)obj);
}
HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, bool is_noref, TRAPS) {
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/includeDB_core
--- a/hotspot/src/share/vm/includeDB_core Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/includeDB_core Sun Apr 13 17:43:42 2008 -0400
@@ -191,7 +191,6 @@
arrayKlass.cpp arrayKlass.hpp
arrayKlass.cpp arrayKlassKlass.hpp
arrayKlass.cpp arrayOop.hpp
-arrayKlass.cpp collectedHeap.hpp
arrayKlass.cpp collectedHeap.inline.hpp
arrayKlass.cpp gcLocker.hpp
arrayKlass.cpp instanceKlass.hpp
@@ -211,6 +210,7 @@
arrayKlassKlass.cpp arrayKlassKlass.hpp
arrayKlassKlass.cpp handles.inline.hpp
arrayKlassKlass.cpp javaClasses.hpp
+arrayKlassKlass.cpp markSweep.inline.hpp
arrayKlassKlass.cpp oop.inline.hpp
arrayKlassKlass.hpp arrayKlass.hpp
@@ -250,7 +250,7 @@
assembler_.cpp assembler_.inline.hpp
assembler_.cpp biasedLocking.hpp
assembler_.cpp cardTableModRefBS.hpp
-assembler_.cpp collectedHeap.hpp
+assembler_.cpp collectedHeap.inline.hpp
assembler_.cpp interfaceSupport.hpp
assembler_.cpp interpreter.hpp
assembler_.cpp objectMonitor.hpp
@@ -331,9 +331,8 @@
bitMap.inline.hpp atomic.hpp
bitMap.inline.hpp bitMap.hpp
-blockOffsetTable.cpp blockOffsetTable.hpp
blockOffsetTable.cpp blockOffsetTable.inline.hpp
-blockOffsetTable.cpp collectedHeap.hpp
+blockOffsetTable.cpp collectedHeap.inline.hpp
blockOffsetTable.cpp iterator.hpp
blockOffsetTable.cpp java.hpp
blockOffsetTable.cpp oop.inline.hpp
@@ -990,6 +989,7 @@
codeCache.cpp mutexLocker.hpp
codeCache.cpp nmethod.hpp
codeCache.cpp objArrayOop.hpp
+codeCache.cpp oop.inline.hpp
codeCache.cpp pcDesc.hpp
codeCache.cpp resourceArea.hpp
@@ -1124,7 +1124,7 @@
compiledICHolderKlass.cpp compiledICHolderKlass.hpp
compiledICHolderKlass.cpp handles.inline.hpp
compiledICHolderKlass.cpp javaClasses.hpp
-compiledICHolderKlass.cpp markSweep.hpp
+compiledICHolderKlass.cpp markSweep.inline.hpp
compiledICHolderKlass.cpp oop.inline.hpp
compiledICHolderKlass.cpp oop.inline2.hpp
compiledICHolderKlass.cpp permGen.hpp
@@ -1192,6 +1192,7 @@
constMethodKlass.cpp gcLocker.hpp
constMethodKlass.cpp handles.inline.hpp
constMethodKlass.cpp interpreter.hpp
+constMethodKlass.cpp markSweep.inline.hpp
constMethodKlass.cpp oop.inline.hpp
constMethodKlass.cpp oop.inline2.hpp
constMethodKlass.cpp resourceArea.hpp
@@ -1210,6 +1211,8 @@
constantPoolKlass.cpp constantPoolKlass.hpp
constantPoolKlass.cpp constantPoolOop.hpp
constantPoolKlass.cpp handles.inline.hpp
+constantPoolKlass.cpp javaClasses.hpp
+constantPoolKlass.cpp markSweep.inline.hpp
constantPoolKlass.cpp oop.inline.hpp
constantPoolKlass.cpp oop.inline2.hpp
constantPoolKlass.cpp oopFactory.hpp
@@ -1261,7 +1264,8 @@
cpCacheKlass.cpp constantPoolOop.hpp
cpCacheKlass.cpp cpCacheKlass.hpp
cpCacheKlass.cpp handles.inline.hpp
-cpCacheKlass.cpp markSweep.hpp
+cpCacheKlass.cpp javaClasses.hpp
+cpCacheKlass.cpp markSweep.inline.hpp
cpCacheKlass.cpp oop.inline.hpp
cpCacheKlass.cpp permGen.hpp
@@ -1273,7 +1277,6 @@
cpCacheOop.cpp handles.inline.hpp
cpCacheOop.cpp interpreter.hpp
cpCacheOop.cpp jvmtiRedefineClassesTrace.hpp
-cpCacheOop.cpp markSweep.hpp
cpCacheOop.cpp markSweep.inline.hpp
cpCacheOop.cpp objArrayOop.hpp
cpCacheOop.cpp oop.inline.hpp
@@ -1385,7 +1388,6 @@
defNewGeneration.cpp collectorCounters.hpp
defNewGeneration.cpp copy.hpp
-defNewGeneration.cpp defNewGeneration.hpp
defNewGeneration.cpp defNewGeneration.inline.hpp
defNewGeneration.cpp gcLocker.inline.hpp
defNewGeneration.cpp gcPolicyCounters.hpp
@@ -1397,7 +1399,6 @@
defNewGeneration.cpp java.hpp
defNewGeneration.cpp oop.inline.hpp
defNewGeneration.cpp referencePolicy.hpp
-defNewGeneration.cpp space.hpp
defNewGeneration.cpp space.inline.hpp
defNewGeneration.cpp thread_.inline.hpp
@@ -1406,6 +1407,7 @@
defNewGeneration.hpp generation.inline.hpp
defNewGeneration.hpp generationCounters.hpp
+defNewGeneration.inline.hpp cardTableRS.hpp
defNewGeneration.inline.hpp defNewGeneration.hpp
defNewGeneration.inline.hpp space.hpp
@@ -1956,6 +1958,7 @@
instanceKlass.cpp jvmti.h
instanceKlass.cpp jvmtiExport.hpp
instanceKlass.cpp jvmtiRedefineClassesTrace.hpp
+instanceKlass.cpp markSweep.inline.hpp
instanceKlass.cpp methodOop.hpp
instanceKlass.cpp mutexLocker.hpp
instanceKlass.cpp objArrayKlassKlass.hpp
@@ -1991,6 +1994,7 @@
instanceKlassKlass.cpp instanceRefKlass.hpp
instanceKlassKlass.cpp javaClasses.hpp
instanceKlassKlass.cpp jvmtiExport.hpp
+instanceKlassKlass.cpp markSweep.inline.hpp
instanceKlassKlass.cpp objArrayKlassKlass.hpp
instanceKlassKlass.cpp objArrayOop.hpp
instanceKlassKlass.cpp oop.inline.hpp
@@ -2012,7 +2016,7 @@
instanceRefKlass.cpp genOopClosures.inline.hpp
instanceRefKlass.cpp instanceRefKlass.hpp
instanceRefKlass.cpp javaClasses.hpp
-instanceRefKlass.cpp markSweep.hpp
+instanceRefKlass.cpp markSweep.inline.hpp
instanceRefKlass.cpp oop.inline.hpp
instanceRefKlass.cpp preserveException.hpp
instanceRefKlass.cpp systemDictionary.hpp
@@ -2492,7 +2496,7 @@
klassKlass.cpp instanceOop.hpp
klassKlass.cpp klassKlass.hpp
klassKlass.cpp klassOop.hpp
-klassKlass.cpp markSweep.hpp
+klassKlass.cpp markSweep.inline.hpp
klassKlass.cpp methodKlass.hpp
klassKlass.cpp objArrayKlass.hpp
klassKlass.cpp oop.inline.hpp
@@ -2519,7 +2523,7 @@
klassVtable.cpp jvmtiRedefineClassesTrace.hpp
klassVtable.cpp klassOop.hpp
klassVtable.cpp klassVtable.hpp
-klassVtable.cpp markSweep.hpp
+klassVtable.cpp markSweep.inline.hpp
klassVtable.cpp methodOop.hpp
klassVtable.cpp objArrayOop.hpp
klassVtable.cpp oop.inline.hpp
@@ -2632,6 +2636,9 @@
markOop.inline.hpp markOop.hpp
markSweep.cpp compileBroker.hpp
+
+markSweep.hpp collectedHeap.hpp
+
memRegion.cpp globals.hpp
memRegion.cpp memRegion.hpp
@@ -2731,7 +2738,7 @@
methodDataKlass.cpp gcLocker.hpp
methodDataKlass.cpp handles.inline.hpp
methodDataKlass.cpp klassOop.hpp
-methodDataKlass.cpp markSweep.hpp
+methodDataKlass.cpp markSweep.inline.hpp
methodDataKlass.cpp methodDataKlass.hpp
methodDataKlass.cpp methodDataOop.hpp
methodDataKlass.cpp oop.inline.hpp
@@ -2746,7 +2753,6 @@
methodDataOop.cpp deoptimization.hpp
methodDataOop.cpp handles.inline.hpp
methodDataOop.cpp linkResolver.hpp
-methodDataOop.cpp markSweep.hpp
methodDataOop.cpp markSweep.inline.hpp
methodDataOop.cpp methodDataOop.hpp
methodDataOop.cpp oop.inline.hpp
@@ -2764,7 +2770,7 @@
methodKlass.cpp interpreter.hpp
methodKlass.cpp javaClasses.hpp
methodKlass.cpp klassOop.hpp
-methodKlass.cpp markSweep.hpp
+methodKlass.cpp markSweep.inline.hpp
methodKlass.cpp methodDataOop.hpp
methodKlass.cpp methodKlass.hpp
methodKlass.cpp oop.inline.hpp
@@ -2941,6 +2947,7 @@
objArrayKlass.cpp universe.inline.hpp
objArrayKlass.cpp vmSymbols.hpp
+
objArrayKlass.hpp arrayKlass.hpp
objArrayKlass.hpp instanceKlass.hpp
objArrayKlass.hpp specialized_oop_closures.hpp
@@ -2948,6 +2955,7 @@
objArrayKlassKlass.cpp collectedHeap.inline.hpp
objArrayKlassKlass.cpp instanceKlass.hpp
objArrayKlassKlass.cpp javaClasses.hpp
+objArrayKlassKlass.cpp markSweep.inline.hpp
objArrayKlassKlass.cpp objArrayKlassKlass.hpp
objArrayKlassKlass.cpp oop.inline.hpp
objArrayKlassKlass.cpp oop.inline2.hpp
@@ -2956,6 +2964,7 @@
objArrayKlassKlass.hpp arrayKlassKlass.hpp
objArrayKlassKlass.hpp objArrayKlass.hpp
+objArrayOop.cpp objArrayKlass.hpp
objArrayOop.cpp objArrayOop.hpp
objArrayOop.cpp oop.inline.hpp
@@ -3005,7 +3014,6 @@
oop.inline.hpp klass.hpp
oop.inline.hpp klassOop.hpp
oop.inline.hpp markOop.inline.hpp
-oop.inline.hpp markSweep.hpp
oop.inline.hpp markSweep.inline.hpp
oop.inline.hpp oop.hpp
oop.inline.hpp os.hpp
@@ -4536,6 +4544,7 @@
vtableStubs.cpp instanceKlass.hpp
vtableStubs.cpp jvmtiExport.hpp
vtableStubs.cpp klassVtable.hpp
+vtableStubs.cpp oop.inline.hpp
vtableStubs.cpp mutexLocker.hpp
vtableStubs.cpp resourceArea.hpp
vtableStubs.cpp sharedRuntime.hpp
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/interpreter/interpreterRuntime.hpp
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -35,7 +35,10 @@
static methodOop method(JavaThread *thread) { return last_frame(thread).interpreter_frame_method(); }
static address bcp(JavaThread *thread) { return last_frame(thread).interpreter_frame_bcp(); }
static void set_bcp_and_mdp(address bcp, JavaThread*thread);
- static Bytecodes::Code code(JavaThread *thread) { return Bytecodes::code_at(bcp(thread)); }
+ static Bytecodes::Code code(JavaThread *thread) {
+ // pass method to avoid calling unsafe bcp_to_method (partial fix 4926272)
+ return Bytecodes::code_at(bcp(thread), method(thread));
+ }
static bool already_resolved(JavaThread *thread) { return cache_entry(thread)->is_resolved(code(thread)); }
static int one_byte_index(JavaThread *thread) { return bcp(thread)[1]; }
static int two_byte_index(JavaThread *thread) { return Bytes::get_Java_u2(bcp(thread) + 1); }
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/barrierSet.hpp
--- a/hotspot/src/share/vm/memory/barrierSet.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/barrierSet.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -54,9 +54,9 @@
// These functions indicate whether a particular access of the given
// kinds requires a barrier.
- virtual bool read_ref_needs_barrier(oop* field) = 0;
+ virtual bool read_ref_needs_barrier(void* field) = 0;
virtual bool read_prim_needs_barrier(HeapWord* field, size_t bytes) = 0;
- virtual bool write_ref_needs_barrier(oop* field, oop new_val) = 0;
+ virtual bool write_ref_needs_barrier(void* field, oop new_val) = 0;
virtual bool write_prim_needs_barrier(HeapWord* field, size_t bytes, juint val1, juint val2) = 0;
// The first four operations provide a direct implementation of the
@@ -64,7 +64,7 @@
// directly, as appropriate.
// Invoke the barrier, if any, necessary when reading the given ref field.
- virtual void read_ref_field(oop* field) = 0;
+ virtual void read_ref_field(void* field) = 0;
// Invoke the barrier, if any, necessary when reading the given primitive
// "field" of "bytes" bytes in "obj".
@@ -75,9 +75,9 @@
// (For efficiency reasons, this operation is specialized for certain
// barrier types. Semantically, it should be thought of as a call to the
// virtual "_work" function below, which must implement the barrier.)
- inline void write_ref_field(oop* field, oop new_val);
+ inline void write_ref_field(void* field, oop new_val);
protected:
- virtual void write_ref_field_work(oop* field, oop new_val) = 0;
+ virtual void write_ref_field_work(void* field, oop new_val) = 0;
public:
// Invoke the barrier, if any, necessary when writing the "bytes"-byte
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/barrierSet.inline.hpp
--- a/hotspot/src/share/vm/memory/barrierSet.inline.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/barrierSet.inline.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -26,7 +26,7 @@
// performance-critical calls when when the barrier is the most common
// card-table kind.
-void BarrierSet::write_ref_field(oop* field, oop new_val) {
+void BarrierSet::write_ref_field(void* field, oop new_val) {
if (kind() == CardTableModRef) {
((CardTableModRefBS*)this)->inline_write_ref_field(field, new_val);
} else {
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/cardTableModRefBS.cpp
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -294,7 +294,7 @@
// Note that these versions are precise! The scanning code has to handle the
// fact that the write barrier may be either precise or imprecise.
-void CardTableModRefBS::write_ref_field_work(oop* field, oop newVal) {
+void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) {
inline_write_ref_field(field, newVal);
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/cardTableModRefBS.hpp
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -273,7 +273,7 @@
// *** Barrier set functions.
- inline bool write_ref_needs_barrier(oop* field, oop new_val) {
+ inline bool write_ref_needs_barrier(void* field, oop new_val) {
// Note that this assumes the perm gen is the highest generation
// in the address space
return new_val != NULL && !new_val->is_perm();
@@ -285,7 +285,7 @@
// these functions here for performance.
protected:
void write_ref_field_work(oop obj, size_t offset, oop newVal);
- void write_ref_field_work(oop* field, oop newVal);
+ void write_ref_field_work(void* field, oop newVal);
public:
bool has_write_ref_array_opt() { return true; }
@@ -315,7 +315,7 @@
// *** Card-table-barrier-specific things.
- inline void inline_write_ref_field(oop* field, oop newVal) {
+ inline void inline_write_ref_field(void* field, oop newVal) {
jbyte* byte = byte_for(field);
*byte = dirty_card;
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/cardTableRS.cpp
--- a/hotspot/src/share/vm/memory/cardTableRS.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/cardTableRS.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -191,7 +191,7 @@
// prev-younger-gen ==> cur_youngergen_and_prev_nonclean_card
// cur-younger-gen ==> cur_younger_gen
// cur_youngergen_and_prev_nonclean_card ==> no change.
-void CardTableRS::write_ref_field_gc_par(oop* field, oop new_val) {
+void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) {
jbyte* entry = ct_bs()->byte_for(field);
do {
jbyte entry_val = *entry;
@@ -290,28 +290,36 @@
class VerifyCleanCardClosure: public OopClosure {
- HeapWord* boundary;
- HeapWord* begin; HeapWord* end;
-public:
- void do_oop(oop* p) {
+private:
+ HeapWord* _boundary;
+ HeapWord* _begin;
+ HeapWord* _end;
+protected:
+ template void do_oop_work(T* p) {
HeapWord* jp = (HeapWord*)p;
- if (jp >= begin && jp < end) {
- guarantee(*p == NULL || (HeapWord*)p < boundary
- || (HeapWord*)(*p) >= boundary,
+ if (jp >= _begin && jp < _end) {
+ oop obj = oopDesc::load_decode_heap_oop(p);
+ guarantee(obj == NULL ||
+ (HeapWord*)p < _boundary ||
+ (HeapWord*)obj >= _boundary,
"pointer on clean card crosses boundary");
}
}
- VerifyCleanCardClosure(HeapWord* b, HeapWord* _begin, HeapWord* _end) :
- boundary(b), begin(_begin), end(_end) {}
+public:
+ VerifyCleanCardClosure(HeapWord* b, HeapWord* begin, HeapWord* end) :
+ _boundary(b), _begin(begin), _end(end) {}
+ virtual void do_oop(oop* p) { VerifyCleanCardClosure::do_oop_work(p); }
+ virtual void do_oop(narrowOop* p) { VerifyCleanCardClosure::do_oop_work(p); }
};
class VerifyCTSpaceClosure: public SpaceClosure {
+private:
CardTableRS* _ct;
HeapWord* _boundary;
public:
VerifyCTSpaceClosure(CardTableRS* ct, HeapWord* boundary) :
_ct(ct), _boundary(boundary) {}
- void do_space(Space* s) { _ct->verify_space(s, _boundary); }
+ virtual void do_space(Space* s) { _ct->verify_space(s, _boundary); }
};
class VerifyCTGenClosure: public GenCollectedHeap::GenClosure {
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/cardTableRS.hpp
--- a/hotspot/src/share/vm/memory/cardTableRS.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/cardTableRS.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -106,18 +106,18 @@
// closure application.
void younger_refs_iterate(Generation* g, OopsInGenClosure* blk);
- void inline_write_ref_field_gc(oop* field, oop new_val) {
+ void inline_write_ref_field_gc(void* field, oop new_val) {
jbyte* byte = _ct_bs.byte_for(field);
*byte = youngergen_card;
}
- void write_ref_field_gc_work(oop* field, oop new_val) {
+ void write_ref_field_gc_work(void* field, oop new_val) {
inline_write_ref_field_gc(field, new_val);
}
// Override. Might want to devirtualize this in the same fashion as
// above. Ensures that the value of the card for field says that it's
// a younger card in the current collection.
- virtual void write_ref_field_gc_par(oop* field, oop new_val);
+ virtual void write_ref_field_gc_par(void* field, oop new_val);
void resize_covered_region(MemRegion new_region);
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/compactingPermGenGen.cpp
--- a/hotspot/src/share/vm/memory/compactingPermGenGen.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/compactingPermGenGen.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -49,9 +49,9 @@
// to prevent visiting any object twice.
class RecursiveAdjustSharedObjectClosure : public OopClosure {
-public:
- void do_oop(oop* o) {
- oop obj = *o;
+ protected:
+ template inline void do_oop_work(T* p) {
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
if (obj->is_shared_readwrite()) {
if (obj->mark()->is_marked()) {
obj->init_mark(); // Don't revisit this object.
@@ -71,7 +71,10 @@
}
}
}
- };
+ }
+ public:
+ virtual void do_oop(oop* p) { RecursiveAdjustSharedObjectClosure::do_oop_work(p); }
+ virtual void do_oop(narrowOop* p) { RecursiveAdjustSharedObjectClosure::do_oop_work(p); }
};
@@ -86,9 +89,9 @@
// as doing so can cause hash codes to be computed, destroying
// forwarding pointers.
class TraversePlaceholdersClosure : public OopClosure {
- public:
- void do_oop(oop* o) {
- oop obj = *o;
+ protected:
+ template inline void do_oop_work(T* p) {
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
if (obj->klass() == Universe::symbolKlassObj() &&
obj->is_shared_readonly()) {
symbolHandle sym((symbolOop) obj);
@@ -99,6 +102,10 @@
}
}
}
+ public:
+ virtual void do_oop(oop* p) { TraversePlaceholdersClosure::do_oop_work(p); }
+ virtual void do_oop(narrowOop* p) { TraversePlaceholdersClosure::do_oop_work(p); }
+
};
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/defNewGeneration.cpp
--- a/hotspot/src/share/vm/memory/defNewGeneration.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -47,31 +47,9 @@
_rs = (CardTableRS*)rs;
}
-void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) {
- // We never expect to see a null reference being processed
- // as a weak reference.
- assert (*p != NULL, "expected non-null ref");
- assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
-
- _cl->do_oop_nv(p);
+void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
+void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
- // Card marking is trickier for weak refs.
- // This oop is a 'next' field which was filled in while we
- // were discovering weak references. While we might not need
- // to take a special action to keep this reference alive, we
- // will need to dirty a card as the field was modified.
- //
- // Alternatively, we could create a method which iterates through
- // each generation, allowing them in turn to examine the modified
- // field.
- //
- // We could check that p is also in an older generation, but
- // dirty cards in the youngest gen are never scanned, so the
- // extra check probably isn't worthwhile.
- if (Universe::heap()->is_in_reserved(p)) {
- _rs->inline_write_ref_field_gc(p, *p);
- }
-}
DefNewGeneration::FastKeepAliveClosure::
FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
@@ -79,19 +57,8 @@
_boundary = g->reserved().end();
}
-void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) {
- assert (*p != NULL, "expected non-null ref");
- assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
-
- _cl->do_oop_nv(p);
-
- // Optimized for Defnew generation if it's the youngest generation:
- // we set a younger_gen card if we have an older->youngest
- // generation pointer.
- if (((HeapWord*)(*p) < _boundary) && Universe::heap()->is_in_reserved(p)) {
- _rs->inline_write_ref_field_gc(p, *p);
- }
-}
+void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
+void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
DefNewGeneration::EvacuateFollowersClosure::
EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
@@ -132,6 +99,9 @@
_boundary = _g->reserved().end();
}
+void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); }
+void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
+
FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
{
@@ -139,6 +109,9 @@
_boundary = _g->reserved().end();
}
+void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }
+void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
+
ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
OopClosure(g->ref_processor()), _g(g)
{
@@ -146,6 +119,11 @@
_boundary = _g->reserved().end();
}
+void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); }
+void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
+
+void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }
+void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
DefNewGeneration::DefNewGeneration(ReservedSpace rs,
size_t initial_size,
@@ -656,7 +634,7 @@
}
}
-oop DefNewGeneration::copy_to_survivor_space(oop old, oop* from) {
+oop DefNewGeneration::copy_to_survivor_space(oop old) {
assert(is_in_reserved(old) && !old->is_forwarded(),
"shouldn't be scavenging this oop");
size_t s = old->size();
@@ -669,7 +647,7 @@
// Otherwise try allocating obj tenured
if (obj == NULL) {
- obj = _next_gen->promote(old, s, from);
+ obj = _next_gen->promote(old, s);
if (obj == NULL) {
if (!HandlePromotionFailure) {
// A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
@@ -862,3 +840,69 @@
const char* DefNewGeneration::name() const {
return "def new generation";
}
+
+// Moved from inline file as they are not called inline
+CompactibleSpace* DefNewGeneration::first_compaction_space() const {
+ return eden();
+}
+
+HeapWord* DefNewGeneration::allocate(size_t word_size,
+ bool is_tlab) {
+ // This is the slow-path allocation for the DefNewGeneration.
+ // Most allocations are fast-path in compiled code.
+ // We try to allocate from the eden. If that works, we are happy.
+ // Note that since DefNewGeneration supports lock-free allocation, we
+ // have to use it here, as well.
+ HeapWord* result = eden()->par_allocate(word_size);
+ if (result != NULL) {
+ return result;
+ }
+ do {
+ HeapWord* old_limit = eden()->soft_end();
+ if (old_limit < eden()->end()) {
+ // Tell the next generation we reached a limit.
+ HeapWord* new_limit =
+ next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
+ if (new_limit != NULL) {
+ Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
+ } else {
+ assert(eden()->soft_end() == eden()->end(),
+ "invalid state after allocation_limit_reached returned null");
+ }
+ } else {
+ // The allocation failed and the soft limit is equal to the hard limit,
+ // there are no reasons to do an attempt to allocate
+ assert(old_limit == eden()->end(), "sanity check");
+ break;
+ }
+ // Try to allocate until succeeded or the soft limit can't be adjusted
+ result = eden()->par_allocate(word_size);
+ } while (result == NULL);
+
+ // If the eden is full and the last collection bailed out, we are running
+ // out of heap space, and we try to allocate the from-space, too.
+ // allocate_from_space can't be inlined because that would introduce a
+ // circular dependency at compile time.
+ if (result == NULL) {
+ result = allocate_from_space(word_size);
+ }
+ return result;
+}
+
+HeapWord* DefNewGeneration::par_allocate(size_t word_size,
+ bool is_tlab) {
+ return eden()->par_allocate(word_size);
+}
+
+void DefNewGeneration::gc_prologue(bool full) {
+ // Ensure that _end and _soft_end are the same in eden space.
+ eden()->set_soft_end(eden()->end());
+}
+
+size_t DefNewGeneration::tlab_capacity() const {
+ return eden()->capacity();
+}
+
+size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
+ return unsafe_max_alloc_nogc();
+}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/defNewGeneration.hpp
--- a/hotspot/src/share/vm/memory/defNewGeneration.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/defNewGeneration.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -24,6 +24,7 @@
class EdenSpace;
class ContiguousSpace;
+class ScanClosure;
// DefNewGeneration is a young generation containing eden, from- and
// to-space.
@@ -155,17 +156,21 @@
protected:
ScanWeakRefClosure* _cl;
CardTableRS* _rs;
+ template void do_oop_work(T* p);
public:
KeepAliveClosure(ScanWeakRefClosure* cl);
- void do_oop(oop* p);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
};
class FastKeepAliveClosure: public KeepAliveClosure {
protected:
HeapWord* _boundary;
+ template void do_oop_work(T* p);
public:
FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl);
- void do_oop(oop* p);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
};
class EvacuateFollowersClosure: public VoidClosure {
@@ -206,7 +211,7 @@
ContiguousSpace* from() const { return _from_space; }
ContiguousSpace* to() const { return _to_space; }
- inline CompactibleSpace* first_compaction_space() const;
+ virtual CompactibleSpace* first_compaction_space() const;
// Space enquiries
size_t capacity() const;
@@ -226,8 +231,8 @@
// Thread-local allocation buffers
bool supports_tlab_allocation() const { return true; }
- inline size_t tlab_capacity() const;
- inline size_t unsafe_max_tlab_alloc() const;
+ size_t tlab_capacity() const;
+ size_t unsafe_max_tlab_alloc() const;
// Grow the generation by the specified number of bytes.
// The size of bytes is assumed to be properly aligned.
@@ -265,13 +270,13 @@
return result;
}
- inline HeapWord* allocate(size_t word_size, bool is_tlab);
+ HeapWord* allocate(size_t word_size, bool is_tlab);
HeapWord* allocate_from_space(size_t word_size);
- inline HeapWord* par_allocate(size_t word_size, bool is_tlab);
+ HeapWord* par_allocate(size_t word_size, bool is_tlab);
// Prologue & Epilogue
- inline virtual void gc_prologue(bool full);
+ virtual void gc_prologue(bool full);
virtual void gc_epilogue(bool full);
// Doesn't require additional work during GC prologue and epilogue
@@ -307,7 +312,7 @@
bool is_tlab,
bool parallel = false);
- oop copy_to_survivor_space(oop old, oop* from);
+ oop copy_to_survivor_space(oop old);
int tenuring_threshold() { return _tenuring_threshold; }
// Performance Counter support
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/defNewGeneration.inline.hpp
--- a/hotspot/src/share/vm/memory/defNewGeneration.inline.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/defNewGeneration.inline.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -22,67 +22,60 @@
*
*/
-CompactibleSpace* DefNewGeneration::first_compaction_space() const {
- return eden();
+// Methods of protected closure types
+
+template
+inline void DefNewGeneration::KeepAliveClosure::do_oop_work(T* p) {
+#ifdef ASSERT
+ {
+ // We never expect to see a null reference being processed
+ // as a weak reference.
+ assert (!oopDesc::is_null(*p), "expected non-null ref");
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+ assert (obj->is_oop(), "expected an oop while scanning weak refs");
+ }
+#endif // ASSERT
+
+ _cl->do_oop_nv(p);
+
+ // Card marking is trickier for weak refs.
+ // This oop is a 'next' field which was filled in while we
+ // were discovering weak references. While we might not need
+ // to take a special action to keep this reference alive, we
+ // will need to dirty a card as the field was modified.
+ //
+ // Alternatively, we could create a method which iterates through
+ // each generation, allowing them in turn to examine the modified
+ // field.
+ //
+ // We could check that p is also in an older generation, but
+ // dirty cards in the youngest gen are never scanned, so the
+ // extra check probably isn't worthwhile.
+ if (Universe::heap()->is_in_reserved(p)) {
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+ _rs->inline_write_ref_field_gc(p, obj);
+ }
}
-HeapWord* DefNewGeneration::allocate(size_t word_size,
- bool is_tlab) {
- // This is the slow-path allocation for the DefNewGeneration.
- // Most allocations are fast-path in compiled code.
- // We try to allocate from the eden. If that works, we are happy.
- // Note that since DefNewGeneration supports lock-free allocation, we
- // have to use it here, as well.
- HeapWord* result = eden()->par_allocate(word_size);
- if (result != NULL) {
- return result;
+template
+inline void DefNewGeneration::FastKeepAliveClosure::do_oop_work(T* p) {
+#ifdef ASSERT
+ {
+ // We never expect to see a null reference being processed
+ // as a weak reference.
+ assert (!oopDesc::is_null(*p), "expected non-null ref");
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+ assert (obj->is_oop(), "expected an oop while scanning weak refs");
}
- do {
- HeapWord* old_limit = eden()->soft_end();
- if (old_limit < eden()->end()) {
- // Tell the next generation we reached a limit.
- HeapWord* new_limit =
- next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
- if (new_limit != NULL) {
- Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
- } else {
- assert(eden()->soft_end() == eden()->end(),
- "invalid state after allocation_limit_reached returned null");
- }
- } else {
- // The allocation failed and the soft limit is equal to the hard limit,
- // there are no reasons to do an attempt to allocate
- assert(old_limit == eden()->end(), "sanity check");
- break;
- }
- // Try to allocate until succeeded or the soft limit can't be adjusted
- result = eden()->par_allocate(word_size);
- } while (result == NULL);
+#endif // ASSERT
+
+ _cl->do_oop_nv(p);
- // If the eden is full and the last collection bailed out, we are running
- // out of heap space, and we try to allocate the from-space, too.
- // allocate_from_space can't be inlined because that would introduce a
- // circular dependency at compile time.
- if (result == NULL) {
- result = allocate_from_space(word_size);
+ // Optimized for Defnew generation if it's the youngest generation:
+ // we set a younger_gen card if we have an older->youngest
+ // generation pointer.
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+ if (((HeapWord*)obj < _boundary) && Universe::heap()->is_in_reserved(p)) {
+ _rs->inline_write_ref_field_gc(p, obj);
}
- return result;
-}
-
-HeapWord* DefNewGeneration::par_allocate(size_t word_size,
- bool is_tlab) {
- return eden()->par_allocate(word_size);
}
-
-void DefNewGeneration::gc_prologue(bool full) {
- // Ensure that _end and _soft_end are the same in eden space.
- eden()->set_soft_end(eden()->end());
-}
-
-size_t DefNewGeneration::tlab_capacity() const {
- return eden()->capacity();
-}
-
-size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
- return unsafe_max_alloc_nogc();
-}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/dump.cpp
--- a/hotspot/src/share/vm/memory/dump.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/dump.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -60,9 +60,9 @@
hash_offset = java_lang_String::hash_offset_in_bytes();
}
- void do_oop(oop* pobj) {
- if (pobj != NULL) {
- oop obj = *pobj;
+ void do_oop(oop* p) {
+ if (p != NULL) {
+ oop obj = *p;
if (obj->klass() == SystemDictionary::string_klass()) {
int hash;
@@ -79,6 +79,7 @@
}
}
}
+ void do_oop(narrowOop* p) { ShouldNotReachHere(); }
};
@@ -121,9 +122,8 @@
class MarkObjectsOopClosure : public OopClosure {
public:
- void do_oop(oop* pobj) {
- mark_object(*pobj);
- }
+ void do_oop(oop* p) { mark_object(*p); }
+ void do_oop(narrowOop* p) { ShouldNotReachHere(); }
};
@@ -136,6 +136,7 @@
mark_object(obj);
}
}
+ void do_oop(narrowOop* pobj) { ShouldNotReachHere(); }
};
@@ -554,6 +555,7 @@
}
}
}
+ void do_oop(narrowOop* pobj) { ShouldNotReachHere(); }
};
@@ -690,6 +692,8 @@
++top;
}
+ void do_oop(narrowOop* pobj) { ShouldNotReachHere(); }
+
void do_int(int* p) {
check_space();
*top = (oop)(intptr_t)*p;
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/genCollectedHeap.cpp
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -624,6 +624,7 @@
void do_oop(oop* p) {
assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm.");
}
+ void do_oop(narrowOop* p) { ShouldNotReachHere(); }
};
static AssertIsPermClosure assert_is_perm_closure;
@@ -1300,8 +1301,7 @@
oop GenCollectedHeap::handle_failed_promotion(Generation* gen,
oop obj,
- size_t obj_size,
- oop* ref) {
+ size_t obj_size) {
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
HeapWord* result = NULL;
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/genCollectedHeap.hpp
--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -452,8 +452,7 @@
// gen; return the new location of obj if successful. Otherwise, return NULL.
oop handle_failed_promotion(Generation* gen,
oop obj,
- size_t obj_size,
- oop* ref);
+ size_t obj_size);
private:
// Accessor for memory state verification support
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/genMarkSweep.cpp
--- a/hotspot/src/share/vm/memory/genMarkSweep.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/genMarkSweep.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -73,8 +73,7 @@
VALIDATE_MARK_SWEEP_ONLY(
if (ValidateMarkSweep) {
- guarantee(_root_refs_stack->length() == 0,
- "should be empty by now");
+ guarantee(_root_refs_stack->length() == 0, "should be empty by now");
}
)
@@ -165,9 +164,9 @@
#ifdef VALIDATE_MARK_SWEEP
if (ValidateMarkSweep) {
- _root_refs_stack = new (ResourceObj::C_HEAP) GrowableArray(100, true);
- _other_refs_stack = new (ResourceObj::C_HEAP) GrowableArray(100, true);
- _adjusted_pointers = new (ResourceObj::C_HEAP) GrowableArray(100, true);
+ _root_refs_stack = new (ResourceObj::C_HEAP) GrowableArray(100, true);
+ _other_refs_stack = new (ResourceObj::C_HEAP) GrowableArray(100, true);
+ _adjusted_pointers = new (ResourceObj::C_HEAP) GrowableArray(100, true);
_live_oops = new (ResourceObj::C_HEAP) GrowableArray(100, true);
_live_oops_moved_to = new (ResourceObj::C_HEAP) GrowableArray(100, true);
_live_oops_size = new (ResourceObj::C_HEAP) GrowableArray(100, true);
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/genOopClosures.hpp
--- a/hotspot/src/share/vm/memory/genOopClosures.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/genOopClosures.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -28,6 +28,11 @@
class CardTableModRefBS;
class DefNewGeneration;
+template class GenericTaskQueue;
+typedef GenericTaskQueue OopTaskQueue;
+template class GenericTaskQueueSet;
+typedef GenericTaskQueueSet OopTaskQueueSet;
+
// Closure for iterating roots from a particular generation
// Note: all classes deriving from this MUST call this do_barrier
// method at the end of their own do_oop method!
@@ -35,13 +40,13 @@
class OopsInGenClosure : public OopClosure {
private:
- Generation* _orig_gen; // generation originally set in ctor
- Generation* _gen; // generation being scanned
+ Generation* _orig_gen; // generation originally set in ctor
+ Generation* _gen; // generation being scanned
protected:
// Some subtypes need access.
- HeapWord* _gen_boundary; // start of generation
- CardTableRS* _rs; // remembered set
+ HeapWord* _gen_boundary; // start of generation
+ CardTableRS* _rs; // remembered set
// For assertions
Generation* generation() { return _gen; }
@@ -49,7 +54,7 @@
// Derived classes that modify oops so that they might be old-to-young
// pointers must call the method below.
- void do_barrier(oop* p);
+ template void do_barrier(T* p);
public:
OopsInGenClosure() : OopClosure(NULL),
@@ -75,14 +80,17 @@
// This closure will perform barrier store calls for ALL
// pointers in scanned oops.
class ScanClosure: public OopsInGenClosure {
-protected:
+ protected:
DefNewGeneration* _g;
- HeapWord* _boundary;
- bool _gc_barrier;
-public:
+ HeapWord* _boundary;
+ bool _gc_barrier;
+ template inline void do_oop_work(T* p);
+ public:
ScanClosure(DefNewGeneration* g, bool gc_barrier);
- void do_oop(oop* p);
- void do_oop_nv(oop* p);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p);
+ inline void do_oop_nv(narrowOop* p);
bool do_header() { return false; }
Prefetch::style prefetch_style() {
return Prefetch::do_write;
@@ -95,14 +103,17 @@
// pointers into the DefNewGeneration. This is less
// precise, but faster, than a ScanClosure
class FastScanClosure: public OopsInGenClosure {
-protected:
+ protected:
DefNewGeneration* _g;
- HeapWord* _boundary;
- bool _gc_barrier;
-public:
+ HeapWord* _boundary;
+ bool _gc_barrier;
+ template inline void do_oop_work(T* p);
+ public:
FastScanClosure(DefNewGeneration* g, bool gc_barrier);
- void do_oop(oop* p);
- void do_oop_nv(oop* p);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p);
+ inline void do_oop_nv(narrowOop* p);
bool do_header() { return false; }
Prefetch::style prefetch_style() {
return Prefetch::do_write;
@@ -110,19 +121,27 @@
};
class FilteringClosure: public OopClosure {
- HeapWord* _boundary;
+ private:
+ HeapWord* _boundary;
OopClosure* _cl;
-public:
+ protected:
+ template inline void do_oop_work(T* p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ if ((HeapWord*)obj < _boundary) {
+ _cl->do_oop(p);
+ }
+ }
+ }
+ public:
FilteringClosure(HeapWord* boundary, OopClosure* cl) :
OopClosure(cl->_ref_processor), _boundary(boundary),
_cl(cl) {}
- void do_oop(oop* p);
- void do_oop_nv(oop* p) {
- oop obj = *p;
- if ((HeapWord*)obj < _boundary && obj != NULL) {
- _cl->do_oop(p);
- }
- }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p) { FilteringClosure::do_oop_work(p); }
+ inline void do_oop_nv(narrowOop* p) { FilteringClosure::do_oop_work(p); }
bool do_header() { return false; }
};
@@ -131,19 +150,26 @@
// OopsInGenClosure -- weak references are processed all
// at once, with no notion of which generation they were in.
class ScanWeakRefClosure: public OopClosure {
-protected:
- DefNewGeneration* _g;
- HeapWord* _boundary;
-public:
+ protected:
+ DefNewGeneration* _g;
+ HeapWord* _boundary;
+ template inline void do_oop_work(T* p);
+ public:
ScanWeakRefClosure(DefNewGeneration* g);
- void do_oop(oop* p);
- void do_oop_nv(oop* p);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p);
+ inline void do_oop_nv(narrowOop* p);
};
class VerifyOopClosure: public OopClosure {
-public:
- void do_oop(oop* p) {
- guarantee((*p)->is_oop_or_null(), "invalid oop");
+ protected:
+ template inline void do_oop_work(T* p) {
+ oop obj = oopDesc::load_decode_heap_oop(p);
+ guarantee(obj->is_oop_or_null(), "invalid oop");
}
+ public:
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
static VerifyOopClosure verify_oop;
};
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/genOopClosures.inline.hpp
--- a/hotspot/src/share/vm/memory/genOopClosures.inline.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/genOopClosures.inline.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -38,10 +38,10 @@
}
}
-inline void OopsInGenClosure::do_barrier(oop* p) {
+template inline void OopsInGenClosure::do_barrier(T* p) {
assert(generation()->is_in_reserved(p), "expected ref in generation");
- oop obj = *p;
- assert(obj != NULL, "expected non-null object");
+ assert(!oopDesc::is_null(*p), "expected non-null object");
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
// If p points to a younger generation, mark the card.
if ((HeapWord*)obj < _gen_boundary) {
_rs->inline_write_ref_field_gc(p, obj);
@@ -49,18 +49,17 @@
}
// NOTE! Any changes made here should also be made
-// in FastScanClosure::do_oop();
-inline void ScanClosure::do_oop(oop* p) {
- oop obj = *p;
+// in FastScanClosure::do_oop_work()
+template inline void ScanClosure::do_oop_work(T* p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
// Should we copy the obj?
- if (obj != NULL) {
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
if ((HeapWord*)obj < _boundary) {
assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
- if (obj->is_forwarded()) {
- *p = obj->forwardee();
- } else {
- *p = _g->copy_to_survivor_space(obj, p);
- }
+ oop new_obj = obj->is_forwarded() ? obj->forwardee()
+ : _g->copy_to_survivor_space(obj);
+ oopDesc::encode_store_heap_oop_not_null(p, new_obj);
}
if (_gc_barrier) {
// Now call parent closure
@@ -69,23 +68,21 @@
}
}
-inline void ScanClosure::do_oop_nv(oop* p) {
- ScanClosure::do_oop(p);
-}
+inline void ScanClosure::do_oop_nv(oop* p) { ScanClosure::do_oop_work(p); }
+inline void ScanClosure::do_oop_nv(narrowOop* p) { ScanClosure::do_oop_work(p); }
// NOTE! Any changes made here should also be made
-// in ScanClosure::do_oop();
-inline void FastScanClosure::do_oop(oop* p) {
- oop obj = *p;
+// in ScanClosure::do_oop_work()
+template inline void FastScanClosure::do_oop_work(T* p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
// Should we copy the obj?
- if (obj != NULL) {
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
if ((HeapWord*)obj < _boundary) {
assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
- if (obj->is_forwarded()) {
- *p = obj->forwardee();
- } else {
- *p = _g->copy_to_survivor_space(obj, p);
- }
+ oop new_obj = obj->is_forwarded() ? obj->forwardee()
+ : _g->copy_to_survivor_space(obj);
+ oopDesc::encode_store_heap_oop_not_null(p, new_obj);
if (_gc_barrier) {
// Now call parent closure
do_barrier(p);
@@ -94,26 +91,22 @@
}
}
-inline void FastScanClosure::do_oop_nv(oop* p) {
- FastScanClosure::do_oop(p);
-}
+inline void FastScanClosure::do_oop_nv(oop* p) { FastScanClosure::do_oop_work(p); }
+inline void FastScanClosure::do_oop_nv(narrowOop* p) { FastScanClosure::do_oop_work(p); }
// Note similarity to ScanClosure; the difference is that
// the barrier set is taken care of outside this closure.
-inline void ScanWeakRefClosure::do_oop(oop* p) {
- oop obj = *p;
- assert (obj != NULL, "null weak reference?");
+template inline void ScanWeakRefClosure::do_oop_work(T* p) {
+ assert(!oopDesc::is_null(*p), "null weak reference?");
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
// weak references are sometimes scanned twice; must check
// that to-space doesn't already contain this object
if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) {
- if (obj->is_forwarded()) {
- *p = obj->forwardee();
- } else {
- *p = _g->copy_to_survivor_space(obj, p);
- }
+ oop new_obj = obj->is_forwarded() ? obj->forwardee()
+ : _g->copy_to_survivor_space(obj);
+ oopDesc::encode_store_heap_oop_not_null(p, new_obj);
}
}
-inline void ScanWeakRefClosure::do_oop_nv(oop* p) {
- ScanWeakRefClosure::do_oop(p);
-}
+inline void ScanWeakRefClosure::do_oop_nv(oop* p) { ScanWeakRefClosure::do_oop_work(p); }
+inline void ScanWeakRefClosure::do_oop_nv(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/genRemSet.hpp
--- a/hotspot/src/share/vm/memory/genRemSet.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/genRemSet.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -68,13 +68,13 @@
// This method is used to notify the remembered set that "new_val" has
// been written into "field" by the garbage collector.
- void write_ref_field_gc(oop* field, oop new_val);
+ void write_ref_field_gc(void* field, oop new_val);
protected:
- virtual void write_ref_field_gc_work(oop* field, oop new_val) = 0;
+ virtual void write_ref_field_gc_work(void* field, oop new_val) = 0;
public:
// A version of the above suitable for use by parallel collectors.
- virtual void write_ref_field_gc_par(oop* field, oop new_val) = 0;
+ virtual void write_ref_field_gc_par(void* field, oop new_val) = 0;
// Resize one of the regions covered by the remembered set.
virtual void resize_covered_region(MemRegion new_region) = 0;
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/genRemSet.inline.hpp
--- a/hotspot/src/share/vm/memory/genRemSet.inline.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/genRemSet.inline.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -26,7 +26,7 @@
// performance-critical call when when the rem set is the most common
// card-table kind.
-void GenRemSet::write_ref_field_gc(oop* field, oop new_val) {
+void GenRemSet::write_ref_field_gc(void* field, oop new_val) {
if (kind() == CardTableModRef) {
((CardTableRS*)this)->inline_write_ref_field_gc(field, new_val);
} else {
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/generation.cpp
--- a/hotspot/src/share/vm/memory/generation.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/generation.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -171,7 +171,7 @@
}
// Ignores "ref" and calls allocate().
-oop Generation::promote(oop obj, size_t obj_size, oop* ref) {
+oop Generation::promote(oop obj, size_t obj_size) {
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
#ifndef PRODUCT
@@ -186,7 +186,7 @@
return oop(result);
} else {
GenCollectedHeap* gch = GenCollectedHeap::heap();
- return gch->handle_failed_promotion(this, obj, obj_size, ref);
+ return gch->handle_failed_promotion(this, obj, obj_size);
}
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/generation.hpp
--- a/hotspot/src/share/vm/memory/generation.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/generation.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -295,13 +295,7 @@
//
// The "obj_size" argument is just obj->size(), passed along so the caller can
// avoid repeating the virtual call to retrieve it.
- //
- // The "ref" argument, if non-NULL, is the address of some reference to "obj"
- // (that is "*ref == obj"); some generations may use this information to, for
- // example, influence placement decisions.
- //
- // The default implementation ignores "ref" and calls allocate().
- virtual oop promote(oop obj, size_t obj_size, oop* ref);
+ virtual oop promote(oop obj, size_t obj_size);
// Thread "thread_num" (0 <= i < ParalleGCThreads) wants to promote
// object "obj", whose original mark word was "m", and whose size is
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/iterator.hpp
--- a/hotspot/src/share/vm/memory/iterator.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/iterator.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -35,6 +35,8 @@
OopClosure() : _ref_processor(NULL) { }
virtual void do_oop(oop* o) = 0;
virtual void do_oop_v(oop* o) { do_oop(o); }
+ virtual void do_oop(narrowOop* o) = 0;
+ virtual void do_oop_v(narrowOop* o) { do_oop(o); }
// In support of post-processing of weak links of KlassKlass objects;
// see KlassKlass::oop_oop_iterate().
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/modRefBarrierSet.hpp
--- a/hotspot/src/share/vm/memory/modRefBarrierSet.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/modRefBarrierSet.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -37,19 +37,19 @@
bool has_write_ref_barrier() { return true; }
bool has_write_prim_barrier() { return false; }
- bool read_ref_needs_barrier(oop* field) { return false; }
+ bool read_ref_needs_barrier(void* field) { return false; }
bool read_prim_needs_barrier(HeapWord* field, size_t bytes) { return false; }
- virtual bool write_ref_needs_barrier(oop* field, oop new_val) = 0;
+ virtual bool write_ref_needs_barrier(void* field, oop new_val) = 0;
bool write_prim_needs_barrier(HeapWord* field, size_t bytes,
juint val1, juint val2) { return false; }
void write_prim_field(oop obj, size_t offset, size_t bytes,
juint val1, juint val2) {}
- void read_ref_field(oop* field) {}
+ void read_ref_field(void* field) {}
void read_prim_field(HeapWord* field, size_t bytes) {}
protected:
- virtual void write_ref_field_work(oop* field, oop new_val) = 0;
+ virtual void write_ref_field_work(void* field, oop new_val) = 0;
public:
void write_prim_field(HeapWord* field, size_t bytes,
juint val1, juint val2) {}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/referenceProcessor.cpp
--- a/hotspot/src/share/vm/memory/referenceProcessor.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/referenceProcessor.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -28,16 +28,32 @@
// List of discovered references.
class DiscoveredList {
public:
- DiscoveredList() : _head(NULL), _len(0) { }
- oop head() const { return _head; }
- oop* head_ptr() { return &_head; }
- void set_head(oop o) { _head = o; }
- bool empty() const { return _head == ReferenceProcessor::_sentinelRef; }
+ DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
+ oop head() const {
+ return UseCompressedOops ? oopDesc::decode_heap_oop_not_null(_compressed_head) :
+ _oop_head;
+ }
+ HeapWord* adr_head() {
+ return UseCompressedOops ? (HeapWord*)&_compressed_head :
+ (HeapWord*)&_oop_head;
+ }
+ void set_head(oop o) {
+ if (UseCompressedOops) {
+ // Must compress the head ptr.
+ _compressed_head = oopDesc::encode_heap_oop_not_null(o);
+ } else {
+ _oop_head = o;
+ }
+ }
+ bool empty() const { return head() == ReferenceProcessor::sentinel_ref(); }
size_t length() { return _len; }
void set_length(size_t len) { _len = len; }
private:
+ // Set value depending on UseCompressedOops. This could be a template class
+ // but then we have to fix all the instantiations and declarations that use this class.
+ oop _oop_head;
+ narrowOop _compressed_head;
size_t _len;
- oop _head;
};
oop ReferenceProcessor::_sentinelRef = NULL;
@@ -49,11 +65,11 @@
}
void ReferenceProcessor::init_statics() {
- assert(_sentinelRef == NULL, "should be initialized precsiely once");
+ assert(_sentinelRef == NULL, "should be initialized precisely once");
EXCEPTION_MARK;
_sentinelRef = instanceKlass::cast(
- SystemDictionary::object_klass())->
- allocate_permanent_instance(THREAD);
+ SystemDictionary::reference_klass())->
+ allocate_permanent_instance(THREAD);
// Initialize the master soft ref clock.
java_lang_ref_SoftReference::set_clock(os::javaTimeMillis());
@@ -69,15 +85,13 @@
"Unrecongnized RefDiscoveryPolicy");
}
-
-ReferenceProcessor* ReferenceProcessor::create_ref_processor(
- MemRegion span,
- bool atomic_discovery,
- bool mt_discovery,
- BoolObjectClosure* is_alive_non_header,
- int parallel_gc_threads,
- bool mt_processing)
-{
+ReferenceProcessor*
+ReferenceProcessor::create_ref_processor(MemRegion span,
+ bool atomic_discovery,
+ bool mt_discovery,
+ BoolObjectClosure* is_alive_non_header,
+ int parallel_gc_threads,
+ bool mt_processing) {
int mt_degree = 1;
if (parallel_gc_threads > 1) {
mt_degree = parallel_gc_threads;
@@ -93,10 +107,11 @@
return rp;
}
-
ReferenceProcessor::ReferenceProcessor(MemRegion span,
- bool atomic_discovery, bool mt_discovery, int mt_degree,
- bool mt_processing) :
+ bool atomic_discovery,
+ bool mt_discovery,
+ int mt_degree,
+ bool mt_processing) :
_discovering_refs(false),
_enqueuing_is_done(false),
_is_alive_non_header(NULL),
@@ -114,10 +129,10 @@
_discoveredWeakRefs = &_discoveredSoftRefs[_num_q];
_discoveredFinalRefs = &_discoveredWeakRefs[_num_q];
_discoveredPhantomRefs = &_discoveredFinalRefs[_num_q];
- assert(_sentinelRef != NULL, "_sentinelRef is NULL");
+ assert(sentinel_ref() != NULL, "_sentinelRef is NULL");
// Initialized all entries to _sentinelRef
for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
- _discoveredSoftRefs[i].set_head(_sentinelRef);
+ _discoveredSoftRefs[i].set_head(sentinel_ref());
_discoveredSoftRefs[i].set_length(0);
}
}
@@ -134,16 +149,19 @@
void ReferenceProcessor::weak_oops_do(OopClosure* f) {
for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
- f->do_oop(_discoveredSoftRefs[i].head_ptr());
+ if (UseCompressedOops) {
+ f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head());
+ } else {
+ f->do_oop((oop*)_discoveredSoftRefs[i].adr_head());
+ }
}
}
void ReferenceProcessor::oops_do(OopClosure* f) {
- f->do_oop(&_sentinelRef);
+ f->do_oop(adr_sentinel_ref());
}
-void ReferenceProcessor::update_soft_ref_master_clock()
-{
+void ReferenceProcessor::update_soft_ref_master_clock() {
// Update (advance) the soft ref master clock field. This must be done
// after processing the soft ref list.
jlong now = os::javaTimeMillis();
@@ -164,9 +182,7 @@
// past clock value.
}
-
-void
-ReferenceProcessor::process_discovered_references(
+void ReferenceProcessor::process_discovered_references(
ReferencePolicy* policy,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
@@ -223,15 +239,13 @@
}
}
-
#ifndef PRODUCT
// Calculate the number of jni handles.
-unsigned int ReferenceProcessor::count_jni_refs()
-{
+uint ReferenceProcessor::count_jni_refs() {
class AlwaysAliveClosure: public BoolObjectClosure {
public:
- bool do_object_b(oop obj) { return true; }
- void do_object(oop obj) { assert(false, "Don't call"); }
+ virtual bool do_object_b(oop obj) { return true; }
+ virtual void do_object(oop obj) { assert(false, "Don't call"); }
};
class CountHandleClosure: public OopClosure {
@@ -239,9 +253,8 @@
int _count;
public:
CountHandleClosure(): _count(0) {}
- void do_oop(oop* unused) {
- _count++;
- }
+ void do_oop(oop* unused) { _count++; }
+ void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
int count() { return _count; }
};
CountHandleClosure global_handle_count;
@@ -262,36 +275,48 @@
#endif
JNIHandles::weak_oops_do(is_alive, keep_alive);
// Finally remember to keep sentinel around
- keep_alive->do_oop(&_sentinelRef);
+ keep_alive->do_oop(adr_sentinel_ref());
complete_gc->do_void();
}
-bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) {
- NOT_PRODUCT(verify_ok_to_handle_reflists());
+
+template
+static bool enqueue_discovered_ref_helper(ReferenceProcessor* ref,
+ AbstractRefProcTaskExecutor* task_executor) {
+
// Remember old value of pending references list
- oop* pending_list_addr = java_lang_ref_Reference::pending_list_addr();
- oop old_pending_list_value = *pending_list_addr;
+ T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr();
+ T old_pending_list_value = *pending_list_addr;
// Enqueue references that are not made active again, and
// clear the decks for the next collection (cycle).
- enqueue_discovered_reflists(pending_list_addr, task_executor);
+ ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor);
// Do the oop-check on pending_list_addr missed in
// enqueue_discovered_reflist. We should probably
// do a raw oop_check so that future such idempotent
// oop_stores relying on the oop-check side-effect
// may be elided automatically and safely without
// affecting correctness.
- oop_store(pending_list_addr, *(pending_list_addr));
+ oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr));
// Stop treating discovered references specially.
- disable_discovery();
+ ref->disable_discovery();
// Return true if new pending references were added
return old_pending_list_value != *pending_list_addr;
}
+bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) {
+ NOT_PRODUCT(verify_ok_to_handle_reflists());
+ if (UseCompressedOops) {
+ return enqueue_discovered_ref_helper(this, task_executor);
+ } else {
+ return enqueue_discovered_ref_helper(this, task_executor);
+ }
+}
+
void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
- oop* pending_list_addr) {
+ HeapWord* pending_list_addr) {
// Given a list of refs linked through the "discovered" field
// (java.lang.ref.Reference.discovered) chain them through the
// "next" field (java.lang.ref.Reference.next) and prepend
@@ -305,19 +330,19 @@
// the next field and clearing it (except for the last
// non-sentinel object which is treated specially to avoid
// confusion with an active reference).
- while (obj != _sentinelRef) {
+ while (obj != sentinel_ref()) {
assert(obj->is_instanceRef(), "should be reference object");
oop next = java_lang_ref_Reference::discovered(obj);
if (TraceReferenceGC && PrintGCDetails) {
- gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next " INTPTR_FORMAT,
- (oopDesc*) obj, (oopDesc*) next);
+ gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next " INTPTR_FORMAT,
+ obj, next);
}
- assert(*java_lang_ref_Reference::next_addr(obj) == NULL,
- "The reference should not be enqueued");
- if (next == _sentinelRef) { // obj is last
+ assert(java_lang_ref_Reference::next(obj) == NULL,
+ "The reference should not be enqueued");
+ if (next == sentinel_ref()) { // obj is last
// Swap refs_list into pendling_list_addr and
// set obj's next to what we read from pending_list_addr.
- oop old = (oop)Atomic::xchg_ptr(refs_list.head(), pending_list_addr);
+ oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
// Need oop_check on pending_list_addr above;
// see special oop-check code at the end of
// enqueue_discovered_reflists() further below.
@@ -341,15 +366,14 @@
public:
RefProcEnqueueTask(ReferenceProcessor& ref_processor,
DiscoveredList discovered_refs[],
- oop* pending_list_addr,
+ HeapWord* pending_list_addr,
oop sentinel_ref,
int n_queues)
: EnqueueTask(ref_processor, discovered_refs,
pending_list_addr, sentinel_ref, n_queues)
{ }
- virtual void work(unsigned int work_id)
- {
+ virtual void work(unsigned int work_id) {
assert(work_id < (unsigned int)_ref_processor.num_q(), "Index out-of-bounds");
// Simplest first cut: static partitioning.
int index = work_id;
@@ -363,18 +387,18 @@
};
// Enqueue references that are not made active again
-void ReferenceProcessor::enqueue_discovered_reflists(oop* pending_list_addr,
+void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr,
AbstractRefProcTaskExecutor* task_executor) {
if (_processing_is_mt && task_executor != NULL) {
// Parallel code
RefProcEnqueueTask tsk(*this, _discoveredSoftRefs,
- pending_list_addr, _sentinelRef, _num_q);
+ pending_list_addr, sentinel_ref(), _num_q);
task_executor->execute(tsk);
} else {
// Serial code: call the parent class's implementation
for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr);
- _discoveredSoftRefs[i].set_head(_sentinelRef);
+ _discoveredSoftRefs[i].set_head(sentinel_ref());
_discoveredSoftRefs[i].set_length(0);
}
}
@@ -388,14 +412,13 @@
BoolObjectClosure* is_alive);
// End Of List.
- inline bool has_next() const
- { return _next != ReferenceProcessor::_sentinelRef; }
+ inline bool has_next() const { return _next != ReferenceProcessor::sentinel_ref(); }
// Get oop to the Reference object.
- inline oop obj() const { return _ref; }
+ inline oop obj() const { return _ref; }
// Get oop to the referent object.
- inline oop referent() const { return _referent; }
+ inline oop referent() const { return _referent; }
// Returns true if referent is alive.
inline bool is_referent_alive() const;
@@ -417,13 +440,26 @@
inline void make_active() { java_lang_ref_Reference::set_next(_ref, NULL); }
// Make the referent alive.
- inline void make_referent_alive() { _keep_alive->do_oop(_referent_addr); }
+ inline void make_referent_alive() {
+ if (UseCompressedOops) {
+ _keep_alive->do_oop((narrowOop*)_referent_addr);
+ } else {
+ _keep_alive->do_oop((oop*)_referent_addr);
+ }
+ }
// Update the discovered field.
- inline void update_discovered() { _keep_alive->do_oop(_prev_next); }
+ inline void update_discovered() {
+ // First _prev_next ref actually points into DiscoveredList (gross).
+ if (UseCompressedOops) {
+ _keep_alive->do_oop((narrowOop*)_prev_next);
+ } else {
+ _keep_alive->do_oop((oop*)_prev_next);
+ }
+ }
// NULL out referent pointer.
- inline void clear_referent() { *_referent_addr = NULL; }
+ inline void clear_referent() { oop_store_raw(_referent_addr, NULL); }
// Statistics
NOT_PRODUCT(
@@ -436,11 +472,11 @@
private:
DiscoveredList& _refs_list;
- oop* _prev_next;
+ HeapWord* _prev_next;
oop _ref;
- oop* _discovered_addr;
+ HeapWord* _discovered_addr;
oop _next;
- oop* _referent_addr;
+ HeapWord* _referent_addr;
oop _referent;
OopClosure* _keep_alive;
BoolObjectClosure* _is_alive;
@@ -457,7 +493,7 @@
OopClosure* keep_alive,
BoolObjectClosure* is_alive)
: _refs_list(refs_list),
- _prev_next(refs_list.head_ptr()),
+ _prev_next(refs_list.adr_head()),
_ref(refs_list.head()),
#ifdef ASSERT
_first_seen(refs_list.head()),
@@ -471,19 +507,18 @@
_is_alive(is_alive)
{ }
-inline bool DiscoveredListIterator::is_referent_alive() const
-{
+inline bool DiscoveredListIterator::is_referent_alive() const {
return _is_alive->do_object_b(_referent);
}
-inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent))
-{
+inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
_discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
- assert(_discovered_addr && (*_discovered_addr)->is_oop_or_null(),
+ oop discovered = java_lang_ref_Reference::discovered(_ref);
+ assert(_discovered_addr && discovered->is_oop_or_null(),
"discovered field is bad");
- _next = *_discovered_addr;
+ _next = discovered;
_referent_addr = java_lang_ref_Reference::referent_addr(_ref);
- _referent = *_referent_addr;
+ _referent = java_lang_ref_Reference::referent(_ref);
assert(Universe::heap()->is_in_reserved_or_null(_referent),
"Wrong oop found in java.lang.Reference object");
assert(allow_null_referent ?
@@ -492,32 +527,32 @@
"bad referent");
}
-inline void DiscoveredListIterator::next()
-{
+inline void DiscoveredListIterator::next() {
_prev_next = _discovered_addr;
move_to_next();
}
-inline void DiscoveredListIterator::remove()
-{
+inline void DiscoveredListIterator::remove() {
assert(_ref->is_oop(), "Dropping a bad reference");
- // Clear the discovered_addr field so that the object does
- // not look like it has been discovered.
- *_discovered_addr = NULL;
- // Remove Reference object from list.
- *_prev_next = _next;
+ oop_store_raw(_discovered_addr, NULL);
+ // First _prev_next ref actually points into DiscoveredList (gross).
+ if (UseCompressedOops) {
+ // Remove Reference object from list.
+ oopDesc::encode_store_heap_oop_not_null((narrowOop*)_prev_next, _next);
+ } else {
+ // Remove Reference object from list.
+ oopDesc::store_heap_oop((oop*)_prev_next, _next);
+ }
NOT_PRODUCT(_removed++);
move_to_next();
}
-inline void DiscoveredListIterator::move_to_next()
-{
+inline void DiscoveredListIterator::move_to_next() {
_ref = _next;
assert(_ref != _first_seen, "cyclic ref_list found");
NOT_PRODUCT(_processed++);
}
-
// NOTE: process_phase*() are largely similar, and at a high level
// merely iterate over the extant list applying a predicate to
// each of its elements and possibly removing that element from the
@@ -531,13 +566,13 @@
// referents are not alive, but that should be kept alive for policy reasons.
// Keep alive the transitive closure of all such referents.
void
-ReferenceProcessor::process_phase1(DiscoveredList& refs_list_addr,
+ReferenceProcessor::process_phase1(DiscoveredList& refs_list,
ReferencePolicy* policy,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
VoidClosure* complete_gc) {
assert(policy != NULL, "Must have a non-NULL policy");
- DiscoveredListIterator iter(refs_list_addr, keep_alive, is_alive);
+ DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
// Decide which softly reachable refs should be kept alive.
while (iter.has_next()) {
iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
@@ -545,7 +580,7 @@
if (referent_is_dead && !policy->should_clear_reference(iter.obj())) {
if (TraceReferenceGC) {
gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy",
- (address)iter.obj(), iter.obj()->blueprint()->internal_name());
+ iter.obj(), iter.obj()->blueprint()->internal_name());
}
// Make the Reference object active again
iter.make_active();
@@ -570,20 +605,19 @@
// Traverse the list and remove any Refs that are not active, or
// whose referents are either alive or NULL.
void
-ReferenceProcessor::pp2_work(DiscoveredList& refs_list_addr,
+ReferenceProcessor::pp2_work(DiscoveredList& refs_list,
BoolObjectClosure* is_alive,
- OopClosure* keep_alive)
-{
+ OopClosure* keep_alive) {
assert(discovery_is_atomic(), "Error");
- DiscoveredListIterator iter(refs_list_addr, keep_alive, is_alive);
+ DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
while (iter.has_next()) {
iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
- DEBUG_ONLY(oop* next_addr = java_lang_ref_Reference::next_addr(iter.obj());)
- assert(*next_addr == NULL, "Should not discover inactive Reference");
+ DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
+ assert(next == NULL, "Should not discover inactive Reference");
if (iter.is_referent_alive()) {
if (TraceReferenceGC) {
gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)",
- (address)iter.obj(), iter.obj()->blueprint()->internal_name());
+ iter.obj(), iter.obj()->blueprint()->internal_name());
}
// The referent is reachable after all.
// Update the referent pointer as necessary: Note that this
@@ -605,25 +639,28 @@
}
void
-ReferenceProcessor::pp2_work_concurrent_discovery(
- DiscoveredList& refs_list_addr,
- BoolObjectClosure* is_alive,
- OopClosure* keep_alive,
- VoidClosure* complete_gc)
-{
+ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list,
+ BoolObjectClosure* is_alive,
+ OopClosure* keep_alive,
+ VoidClosure* complete_gc) {
assert(!discovery_is_atomic(), "Error");
- DiscoveredListIterator iter(refs_list_addr, keep_alive, is_alive);
+ DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
while (iter.has_next()) {
iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
- oop* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
+ HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
+ oop next = java_lang_ref_Reference::next(iter.obj());
if ((iter.referent() == NULL || iter.is_referent_alive() ||
- *next_addr != NULL)) {
- assert((*next_addr)->is_oop_or_null(), "bad next field");
+ next != NULL)) {
+ assert(next->is_oop_or_null(), "bad next field");
// Remove Reference object from list
iter.remove();
// Trace the cohorts
iter.make_referent_alive();
- keep_alive->do_oop(next_addr);
+ if (UseCompressedOops) {
+ keep_alive->do_oop((narrowOop*)next_addr);
+ } else {
+ keep_alive->do_oop((oop*)next_addr);
+ }
} else {
iter.next();
}
@@ -639,15 +676,15 @@
}
// Traverse the list and process the referents, by either
-// either clearing them or keeping them (and their reachable
+// clearing them or keeping them (and their reachable
// closure) alive.
void
-ReferenceProcessor::process_phase3(DiscoveredList& refs_list_addr,
+ReferenceProcessor::process_phase3(DiscoveredList& refs_list,
bool clear_referent,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
VoidClosure* complete_gc) {
- DiscoveredListIterator iter(refs_list_addr, keep_alive, is_alive);
+ DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
while (iter.has_next()) {
iter.update_discovered();
iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
@@ -661,7 +698,7 @@
if (TraceReferenceGC) {
gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending",
clear_referent ? "cleared " : "",
- (address)iter.obj(), iter.obj()->blueprint()->internal_name());
+ iter.obj(), iter.obj()->blueprint()->internal_name());
}
assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference");
// If discovery is concurrent, we may have objects with null referents,
@@ -679,15 +716,15 @@
}
void
-ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& ref_list) {
- oop obj = ref_list.head();
- while (obj != _sentinelRef) {
- oop* discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
- obj = *discovered_addr;
- *discovered_addr = NULL;
+ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) {
+ oop obj = refs_list.head();
+ while (obj != sentinel_ref()) {
+ oop discovered = java_lang_ref_Reference::discovered(obj);
+ java_lang_ref_Reference::set_discovered_raw(obj, NULL);
+ obj = discovered;
}
- ref_list.set_head(_sentinelRef);
- ref_list.set_length(0);
+ refs_list.set_head(sentinel_ref());
+ refs_list.set_length(0);
}
void
@@ -777,7 +814,7 @@
// find an element to split the list on
for (size_t j = 0; j < refs_to_move; ++j) {
move_tail = new_head;
- new_head = *java_lang_ref_Reference::discovered_addr(new_head);
+ new_head = java_lang_ref_Reference::discovered(new_head);
}
java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head());
ref_lists[to_idx].set_head(move_head);
@@ -875,17 +912,17 @@
size_t length = refs_list.length();
while (iter.has_next()) {
iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
- oop* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
- assert((*next_addr)->is_oop_or_null(), "bad next field");
+ oop next = java_lang_ref_Reference::next(iter.obj());
+ assert(next->is_oop_or_null(), "bad next field");
// If referent has been cleared or Reference is not active,
// drop it.
- if (iter.referent() == NULL || *next_addr != NULL) {
+ if (iter.referent() == NULL || next != NULL) {
debug_only(
if (PrintGCDetails && TraceReferenceGC) {
gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: "
INTPTR_FORMAT " with next field: " INTPTR_FORMAT
" and referent: " INTPTR_FORMAT,
- (address)iter.obj(), (address)*next_addr, (address)iter.referent());
+ iter.obj(), next, iter.referent());
}
)
// Remove Reference object from list
@@ -950,18 +987,21 @@
return list;
}
-inline void ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& list,
- oop obj, oop* discovered_addr) {
+inline void
+ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
+ oop obj,
+ HeapWord* discovered_addr) {
assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller");
// First we must make sure this object is only enqueued once. CAS in a non null
// discovered_addr.
- oop retest = (oop)Atomic::cmpxchg_ptr(list.head(), discovered_addr, NULL);
+ oop retest = oopDesc::atomic_compare_exchange_oop(refs_list.head(), discovered_addr,
+ NULL);
if (retest == NULL) {
// This thread just won the right to enqueue the object.
// We have separate lists for enqueueing so no synchronization
// is necessary.
- list.set_head(obj);
- list.set_length(list.length() + 1);
+ refs_list.set_head(obj);
+ refs_list.set_length(refs_list.length() + 1);
} else {
// If retest was non NULL, another thread beat us to it:
// The reference has already been discovered...
@@ -972,7 +1012,6 @@
}
}
-
// We mention two of several possible choices here:
// #0: if the reference object is not in the "originating generation"
// (or part of the heap being collected, indicated by our "span"
@@ -1006,8 +1045,8 @@
return false;
}
// We only enqueue active references.
- oop* next_addr = java_lang_ref_Reference::next_addr(obj);
- if (*next_addr != NULL) {
+ oop next = java_lang_ref_Reference::next(obj);
+ if (next != NULL) {
return false;
}
@@ -1034,14 +1073,14 @@
}
}
- oop* discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
- assert(discovered_addr != NULL && (*discovered_addr)->is_oop_or_null(),
- "bad discovered field");
- if (*discovered_addr != NULL) {
+ HeapWord* discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
+ oop discovered = java_lang_ref_Reference::discovered(obj);
+ assert(discovered->is_oop_or_null(), "bad discovered field");
+ if (discovered != NULL) {
// The reference has already been discovered...
if (TraceReferenceGC) {
gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
- (oopDesc*)obj, obj->blueprint()->internal_name());
+ obj, obj->blueprint()->internal_name());
}
if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
// assumes that an object is not processed twice;
@@ -1088,7 +1127,7 @@
if (_discovery_is_mt) {
add_to_discovered_list_mt(*list, obj, discovered_addr);
} else {
- *discovered_addr = list->head();
+ oop_store_raw(discovered_addr, list->head());
list->set_head(obj);
list->set_length(list->length() + 1);
}
@@ -1106,7 +1145,7 @@
oop referent = java_lang_ref_Reference::referent(obj);
if (PrintGCDetails) {
gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)",
- (oopDesc*) obj, obj->blueprint()->internal_name());
+ obj, obj->blueprint()->internal_name());
}
assert(referent->is_oop(), "Enqueued a bad referent");
}
@@ -1181,17 +1220,20 @@
// are not active (have a non-NULL next field). NOTE: For this to work
// correctly, refs discovery can not be happening concurrently with this
// step.
-void ReferenceProcessor::preclean_discovered_reflist(
- DiscoveredList& refs_list, BoolObjectClosure* is_alive,
- OopClosure* keep_alive, VoidClosure* complete_gc, YieldClosure* yield) {
-
+void
+ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list,
+ BoolObjectClosure* is_alive,
+ OopClosure* keep_alive,
+ VoidClosure* complete_gc,
+ YieldClosure* yield) {
DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
size_t length = refs_list.length();
while (iter.has_next()) {
iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
- oop* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
+ oop obj = iter.obj();
+ oop next = java_lang_ref_Reference::next(obj);
if (iter.referent() == NULL || iter.is_referent_alive() ||
- *next_addr != NULL) {
+ next != NULL) {
// The referent has been cleared, or is alive, or the Reference is not
// active; we need to trace and mark its cohort.
if (TraceReferenceGC) {
@@ -1203,7 +1245,13 @@
--length;
// Keep alive its cohort.
iter.make_referent_alive();
- keep_alive->do_oop(next_addr);
+ if (UseCompressedOops) {
+ narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj);
+ keep_alive->do_oop(next_addr);
+ } else {
+ oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj);
+ keep_alive->do_oop(next_addr);
+ }
} else {
iter.next();
}
@@ -1241,7 +1289,7 @@
#endif
void ReferenceProcessor::verify() {
- guarantee(_sentinelRef != NULL && _sentinelRef->is_oop(), "Lost _sentinelRef");
+ guarantee(sentinel_ref() != NULL && sentinel_ref()->is_oop(), "Lost _sentinelRef");
}
#ifndef PRODUCT
@@ -1249,12 +1297,12 @@
guarantee(!_discovering_refs, "Discovering refs?");
for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
oop obj = _discoveredSoftRefs[i].head();
- while (obj != _sentinelRef) {
+ while (obj != sentinel_ref()) {
oop next = java_lang_ref_Reference::discovered(obj);
java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
obj = next;
}
- _discoveredSoftRefs[i].set_head(_sentinelRef);
+ _discoveredSoftRefs[i].set_head(sentinel_ref());
_discoveredSoftRefs[i].set_length(0);
}
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/referenceProcessor.hpp
--- a/hotspot/src/share/vm/memory/referenceProcessor.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/referenceProcessor.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -45,8 +45,6 @@
class DiscoveredList;
class ReferenceProcessor : public CHeapObj {
- friend class DiscoveredList;
- friend class DiscoveredListIterator;
protected:
// End of list marker
static oop _sentinelRef;
@@ -70,16 +68,20 @@
BoolObjectClosure* _is_alive_non_header;
// The discovered ref lists themselves
- int _num_q; // the MT'ness degree of the queues below
- DiscoveredList* _discoveredSoftRefs; // pointer to array of oops
+
+ // The MT'ness degree of the queues below
+ int _num_q;
+ // Arrays of lists of oops, one per thread
+ DiscoveredList* _discoveredSoftRefs;
DiscoveredList* _discoveredWeakRefs;
DiscoveredList* _discoveredFinalRefs;
DiscoveredList* _discoveredPhantomRefs;
public:
- int num_q() { return _num_q; }
+ int num_q() { return _num_q; }
DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
- static oop* sentinel_ref() { return &_sentinelRef; }
+ static oop sentinel_ref() { return _sentinelRef; }
+ static oop* adr_sentinel_ref() { return &_sentinelRef; }
public:
// Process references with a certain reachability level.
@@ -98,45 +100,45 @@
// Work methods used by the method process_discovered_reflist
// Phase1: keep alive all those referents that are otherwise
// dead but which must be kept alive by policy (and their closure).
- void process_phase1(DiscoveredList& refs_list_addr,
+ void process_phase1(DiscoveredList& refs_list,
ReferencePolicy* policy,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
VoidClosure* complete_gc);
// Phase2: remove all those references whose referents are
// reachable.
- inline void process_phase2(DiscoveredList& refs_list_addr,
+ inline void process_phase2(DiscoveredList& refs_list,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
VoidClosure* complete_gc) {
if (discovery_is_atomic()) {
// complete_gc is ignored in this case for this phase
- pp2_work(refs_list_addr, is_alive, keep_alive);
+ pp2_work(refs_list, is_alive, keep_alive);
} else {
assert(complete_gc != NULL, "Error");
- pp2_work_concurrent_discovery(refs_list_addr, is_alive,
+ pp2_work_concurrent_discovery(refs_list, is_alive,
keep_alive, complete_gc);
}
}
// Work methods in support of process_phase2
- void pp2_work(DiscoveredList& refs_list_addr,
+ void pp2_work(DiscoveredList& refs_list,
BoolObjectClosure* is_alive,
OopClosure* keep_alive);
void pp2_work_concurrent_discovery(
- DiscoveredList& refs_list_addr,
+ DiscoveredList& refs_list,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
VoidClosure* complete_gc);
// Phase3: process the referents by either clearing them
// or keeping them alive (and their closure)
- void process_phase3(DiscoveredList& refs_list_addr,
+ void process_phase3(DiscoveredList& refs_list,
bool clear_referent,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
VoidClosure* complete_gc);
// Enqueue references with a certain reachability level
- void enqueue_discovered_reflist(DiscoveredList& refs_list, oop* pending_list_addr);
+ void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr);
// "Preclean" all the discovered reference lists
// by removing references with strongly reachable referents.
@@ -169,6 +171,8 @@
// occupying the i / _num_q slot.
const char* list_name(int i);
+ void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
+
protected:
// "Preclean" the given discovered reference list
// by removing references with strongly reachable referents.
@@ -179,7 +183,6 @@
VoidClosure* complete_gc,
YieldClosure* yield);
- void enqueue_discovered_reflists(oop* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
int next_id() {
int id = _next_id;
if (++_next_id == _num_q) {
@@ -189,7 +192,7 @@
}
DiscoveredList* get_discovered_list(ReferenceType rt);
inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
- oop* discovered_addr);
+ HeapWord* discovered_addr);
void verify_ok_to_handle_reflists() PRODUCT_RETURN;
void abandon_partial_discovered_list(DiscoveredList& refs_list);
@@ -477,7 +480,7 @@
protected:
EnqueueTask(ReferenceProcessor& ref_processor,
DiscoveredList refs_lists[],
- oop* pending_list_addr,
+ HeapWord* pending_list_addr,
oop sentinel_ref,
int n_queues)
: _ref_processor(ref_processor),
@@ -493,7 +496,7 @@
protected:
ReferenceProcessor& _ref_processor;
DiscoveredList* _refs_lists;
- oop* _pending_list_addr;
+ HeapWord* _pending_list_addr;
oop _sentinel_ref;
int _n_queues;
};
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/restore.cpp
--- a/hotspot/src/share/vm/memory/restore.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/restore.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -50,6 +50,8 @@
*p = obj;
}
+ void do_oop(narrowOop* p) { ShouldNotReachHere(); }
+
void do_ptr(void** p) {
assert(*p == NULL, "initializing previous initialized pointer.");
void* obj = nextOop();
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/serialize.cpp
--- a/hotspot/src/share/vm/memory/serialize.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/serialize.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -41,17 +41,18 @@
int tag = 0;
soc->do_tag(--tag);
+ assert(!UseCompressedOops, "UseCompressedOops doesn't work with shared archive");
// Verify the sizes of various oops in the system.
soc->do_tag(sizeof(oopDesc));
soc->do_tag(sizeof(instanceOopDesc));
soc->do_tag(sizeof(methodOopDesc));
soc->do_tag(sizeof(constMethodOopDesc));
soc->do_tag(sizeof(methodDataOopDesc));
- soc->do_tag(sizeof(arrayOopDesc));
+ soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE));
soc->do_tag(sizeof(constantPoolOopDesc));
soc->do_tag(sizeof(constantPoolCacheOopDesc));
- soc->do_tag(sizeof(objArrayOopDesc));
- soc->do_tag(sizeof(typeArrayOopDesc));
+ soc->do_tag(objArrayOopDesc::base_offset_in_bytes(T_BYTE));
+ soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE));
soc->do_tag(sizeof(symbolOopDesc));
soc->do_tag(sizeof(klassOopDesc));
soc->do_tag(sizeof(markOopDesc));
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/sharedHeap.cpp
--- a/hotspot/src/share/vm/memory/sharedHeap.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/sharedHeap.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -74,9 +74,10 @@
class AssertIsPermClosure: public OopClosure {
public:
- void do_oop(oop* p) {
+ virtual void do_oop(oop* p) {
assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm.");
}
+ virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
};
static AssertIsPermClosure assert_is_perm_closure;
@@ -187,12 +188,13 @@
public:
SkipAdjustingSharedStrings(OopClosure* clo) : _clo(clo) {}
- void do_oop(oop* p) {
+ virtual void do_oop(oop* p) {
oop o = (*p);
if (!o->is_shared_readwrite()) {
_clo->do_oop(p);
}
}
+ virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
};
// Unmarked shared Strings in the StringTable (which got there due to
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/space.cpp
--- a/hotspot/src/share/vm/memory/space.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/space.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -25,6 +25,9 @@
# include "incls/_precompiled.incl"
# include "incls/_space.cpp.incl"
+void SpaceMemRegionOopsIterClosure::do_oop(oop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
+void SpaceMemRegionOopsIterClosure::do_oop(narrowOop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
+
HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
HeapWord* top_obj) {
if (top_obj != NULL) {
@@ -150,10 +153,6 @@
return new DirtyCardToOopClosure(this, cl, precision, boundary);
}
-void FilteringClosure::do_oop(oop* p) {
- do_oop_nv(p);
-}
-
HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
HeapWord* top_obj) {
if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
@@ -337,7 +336,7 @@
assert(q->forwardee() == NULL, "should be forwarded to NULL");
}
- debug_only(MarkSweep::register_live_oop(q, size));
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, size));
compact_top += size;
// we need to update the offset table so that the beginnings of objects can be
@@ -406,13 +405,13 @@
if (oop(q)->is_gc_marked()) {
// q is alive
- debug_only(MarkSweep::track_interior_pointers(oop(q)));
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));
// point all the oops to the new location
size_t size = oop(q)->adjust_pointers();
- debug_only(MarkSweep::check_interior_pointers());
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());
debug_only(prev_q = q);
- debug_only(MarkSweep::validate_live_oop(oop(q), size));
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));
q += size;
} else {
@@ -884,10 +883,13 @@
class VerifyOldOopClosure : public OopClosure {
public:
- oop the_obj;
- bool allow_dirty;
+ oop _the_obj;
+ bool _allow_dirty;
void do_oop(oop* p) {
- the_obj->verify_old_oop(p, allow_dirty);
+ _the_obj->verify_old_oop(p, _allow_dirty);
+ }
+ void do_oop(narrowOop* p) {
+ _the_obj->verify_old_oop(p, _allow_dirty);
}
};
@@ -898,7 +900,7 @@
HeapWord* p = bottom();
HeapWord* prev_p = NULL;
VerifyOldOopClosure blk; // Does this do anything?
- blk.allow_dirty = allow_dirty;
+ blk._allow_dirty = allow_dirty;
int objs = 0;
int blocks = 0;
@@ -919,7 +921,7 @@
if (objs == OBJ_SAMPLE_INTERVAL) {
oop(p)->verify();
- blk.the_obj = oop(p);
+ blk._the_obj = oop(p);
oop(p)->oop_iterate(&blk);
objs = 0;
} else {
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/space.hpp
--- a/hotspot/src/share/vm/memory/space.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/space.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -52,21 +52,24 @@
class CardTableRS;
class DirtyCardToOopClosure;
-
// An oop closure that is circumscribed by a filtering memory region.
-class SpaceMemRegionOopsIterClosure: public virtual OopClosure {
- OopClosure* cl;
- MemRegion mr;
-public:
- void do_oop(oop* p) {
- if (mr.contains(p)) {
- cl->do_oop(p);
+class SpaceMemRegionOopsIterClosure: public OopClosure {
+ private:
+ OopClosure* _cl;
+ MemRegion _mr;
+ protected:
+ template void do_oop_work(T* p) {
+ if (_mr.contains(p)) {
+ _cl->do_oop(p);
}
}
- SpaceMemRegionOopsIterClosure(OopClosure* _cl, MemRegion _mr): cl(_cl), mr(_mr) {}
+ public:
+ SpaceMemRegionOopsIterClosure(OopClosure* cl, MemRegion mr):
+ _cl(cl), _mr(mr) {}
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
};
-
// A Space describes a heap area. Class Space is an abstract
// base class.
//
@@ -279,7 +282,7 @@
CardTableModRefBS::PrecisionStyle _precision;
HeapWord* _boundary; // If non-NULL, process only non-NULL oops
// pointing below boundary.
- HeapWord* _min_done; // ObjHeadPreciseArray precision requires
+ HeapWord* _min_done; // ObjHeadPreciseArray precision requires
// a downwards traversal; this is the
// lowest location already done (or,
// alternatively, the lowest address that
@@ -508,7 +511,7 @@
/* prefetch beyond q */ \
Prefetch::write(q, interval); \
/* size_t size = oop(q)->size(); changing this for cms for perm gen */\
- size_t size = block_size(q); \
+ size_t size = block_size(q); \
compact_top = cp->space->forward(oop(q), size, cp, compact_top); \
q += size; \
end_of_live = q; \
@@ -572,147 +575,149 @@
cp->space->set_compaction_top(compact_top); \
}
-#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \
- /* adjust all the interior pointers to point at the new locations of objects \
- * Used by MarkSweep::mark_sweep_phase3() */ \
+#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \
+ /* adjust all the interior pointers to point at the new locations of objects \
+ * Used by MarkSweep::mark_sweep_phase3() */ \
\
- HeapWord* q = bottom(); \
- HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \
+ HeapWord* q = bottom(); \
+ HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \
\
- assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \
+ assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \
\
- if (q < t && _first_dead > q && \
+ if (q < t && _first_dead > q && \
!oop(q)->is_gc_marked()) { \
/* we have a chunk of the space which hasn't moved and we've \
* reinitialized the mark word during the previous pass, so we can't \
- * use is_gc_marked for the traversal. */ \
+ * use is_gc_marked for the traversal. */ \
HeapWord* end = _first_dead; \
\
- while (q < end) { \
- /* I originally tried to conjoin "block_start(q) == q" to the \
- * assertion below, but that doesn't work, because you can't \
- * accurately traverse previous objects to get to the current one \
- * after their pointers (including pointers into permGen) have been \
- * updated, until the actual compaction is done. dld, 4/00 */ \
- assert(block_is_obj(q), \
- "should be at block boundaries, and should be looking at objs"); \
+ while (q < end) { \
+ /* I originally tried to conjoin "block_start(q) == q" to the \
+ * assertion below, but that doesn't work, because you can't \
+ * accurately traverse previous objects to get to the current one \
+ * after their pointers (including pointers into permGen) have been \
+ * updated, until the actual compaction is done. dld, 4/00 */ \
+ assert(block_is_obj(q), \
+ "should be at block boundaries, and should be looking at objs"); \
\
- debug_only(MarkSweep::track_interior_pointers(oop(q))); \
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \
\
- /* point all the oops to the new location */ \
- size_t size = oop(q)->adjust_pointers(); \
- size = adjust_obj_size(size); \
+ /* point all the oops to the new location */ \
+ size_t size = oop(q)->adjust_pointers(); \
+ size = adjust_obj_size(size); \
\
- debug_only(MarkSweep::check_interior_pointers()); \
- \
- debug_only(MarkSweep::validate_live_oop(oop(q), size)); \
- \
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \
+ \
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \
+ \
q += size; \
- } \
+ } \
\
- if (_first_dead == t) { \
- q = t; \
- } else { \
- /* $$$ This is funky. Using this to read the previously written \
- * LiveRange. See also use below. */ \
+ if (_first_dead == t) { \
+ q = t; \
+ } else { \
+ /* $$$ This is funky. Using this to read the previously written \
+ * LiveRange. See also use below. */ \
q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \
- } \
- } \
+ } \
+ } \
\
const intx interval = PrefetchScanIntervalInBytes; \
\
- debug_only(HeapWord* prev_q = NULL); \
- while (q < t) { \
- /* prefetch beyond q */ \
+ debug_only(HeapWord* prev_q = NULL); \
+ while (q < t) { \
+ /* prefetch beyond q */ \
Prefetch::write(q, interval); \
- if (oop(q)->is_gc_marked()) { \
- /* q is alive */ \
- debug_only(MarkSweep::track_interior_pointers(oop(q))); \
- /* point all the oops to the new location */ \
- size_t size = oop(q)->adjust_pointers(); \
- size = adjust_obj_size(size); \
- debug_only(MarkSweep::check_interior_pointers()); \
- debug_only(MarkSweep::validate_live_oop(oop(q), size)); \
- debug_only(prev_q = q); \
+ if (oop(q)->is_gc_marked()) { \
+ /* q is alive */ \
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \
+ /* point all the oops to the new location */ \
+ size_t size = oop(q)->adjust_pointers(); \
+ size = adjust_obj_size(size); \
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \
+ debug_only(prev_q = q); \
q += size; \
- } else { \
- /* q is not a live object, so its mark should point at the next \
- * live object */ \
- debug_only(prev_q = q); \
- q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
- assert(q > prev_q, "we should be moving forward through memory"); \
- } \
- } \
+ } else { \
+ /* q is not a live object, so its mark should point at the next \
+ * live object */ \
+ debug_only(prev_q = q); \
+ q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
+ assert(q > prev_q, "we should be moving forward through memory"); \
+ } \
+ } \
\
- assert(q == t, "just checking"); \
+ assert(q == t, "just checking"); \
}
-#define SCAN_AND_COMPACT(obj_size) { \
+#define SCAN_AND_COMPACT(obj_size) { \
/* Copy all live objects to their new location \
- * Used by MarkSweep::mark_sweep_phase4() */ \
+ * Used by MarkSweep::mark_sweep_phase4() */ \
\
- HeapWord* q = bottom(); \
- HeapWord* const t = _end_of_live; \
- debug_only(HeapWord* prev_q = NULL); \
+ HeapWord* q = bottom(); \
+ HeapWord* const t = _end_of_live; \
+ debug_only(HeapWord* prev_q = NULL); \
\
- if (q < t && _first_dead > q && \
+ if (q < t && _first_dead > q && \
!oop(q)->is_gc_marked()) { \
- debug_only( \
- /* we have a chunk of the space which hasn't moved and we've reinitialized the \
- * mark word during the previous pass, so we can't use is_gc_marked for the \
- * traversal. */ \
- HeapWord* const end = _first_dead; \
- \
- while (q < end) { \
+ debug_only( \
+ /* we have a chunk of the space which hasn't moved and we've reinitialized \
+ * the mark word during the previous pass, so we can't use is_gc_marked for \
+ * the traversal. */ \
+ HeapWord* const end = _first_dead; \
+ \
+ while (q < end) { \
size_t size = obj_size(q); \
- assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)"); \
- debug_only(MarkSweep::live_oop_moved_to(q, size, q)); \
- debug_only(prev_q = q); \
+ assert(!oop(q)->is_gc_marked(), \
+ "should be unmarked (special dense prefix handling)"); \
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q)); \
+ debug_only(prev_q = q); \
q += size; \
- } \
- ) /* debug_only */ \
- \
- if (_first_dead == t) { \
- q = t; \
- } else { \
- /* $$$ Funky */ \
- q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \
- } \
- } \
+ } \
+ ) /* debug_only */ \
+ \
+ if (_first_dead == t) { \
+ q = t; \
+ } else { \
+ /* $$$ Funky */ \
+ q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \
+ } \
+ } \
\
- const intx scan_interval = PrefetchScanIntervalInBytes; \
- const intx copy_interval = PrefetchCopyIntervalInBytes; \
- while (q < t) { \
- if (!oop(q)->is_gc_marked()) { \
- /* mark is pointer to next marked oop */ \
- debug_only(prev_q = q); \
- q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
- assert(q > prev_q, "we should be moving forward through memory"); \
- } else { \
- /* prefetch beyond q */ \
+ const intx scan_interval = PrefetchScanIntervalInBytes; \
+ const intx copy_interval = PrefetchCopyIntervalInBytes; \
+ while (q < t) { \
+ if (!oop(q)->is_gc_marked()) { \
+ /* mark is pointer to next marked oop */ \
+ debug_only(prev_q = q); \
+ q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
+ assert(q > prev_q, "we should be moving forward through memory"); \
+ } else { \
+ /* prefetch beyond q */ \
Prefetch::read(q, scan_interval); \
\
/* size and destination */ \
size_t size = obj_size(q); \
HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \
\
- /* prefetch beyond compaction_top */ \
+ /* prefetch beyond compaction_top */ \
Prefetch::write(compaction_top, copy_interval); \
\
- /* copy object and reinit its mark */ \
- debug_only(MarkSweep::live_oop_moved_to(q, size, compaction_top)); \
- assert(q != compaction_top, "everything in this pass should be moving"); \
- Copy::aligned_conjoint_words(q, compaction_top, size); \
- oop(compaction_top)->init_mark(); \
- assert(oop(compaction_top)->klass() != NULL, "should have a class"); \
+ /* copy object and reinit its mark */ \
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, \
+ compaction_top)); \
+ assert(q != compaction_top, "everything in this pass should be moving"); \
+ Copy::aligned_conjoint_words(q, compaction_top, size); \
+ oop(compaction_top)->init_mark(); \
+ assert(oop(compaction_top)->klass() != NULL, "should have a class"); \
\
- debug_only(prev_q = q); \
+ debug_only(prev_q = q); \
q += size; \
- } \
- } \
+ } \
+ } \
\
/* Reset space after compaction is complete */ \
- reset_after_compaction(); \
+ reset_after_compaction(); \
/* We do this clear, below, since it has overloaded meanings for some */ \
/* space subtypes. For example, OffsetTableContigSpace's that were */ \
/* compacted into will have had their offset table thresholds updated */ \
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/universe.cpp
--- a/hotspot/src/share/vm/memory/universe.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/universe.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -99,6 +99,7 @@
size_t Universe::_heap_used_at_last_gc;
CollectedHeap* Universe::_collectedHeap = NULL;
+address Universe::_heap_base = NULL;
void Universe::basic_type_classes_do(void f(klassOop)) {
@@ -464,7 +465,7 @@
class FixupMirrorClosure: public ObjectClosure {
public:
- void do_object(oop obj) {
+ virtual void do_object(oop obj) {
if (obj->is_klass()) {
EXCEPTION_MARK;
KlassHandle k(THREAD, klassOop(obj));
@@ -667,7 +668,7 @@
"LogHeapWordSize is incorrect.");
guarantee(sizeof(oop) >= sizeof(HeapWord), "HeapWord larger than oop?");
guarantee(sizeof(oop) % sizeof(HeapWord) == 0,
- "oop size is not not a multiple of HeapWord size");
+ "oop size is not not a multiple of HeapWord size");
TraceTime timer("Genesis", TraceStartupTime);
GC_locker::lock(); // do not allow gc during bootstrapping
JavaClasses::compute_hard_coded_offsets();
@@ -759,6 +760,15 @@
if (status != JNI_OK) {
return status;
}
+ if (UseCompressedOops) {
+ // Subtract a page because something can get allocated at heap base.
+ // This also makes implicit null checking work, because the
+ // memory+1 page below heap_base needs to cause a signal.
+ // See needs_explicit_null_check.
+ // Only set the heap base for compressed oops because it indicates
+ // compressed oops for pstack code.
+ Universe::_heap_base = Universe::heap()->base() - os::vm_page_size();
+ }
// We will never reach the CATCH below since Exceptions::_throw will cause
// the VM to exit if an exception is thrown during initialization
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/memory/universe.hpp
--- a/hotspot/src/share/vm/memory/universe.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/universe.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -180,10 +180,13 @@
// The particular choice of collected heap.
static CollectedHeap* _collectedHeap;
+ // Base address for oop-within-java-object materialization.
+ // NULL if using wide oops. Doubles as heap oop null value.
+ static address _heap_base;
// array of dummy objects used with +FullGCAlot
debug_only(static objArrayOop _fullgc_alot_dummy_array;)
- // index of next entry to clear
+ // index of next entry to clear
debug_only(static int _fullgc_alot_dummy_next;)
// Compiler/dispatch support
@@ -323,6 +326,10 @@
// The particular choice of collected heap.
static CollectedHeap* heap() { return _collectedHeap; }
+ // For UseCompressedOops
+ static address heap_base() { return _heap_base; }
+ static address* heap_base_addr() { return &_heap_base; }
+
// Historic gc information
static size_t get_heap_capacity_at_last_gc() { return _heap_capacity_at_last_gc; }
static size_t get_heap_free_at_last_gc() { return _heap_capacity_at_last_gc - _heap_used_at_last_gc; }
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/oops/arrayOop.hpp
--- a/hotspot/src/share/vm/oops/arrayOop.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/arrayOop.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -22,34 +22,79 @@
*
*/
-// arrayOopDesc is the abstract baseclass for all arrays.
+// arrayOopDesc is the abstract baseclass for all arrays. It doesn't
+// declare pure virtual to enforce this because that would allocate a vtbl
+// in each instance, which we don't want.
+
+// The layout of array Oops is:
+//
+// markOop
+// klassOop // 32 bits if compressed but declared 64 in LP64.
+// length // shares klass memory or allocated after declared fields.
+
class arrayOopDesc : public oopDesc {
friend class VMStructs;
- private:
- int _length; // number of elements in the array
+
+ // Interpreter/Compiler offsets
+
+ // Header size computation.
+ // The header is considered the oop part of this type plus the length.
+ // Returns the aligned header_size_in_bytes. This is not equivalent to
+ // sizeof(arrayOopDesc) which should not appear in the code, except here.
+ static int header_size_in_bytes() {
+ size_t hs = UseCompressedOops ?
+ sizeof(arrayOopDesc) :
+ align_size_up(sizeof(arrayOopDesc) + sizeof(int), HeapWordSize);
+#ifdef ASSERT
+ // make sure it isn't called before UseCompressedOops is initialized.
+ static size_t arrayoopdesc_hs = 0;
+ if (arrayoopdesc_hs == 0) arrayoopdesc_hs = hs;
+ assert(arrayoopdesc_hs == hs, "header size can't change");
+#endif // ASSERT
+ return (int)hs;
+ }
public:
- // Interpreter/Compiler offsets
- static int length_offset_in_bytes() { return offset_of(arrayOopDesc, _length); }
- static int base_offset_in_bytes(BasicType type) { return header_size(type) * HeapWordSize; }
+ // The _length field is not declared in C++. It is allocated after the
+ // declared nonstatic fields in arrayOopDesc if not compressed, otherwise
+ // it occupies the second half of the _klass field in oopDesc.
+ static int length_offset_in_bytes() {
+ return UseCompressedOops ? klass_gap_offset_in_bytes() :
+ sizeof(arrayOopDesc);
+ }
+
+ // Returns the offset of the first element.
+ static int base_offset_in_bytes(BasicType type) {
+ return header_size(type) * HeapWordSize;
+ }
// Returns the address of the first element.
- void* base(BasicType type) const { return (void*) (((intptr_t) this) + base_offset_in_bytes(type)); }
+ void* base(BasicType type) const {
+ return (void*) (((intptr_t) this) + base_offset_in_bytes(type));
+ }
// Tells whether index is within bounds.
bool is_within_bounds(int index) const { return 0 <= index && index < length(); }
- // Accessores for instance variable
- int length() const { return _length; }
- void set_length(int length) { _length = length; }
+ // Accessors for instance variable which is not a C++ declared nonstatic
+ // field.
+ int length() const {
+ return *(int*)(((intptr_t)this) + length_offset_in_bytes());
+ }
+ void set_length(int length) {
+ *(int*)(((intptr_t)this) + length_offset_in_bytes()) = length;
+ }
- // Header size computation.
- // Should only be called with constants as argument (will not constant fold otherwise)
+ // Should only be called with constants as argument
+ // (will not constant fold otherwise)
+ // Returns the header size in words aligned to the requirements of the
+ // array object type.
static int header_size(BasicType type) {
- return Universe::element_type_should_be_aligned(type)
- ? align_object_size(sizeof(arrayOopDesc)/HeapWordSize)
- : sizeof(arrayOopDesc)/HeapWordSize;
+ size_t typesize_in_bytes = header_size_in_bytes();
+ return (int)(Universe::element_type_should_be_aligned(type)
+ ? align_object_size(typesize_in_bytes/HeapWordSize)
+ : typesize_in_bytes/HeapWordSize);
}
// This method returns the maximum length that can passed into
@@ -62,7 +107,7 @@
// We use max_jint, since object_size is internally represented by an 'int'
// This gives us an upper bound of max_jint words for the size of the oop.
int32_t max_words = (max_jint - header_size(type) - 2);
- int elembytes = (type == T_OBJECT) ? T_OBJECT_aelem_bytes : type2aelembytes(type);
+ int elembytes = type2aelembytes(type);
jlong len = ((jlong)max_words * HeapWordSize) / elembytes;
return (len > max_jint) ? max_jint : (int32_t)len;
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/oops/constantPoolKlass.cpp
--- a/hotspot/src/share/vm/oops/constantPoolKlass.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/constantPoolKlass.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -29,8 +29,9 @@
int size = constantPoolOopDesc::object_size(length);
KlassHandle klass (THREAD, as_klassOop());
constantPoolOop c =
- (constantPoolOop)CollectedHeap::permanent_array_allocate(klass, size, length, CHECK_NULL);
+ (constantPoolOop)CollectedHeap::permanent_obj_allocate(klass, size, CHECK_NULL);
+ c->set_length(length);
c->set_tags(NULL);
c->set_cache(NULL);
c->set_pool_holder(NULL);
@@ -54,14 +55,14 @@
klassOop constantPoolKlass::create_klass(TRAPS) {
constantPoolKlass o;
- KlassHandle klassklass(THREAD, Universe::arrayKlassKlassObj());
- arrayKlassHandle k = base_create_array_klass(o.vtbl_value(), header_size(), klassklass, CHECK_NULL);
- arrayKlassHandle super (THREAD, k->super());
- complete_create_array_klass(k, super, CHECK_NULL);
+ KlassHandle h_this_klass(THREAD, Universe::klassKlassObj());
+ KlassHandle k = base_create_klass(h_this_klass, header_size(), o.vtbl_value(), CHECK_NULL);
+ // Make sure size calculation is right
+ assert(k()->size() == align_object_size(header_size()), "wrong size for object");
+ java_lang_Class::create_mirror(k, CHECK_NULL); // Allocate mirror
return k();
}
-
int constantPoolKlass::oop_size(oop obj) const {
assert(obj->is_constantPool(), "must be constantPool");
return constantPoolOop(obj)->object_size();
@@ -275,7 +276,7 @@
EXCEPTION_MARK;
oop anObj;
assert(obj->is_constantPool(), "must be constantPool");
- arrayKlass::oop_print_on(obj, st);
+ Klass::oop_print_on(obj, st);
constantPoolOop cp = constantPoolOop(obj);
// Temp. remove cache so we can do lookups with original indicies.
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/oops/constantPoolKlass.hpp
--- a/hotspot/src/share/vm/oops/constantPoolKlass.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/constantPoolKlass.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -24,7 +24,8 @@
// A constantPoolKlass is the klass of a constantPoolOop
-class constantPoolKlass : public arrayKlass {
+class constantPoolKlass : public Klass {
+ juint _alloc_size; // allocation profiling support
public:
// Dispatched klass operations
bool oop_is_constantPool() const { return true; }
@@ -44,7 +45,7 @@
// Sizing
static int header_size() { return oopDesc::header_size() + sizeof(constantPoolKlass)/HeapWordSize; }
- int object_size() const { return arrayKlass::object_size(header_size()); }
+ int object_size() const { return align_object_size(header_size()); }
// Garbage collection
void oop_follow_contents(oop obj);
@@ -57,6 +58,11 @@
int oop_oop_iterate(oop obj, OopClosure* blk);
int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
+ // Allocation profiling support
+ // no idea why this is pure virtual and not in Klass ???
+ juint alloc_size() const { return _alloc_size; }
+ void set_alloc_size(juint n) { _alloc_size = n; }
+
#ifndef PRODUCT
public:
// Printing
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/oops/constantPoolOop.hpp
--- a/hotspot/src/share/vm/oops/constantPoolOop.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/constantPoolOop.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -34,13 +34,14 @@
class SymbolHashMap;
-class constantPoolOopDesc : public arrayOopDesc {
+class constantPoolOopDesc : public oopDesc {
friend class VMStructs;
friend class BytecodeInterpreter; // Directly extracts an oop in the pool for fast instanceof/checkcast
private:
typeArrayOop _tags; // the tag array describing the constant pool's contents
constantPoolCacheOop _cache; // the cache holding interpreter runtime information
klassOop _pool_holder; // the corresponding class
+ int _length; // number of elements in the array
// only set to non-zero if constant pool is merged by RedefineClasses
int _orig_length;
@@ -330,6 +331,14 @@
bool klass_name_at_matches(instanceKlassHandle k, int which);
// Sizing
+ int length() const { return _length; }
+ void set_length(int length) { _length = length; }
+
+ // Tells whether index is within bounds.
+ bool is_within_bounds(int index) const {
+ return 0 <= index && index < length();
+ }
+
static int header_size() { return sizeof(constantPoolOopDesc)/HeapWordSize; }
static int object_size(int length) { return align_object_size(header_size() + length); }
int object_size() { return object_size(length()); }
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/oops/cpCacheKlass.cpp
--- a/hotspot/src/share/vm/oops/cpCacheKlass.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/cpCacheKlass.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -37,18 +37,19 @@
int size = constantPoolCacheOopDesc::object_size(length);
KlassHandle klass (THREAD, as_klassOop());
constantPoolCacheOop cache = (constantPoolCacheOop)
- CollectedHeap::permanent_array_allocate(klass, size, length, CHECK_NULL);
+ CollectedHeap::permanent_obj_allocate(klass, size, CHECK_NULL);
+ cache->set_length(length);
cache->set_constant_pool(NULL);
return cache;
}
-
klassOop constantPoolCacheKlass::create_klass(TRAPS) {
constantPoolCacheKlass o;
- KlassHandle klassklass(THREAD, Universe::arrayKlassKlassObj());
- arrayKlassHandle k = base_create_array_klass(o.vtbl_value(), header_size(), klassklass, CHECK_NULL);
- KlassHandle super (THREAD, k->super());
- complete_create_array_klass(k, super, CHECK_NULL);
+ KlassHandle h_this_klass(THREAD, Universe::klassKlassObj());
+ KlassHandle k = base_create_klass(h_this_klass, header_size(), o.vtbl_value(), CHECK_NULL);
+ // Make sure size calculation is right
+ assert(k()->size() == align_object_size(header_size()), "wrong size for object");
+ java_lang_Class::create_mirror(k, CHECK_NULL); // Allocate mirror
return k();
}
@@ -183,7 +184,7 @@
assert(obj->is_constantPoolCache(), "obj must be constant pool cache");
constantPoolCacheOop cache = (constantPoolCacheOop)obj;
// super print
- arrayKlass::oop_print_on(obj, st);
+ Klass::oop_print_on(obj, st);
// print constant pool cache entries
for (int i = 0; i < cache->length(); i++) cache->entry_at(i)->print(st, i);
}
@@ -194,7 +195,7 @@
guarantee(obj->is_constantPoolCache(), "obj must be constant pool cache");
constantPoolCacheOop cache = (constantPoolCacheOop)obj;
// super verify
- arrayKlass::oop_verify_on(obj, st);
+ Klass::oop_verify_on(obj, st);
// print constant pool cache entries
for (int i = 0; i < cache->length(); i++) cache->entry_at(i)->verify(st);
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/oops/cpCacheKlass.hpp
--- a/hotspot/src/share/vm/oops/cpCacheKlass.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/cpCacheKlass.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -22,7 +22,8 @@
*
*/
-class constantPoolCacheKlass: public arrayKlass {
+class constantPoolCacheKlass: public Klass {
+ juint _alloc_size; // allocation profiling support
public:
// Dispatched klass operations
bool oop_is_constantPoolCache() const { return true; }
@@ -41,8 +42,8 @@
}
// Sizing
- static int header_size() { return oopDesc::header_size() + sizeof(constantPoolCacheKlass)/HeapWordSize; }
- int object_size() const { return arrayKlass::object_size(header_size()); }
+ static int header_size() { return oopDesc::header_size() + sizeof(constantPoolCacheKlass)/HeapWordSize; }
+ int object_size() const { return align_object_size(header_size()); }
// Garbage collection
void oop_follow_contents(oop obj);
@@ -55,6 +56,10 @@
int oop_oop_iterate(oop obj, OopClosure* blk);
int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
+ // Allocation profiling support
+ juint alloc_size() const { return _alloc_size; }
+ void set_alloc_size(juint n) { _alloc_size = n; }
+
#ifndef PRODUCT
public:
// Printing
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/oops/cpCacheOop.cpp
--- a/hotspot/src/share/vm/oops/cpCacheOop.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/cpCacheOop.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -218,6 +218,7 @@
public:
LocalOopClosure(void f(oop*)) { _f = f; }
virtual void do_oop(oop* o) { _f(o); }
+ virtual void do_oop(narrowOop *o) { ShouldNotReachHere(); }
};
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/oops/cpCacheOop.hpp
--- a/hotspot/src/share/vm/oops/cpCacheOop.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/cpCacheOop.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -286,12 +286,17 @@
// is created and initialized before a class is actively used (i.e., initialized), the indivi-
// dual cache entries are filled at resolution (i.e., "link") time (see also: rewriter.*).
-class constantPoolCacheOopDesc: public arrayOopDesc {
+class constantPoolCacheOopDesc: public oopDesc {
friend class VMStructs;
private:
+ int _length;
constantPoolOop _constant_pool; // the corresponding constant pool
// Sizing
+ debug_only(friend class ClassVerifier;)
+ int length() const { return _length; }
+ void set_length(int length) { _length = length; }
+
static int header_size() { return sizeof(constantPoolCacheOopDesc) / HeapWordSize; }
static int object_size(int length) { return align_object_size(header_size() + length * in_words(ConstantPoolCacheEntry::size())); }
int object_size() { return object_size(length()); }
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/oops/instanceKlass.cpp
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -1255,218 +1255,298 @@
#endif //PRODUCT
+#ifdef ASSERT
+template void assert_is_in(T *p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
+ assert(Universe::heap()->is_in(o), "should be in heap");
+ }
+}
+template void assert_is_in_closed_subset(T *p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
+ assert(Universe::heap()->is_in_closed_subset(o), "should be in closed");
+ }
+}
+template void assert_is_in_reserved(T *p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
+ assert(Universe::heap()->is_in_reserved(o), "should be in reserved");
+ }
+}
+template void assert_nothing(T *p) {}
+
+#else
+template void assert_is_in(T *p) {}
+template void assert_is_in_closed_subset(T *p) {}
+template void assert_is_in_reserved(T *p) {}
+template void assert_nothing(T *p) {}
+#endif // ASSERT
+
+//
+// Macros that iterate over areas of oops which are specialized on type of
+// oop pointer either narrow or wide, depending on UseCompressedOops
+//
+// Parameters are:
+// T - type of oop to point to (either oop or narrowOop)
+// start_p - starting pointer for region to iterate over
+// count - number of oops or narrowOops to iterate over
+// do_oop - action to perform on each oop (it's arbitrary C code which
+// makes it more efficient to put in a macro rather than making
+// it a template function)
+// assert_fn - assert function which is template function because performance
+// doesn't matter when enabled.
+#define InstanceKlass_SPECIALIZED_OOP_ITERATE( \
+ T, start_p, count, do_oop, \
+ assert_fn) \
+{ \
+ T* p = (T*)(start_p); \
+ T* const end = p + (count); \
+ while (p < end) { \
+ (assert_fn)(p); \
+ do_oop; \
+ ++p; \
+ } \
+}
+
+#define InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE( \
+ T, start_p, count, do_oop, \
+ assert_fn) \
+{ \
+ T* const start = (T*)(start_p); \
+ T* p = start + (count); \
+ while (start < p) { \
+ --p; \
+ (assert_fn)(p); \
+ do_oop; \
+ } \
+}
+
+#define InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \
+ T, start_p, count, low, high, \
+ do_oop, assert_fn) \
+{ \
+ T* const l = (T*)(low); \
+ T* const h = (T*)(high); \
+ assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \
+ mask_bits((intptr_t)h, sizeof(T)-1) == 0, \
+ "bounded region must be properly aligned"); \
+ T* p = (T*)(start_p); \
+ T* end = p + (count); \
+ if (p < l) p = l; \
+ if (end > h) end = h; \
+ while (p < end) { \
+ (assert_fn)(p); \
+ do_oop; \
+ ++p; \
+ } \
+}
+
+
+// The following macros call specialized macros, passing either oop or
+// narrowOop as the specialization type. These test the UseCompressedOops
+// flag.
+#define InstanceKlass_OOP_ITERATE(start_p, count, \
+ do_oop, assert_fn) \
+{ \
+ if (UseCompressedOops) { \
+ InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
+ start_p, count, \
+ do_oop, assert_fn) \
+ } else { \
+ InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \
+ start_p, count, \
+ do_oop, assert_fn) \
+ } \
+}
+
+#define InstanceKlass_BOUNDED_OOP_ITERATE(start_p, count, low, high, \
+ do_oop, assert_fn) \
+{ \
+ if (UseCompressedOops) { \
+ InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
+ start_p, count, \
+ low, high, \
+ do_oop, assert_fn) \
+ } else { \
+ InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \
+ start_p, count, \
+ low, high, \
+ do_oop, assert_fn) \
+ } \
+}
+
+#define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn) \
+{ \
+ /* Compute oopmap block range. The common case \
+ is nonstatic_oop_map_size == 1. */ \
+ OopMapBlock* map = start_of_nonstatic_oop_maps(); \
+ OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \
+ if (UseCompressedOops) { \
+ while (map < end_map) { \
+ InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
+ obj->obj_field_addr(map->offset()), map->length(), \
+ do_oop, assert_fn) \
+ ++map; \
+ } \
+ } else { \
+ while (map < end_map) { \
+ InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \
+ obj->obj_field_addr(map->offset()), map->length(), \
+ do_oop, assert_fn) \
+ ++map; \
+ } \
+ } \
+}
+
+#define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn) \
+{ \
+ OopMapBlock* const start_map = start_of_nonstatic_oop_maps(); \
+ OopMapBlock* map = start_map + nonstatic_oop_map_size(); \
+ if (UseCompressedOops) { \
+ while (start_map < map) { \
+ --map; \
+ InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop, \
+ obj->obj_field_addr(map->offset()), map->length(), \
+ do_oop, assert_fn) \
+ } \
+ } else { \
+ while (start_map < map) { \
+ --map; \
+ InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop, \
+ obj->obj_field_addr(map->offset()), map->length(), \
+ do_oop, assert_fn) \
+ } \
+ } \
+}
+
+#define InstanceKlass_BOUNDED_OOP_MAP_ITERATE(obj, low, high, do_oop, \
+ assert_fn) \
+{ \
+ /* Compute oopmap block range. The common case is \
+ nonstatic_oop_map_size == 1, so we accept the \
+ usually non-existent extra overhead of examining \
+ all the maps. */ \
+ OopMapBlock* map = start_of_nonstatic_oop_maps(); \
+ OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \
+ if (UseCompressedOops) { \
+ while (map < end_map) { \
+ InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
+ obj->obj_field_addr(map->offset()), map->length(), \
+ low, high, \
+ do_oop, assert_fn) \
+ ++map; \
+ } \
+ } else { \
+ while (map < end_map) { \
+ InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \
+ obj->obj_field_addr(map->offset()), map->length(), \
+ low, high, \
+ do_oop, assert_fn) \
+ ++map; \
+ } \
+ } \
+}
+
void instanceKlass::follow_static_fields() {
- oop* start = start_of_static_fields();
- oop* end = start + static_oop_field_size();
- while (start < end) {
- if (*start != NULL) {
- assert(Universe::heap()->is_in_closed_subset(*start),
- "should be in heap");
- MarkSweep::mark_and_push(start);
- }
- start++;
- }
+ InstanceKlass_OOP_ITERATE( \
+ start_of_static_fields(), static_oop_field_size(), \
+ MarkSweep::mark_and_push(p), \
+ assert_is_in_closed_subset)
}
#ifndef SERIALGC
void instanceKlass::follow_static_fields(ParCompactionManager* cm) {
- oop* start = start_of_static_fields();
- oop* end = start + static_oop_field_size();
- while (start < end) {
- if (*start != NULL) {
- assert(Universe::heap()->is_in(*start), "should be in heap");
- PSParallelCompact::mark_and_push(cm, start);
- }
- start++;
- }
+ InstanceKlass_OOP_ITERATE( \
+ start_of_static_fields(), static_oop_field_size(), \
+ PSParallelCompact::mark_and_push(cm, p), \
+ assert_is_in)
}
#endif // SERIALGC
-
void instanceKlass::adjust_static_fields() {
- oop* start = start_of_static_fields();
- oop* end = start + static_oop_field_size();
- while (start < end) {
- MarkSweep::adjust_pointer(start);
- start++;
- }
+ InstanceKlass_OOP_ITERATE( \
+ start_of_static_fields(), static_oop_field_size(), \
+ MarkSweep::adjust_pointer(p), \
+ assert_nothing)
}
#ifndef SERIALGC
void instanceKlass::update_static_fields() {
- oop* const start = start_of_static_fields();
- oop* const beg_oop = start;
- oop* const end_oop = start + static_oop_field_size();
- for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
- PSParallelCompact::adjust_pointer(cur_oop);
- }
+ InstanceKlass_OOP_ITERATE( \
+ start_of_static_fields(), static_oop_field_size(), \
+ PSParallelCompact::adjust_pointer(p), \
+ assert_nothing)
}
-void
-instanceKlass::update_static_fields(HeapWord* beg_addr, HeapWord* end_addr) {
- oop* const start = start_of_static_fields();
- oop* const beg_oop = MAX2((oop*)beg_addr, start);
- oop* const end_oop = MIN2((oop*)end_addr, start + static_oop_field_size());
- for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
- PSParallelCompact::adjust_pointer(cur_oop);
- }
+void instanceKlass::update_static_fields(HeapWord* beg_addr, HeapWord* end_addr) {
+ InstanceKlass_BOUNDED_OOP_ITERATE( \
+ start_of_static_fields(), static_oop_field_size(), \
+ beg_addr, end_addr, \
+ PSParallelCompact::adjust_pointer(p), \
+ assert_nothing )
}
#endif // SERIALGC
void instanceKlass::oop_follow_contents(oop obj) {
- assert (obj!=NULL, "can't follow the content of NULL object");
+ assert(obj != NULL, "can't follow the content of NULL object");
obj->follow_header();
- OopMapBlock* map = start_of_nonstatic_oop_maps();
- OopMapBlock* end_map = map + nonstatic_oop_map_size();
- while (map < end_map) {
- oop* start = obj->obj_field_addr(map->offset());
- oop* end = start + map->length();
- while (start < end) {
- if (*start != NULL) {
- assert(Universe::heap()->is_in_closed_subset(*start),
- "should be in heap");
- MarkSweep::mark_and_push(start);
- }
- start++;
- }
- map++;
- }
+ InstanceKlass_OOP_MAP_ITERATE( \
+ obj, \
+ MarkSweep::mark_and_push(p), \
+ assert_is_in_closed_subset)
}
#ifndef SERIALGC
void instanceKlass::oop_follow_contents(ParCompactionManager* cm,
oop obj) {
- assert (obj!=NULL, "can't follow the content of NULL object");
+ assert(obj != NULL, "can't follow the content of NULL object");
obj->follow_header(cm);
- OopMapBlock* map = start_of_nonstatic_oop_maps();
- OopMapBlock* end_map = map + nonstatic_oop_map_size();
- while (map < end_map) {
- oop* start = obj->obj_field_addr(map->offset());
- oop* end = start + map->length();
- while (start < end) {
- if (*start != NULL) {
- assert(Universe::heap()->is_in(*start), "should be in heap");
- PSParallelCompact::mark_and_push(cm, start);
- }
- start++;
- }
- map++;
- }
+ InstanceKlass_OOP_MAP_ITERATE( \
+ obj, \
+ PSParallelCompact::mark_and_push(cm, p), \
+ assert_is_in)
}
#endif // SERIALGC
-#define invoke_closure_on(start, closure, nv_suffix) { \
- oop obj = *(start); \
- if (obj != NULL) { \
- assert(Universe::heap()->is_in_closed_subset(obj), "should be in heap"); \
- (closure)->do_oop##nv_suffix(start); \
- } \
-}
-
// closure's do_header() method dicates whether the given closure should be
// applied to the klass ptr in the object header.
-#define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
- \
-int instanceKlass::oop_oop_iterate##nv_suffix(oop obj, \
- OopClosureType* closure) { \
- SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \
- /* header */ \
- if (closure->do_header()) { \
- obj->oop_iterate_header(closure); \
- } \
- /* instance variables */ \
- OopMapBlock* map = start_of_nonstatic_oop_maps(); \
- OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \
- const intx field_offset = PrefetchFieldsAhead; \
- if (field_offset > 0) { \
- while (map < end_map) { \
- oop* start = obj->obj_field_addr(map->offset()); \
- oop* const end = start + map->length(); \
- while (start < end) { \
- prefetch_beyond(start, (oop*)end, field_offset, \
- closure->prefetch_style()); \
- SpecializationStats:: \
- record_do_oop_call##nv_suffix(SpecializationStats::ik); \
- invoke_closure_on(start, closure, nv_suffix); \
- start++; \
- } \
- map++; \
- } \
- } else { \
- while (map < end_map) { \
- oop* start = obj->obj_field_addr(map->offset()); \
- oop* const end = start + map->length(); \
- while (start < end) { \
- SpecializationStats:: \
- record_do_oop_call##nv_suffix(SpecializationStats::ik); \
- invoke_closure_on(start, closure, nv_suffix); \
- start++; \
- } \
- map++; \
- } \
- } \
- return size_helper(); \
+#define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
+ \
+int instanceKlass::oop_oop_iterate##nv_suffix(oop obj, \
+ OopClosureType* closure) {\
+ SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
+ /* header */ \
+ if (closure->do_header()) { \
+ obj->oop_iterate_header(closure); \
+ } \
+ InstanceKlass_OOP_MAP_ITERATE( \
+ obj, \
+ SpecializationStats:: \
+ record_do_oop_call##nv_suffix(SpecializationStats::ik); \
+ (closure)->do_oop##nv_suffix(p), \
+ assert_is_in_closed_subset) \
+ return size_helper(); \
}
-#define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
- \
-int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \
- OopClosureType* closure, \
- MemRegion mr) { \
- SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \
- /* header */ \
- if (closure->do_header()) { \
- obj->oop_iterate_header(closure, mr); \
- } \
- /* instance variables */ \
- OopMapBlock* map = start_of_nonstatic_oop_maps(); \
- OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \
- HeapWord* bot = mr.start(); \
- HeapWord* top = mr.end(); \
- oop* start = obj->obj_field_addr(map->offset()); \
- HeapWord* end = MIN2((HeapWord*)(start + map->length()), top); \
- /* Find the first map entry that extends onto mr. */ \
- while (map < end_map && end <= bot) { \
- map++; \
- start = obj->obj_field_addr(map->offset()); \
- end = MIN2((HeapWord*)(start + map->length()), top); \
- } \
- if (map != end_map) { \
- /* The current map's end is past the start of "mr". Skip up to the first \
- entry on "mr". */ \
- while ((HeapWord*)start < bot) { \
- start++; \
- } \
- const intx field_offset = PrefetchFieldsAhead; \
- for (;;) { \
- if (field_offset > 0) { \
- while ((HeapWord*)start < end) { \
- prefetch_beyond(start, (oop*)end, field_offset, \
- closure->prefetch_style()); \
- invoke_closure_on(start, closure, nv_suffix); \
- start++; \
- } \
- } else { \
- while ((HeapWord*)start < end) { \
- invoke_closure_on(start, closure, nv_suffix); \
- start++; \
- } \
- } \
- /* Go to the next map. */ \
- map++; \
- if (map == end_map) { \
- break; \
- } \
- /* Otherwise, */ \
- start = obj->obj_field_addr(map->offset()); \
- if ((HeapWord*)start >= top) { \
- break; \
- } \
- end = MIN2((HeapWord*)(start + map->length()), top); \
- } \
- } \
- return size_helper(); \
+#define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
+ \
+int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \
+ OopClosureType* closure, \
+ MemRegion mr) { \
+ SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
+ if (closure->do_header()) { \
+ obj->oop_iterate_header(closure, mr); \
+ } \
+ InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \
+ obj, mr.start(), mr.end(), \
+ (closure)->do_oop##nv_suffix(p), \
+ assert_is_in_closed_subset) \
+ return size_helper(); \
}
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN)
@@ -1474,56 +1554,28 @@
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
ALL_OOP_OOP_ITERATE_CLOSURES_3(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
-
void instanceKlass::iterate_static_fields(OopClosure* closure) {
- oop* start = start_of_static_fields();
- oop* end = start + static_oop_field_size();
- while (start < end) {
- assert(Universe::heap()->is_in_reserved_or_null(*start), "should be in heap");
- closure->do_oop(start);
- start++;
- }
+ InstanceKlass_OOP_ITERATE( \
+ start_of_static_fields(), static_oop_field_size(), \
+ closure->do_oop(p), \
+ assert_is_in_reserved)
}
void instanceKlass::iterate_static_fields(OopClosure* closure,
MemRegion mr) {
- oop* start = start_of_static_fields();
- oop* end = start + static_oop_field_size();
- // I gather that the the static fields of reference types come first,
- // hence the name of "oop_field_size", and that is what makes this safe.
- assert((intptr_t)mr.start() ==
- align_size_up((intptr_t)mr.start(), sizeof(oop)) &&
- (intptr_t)mr.end() == align_size_up((intptr_t)mr.end(), sizeof(oop)),
- "Memregion must be oop-aligned.");
- if ((HeapWord*)start < mr.start()) start = (oop*)mr.start();
- if ((HeapWord*)end > mr.end()) end = (oop*)mr.end();
- while (start < end) {
- invoke_closure_on(start, closure,_v);
- start++;
- }
+ InstanceKlass_BOUNDED_OOP_ITERATE( \
+ start_of_static_fields(), static_oop_field_size(), \
+ mr.start(), mr.end(), \
+ (closure)->do_oop_v(p), \
+ assert_is_in_closed_subset)
}
-
int instanceKlass::oop_adjust_pointers(oop obj) {
int size = size_helper();
-
- // Compute oopmap block range. The common case is nonstatic_oop_map_size == 1.
- OopMapBlock* map = start_of_nonstatic_oop_maps();
- OopMapBlock* const end_map = map + nonstatic_oop_map_size();
- // Iterate over oopmap blocks
- while (map < end_map) {
- // Compute oop range for this block
- oop* start = obj->obj_field_addr(map->offset());
- oop* end = start + map->length();
- // Iterate over oops
- while (start < end) {
- assert(Universe::heap()->is_in_or_null(*start), "should be in heap");
- MarkSweep::adjust_pointer(start);
- start++;
- }
- map++;
- }
-
+ InstanceKlass_OOP_MAP_ITERATE( \
+ obj, \
+ MarkSweep::adjust_pointer(p), \
+ assert_is_in)
obj->adjust_header();
return size;
}
@@ -1531,132 +1583,66 @@
#ifndef SERIALGC
void instanceKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
assert(!pm->depth_first(), "invariant");
- // Compute oopmap block range. The common case is nonstatic_oop_map_size == 1.
- OopMapBlock* start_map = start_of_nonstatic_oop_maps();
- OopMapBlock* map = start_map + nonstatic_oop_map_size();
-
- // Iterate over oopmap blocks
- while (start_map < map) {
- --map;
- // Compute oop range for this block
- oop* start = obj->obj_field_addr(map->offset());
- oop* curr = start + map->length();
- // Iterate over oops
- while (start < curr) {
- --curr;
- if (PSScavenge::should_scavenge(*curr)) {
- assert(Universe::heap()->is_in(*curr), "should be in heap");
- pm->claim_or_forward_breadth(curr);
- }
- }
- }
+ InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
+ obj, \
+ if (PSScavenge::should_scavenge(p)) { \
+ pm->claim_or_forward_breadth(p); \
+ }, \
+ assert_nothing )
}
void instanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
assert(pm->depth_first(), "invariant");
- // Compute oopmap block range. The common case is nonstatic_oop_map_size == 1.
- OopMapBlock* start_map = start_of_nonstatic_oop_maps();
- OopMapBlock* map = start_map + nonstatic_oop_map_size();
-
- // Iterate over oopmap blocks
- while (start_map < map) {
- --map;
- // Compute oop range for this block
- oop* start = obj->obj_field_addr(map->offset());
- oop* curr = start + map->length();
- // Iterate over oops
- while (start < curr) {
- --curr;
- if (PSScavenge::should_scavenge(*curr)) {
- assert(Universe::heap()->is_in(*curr), "should be in heap");
- pm->claim_or_forward_depth(curr);
- }
- }
- }
+ InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
+ obj, \
+ if (PSScavenge::should_scavenge(p)) { \
+ pm->claim_or_forward_depth(p); \
+ }, \
+ assert_nothing )
}
int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
- // Compute oopmap block range. The common case is nonstatic_oop_map_size==1.
- OopMapBlock* map = start_of_nonstatic_oop_maps();
- OopMapBlock* const end_map = map + nonstatic_oop_map_size();
- // Iterate over oopmap blocks
- while (map < end_map) {
- // Compute oop range for this oopmap block.
- oop* const map_start = obj->obj_field_addr(map->offset());
- oop* const beg_oop = map_start;
- oop* const end_oop = map_start + map->length();
- for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
- PSParallelCompact::adjust_pointer(cur_oop);
- }
- ++map;
- }
-
+ InstanceKlass_OOP_MAP_ITERATE( \
+ obj, \
+ PSParallelCompact::adjust_pointer(p), \
+ assert_nothing)
return size_helper();
}
int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
HeapWord* beg_addr, HeapWord* end_addr) {
- // Compute oopmap block range. The common case is nonstatic_oop_map_size==1.
- OopMapBlock* map = start_of_nonstatic_oop_maps();
- OopMapBlock* const end_map = map + nonstatic_oop_map_size();
- // Iterate over oopmap blocks
- while (map < end_map) {
- // Compute oop range for this oopmap block.
- oop* const map_start = obj->obj_field_addr(map->offset());
- oop* const beg_oop = MAX2((oop*)beg_addr, map_start);
- oop* const end_oop = MIN2((oop*)end_addr, map_start + map->length());
- for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
- PSParallelCompact::adjust_pointer(cur_oop);
- }
- ++map;
- }
-
+ InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \
+ obj, beg_addr, end_addr, \
+ PSParallelCompact::adjust_pointer(p), \
+ assert_nothing)
return size_helper();
}
void instanceKlass::copy_static_fields(PSPromotionManager* pm) {
assert(!pm->depth_first(), "invariant");
- // Compute oop range
- oop* start = start_of_static_fields();
- oop* end = start + static_oop_field_size();
- // Iterate over oops
- while (start < end) {
- if (PSScavenge::should_scavenge(*start)) {
- assert(Universe::heap()->is_in(*start), "should be in heap");
- pm->claim_or_forward_breadth(start);
- }
- start++;
- }
+ InstanceKlass_OOP_ITERATE( \
+ start_of_static_fields(), static_oop_field_size(), \
+ if (PSScavenge::should_scavenge(p)) { \
+ pm->claim_or_forward_breadth(p); \
+ }, \
+ assert_nothing )
}
void instanceKlass::push_static_fields(PSPromotionManager* pm) {
assert(pm->depth_first(), "invariant");
- // Compute oop range
- oop* start = start_of_static_fields();
- oop* end = start + static_oop_field_size();
- // Iterate over oops
- while (start < end) {
- if (PSScavenge::should_scavenge(*start)) {
- assert(Universe::heap()->is_in(*start), "should be in heap");
- pm->claim_or_forward_depth(start);
- }
- start++;
- }
+ InstanceKlass_OOP_ITERATE( \
+ start_of_static_fields(), static_oop_field_size(), \
+ if (PSScavenge::should_scavenge(p)) { \
+ pm->claim_or_forward_depth(p); \
+ }, \
+ assert_nothing )
}
void instanceKlass::copy_static_fields(ParCompactionManager* cm) {
- // Compute oop range
- oop* start = start_of_static_fields();
- oop* end = start + static_oop_field_size();
- // Iterate over oops
- while (start < end) {
- if (*start != NULL) {
- assert(Universe::heap()->is_in(*start), "should be in heap");
- // *start = (oop) cm->summary_data()->calc_new_pointer(*start);
- PSParallelCompact::adjust_pointer(start);
- }
- start++;
- }
+ InstanceKlass_OOP_ITERATE( \
+ start_of_static_fields(), static_oop_field_size(), \
+ PSParallelCompact::adjust_pointer(p), \
+ assert_is_in)
}
#endif // SERIALGC
@@ -1687,18 +1673,15 @@
Klass::follow_weak_klass_links(is_alive, keep_alive);
}
-
void instanceKlass::remove_unshareable_info() {
Klass::remove_unshareable_info();
init_implementor();
}
-
static void clear_all_breakpoints(methodOop m) {
m->clear_all_breakpoints();
}
-
void instanceKlass::release_C_heap_structures() {
// Deallocate oop map cache
if (_oop_map_cache != NULL) {
@@ -2047,29 +2030,30 @@
obj->print_address_on(st);
}
-#endif
+#endif // ndef PRODUCT
const char* instanceKlass::internal_name() const {
return external_name();
}
-
-
// Verification
class VerifyFieldClosure: public OopClosure {
- public:
- void do_oop(oop* p) {
+ protected:
+ template void do_oop_work(T* p) {
guarantee(Universe::heap()->is_in_closed_subset(p), "should be in heap");
- if (!(*p)->is_oop_or_null()) {
- tty->print_cr("Failed: %p -> %p",p,(address)*p);
+ oop obj = oopDesc::load_decode_heap_oop(p);
+ if (!obj->is_oop_or_null()) {
+ tty->print_cr("Failed: " PTR_FORMAT " -> " PTR_FORMAT, p, (address)obj);
Universe::print();
guarantee(false, "boom");
}
}
+ public:
+ virtual void do_oop(oop* p) { VerifyFieldClosure::do_oop_work(p); }
+ virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); }
};
-
void instanceKlass::oop_verify_on(oop obj, outputStream* st) {
Klass::oop_verify_on(obj, st);
VerifyFieldClosure blk;
@@ -2110,26 +2094,28 @@
}
}
-#endif
+#endif // ndef PRODUCT
+
+// JNIid class for jfieldIDs only
+// Note to reviewers:
+// These JNI functions are just moved over to column 1 and not changed
+// in the compressed oops workspace.
+JNIid::JNIid(klassOop holder, int offset, JNIid* next) {
+ _holder = holder;
+ _offset = offset;
+ _next = next;
+ debug_only(_is_static_field_id = false;)
+}
-/* JNIid class for jfieldIDs only */
- JNIid::JNIid(klassOop holder, int offset, JNIid* next) {
- _holder = holder;
- _offset = offset;
- _next = next;
- debug_only(_is_static_field_id = false;)
- }
-
-
- JNIid* JNIid::find(int offset) {
- JNIid* current = this;
- while (current != NULL) {
- if (current->offset() == offset) return current;
- current = current->next();
- }
- return NULL;
- }
+JNIid* JNIid::find(int offset) {
+ JNIid* current = this;
+ while (current != NULL) {
+ if (current->offset() == offset) return current;
+ current = current->next();
+ }
+ return NULL;
+}
void JNIid::oops_do(OopClosure* f) {
for (JNIid* cur = this; cur != NULL; cur = cur->next()) {
@@ -2138,40 +2124,40 @@
}
void JNIid::deallocate(JNIid* current) {
- while (current != NULL) {
- JNIid* next = current->next();
- delete current;
- current = next;
- }
- }
+ while (current != NULL) {
+ JNIid* next = current->next();
+ delete current;
+ current = next;
+ }
+}
- void JNIid::verify(klassOop holder) {
- int first_field_offset = instanceKlass::cast(holder)->offset_of_static_fields();
- int end_field_offset;
- end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize);
+void JNIid::verify(klassOop holder) {
+ int first_field_offset = instanceKlass::cast(holder)->offset_of_static_fields();
+ int end_field_offset;
+ end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize);
- JNIid* current = this;
- while (current != NULL) {
- guarantee(current->holder() == holder, "Invalid klass in JNIid");
- #ifdef ASSERT
- int o = current->offset();
- if (current->is_static_field_id()) {
- guarantee(o >= first_field_offset && o < end_field_offset, "Invalid static field offset in JNIid");
- }
- #endif
- current = current->next();
- }
- }
+ JNIid* current = this;
+ while (current != NULL) {
+ guarantee(current->holder() == holder, "Invalid klass in JNIid");
+#ifdef ASSERT
+ int o = current->offset();
+ if (current->is_static_field_id()) {
+ guarantee(o >= first_field_offset && o < end_field_offset, "Invalid static field offset in JNIid");
+ }
+#endif
+ current = current->next();
+ }
+}
#ifdef ASSERT
- void instanceKlass::set_init_state(ClassState state) {
- bool good_state = as_klassOop()->is_shared() ? (_init_state <= state)
- : (_init_state < state);
- assert(good_state || state == allocated, "illegal state transition");
- _init_state = state;
- }
+void instanceKlass::set_init_state(ClassState state) {
+ bool good_state = as_klassOop()->is_shared() ? (_init_state <= state)
+ : (_init_state < state);
+ assert(good_state || state == allocated, "illegal state transition");
+ _init_state = state;
+}
#endif
@@ -2180,9 +2166,9 @@
// Add an information node that contains weak references to the
// interesting parts of the previous version of the_class.
void instanceKlass::add_previous_version(instanceKlassHandle ikh,
- BitMap * emcp_methods, int emcp_method_count) {
+ BitMap* emcp_methods, int emcp_method_count) {
assert(Thread::current()->is_VM_thread(),
- "only VMThread can add previous versions");
+ "only VMThread can add previous versions");
if (_previous_versions == NULL) {
// This is the first previous version so make some space.
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/oops/instanceKlass.hpp
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -180,12 +180,16 @@
// End of the oop block.
//
- int _nonstatic_field_size; // number of non-static fields in this klass (including inherited fields)
- int _static_field_size; // number of static fields (oop and non-oop) in this klass
+ // number of words used by non-static fields in this klass (including
+ // inherited fields but after header_size()). If fields are compressed into
+ // header, this can be zero so it's not the same as number of static fields.
+ int _nonstatic_field_size;
+ int _static_field_size; // number words used by static fields (oop and non-oop) in this klass
int _static_oop_field_size;// number of static oop fields in this klass
int _nonstatic_oop_map_size;// number of nonstatic oop-map blocks allocated at end of this klass
bool _is_marked_dependent; // used for marking during flushing and deoptimization
bool _rewritten; // methods rewritten.
+ bool _has_nonstatic_fields; // for sizing with UseCompressedOops
u2 _minor_version; // minor version number of class file
u2 _major_version; // major version number of class file
ClassState _init_state; // state of class
@@ -221,6 +225,9 @@
friend class SystemDictionary;
public:
+ bool has_nonstatic_fields() const { return _has_nonstatic_fields; }
+ void set_has_nonstatic_fields(bool b) { _has_nonstatic_fields = b; }
+
// field sizes
int nonstatic_field_size() const { return _nonstatic_field_size; }
void set_nonstatic_field_size(int size) { _nonstatic_field_size = size; }
@@ -340,8 +347,7 @@
// find a non-static or static field given its offset within the class.
bool contains_field_offset(int offset) {
- return ((offset/wordSize) >= instanceOopDesc::header_size() &&
- (offset/wordSize)-instanceOopDesc::header_size() < nonstatic_field_size());
+ return instanceOopDesc::contains_field_offset(offset, nonstatic_field_size());
}
bool find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const;
@@ -570,12 +576,21 @@
intptr_t* start_of_itable() const { return start_of_vtable() + align_object_offset(vtable_length()); }
int itable_offset_in_words() const { return start_of_itable() - (intptr_t*)as_klassOop(); }
- oop* start_of_static_fields() const { return (oop*)(start_of_itable() + align_object_offset(itable_length())); }
+ // Static field offset is an offset into the Heap, should be converted by
+ // based on UseCompressedOop for traversal
+ HeapWord* start_of_static_fields() const {
+ return (HeapWord*)(start_of_itable() + align_object_offset(itable_length()));
+ }
+
intptr_t* end_of_itable() const { return start_of_itable() + itable_length(); }
- oop* end_of_static_fields() const { return start_of_static_fields() + static_field_size(); }
- int offset_of_static_fields() const { return (intptr_t)start_of_static_fields() - (intptr_t)as_klassOop(); }
- OopMapBlock* start_of_nonstatic_oop_maps() const { return (OopMapBlock*) (start_of_static_fields() + static_field_size()); }
+ int offset_of_static_fields() const {
+ return (intptr_t)start_of_static_fields() - (intptr_t)as_klassOop();
+ }
+
+ OopMapBlock* start_of_nonstatic_oop_maps() const {
+ return (OopMapBlock*) (start_of_static_fields() + static_field_size());
+ }
// Allocation profiling support
juint alloc_size() const { return _alloc_count * size_helper(); }
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/oops/instanceKlassKlass.cpp
--- a/hotspot/src/share/vm/oops/instanceKlassKlass.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/instanceKlassKlass.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -286,17 +286,17 @@
ik->copy_static_fields(pm);
oop* loader_addr = ik->adr_class_loader();
- if (PSScavenge::should_scavenge(*loader_addr)) {
+ if (PSScavenge::should_scavenge(loader_addr)) {
pm->claim_or_forward_breadth(loader_addr);
}
oop* pd_addr = ik->adr_protection_domain();
- if (PSScavenge::should_scavenge(*pd_addr)) {
+ if (PSScavenge::should_scavenge(pd_addr)) {
pm->claim_or_forward_breadth(pd_addr);
}
oop* sg_addr = ik->adr_signers();
- if (PSScavenge::should_scavenge(*sg_addr)) {
+ if (PSScavenge::should_scavenge(sg_addr)) {
pm->claim_or_forward_breadth(sg_addr);
}
@@ -309,17 +309,17 @@
ik->push_static_fields(pm);
oop* loader_addr = ik->adr_class_loader();
- if (PSScavenge::should_scavenge(*loader_addr)) {
+ if (PSScavenge::should_scavenge(loader_addr)) {
pm->claim_or_forward_depth(loader_addr);
}
oop* pd_addr = ik->adr_protection_domain();
- if (PSScavenge::should_scavenge(*pd_addr)) {
+ if (PSScavenge::should_scavenge(pd_addr)) {
pm->claim_or_forward_depth(pd_addr);
}
oop* sg_addr = ik->adr_signers();
- if (PSScavenge::should_scavenge(*sg_addr)) {
+ if (PSScavenge::should_scavenge(sg_addr)) {
pm->claim_or_forward_depth(sg_addr);
}
@@ -602,16 +602,18 @@
// Verification
-
class VerifyFieldClosure: public OopClosure {
- public:
- void do_oop(oop* p) {
+ protected:
+ template void do_oop_work(T* p) {
guarantee(Universe::heap()->is_in(p), "should be in heap");
- guarantee((*p)->is_oop_or_null(), "should be in heap");
+ oop obj = oopDesc::load_decode_heap_oop(p);
+ guarantee(obj->is_oop_or_null(), "should be in heap");
}
+ public:
+ virtual void do_oop(oop* p) { VerifyFieldClosure::do_oop_work(p); }
+ virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); }
};
-
void instanceKlassKlass::oop_verify_on(oop obj, outputStream* st) {
klassKlass::oop_verify_on(obj, st);
if (!obj->partially_loaded()) {
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/oops/instanceOop.hpp
--- a/hotspot/src/share/vm/oops/instanceOop.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/instanceOop.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -27,5 +27,26 @@
class instanceOopDesc : public oopDesc {
public:
+ // aligned header size.
static int header_size() { return sizeof(instanceOopDesc)/HeapWordSize; }
+
+ // If compressed, the offset of the fields of the instance may not be aligned.
+ static int base_offset_in_bytes() {
+ return UseCompressedOops ?
+ klass_gap_offset_in_bytes() :
+ sizeof(instanceOopDesc);
+ }
+
+ static bool contains_field_offset(int offset, int nonstatic_field_size) {
+ int base_in_bytes = base_offset_in_bytes();
+ if (UseCompressedOops) {
+ return (offset >= base_in_bytes &&
+ // field can be embedded in header, or is after header.
+ (offset < (int)sizeof(instanceOopDesc) ||
+ (offset-(int)sizeof(instanceOopDesc))/wordSize < nonstatic_field_size));
+ } else {
+ return (offset >= base_in_bytes &&
+ (offset-base_in_bytes)/wordSize < nonstatic_field_size);
+ }
+ }
};
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/oops/instanceRefKlass.cpp
--- a/hotspot/src/share/vm/oops/instanceRefKlass.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/instanceRefKlass.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -25,23 +25,77 @@
# include "incls/_precompiled.incl"
# include "incls/_instanceRefKlass.cpp.incl"
-void instanceRefKlass::oop_follow_contents(oop obj) {
- oop* referent_addr = java_lang_ref_Reference::referent_addr(obj);
- oop referent = *referent_addr;
+template
+static void specialized_oop_follow_contents(instanceRefKlass* ref, oop obj) {
+ T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
+ oop referent = oopDesc::load_decode_heap_oop(referent_addr);
debug_only(
if(TraceReferenceGC && PrintGCDetails) {
- gclog_or_tty->print_cr("instanceRefKlass::oop_follow_contents " INTPTR_FORMAT, (address)obj);
+ gclog_or_tty->print_cr("instanceRefKlass::oop_follow_contents " INTPTR_FORMAT, obj);
}
)
if (referent != NULL) {
if (!referent->is_gc_marked() &&
MarkSweep::ref_processor()->
- discover_reference(obj, reference_type())) {
+ discover_reference(obj, ref->reference_type())) {
// reference already enqueued, referent will be traversed later
- instanceKlass::oop_follow_contents(obj);
+ ref->instanceKlass::oop_follow_contents(obj);
+ debug_only(
+ if(TraceReferenceGC && PrintGCDetails) {
+ gclog_or_tty->print_cr(" Non NULL enqueued " INTPTR_FORMAT, obj);
+ }
+ )
+ return;
+ } else {
+ // treat referent as normal oop
debug_only(
if(TraceReferenceGC && PrintGCDetails) {
- gclog_or_tty->print_cr(" Non NULL enqueued " INTPTR_FORMAT, (address)obj);
+ gclog_or_tty->print_cr(" Non NULL normal " INTPTR_FORMAT, obj);
+ }
+ )
+ MarkSweep::mark_and_push(referent_addr);
+ }
+ }
+ // treat next as normal oop. next is a link in the pending list.
+ T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
+ debug_only(
+ if(TraceReferenceGC && PrintGCDetails) {
+ gclog_or_tty->print_cr(" Process next as normal " INTPTR_FORMAT, next_addr);
+ }
+ )
+ MarkSweep::mark_and_push(next_addr);
+ ref->instanceKlass::oop_follow_contents(obj);
+}
+
+void instanceRefKlass::oop_follow_contents(oop obj) {
+ if (UseCompressedOops) {
+ specialized_oop_follow_contents(this, obj);
+ } else {
+ specialized_oop_follow_contents(this, obj);
+ }
+}
+
+#ifndef SERIALGC
+template
+static void specialized_oop_follow_contents(instanceRefKlass* ref,
+ ParCompactionManager* cm,
+ oop obj) {
+ T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
+ oop referent = oopDesc::load_decode_heap_oop(referent_addr);
+ debug_only(
+ if(TraceReferenceGC && PrintGCDetails) {
+ gclog_or_tty->print_cr("instanceRefKlass::oop_follow_contents " INTPTR_FORMAT, obj);
+ }
+ )
+ if (referent != NULL) {
+ if (PSParallelCompact::mark_bitmap()->is_unmarked(referent) &&
+ PSParallelCompact::ref_processor()->
+ discover_reference(obj, ref->reference_type())) {
+ // reference already enqueued, referent will be traversed later
+ ref->instanceKlass::oop_follow_contents(cm, obj);
+ debug_only(
+ if(TraceReferenceGC && PrintGCDetails) {
+ gclog_or_tty->print_cr(" Non NULL enqueued " INTPTR_FORMAT, obj);
}
)
return;
@@ -49,98 +103,106 @@
// treat referent as normal oop
debug_only(
if(TraceReferenceGC && PrintGCDetails) {
- gclog_or_tty->print_cr(" Non NULL normal " INTPTR_FORMAT, (address)obj);
- }
- )
- MarkSweep::mark_and_push(referent_addr);
- }
- }
- // treat next as normal oop. next is a link in the pending list.
- oop* next_addr = java_lang_ref_Reference::next_addr(obj);
- debug_only(
- if(TraceReferenceGC && PrintGCDetails) {
- gclog_or_tty->print_cr(" Process next as normal " INTPTR_FORMAT, next_addr);
- }
- )
- MarkSweep::mark_and_push(next_addr);
- instanceKlass::oop_follow_contents(obj);
-}
-
-#ifndef SERIALGC
-void instanceRefKlass::oop_follow_contents(ParCompactionManager* cm,
- oop obj) {
- oop* referent_addr = java_lang_ref_Reference::referent_addr(obj);
- oop referent = *referent_addr;
- debug_only(
- if(TraceReferenceGC && PrintGCDetails) {
- gclog_or_tty->print_cr("instanceRefKlass::oop_follow_contents " INTPTR_FORMAT, (address)obj);
- }
- )
- if (referent != NULL) {
- if (PSParallelCompact::mark_bitmap()->is_unmarked(referent) &&
- PSParallelCompact::ref_processor()->
- discover_reference(obj, reference_type())) {
- // reference already enqueued, referent will be traversed later
- instanceKlass::oop_follow_contents(cm, obj);
- debug_only(
- if(TraceReferenceGC && PrintGCDetails) {
- gclog_or_tty->print_cr(" Non NULL enqueued " INTPTR_FORMAT, (address)obj);
- }
- )
- return;
- } else {
- // treat referent as normal oop
- debug_only(
- if(TraceReferenceGC && PrintGCDetails) {
- gclog_or_tty->print_cr(" Non NULL normal " INTPTR_FORMAT, (address)obj);
+ gclog_or_tty->print_cr(" Non NULL normal " INTPTR_FORMAT, obj);
}
)
PSParallelCompact::mark_and_push(cm, referent_addr);
}
}
// treat next as normal oop. next is a link in the pending list.
- oop* next_addr = java_lang_ref_Reference::next_addr(obj);
+ T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
debug_only(
if(TraceReferenceGC && PrintGCDetails) {
gclog_or_tty->print_cr(" Process next as normal " INTPTR_FORMAT, next_addr);
}
)
PSParallelCompact::mark_and_push(cm, next_addr);
- instanceKlass::oop_follow_contents(cm, obj);
+ ref->instanceKlass::oop_follow_contents(cm, obj);
+}
+
+void instanceRefKlass::oop_follow_contents(ParCompactionManager* cm,
+ oop obj) {
+ if (UseCompressedOops) {
+ specialized_oop_follow_contents(this, cm, obj);
+ } else {
+ specialized_oop_follow_contents(this, cm, obj);
+ }
}
#endif // SERIALGC
+#ifdef ASSERT
+template void trace_reference_gc(const char *s, oop obj,
+ T* referent_addr,
+ T* next_addr,
+ T* discovered_addr) {
+ if(TraceReferenceGC && PrintGCDetails) {
+ gclog_or_tty->print_cr("%s obj " INTPTR_FORMAT, s, (address)obj);
+ gclog_or_tty->print_cr(" referent_addr/* " INTPTR_FORMAT " / "
+ INTPTR_FORMAT, referent_addr,
+ referent_addr ?
+ (address)oopDesc::load_decode_heap_oop(referent_addr) : NULL);
+ gclog_or_tty->print_cr(" next_addr/* " INTPTR_FORMAT " / "
+ INTPTR_FORMAT, next_addr,
+ next_addr ? (address)oopDesc::load_decode_heap_oop(next_addr) : NULL);
+ gclog_or_tty->print_cr(" discovered_addr/* " INTPTR_FORMAT " / "
+ INTPTR_FORMAT, discovered_addr,
+ discovered_addr ?
+ (address)oopDesc::load_decode_heap_oop(discovered_addr) : NULL);
+ }
+}
+#endif
+
+template void specialized_oop_adjust_pointers(instanceRefKlass *ref, oop obj) {
+ T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
+ MarkSweep::adjust_pointer(referent_addr);
+ T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
+ MarkSweep::adjust_pointer(next_addr);
+ T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
+ MarkSweep::adjust_pointer(discovered_addr);
+ debug_only(trace_reference_gc("instanceRefKlass::oop_adjust_pointers", obj,
+ referent_addr, next_addr, discovered_addr);)
+}
int instanceRefKlass::oop_adjust_pointers(oop obj) {
int size = size_helper();
instanceKlass::oop_adjust_pointers(obj);
- oop* referent_addr = java_lang_ref_Reference::referent_addr(obj);
- MarkSweep::adjust_pointer(referent_addr);
- oop* next_addr = java_lang_ref_Reference::next_addr(obj);
- MarkSweep::adjust_pointer(next_addr);
- oop* discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
- MarkSweep::adjust_pointer(discovered_addr);
-
-#ifdef ASSERT
- if(TraceReferenceGC && PrintGCDetails) {
- gclog_or_tty->print_cr("instanceRefKlass::oop_adjust_pointers obj "
- INTPTR_FORMAT, (address)obj);
- gclog_or_tty->print_cr(" referent_addr/* " INTPTR_FORMAT " / "
- INTPTR_FORMAT, referent_addr,
- referent_addr ? (address)*referent_addr : NULL);
- gclog_or_tty->print_cr(" next_addr/* " INTPTR_FORMAT " / "
- INTPTR_FORMAT, next_addr,
- next_addr ? (address)*next_addr : NULL);
- gclog_or_tty->print_cr(" discovered_addr/* " INTPTR_FORMAT " / "
- INTPTR_FORMAT, discovered_addr,
- discovered_addr ? (address)*discovered_addr : NULL);
+ if (UseCompressedOops) {
+ specialized_oop_adjust_pointers(this, obj);
+ } else {
+ specialized_oop_adjust_pointers(this, obj);
}
-#endif
-
return size;
}
+#define InstanceRefKlass_SPECIALIZED_OOP_ITERATE(T, nv_suffix, contains) \
+ T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); \
+ oop referent = oopDesc::load_decode_heap_oop(referent_addr); \
+ if (referent != NULL && contains(referent_addr)) { \
+ ReferenceProcessor* rp = closure->_ref_processor; \
+ if (!referent->is_gc_marked() && (rp != NULL) && \
+ rp->discover_reference(obj, reference_type())) { \
+ return size; \
+ } else { \
+ /* treat referent as normal oop */ \
+ SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk);\
+ closure->do_oop##nv_suffix(referent_addr); \
+ } \
+ } \
+ /* treat next as normal oop */ \
+ T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); \
+ if (contains(next_addr)) { \
+ SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk); \
+ closure->do_oop##nv_suffix(next_addr); \
+ } \
+ return size; \
+
+
+template bool contains(T *t) { return true; }
+
+// Macro to define instanceRefKlass::oop_oop_iterate for virtual/nonvirtual for
+// all closures. Macros calling macros above for each oop size.
+
#define InstanceRefKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
\
int instanceRefKlass:: \
@@ -150,25 +212,11 @@
\
int size = instanceKlass::oop_oop_iterate##nv_suffix(obj, closure); \
\
- oop* referent_addr = java_lang_ref_Reference::referent_addr(obj); \
- oop referent = *referent_addr; \
- if (referent != NULL) { \
- ReferenceProcessor* rp = closure->_ref_processor; \
- if (!referent->is_gc_marked() && (rp != NULL) && \
- rp->discover_reference(obj, reference_type())) { \
- return size; \
- } else { \
- /* treat referent as normal oop */ \
- SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk);\
- closure->do_oop##nv_suffix(referent_addr); \
- } \
+ if (UseCompressedOops) { \
+ InstanceRefKlass_SPECIALIZED_OOP_ITERATE(narrowOop, nv_suffix, contains); \
+ } else { \
+ InstanceRefKlass_SPECIALIZED_OOP_ITERATE(oop, nv_suffix, contains); \
} \
- \
- /* treat next as normal oop */ \
- oop* next_addr = java_lang_ref_Reference::next_addr(obj); \
- SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk); \
- closure->do_oop##nv_suffix(next_addr); \
- return size; \
}
#define InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
@@ -180,28 +228,11 @@
SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk);\
\
int size = instanceKlass::oop_oop_iterate##nv_suffix##_m(obj, closure, mr); \
- \
- oop* referent_addr = java_lang_ref_Reference::referent_addr(obj); \
- oop referent = *referent_addr; \
- if (referent != NULL && mr.contains(referent_addr)) { \
- ReferenceProcessor* rp = closure->_ref_processor; \
- if (!referent->is_gc_marked() && (rp != NULL) && \
- rp->discover_reference(obj, reference_type())) { \
- return size; \
- } else { \
- /* treat referent as normal oop */ \
- SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk);\
- closure->do_oop##nv_suffix(referent_addr); \
- } \
+ if (UseCompressedOops) { \
+ InstanceRefKlass_SPECIALIZED_OOP_ITERATE(narrowOop, nv_suffix, mr.contains); \
+ } else { \
+ InstanceRefKlass_SPECIALIZED_OOP_ITERATE(oop, nv_suffix, mr.contains); \
} \
- \
- /* treat next as normal oop */ \
- oop* next_addr = java_lang_ref_Reference::next_addr(obj); \
- if (mr.contains(next_addr)) { \
- SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk);\
- closure->do_oop##nv_suffix(next_addr); \
- } \
- return size; \
}
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_DEFN)
@@ -209,16 +240,17 @@
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m)
ALL_OOP_OOP_ITERATE_CLOSURES_3(InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m)
-
#ifndef SERIALGC
-void instanceRefKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
+template
+void specialized_oop_copy_contents(instanceRefKlass *ref,
+ PSPromotionManager* pm, oop obj) {
assert(!pm->depth_first(), "invariant");
- oop* referent_addr = java_lang_ref_Reference::referent_addr(obj);
- if (PSScavenge::should_scavenge(*referent_addr)) {
+ T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
+ if (PSScavenge::should_scavenge(referent_addr)) {
ReferenceProcessor* rp = PSScavenge::reference_processor();
- if (rp->discover_reference(obj, reference_type())) {
+ if (rp->discover_reference(obj, ref->reference_type())) {
// reference already enqueued, referent and next will be traversed later
- instanceKlass::oop_copy_contents(pm, obj);
+ ref->instanceKlass::oop_copy_contents(pm, obj);
return;
} else {
// treat referent as normal oop
@@ -226,21 +258,31 @@
}
}
// treat next as normal oop
- oop* next_addr = java_lang_ref_Reference::next_addr(obj);
- if (PSScavenge::should_scavenge(*next_addr)) {
+ T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
+ if (PSScavenge::should_scavenge(next_addr)) {
pm->claim_or_forward_breadth(next_addr);
}
- instanceKlass::oop_copy_contents(pm, obj);
+ ref->instanceKlass::oop_copy_contents(pm, obj);
}
-void instanceRefKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
+void instanceRefKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
+ if (UseCompressedOops) {
+ specialized_oop_copy_contents(this, pm, obj);
+ } else {
+ specialized_oop_copy_contents(this, pm, obj);
+ }
+}
+
+template
+void specialized_oop_push_contents(instanceRefKlass *ref,
+ PSPromotionManager* pm, oop obj) {
assert(pm->depth_first(), "invariant");
- oop* referent_addr = java_lang_ref_Reference::referent_addr(obj);
- if (PSScavenge::should_scavenge(*referent_addr)) {
+ T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
+ if (PSScavenge::should_scavenge(referent_addr)) {
ReferenceProcessor* rp = PSScavenge::reference_processor();
- if (rp->discover_reference(obj, reference_type())) {
+ if (rp->discover_reference(obj, ref->reference_type())) {
// reference already enqueued, referent and next will be traversed later
- instanceKlass::oop_push_contents(pm, obj);
+ ref->instanceKlass::oop_push_contents(pm, obj);
return;
} else {
// treat referent as normal oop
@@ -248,71 +290,68 @@
}
}
// treat next as normal oop
- oop* next_addr = java_lang_ref_Reference::next_addr(obj);
- if (PSScavenge::should_scavenge(*next_addr)) {
+ T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
+ if (PSScavenge::should_scavenge(next_addr)) {
pm->claim_or_forward_depth(next_addr);
}
- instanceKlass::oop_push_contents(pm, obj);
+ ref->instanceKlass::oop_push_contents(pm, obj);
+}
+
+void instanceRefKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
+ if (UseCompressedOops) {
+ specialized_oop_push_contents(this, pm, obj);
+ } else {
+ specialized_oop_push_contents(this, pm, obj);
+ }
+}
+
+template
+void specialized_oop_update_pointers(instanceRefKlass *ref,
+ ParCompactionManager* cm, oop obj) {
+ T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
+ PSParallelCompact::adjust_pointer(referent_addr);
+ T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
+ PSParallelCompact::adjust_pointer(next_addr);
+ T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
+ PSParallelCompact::adjust_pointer(discovered_addr);
+ debug_only(trace_reference_gc("instanceRefKlass::oop_update_ptrs", obj,
+ referent_addr, next_addr, discovered_addr);)
}
int instanceRefKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
instanceKlass::oop_update_pointers(cm, obj);
+ if (UseCompressedOops) {
+ specialized_oop_update_pointers(this, cm, obj);
+ } else {
+ specialized_oop_update_pointers(this, cm, obj);
+ }
+ return size_helper();
+}
- oop* referent_addr = java_lang_ref_Reference::referent_addr(obj);
- PSParallelCompact::adjust_pointer(referent_addr);
- oop* next_addr = java_lang_ref_Reference::next_addr(obj);
- PSParallelCompact::adjust_pointer(next_addr);
- oop* discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
- PSParallelCompact::adjust_pointer(discovered_addr);
-#ifdef ASSERT
- if(TraceReferenceGC && PrintGCDetails) {
- gclog_or_tty->print_cr("instanceRefKlass::oop_update_pointers obj "
- INTPTR_FORMAT, (oopDesc*) obj);
- gclog_or_tty->print_cr(" referent_addr/* " INTPTR_FORMAT " / "
- INTPTR_FORMAT, referent_addr,
- referent_addr ? (oopDesc*) *referent_addr : NULL);
- gclog_or_tty->print_cr(" next_addr/* " INTPTR_FORMAT " / "
- INTPTR_FORMAT, next_addr,
- next_addr ? (oopDesc*) *next_addr : NULL);
- gclog_or_tty->print_cr(" discovered_addr/* " INTPTR_FORMAT " / "
- INTPTR_FORMAT, discovered_addr,
- discovered_addr ? (oopDesc*) *discovered_addr : NULL);
- }
-#endif
-
- return size_helper();
+template void
+specialized_oop_update_pointers(ParCompactionManager* cm, oop obj,
+ HeapWord* beg_addr, HeapWord* end_addr) {
+ T* p;
+ T* referent_addr = p = (T*)java_lang_ref_Reference::referent_addr(obj);
+ PSParallelCompact::adjust_pointer(p, beg_addr, end_addr);
+ T* next_addr = p = (T*)java_lang_ref_Reference::next_addr(obj);
+ PSParallelCompact::adjust_pointer(p, beg_addr, end_addr);
+ T* discovered_addr = p = (T*)java_lang_ref_Reference::discovered_addr(obj);
+ PSParallelCompact::adjust_pointer(p, beg_addr, end_addr);
+ debug_only(trace_reference_gc("instanceRefKlass::oop_update_ptrs", obj,
+ referent_addr, next_addr, discovered_addr);)
}
int
instanceRefKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
HeapWord* beg_addr, HeapWord* end_addr) {
instanceKlass::oop_update_pointers(cm, obj, beg_addr, end_addr);
-
- oop* p;
- oop* referent_addr = p = java_lang_ref_Reference::referent_addr(obj);
- PSParallelCompact::adjust_pointer(p, beg_addr, end_addr);
- oop* next_addr = p = java_lang_ref_Reference::next_addr(obj);
- PSParallelCompact::adjust_pointer(p, beg_addr, end_addr);
- oop* discovered_addr = p = java_lang_ref_Reference::discovered_addr(obj);
- PSParallelCompact::adjust_pointer(p, beg_addr, end_addr);
-
-#ifdef ASSERT
- if(TraceReferenceGC && PrintGCDetails) {
- gclog_or_tty->print_cr("instanceRefKlass::oop_update_pointers obj "
- INTPTR_FORMAT, (oopDesc*) obj);
- gclog_or_tty->print_cr(" referent_addr/* " INTPTR_FORMAT " / "
- INTPTR_FORMAT, referent_addr,
- referent_addr ? (oopDesc*) *referent_addr : NULL);
- gclog_or_tty->print_cr(" next_addr/* " INTPTR_FORMAT " / "
- INTPTR_FORMAT, next_addr,
- next_addr ? (oopDesc*) *next_addr : NULL);
- gclog_or_tty->print_cr(" discovered_addr/* " INTPTR_FORMAT " / "
- INTPTR_FORMAT, discovered_addr,
- discovered_addr ? (oopDesc*) *discovered_addr : NULL);
+ if (UseCompressedOops) {
+ specialized_oop_update_pointers(cm, obj, beg_addr, end_addr);
+ } else {
+ specialized_oop_update_pointers(cm, obj, beg_addr, end_addr);
}
-#endif
-
return size_helper();
}
#endif // SERIALGC
@@ -338,7 +377,7 @@
// offset 2 (words) and has 4 map entries.
debug_only(int offset = java_lang_ref_Reference::referent_offset);
debug_only(int length = ((java_lang_ref_Reference::discovered_offset -
- java_lang_ref_Reference::referent_offset)/wordSize) + 1);
+ java_lang_ref_Reference::referent_offset)/heapOopSize) + 1);
if (UseSharedSpaces) {
assert(map->offset() == java_lang_ref_Reference::queue_offset &&
@@ -368,22 +407,35 @@
if (referent != NULL) {
guarantee(referent->is_oop(), "referent field heap failed");
- if (gch != NULL && !gch->is_in_youngest(obj))
+ if (gch != NULL && !gch->is_in_youngest(obj)) {
// We do a specific remembered set check here since the referent
// field is not part of the oop mask and therefore skipped by the
// regular verify code.
- obj->verify_old_oop(java_lang_ref_Reference::referent_addr(obj), true);
+ if (UseCompressedOops) {
+ narrowOop* referent_addr = (narrowOop*)java_lang_ref_Reference::referent_addr(obj);
+ obj->verify_old_oop(referent_addr, true);
+ } else {
+ oop* referent_addr = (oop*)java_lang_ref_Reference::referent_addr(obj);
+ obj->verify_old_oop(referent_addr, true);
+ }
+ }
}
// Verify next field
oop next = java_lang_ref_Reference::next(obj);
if (next != NULL) {
- guarantee(next->is_oop(), "next field verify failed");
+ guarantee(next->is_oop(), "next field verify fa iled");
guarantee(next->is_instanceRef(), "next field verify failed");
if (gch != NULL && !gch->is_in_youngest(obj)) {
// We do a specific remembered set check here since the next field is
// not part of the oop mask and therefore skipped by the regular
// verify code.
- obj->verify_old_oop(java_lang_ref_Reference::next_addr(obj), true);
+ if (UseCompressedOops) {
+ narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj);
+ obj->verify_old_oop(next_addr, true);
+ } else {
+ oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj);
+ obj->verify_old_oop(next_addr, true);
+ }
}
}
}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/oops/klass.cpp
--- a/hotspot/src/share/vm/oops/klass.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/klass.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -542,11 +542,10 @@
void Klass::oop_verify_old_oop(oop obj, oop* p, bool allow_dirty) {
/* $$$ I think this functionality should be handled by verification of
-
RememberedSet::verify_old_oop(obj, p, allow_dirty, false);
-
the card table. */
}
+void Klass::oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty) { }
#ifndef PRODUCT
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/oops/klass.hpp
--- a/hotspot/src/share/vm/oops/klass.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/klass.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -757,6 +757,7 @@
virtual const char* internal_name() const = 0;
virtual void oop_verify_on(oop obj, outputStream* st);
virtual void oop_verify_old_oop(oop obj, oop* p, bool allow_dirty);
+ virtual void oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty);
// tells whether obj is partially constructed (gc during class loading)
virtual bool oop_partially_loaded(oop obj) const { return false; }
virtual void oop_set_partially_loaded(oop obj) {};
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/oops/klassVtable.cpp
--- a/hotspot/src/share/vm/oops/klassVtable.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/klassVtable.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -1118,8 +1118,8 @@
itableOffsetEntry* ioe = (itableOffsetEntry*)klass->start_of_itable();
itableMethodEntry* ime = (itableMethodEntry*)(ioe + nof_interfaces);
intptr_t* end = klass->end_of_itable();
- assert((oop*)(ime + nof_methods) <= klass->start_of_static_fields(), "wrong offset calculation (1)");
- assert((oop*)(end) == (oop*)(ime + nof_methods), "wrong offset calculation (2)");
+ assert((oop*)(ime + nof_methods) <= (oop*)klass->start_of_static_fields(), "wrong offset calculation (1)");
+ assert((oop*)(end) == (oop*)(ime + nof_methods), "wrong offset calculation (2)");
// Visit all interfaces and initialize itable offset table
SetupItableClosure sic((address)klass->as_klassOop(), ioe, ime);
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/oops/markOop.hpp
--- a/hotspot/src/share/vm/oops/markOop.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/markOop.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -89,7 +89,7 @@
enum { age_bits = 4,
lock_bits = 2,
biased_lock_bits = 1,
- max_hash_bits = BitsPerOop - age_bits - lock_bits - biased_lock_bits,
+ max_hash_bits = BitsPerWord - age_bits - lock_bits - biased_lock_bits,
hash_bits = max_hash_bits > 31 ? 31 : max_hash_bits,
epoch_bits = 2
};
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/oops/methodDataKlass.cpp
--- a/hotspot/src/share/vm/oops/methodDataKlass.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/methodDataKlass.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -95,6 +95,7 @@
}
#endif // SERIALGC
+
int methodDataKlass::oop_oop_iterate(oop obj, OopClosure* blk) {
assert (obj->is_methodData(), "object must be method data");
methodDataOop m = methodDataOop(obj);
@@ -113,7 +114,6 @@
return size;
}
-
int methodDataKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) {
assert (obj->is_methodData(), "object must be method data");
methodDataOop m = methodDataOop(obj);
@@ -158,14 +158,14 @@
assert (obj->is_methodData(), "object must be method data");
methodDataOop m = methodDataOop(obj);
// This should never point into the young gen.
- assert(!PSScavenge::should_scavenge(oop(*m->adr_method())), "Sanity");
+ assert(!PSScavenge::should_scavenge(m->adr_method()), "Sanity");
}
void methodDataKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
assert (obj->is_methodData(), "object must be method data");
methodDataOop m = methodDataOop(obj);
// This should never point into the young gen.
- assert(!PSScavenge::should_scavenge(oop(*m->adr_method())), "Sanity");
+ assert(!PSScavenge::should_scavenge(m->adr_method()), "Sanity");
}
int methodDataKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/oops/methodOop.cpp
--- a/hotspot/src/share/vm/oops/methodOop.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/methodOop.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -430,11 +430,11 @@
bool methodOopDesc::is_accessor() const {
if (code_size() != 5) return false;
if (size_of_parameters() != 1) return false;
- if (Bytecodes::java_code_at(code_base()+0) != Bytecodes::_aload_0 ) return false;
- if (Bytecodes::java_code_at(code_base()+1) != Bytecodes::_getfield) return false;
- Bytecodes::Code ret_bc = Bytecodes::java_code_at(code_base()+4);
- if (Bytecodes::java_code_at(code_base()+4) != Bytecodes::_areturn &&
- Bytecodes::java_code_at(code_base()+4) != Bytecodes::_ireturn ) return false;
+ methodOop m = (methodOop)this; // pass to code_at() to avoid method_from_bcp
+ if (Bytecodes::java_code_at(code_base()+0, m) != Bytecodes::_aload_0 ) return false;
+ if (Bytecodes::java_code_at(code_base()+1, m) != Bytecodes::_getfield) return false;
+ if (Bytecodes::java_code_at(code_base()+4, m) != Bytecodes::_areturn &&
+ Bytecodes::java_code_at(code_base()+4, m) != Bytecodes::_ireturn ) return false;
return true;
}
@@ -955,7 +955,7 @@
// This is only done during class loading, so it is OK to assume method_idnum matches the methods() array
static void reorder_based_on_method_index(objArrayOop methods,
objArrayOop annotations,
- oop* temp_array) {
+ GrowableArray* temp_array) {
if (annotations == NULL) {
return;
}
@@ -963,12 +963,15 @@
int length = methods->length();
int i;
// Copy to temp array
- memcpy(temp_array, annotations->obj_at_addr(0), length * sizeof(oop));
+ temp_array->clear();
+ for (i = 0; i < length; i++) {
+ temp_array->append(annotations->obj_at(i));
+ }
// Copy back using old method indices
for (i = 0; i < length; i++) {
methodOop m = (methodOop) methods->obj_at(i);
- annotations->obj_at_put(i, temp_array[m->method_idnum()]);
+ annotations->obj_at_put(i, temp_array->at(m->method_idnum()));
}
}
@@ -997,7 +1000,7 @@
// Use a simple bubble sort for small number of methods since
// qsort requires a functional pointer call for each comparison.
- if (length < 8) {
+ if (UseCompressedOops || length < 8) {
bool sorted = true;
for (int i=length-1; i>0; i--) {
for (int j=0; jobj_at_addr(0), length, oopSize, compare);
+ qsort(methods->base(), length, heapOopSize, compare);
}
// Sort annotations if necessary
@@ -1022,8 +1028,9 @@
assert(methods_parameter_annotations == NULL || methods_parameter_annotations->length() == methods->length(), "");
assert(methods_default_annotations == NULL || methods_default_annotations->length() == methods->length(), "");
if (do_annotations) {
+ ResourceMark rm;
// Allocate temporary storage
- oop* temp_array = NEW_RESOURCE_ARRAY(oop, length);
+ GrowableArray* temp_array = new GrowableArray(length);
reorder_based_on_method_index(methods, methods_annotations, temp_array);
reorder_based_on_method_index(methods, methods_parameter_annotations, temp_array);
reorder_based_on_method_index(methods, methods_default_annotations, temp_array);
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/oops/objArrayKlass.cpp
--- a/hotspot/src/share/vm/oops/objArrayKlass.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/objArrayKlass.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -80,6 +80,56 @@
return h_array();
}
+// Either oop or narrowOop depending on UseCompressedOops.
+template void objArrayKlass::do_copy(arrayOop s, T* src,
+ arrayOop d, T* dst, int length, TRAPS) {
+
+ const size_t word_len = objArrayOopDesc::array_size(length);
+
+ // For performance reasons, we assume we are using a card marking write
+ // barrier. The assert will fail if this is not the case.
+ BarrierSet* bs = Universe::heap()->barrier_set();
+ assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
+
+ if (s == d) {
+ // since source and destination are equal we do not need conversion checks.
+ assert(length > 0, "sanity check");
+ Copy::conjoint_oops_atomic(src, dst, length);
+ } else {
+ // We have to make sure all elements conform to the destination array
+ klassOop bound = objArrayKlass::cast(d->klass())->element_klass();
+ klassOop stype = objArrayKlass::cast(s->klass())->element_klass();
+ if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) {
+ // elements are guaranteed to be subtypes, so no check necessary
+ Copy::conjoint_oops_atomic(src, dst, length);
+ } else {
+ // slow case: need individual subtype checks
+ // note: don't use obj_at_put below because it includes a redundant store check
+ T* from = src;
+ T* end = from + length;
+ for (T* p = dst; from < end; from++, p++) {
+ // XXX this is going to be slow.
+ T element = *from;
+ if (oopDesc::is_null(element) ||
+ Klass::cast(oopDesc::decode_heap_oop_not_null(element)->klass())->is_subtype_of(bound)) {
+ *p = *from;
+ } else {
+ // We must do a barrier to cover the partial copy.
+ const size_t pd = pointer_delta(p, dst, (size_t)heapOopSize);
+ // pointer delta is scaled to number of elements (length field in
+ // objArrayOop) which we assume is 32 bit.
+ assert(pd == (size_t)(int)pd, "length field overflow");
+ const size_t done_word_len = objArrayOopDesc::array_size((int)pd);
+ bs->write_ref_array(MemRegion((HeapWord*)dst, done_word_len));
+ THROW(vmSymbols::java_lang_ArrayStoreException());
+ return;
+ }
+ }
+ }
+ }
+ bs->write_ref_array(MemRegion((HeapWord*)dst, word_len));
+}
+
void objArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d,
int dst_pos, int length, TRAPS) {
assert(s->is_objArray(), "must be obj array");
@@ -105,48 +155,15 @@
if (length==0) {
return;
}
-
- oop* const src = objArrayOop(s)->obj_at_addr(src_pos);
- oop* const dst = objArrayOop(d)->obj_at_addr(dst_pos);
- const size_t word_len = length * HeapWordsPerOop;
-
- // For performance reasons, we assume we are using a card marking write
- // barrier. The assert will fail if this is not the case.
- BarrierSet* bs = Universe::heap()->barrier_set();
- assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
-
- if (s == d) {
- // since source and destination are equal we do not need conversion checks.
- assert(length > 0, "sanity check");
- Copy::conjoint_oops_atomic(src, dst, length);
+ if (UseCompressedOops) {
+ narrowOop* const src = objArrayOop(s)->obj_at_addr(src_pos);
+ narrowOop* const dst = objArrayOop(d)->obj_at_addr(dst_pos);
+ do_copy(s, src, d, dst, length, CHECK);
} else {
- // We have to make sure all elements conform to the destination array
- klassOop bound = objArrayKlass::cast(d->klass())->element_klass();
- klassOop stype = objArrayKlass::cast(s->klass())->element_klass();
- if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) {
- // elements are guaranteed to be subtypes, so no check necessary
- Copy::conjoint_oops_atomic(src, dst, length);
- } else {
- // slow case: need individual subtype checks
- // note: don't use obj_at_put below because it includes a redundant store check
- oop* from = src;
- oop* end = from + length;
- for (oop* p = dst; from < end; from++, p++) {
- oop element = *from;
- if (element == NULL || Klass::cast(element->klass())->is_subtype_of(bound)) {
- *p = element;
- } else {
- // We must do a barrier to cover the partial copy.
- const size_t done_word_len = pointer_delta(p, dst, oopSize) *
- HeapWordsPerOop;
- bs->write_ref_array(MemRegion((HeapWord*)dst, done_word_len));
- THROW(vmSymbols::java_lang_ArrayStoreException());
- return;
- }
- }
- }
+ oop* const src = objArrayOop(s)->obj_at_addr(src_pos);
+ oop* const dst = objArrayOop(d)->obj_at_addr(dst_pos);
+ do_copy (s, src, d, dst, length, CHECK);
}
- bs->write_ref_array(MemRegion((HeapWord*)dst, word_len));
}
@@ -242,49 +259,75 @@
return element_klass()->klass_part()->is_subtype_of(oak->element_klass());
}
-
void objArrayKlass::initialize(TRAPS) {
Klass::cast(bottom_klass())->initialize(THREAD); // dispatches to either instanceKlass or typeArrayKlass
}
+#define ObjArrayKlass_SPECIALIZED_OOP_ITERATE(T, a, p, do_oop) \
+{ \
+ T* p = (T*)(a)->base(); \
+ T* const end = p + (a)->length(); \
+ while (p < end) { \
+ do_oop; \
+ p++; \
+ } \
+}
+
+#define ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(T, a, p, low, high, do_oop) \
+{ \
+ T* const l = (T*)(low); \
+ T* const h = (T*)(high); \
+ T* p = (T*)(a)->base(); \
+ T* end = p + (a)->length(); \
+ if (p < l) p = l; \
+ if (end > h) end = h; \
+ while (p < end) { \
+ do_oop; \
+ ++p; \
+ } \
+}
+
+#define ObjArrayKlass_OOP_ITERATE(a, p, do_oop) \
+ if (UseCompressedOops) { \
+ ObjArrayKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
+ a, p, do_oop) \
+ } else { \
+ ObjArrayKlass_SPECIALIZED_OOP_ITERATE(oop, \
+ a, p, do_oop) \
+ }
+
+#define ObjArrayKlass_BOUNDED_OOP_ITERATE(a, p, low, high, do_oop) \
+ if (UseCompressedOops) { \
+ ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
+ a, p, low, high, do_oop) \
+ } else { \
+ ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \
+ a, p, low, high, do_oop) \
+ }
void objArrayKlass::oop_follow_contents(oop obj) {
assert (obj->is_array(), "obj must be array");
- arrayOop a = arrayOop(obj);
+ objArrayOop a = objArrayOop(obj);
a->follow_header();
- oop* base = (oop*)a->base(T_OBJECT);
- oop* const end = base + a->length();
- while (base < end) {
- if (*base != NULL)
- // we call mark_and_follow here to avoid excessive marking stack usage
- MarkSweep::mark_and_follow(base);
- base++;
- }
+ ObjArrayKlass_OOP_ITERATE( \
+ a, p, \
+ /* we call mark_and_follow here to avoid excessive marking stack usage */ \
+ MarkSweep::mark_and_follow(p))
}
#ifndef SERIALGC
void objArrayKlass::oop_follow_contents(ParCompactionManager* cm,
oop obj) {
assert (obj->is_array(), "obj must be array");
- arrayOop a = arrayOop(obj);
+ objArrayOop a = objArrayOop(obj);
a->follow_header(cm);
- oop* base = (oop*)a->base(T_OBJECT);
- oop* const end = base + a->length();
- while (base < end) {
- if (*base != NULL)
- // we call mark_and_follow here to avoid excessive marking stack usage
- PSParallelCompact::mark_and_follow(cm, base);
- base++;
- }
+ ObjArrayKlass_OOP_ITERATE( \
+ a, p, \
+ /* we call mark_and_follow here to avoid excessive marking stack usage */ \
+ PSParallelCompact::mark_and_follow(cm, p))
}
#endif // SERIALGC
-#define invoke_closure_on(base, closure, nv_suffix) { \
- if (*(base) != NULL) { \
- (closure)->do_oop##nv_suffix(base); \
- } \
-}
-
#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
\
int objArrayKlass::oop_oop_iterate##nv_suffix(oop obj, \
@@ -298,21 +341,7 @@
if (closure->do_header()) { \
a->oop_iterate_header(closure); \
} \
- oop* base = a->base(); \
- oop* const end = base + a->length(); \
- const intx field_offset = PrefetchFieldsAhead; \
- if (field_offset > 0) { \
- while (base < end) { \
- prefetch_beyond(base, end, field_offset, closure->prefetch_style()); \
- invoke_closure_on(base, closure, nv_suffix); \
- base++; \
- } \
- } else { \
- while (base < end) { \
- invoke_closure_on(base, closure, nv_suffix); \
- base++; \
- } \
- } \
+ ObjArrayKlass_OOP_ITERATE(a, p, (closure)->do_oop##nv_suffix(p)) \
return size; \
}
@@ -330,28 +359,43 @@
if (closure->do_header()) { \
a->oop_iterate_header(closure, mr); \
} \
- oop* bottom = (oop*)mr.start(); \
- oop* top = (oop*)mr.end(); \
- oop* base = a->base(); \
- oop* end = base + a->length(); \
- if (base < bottom) { \
- base = bottom; \
- } \
- if (end > top) { \
- end = top; \
- } \
- const intx field_offset = PrefetchFieldsAhead; \
- if (field_offset > 0) { \
- while (base < end) { \
- prefetch_beyond(base, end, field_offset, closure->prefetch_style()); \
- invoke_closure_on(base, closure, nv_suffix); \
- base++; \
+ ObjArrayKlass_BOUNDED_OOP_ITERATE( \
+ a, p, mr.start(), mr.end(), (closure)->do_oop##nv_suffix(p)) \
+ return size; \
+}
+
+// Like oop_oop_iterate but only iterates over a specified range and only used
+// for objArrayOops.
+#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r(OopClosureType, nv_suffix) \
+ \
+int objArrayKlass::oop_oop_iterate_range##nv_suffix(oop obj, \
+ OopClosureType* closure, \
+ int start, int end) { \
+ SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::oa); \
+ assert(obj->is_array(), "obj must be array"); \
+ objArrayOop a = objArrayOop(obj); \
+ /* Get size before changing pointers. */ \
+ /* Don't call size() or oop_size() since that is a virtual call */ \
+ int size = a->object_size(); \
+ if (UseCompressedOops) { \
+ HeapWord* low = start == 0 ? (HeapWord*)a : (HeapWord*)a->obj_at_addr(start);\
+ /* this might be wierd if end needs to be aligned on HeapWord boundary */ \
+ HeapWord* high = (HeapWord*)((narrowOop*)a->base() + end); \
+ MemRegion mr(low, high); \
+ if (closure->do_header()) { \
+ a->oop_iterate_header(closure, mr); \
} \
+ ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
+ a, p, low, high, (closure)->do_oop##nv_suffix(p)) \
} else { \
- while (base < end) { \
- invoke_closure_on(base, closure, nv_suffix); \
- base++; \
+ HeapWord* low = start == 0 ? (HeapWord*)a : (HeapWord*)a->obj_at_addr(start); \
+ HeapWord* high = (HeapWord*)((oop*)a->base() + end); \
+ MemRegion mr(low, high); \
+ if (closure->do_header()) { \
+ a->oop_iterate_header(closure, mr); \
} \
+ ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \
+ a, p, low, high, (closure)->do_oop##nv_suffix(p)) \
} \
return size; \
}
@@ -360,6 +404,8 @@
ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayKlass_OOP_OOP_ITERATE_DEFN)
ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m)
ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m)
+ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r)
+ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r)
int objArrayKlass::oop_adjust_pointers(oop obj) {
assert(obj->is_objArray(), "obj must be obj array");
@@ -368,12 +414,7 @@
// Don't call size() or oop_size() since that is a virtual call.
int size = a->object_size();
a->adjust_header();
- oop* base = a->base();
- oop* const end = base + a->length();
- while (base < end) {
- MarkSweep::adjust_pointer(base);
- base++;
- }
+ ObjArrayKlass_OOP_ITERATE(a, p, MarkSweep::adjust_pointer(p))
return size;
}
@@ -381,51 +422,27 @@
void objArrayKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
assert(!pm->depth_first(), "invariant");
assert(obj->is_objArray(), "obj must be obj array");
- // Compute oop range
- oop* curr = objArrayOop(obj)->base();
- oop* end = curr + objArrayOop(obj)->length();
- // assert(align_object_size(end - (oop*)obj) == oop_size(obj), "checking size");
- assert(align_object_size(pointer_delta(end, obj, sizeof(oop*)))
- == oop_size(obj), "checking size");
-
- // Iterate over oops
- while (curr < end) {
- if (PSScavenge::should_scavenge(*curr)) {
- pm->claim_or_forward_breadth(curr);
- }
- ++curr;
- }
+ ObjArrayKlass_OOP_ITERATE( \
+ objArrayOop(obj), p, \
+ if (PSScavenge::should_scavenge(p)) { \
+ pm->claim_or_forward_breadth(p); \
+ })
}
void objArrayKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
assert(pm->depth_first(), "invariant");
assert(obj->is_objArray(), "obj must be obj array");
- // Compute oop range
- oop* curr = objArrayOop(obj)->base();
- oop* end = curr + objArrayOop(obj)->length();
- // assert(align_object_size(end - (oop*)obj) == oop_size(obj), "checking size");
- assert(align_object_size(pointer_delta(end, obj, sizeof(oop*)))
- == oop_size(obj), "checking size");
-
- // Iterate over oops
- while (curr < end) {
- if (PSScavenge::should_scavenge(*curr)) {
- pm->claim_or_forward_depth(curr);
- }
- ++curr;
- }
+ ObjArrayKlass_OOP_ITERATE( \
+ objArrayOop(obj), p, \
+ if (PSScavenge::should_scavenge(p)) { \
+ pm->claim_or_forward_depth(p); \
+ })
}
int objArrayKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
assert (obj->is_objArray(), "obj must be obj array");
objArrayOop a = objArrayOop(obj);
-
- oop* const base = a->base();
- oop* const beg_oop = base;
- oop* const end_oop = base + a->length();
- for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
- PSParallelCompact::adjust_pointer(cur_oop);
- }
+ ObjArrayKlass_OOP_ITERATE(a, p, PSParallelCompact::adjust_pointer(p))
return a->object_size();
}
@@ -433,13 +450,9 @@
HeapWord* beg_addr, HeapWord* end_addr) {
assert (obj->is_objArray(), "obj must be obj array");
objArrayOop a = objArrayOop(obj);
-
- oop* const base = a->base();
- oop* const beg_oop = MAX2((oop*)beg_addr, base);
- oop* const end_oop = MIN2((oop*)end_addr, base + a->length());
- for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
- PSParallelCompact::adjust_pointer(cur_oop);
- }
+ ObjArrayKlass_BOUNDED_OOP_ITERATE( \
+ a, p, beg_addr, end_addr, \
+ PSParallelCompact::adjust_pointer(p))
return a->object_size();
}
#endif // SERIALGC
@@ -509,3 +522,4 @@
RememberedSet::verify_old_oop(obj, p, allow_dirty, true);
*/
}
+void objArrayKlass::oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty) {}
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/oops/objArrayKlass.hpp
--- a/hotspot/src/share/vm/oops/objArrayKlass.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/objArrayKlass.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -63,6 +63,11 @@
// Compute class loader
oop class_loader() const { return Klass::cast(bottom_klass())->class_loader(); }
+ private:
+ // Either oop or narrowOop depending on UseCompressedOops.
+ // must be called from within objArrayKlass.cpp
+ template void do_copy(arrayOop s, T* src, arrayOop d,
+ T* dst, int length, TRAPS);
protected:
// Returns the objArrayKlass for n'th dimension.
virtual klassOop array_klass_impl(bool or_null, int n, TRAPS);
@@ -101,7 +106,9 @@
#define ObjArrayKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \
int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, \
- MemRegion mr);
+ MemRegion mr); \
+ int oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* blk, \
+ int start, int end);
ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DECL)
ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayKlass_OOP_OOP_ITERATE_DECL)
@@ -124,5 +131,6 @@
const char* internal_name() const;
void oop_verify_on(oop obj, outputStream* st);
void oop_verify_old_oop(oop obj, oop* p, bool allow_dirty);
+ void oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty);
};
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/oops/objArrayOop.cpp
--- a/hotspot/src/share/vm/oops/objArrayOop.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/objArrayOop.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -25,4 +25,12 @@
# include "incls/_precompiled.incl"
# include "incls/_objArrayOop.cpp.incl"
-// <>
+#define ObjArrayOop_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
+ \
+int objArrayOopDesc::oop_iterate_range(OopClosureType* blk, int start, int end) { \
+ SpecializationStats::record_call(); \
+ return ((objArrayKlass*)blueprint())->oop_oop_iterate_range##nv_suffix(this, blk, start, end); \
+}
+
+ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayOop_OOP_ITERATE_DEFN)
+ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayOop_OOP_ITERATE_DEFN)
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/oops/objArrayOop.hpp
--- a/hotspot/src/share/vm/oops/objArrayOop.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/objArrayOop.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -26,20 +26,67 @@
// Evaluating "String arg[10]" will create an objArrayOop.
class objArrayOopDesc : public arrayOopDesc {
+ friend class objArrayKlass;
+ friend class Runtime1;
+ friend class psPromotionManager;
+
+ template T* obj_at_addr(int index) const {
+ assert(is_within_bounds(index), "index out of bounds");
+ return &((T*)base())[index];
+ }
+
public:
+ // base is the address following the header.
+ HeapWord* base() const { return (HeapWord*) arrayOopDesc::base(T_OBJECT); }
+
// Accessing
- oop obj_at(int index) const { return *obj_at_addr(index); }
- void obj_at_put(int index, oop value) { oop_store(obj_at_addr(index), value); }
- oop* base() const { return (oop*) arrayOopDesc::base(T_OBJECT); }
+ oop obj_at(int index) const {
+ // With UseCompressedOops decode the narrow oop in the objArray to an
+ // uncompressed oop. Otherwise this is simply a "*" operator.
+ if (UseCompressedOops) {
+ return load_decode_heap_oop(obj_at_addr(index));
+ } else {
+ return load_decode_heap_oop(obj_at_addr(index));
+ }
+ }
+ void obj_at_put(int index, oop value) {
+ if (UseCompressedOops) {
+ oop_store(obj_at_addr(index), value);
+ } else {
+ oop_store(obj_at_addr(index), value);
+ }
+ }
// Sizing
- static int header_size() { return arrayOopDesc::header_size(T_OBJECT); }
- static int object_size(int length) { return align_object_size(header_size() + length); }
- int object_size() { return object_size(length()); }
+ static int header_size() { return arrayOopDesc::header_size(T_OBJECT); }
+ int object_size() { return object_size(length()); }
+ int array_size() { return array_size(length()); }
+
+ static int object_size(int length) {
+ // This returns the object size in HeapWords.
+ return align_object_size(header_size() + array_size(length));
+ }
- // Returns the address of the index'th element
- oop* obj_at_addr(int index) const {
- assert(is_within_bounds(index), "index out of bounds");
- return &base()[index];
+ // Give size of objArrayOop in HeapWords minus the header
+ static int array_size(int length) {
+ // Without UseCompressedOops, this is simply:
+ // oop->length() * HeapWordsPerOop;
+ // With narrowOops, HeapWordsPerOop is 1/2 or equal 0 as an integer.
+ // The oop elements are aligned up to wordSize
+ const int HeapWordsPerOop = heapOopSize/HeapWordSize;
+ if (HeapWordsPerOop > 0) {
+ return length * HeapWordsPerOop;
+ } else {
+ const int OopsPerHeapWord = HeapWordSize/heapOopSize;
+ int word_len = align_size_up(length, OopsPerHeapWord)/OopsPerHeapWord;
+ return word_len;
+ }
}
+
+ // special iterators for index ranges, returns size of object
+#define ObjArrayOop_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
+ int oop_iterate_range(OopClosureType* blk, int start, int end);
+
+ ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayOop_OOP_ITERATE_DECL)
+ ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayOop_OOP_ITERATE_DECL)
};
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/oops/oop.cpp
--- a/hotspot/src/share/vm/oops/oop.cpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/oop.cpp Sun Apr 13 17:43:42 2008 -0400
@@ -105,10 +105,14 @@
}
+// XXX verify_old_oop doesn't do anything (should we remove?)
void oopDesc::verify_old_oop(oop* p, bool allow_dirty) {
blueprint()->oop_verify_old_oop(this, p, allow_dirty);
}
+void oopDesc::verify_old_oop(narrowOop* p, bool allow_dirty) {
+ blueprint()->oop_verify_old_oop(this, p, allow_dirty);
+}
bool oopDesc::partially_loaded() {
return blueprint()->oop_partially_loaded(this);
@@ -130,3 +134,6 @@
}
VerifyOopClosure VerifyOopClosure::verify_oop;
+
+void VerifyOopClosure::do_oop(oop* p) { VerifyOopClosure::do_oop_work(p); }
+void VerifyOopClosure::do_oop(narrowOop* p) { VerifyOopClosure::do_oop_work(p); }
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/oops/oop.hpp
--- a/hotspot/src/share/vm/oops/oop.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/oop.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -30,12 +30,12 @@
// no virtual functions allowed
// store into oop with store check
-void oop_store(oop* p, oop v);
-void oop_store(volatile oop* p, oop v);
+template void oop_store(T* p, oop v);
+template void oop_store(volatile T* p, oop v);
// store into oop without store check
-void oop_store_without_check(oop* p, oop v);
-void oop_store_without_check(volatile oop* p, oop v);
+template void oop_store_without_check(T* p, oop v);
+template void oop_store_without_check(volatile T* p, oop v);
extern bool always_do_update_barrier;
@@ -55,7 +55,10 @@
friend class VMStructs;
private:
volatile markOop _mark;
- klassOop _klass;
+ union _metadata {
+ wideKlassOop _klass;
+ narrowOop _compressed_klass;
+ } _metadata;
// Fast access to barrier set. Must be initialized.
static BarrierSet* _bs;
@@ -73,16 +76,16 @@
// objects during a GC) -- requires a valid klass pointer
void init_mark();
- klassOop klass() const { return _klass; }
- oop* klass_addr() const { return (oop*) &_klass; }
+ klassOop klass() const;
+ oop* klass_addr();
+ narrowOop* compressed_klass_addr();
void set_klass(klassOop k);
// For when the klass pointer is being used as a linked list "next" field.
void set_klass_to_list_ptr(oop k);
- // size of object header
- static int header_size() { return sizeof(oopDesc)/HeapWordSize; }
- static int header_size_in_bytes() { return sizeof(oopDesc); }
+ // size of object header, aligned to platform wordSize
+ static int header_size() { return sizeof(oopDesc)/HeapWordSize; }
Klass* blueprint() const;
@@ -119,7 +122,6 @@
private:
// field addresses in oop
- // byte/char/bool/short fields are always stored as full words
void* field_base(int offset) const;
jbyte* byte_field_addr(int offset) const;
@@ -130,13 +132,66 @@
jlong* long_field_addr(int offset) const;
jfloat* float_field_addr(int offset) const;
jdouble* double_field_addr(int offset) const;
+ address* address_field_addr(int offset) const;
public:
- // need this as public for garbage collection
- oop* obj_field_addr(int offset) const;
+ // Need this as public for garbage collection.
+ template T* obj_field_addr(int offset) const;
+
+ static bool is_null(oop obj);
+ static bool is_null(narrowOop obj);
+
+ // Decode an oop pointer from a narrowOop if compressed.
+ // These are overloaded for oop and narrowOop as are the other functions
+ // below so that they can be called in template functions.
+ static oop decode_heap_oop_not_null(oop v);
+ static oop decode_heap_oop_not_null(narrowOop v);
+ static oop decode_heap_oop(oop v);
+ static oop decode_heap_oop(narrowOop v);
+
+ // Encode an oop pointer to a narrow oop. The or_null versions accept
+ // null oop pointer, others do not in order to eliminate the
+ // null checking branches.
+ static narrowOop encode_heap_oop_not_null(oop v);
+ static narrowOop encode_heap_oop(oop v);
+
+ // Load an oop out of the Java heap
+ static narrowOop load_heap_oop(narrowOop* p);
+ static oop load_heap_oop(oop* p);
+ // Load an oop out of Java heap and decode it to an uncompressed oop.
+ static oop load_decode_heap_oop_not_null(narrowOop* p);
+ static oop load_decode_heap_oop_not_null(oop* p);
+ static oop load_decode_heap_oop(narrowOop* p);
+ static oop load_decode_heap_oop(oop* p);
+
+ // Store an oop into the heap.
+ static void store_heap_oop(narrowOop* p, narrowOop v);
+ static void store_heap_oop(oop* p, oop v);
+
+ // Encode oop if UseCompressedOops and store into the heap.
+ static void encode_store_heap_oop_not_null(narrowOop* p, oop v);
+ static void encode_store_heap_oop_not_null(oop* p, oop v);
+ static void encode_store_heap_oop(narrowOop* p, oop v);
+ static void encode_store_heap_oop(oop* p, oop v);
+
+ static void release_store_heap_oop(volatile narrowOop* p, narrowOop v);
+ static void release_store_heap_oop(volatile oop* p, oop v);
+
+ static void release_encode_store_heap_oop_not_null(volatile narrowOop* p, oop v);
+ static void release_encode_store_heap_oop_not_null(volatile oop* p, oop v);
+ static void release_encode_store_heap_oop(volatile narrowOop* p, oop v);
+ static void release_encode_store_heap_oop(volatile oop* p, oop v);
+
+ static oop atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest);
+ static oop atomic_compare_exchange_oop(oop exchange_value,
+ volatile HeapWord *dest,
+ oop compare_value);
+
+ // Access to fields in a instanceOop through these methods.
oop obj_field(int offset) const;
void obj_field_put(int offset, oop value);
+ void obj_field_raw_put(int offset, oop value);
jbyte byte_field(int offset) const;
void byte_field_put(int offset, jbyte contents);
@@ -162,6 +217,9 @@
jdouble double_field(int offset) const;
void double_field_put(int offset, jdouble contents);
+ address address_field(int offset) const;
+ void address_field_put(int offset, address contents);
+
oop obj_field_acquire(int offset) const;
void release_obj_field_put(int offset, oop value);
@@ -207,6 +265,7 @@
void verify_on(outputStream* st);
void verify();
void verify_old_oop(oop* p, bool allow_dirty);
+ void verify_old_oop(narrowOop* p, bool allow_dirty);
// tells whether this oop is partially constructed (gc during class loading)
bool partially_loaded();
@@ -228,8 +287,8 @@
bool is_gc_marked() const;
// Apply "MarkSweep::mark_and_push" to (the address of) every non-NULL
// reference field in "this".
- void follow_contents();
- void follow_header();
+ void follow_contents(void);
+ void follow_header(void);
#ifndef SERIALGC
// Parallel Scavenge
@@ -317,6 +376,7 @@
void set_displaced_mark(markOop m);
// for code generation
- static int klass_offset_in_bytes() { return offset_of(oopDesc, _klass); }
static int mark_offset_in_bytes() { return offset_of(oopDesc, _mark); }
+ static int klass_offset_in_bytes() { return offset_of(oopDesc, _metadata._klass); }
+ static int klass_gap_offset_in_bytes();
};
diff -r f4edb0d9f109 -r 21d113ecbf6a hotspot/src/share/vm/oops/oop.inline.hpp
--- a/hotspot/src/share/vm/oops/oop.inline.hpp Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/oop.inline.hpp Sun Apr 13 17:43:42 2008 -0400
@@ -25,7 +25,6 @@
// Implementation of all inlined member functions defined in oop.hpp
// We need a separate file to avoid circular references
-
inline void oopDesc::release_set_mark(markOop m) {
OrderAccess::release_store_ptr(&_mark, m);
}
@@ -34,17 +33,54 @@
return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark);
}
+inline klassOop oopDesc::klass() const {
+ if (UseCompressedOops) {
+ return (klassOop)decode_heap_oop_not_null(_metadata._compressed_klass);
+ // can be NULL in CMS, but isn't supported on CMS yet.
+ } else {
+ return _metadata._klass;
+ }
+}
+
+inline int oopDesc::klass_gap_offset_in_bytes() {
+ assert(UseCompressedOops, "only applicable to compressed headers");
+ return oopDesc::klass_offset_in_bytes() + sizeof(narrowOop);
+}
+
+inline oop* oopDesc::klass_addr() {
+ // Only used internally and with CMS and will not work with
+ // UseCompressedOops
+ assert(!UseCompressedOops, "only supported with uncompressed oops");
+ return (oop*) &_metadata._klass;
+}
+
+inline narrowOop* oopDesc::compressed_klass_addr() {
+ assert(UseCompressedOops, "only called by compressed oops");
+ return (narrowOop*) &_metadata._compressed_klass;
+}
+
inline void oopDesc::set_klass(klassOop k) {
// since klasses are promoted no store check is needed
assert(Universe::is_bootstrapping() || k != NULL, "must be a real klassOop");
assert(Universe::is_bootstrapping() || k->is_klass(), "not a klassOop");
- oop_store_without_check((oop*) &_klass, (oop) k);
+ if (UseCompressedOops) {
+ // zero the gap when the klass is set, by zeroing the pointer sized
+ // part of the union.
+ _metadata._klass = NULL;
+ oop_store_without_check(compressed_klass_addr(), (oop)k);
+ } else {
+ oop_store_without_check(klass_addr(), (oop) k);
+ }
}
inline void oopDesc::set_klass_to_list_ptr(oop k) {
// This is only to be used during GC, for from-space objects, so no
// barrier is needed.
- _klass = (klassOop)k;
+ if (UseCompressedOops) {
+ _metadata._compressed_klass = encode_heap_oop_not_null(k);
+ } else {
+ _metadata._klass = (klassOop)k;
+ }
}
inline void oopDesc::init_mark() { set_mark(markOopDesc::prototype_for_object(this)); }
@@ -70,7 +106,7 @@
inline void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; }
-inline oop* oopDesc::obj_field_addr(int offset) const { return (oop*) field_base(offset); }
+template inline T* oopDesc::obj_field_addr(int offset) const { return (T*)field_base(offset); }
inline jbyte* oopDesc::byte_field_addr(int offset) const { return (jbyte*) field_base(offset); }
inline jchar* oopDesc::char_field_addr(int offset) const { return (jchar*) field_base(offset); }
inline jboolean* oopDesc::bool_field_addr(int offset) const { return (jboolean*)field_base(offset); }
@@ -79,9 +115,156 @@
inline jlong* oopDesc::long_field_addr(int offset) const { return (jlong*) field_base(offset); }
inline jfloat* oopDesc::float_field_addr(int offset) const { return (jfloat*) field_base(offset); }
inline jdouble* oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); }
+inline address* oopDesc::address_field_addr(int offset) const { return (address*) field_base(offset); }
-inline oop oopDesc::obj_field(int offset) const { return *obj_field_addr(offset); }
-inline void oopDesc::obj_field_put(int offset, oop value) { oop_store(obj_field_addr(offset), value); }
+
+// Functions for getting and setting oops within instance objects.
+// If the oops are compressed, the type passed to these overloaded functions
+// is narrowOop. All functions are overloaded so they can be called by
+// template functions without conditionals (the compiler instantiates via
+// the right type and inlines the appopriate code).
+
+inline bool oopDesc::is_null(oop obj) { return obj == NULL; }
+inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
+
+// Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
+// offset from the heap base. Saving the check for null can save instructions
+// in inner GC loops so these are separated.
+
+inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
+ assert(!is_null(v), "oop value can never be zero");
+ address heap_base = Universe::heap_base();
+ uint64_t result = (uint64_t)(pointer_delta((void*)v, (void*)heap_base, 1) >> LogMinObjAlignmentInBytes);
+ assert((result & 0xffffffff00000000L) == 0, "narrow oop overflow");
+ return (narrowOop)result;
+}
+
+inline narrowOop oopDesc::encode_heap_oop(oop v) {
+ return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
+}
+
+inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
+ assert(!is_null(v), "narrow oop value can never be zero");
+ address heap_base = Universe::heap_base();
+ return (oop)(void*)((uintptr_t)heap_base + ((uintptr_t)v << LogMinObjAlignmentInBytes));
+}
+
+inline oop oopDesc::decode_heap_oop(narrowOop v) {
+ return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
+}
+
+inline oop oopDesc::decode_heap_oop_not_null(oop v) { return v; }
+inline oop oopDesc::decode_heap_oop(oop v) { return v; }
+
+// Load an oop out of the Java heap as is without decoding.
+// Called by GC to check for null before decoding.
+inline oop oopDesc::load_heap_oop(oop* p) { return *p; }
+inline narrowOop oopDesc::load_heap_oop(narrowOop* p) { return *p; }
+
+// Load and decode an oop out of the Java heap into a wide oop.
+inline oop oopDesc::load_decode_heap_oop_not_null(oop* p) { return *p; }
+inline oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
+ return decode_heap_oop_not_null(*p);
+}
+
+// Load and decode an oop out of the heap accepting null
+inline oop oopDesc::load_decode_heap_oop(oop* p) { return *p; }
+inline oop oopDesc::load_decode_heap_oop(narrowOop* p) {
+ return decode_heap_oop(*p);
+}
+
+// Store already encoded heap oop into the heap.
+inline void oopDesc::store_heap_oop(oop* p, oop v) { *p = v; }
+inline void oopDesc::store_heap_oop(narrowOop* p, narrowOop v) { *p = v; }
+
+// Encode and store a heap oop.
+inline void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) {
+ *p = encode_heap_oop_not_null(v);
+}
+inline void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; }
+
+// Encode and store a heap oop allowing for null.
+inline void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) {
+ *p = encode_heap_oop(v);
+}
+inline void oopDesc::encode_store_heap_oop(oop* p, oop v) { *p = v; }
+
+// Store heap oop as is for volatile fields.
+inline void oopDesc::release_store_heap_oop(volatile oop* p, oop v) {
+ OrderAccess::release_store_ptr(p, v);
+}
+inline void oopDesc::release_store_heap_oop(volatile narrowOop* p,
+ narrowOop v) {
+ OrderAccess::release_store(p, v);
+}
+
+inline void oopDesc::release_encode_store_heap_oop_not_null(
+ volatile narrowOop* p, oop v) {
+ // heap oop is not pointer sized.
+ OrderAccess::release_store(p, encode_heap_oop_not_null(v));
+}
+
+inline void oopDesc::release_encode_store_heap_oop_not_null(
+ volatile oop* p, oop v) {
+ OrderAccess::release_store_ptr(p, v);
+}
+
+inline void oopDesc::release_encode_store_heap_oop(volatile oop* p,
+ oop v) {
+ OrderAccess::release_store_ptr(p, v);
+}
+inline void oopDesc::release_encode_store_heap_oop(
+ volatile narrowOop* p, oop v) {
+ OrderAccess::release_store(p, encode_heap_oop(v));
+}
+
+
+// These functions are only used to exchange oop fields in instances,
+// not headers.
+inline oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) {
+ if (UseCompressedOops) {
+ // encode exchange value from oop to T
+ narrowOop val = encode_heap_oop(exchange_value);
+ narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest);
+ // decode old from T to oop
+ return decode_heap_oop(old);
+ } else {
+ return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest);
+ }
+}
+
+inline oop oopDesc::atomic_compare_exchange_oop(oop exchange_value,
+ volatile HeapWord *dest,
+ oop compare_value) {
+ if (UseCompressedOops) {
+ // encode exchange and compare value from oop to T
+ narrowOop val = encode_heap_oop(exchange_value);
+ narrowOop cmp = encode_heap_oop(compare_value);
+
+ narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp);
+ // decode old from T to oop
+ return decode_heap_oop(old);
+ } else {
+ return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value);
+ }
+}
+
+// In order to put or get a field out of an instance, must first check
+// if the field has been compressed and uncompress it.
+inline oop oopDesc::obj_field(int offset) const {
+ return UseCompressedOops ?
+ load_decode_heap_oop(obj_field_addr(offset)) :
+ load_decode_heap_oop(obj_field_addr(offset));
+}
+inline void oopDesc::obj_field_put(int offset, oop value) {
+ UseCompressedOops ? oop_store(obj_field_addr(offset), value) :
+ oop_store(obj_field_addr(offset), value);
+}
+inline void oopDesc::obj_field_raw_put(int offset, oop value) {
+ UseCompressedOops ?
+ encode_store_heap_oop(obj_field_addr(offset), value) :
+ encode_store_heap_oop(obj_field_addr(offset), value);
+}
inline jbyte oopDesc::byte_field(int offset) const { return (jbyte) *byte_field_addr(offset); }
inline void oopDesc::byte_field_put(int offset, jbyte contents) { *byte_field_addr(offset) = (jint) contents; }
@@ -107,8 +290,21 @@
inline jdouble oopDesc::double_field(int offset) const { return *double_field_addr(offset); }
inline void oopDesc::double_field_put(int offset, jdouble contents) { *double_field_addr(offset) = contents; }
-inline oop oopDesc::obj_field_acquire(int offset) const { return (oop)OrderAccess::load_ptr_acquire(obj_field_addr(offset)); }
-inline void oopDesc::release_obj_field_put(int offset, oop value) { oop_store((volatile oop*)obj_field_addr(offset), value); }
+inline address oopDesc::address_field(int offset) const { return *address_field_addr(offset); }
+inline void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; }
+
+inline oop oopDesc::obj_field_acquire(int offset) const {
+ return UseCompressedOops ?
+ decode_heap_oop((narrowOop)
+ OrderAccess::load_acquire(obj_field_addr(offset)))
+ : decode_heap_oop((oop)
+ OrderAccess::load_ptr_acquire(obj_field_addr(offset)));
+}
+inline void oopDesc::release_obj_field_put(int offset, oop value) {
+ UseCompressedOops ?
+ oop_store((volatile narrowOop*)obj_field_addr(offset), value) :
+ oop_store((volatile oop*) obj_field_addr(offset), value);
+}
inline jbyte oopDesc::byte_field_acquire(int offset) const { return OrderAccess::load_acquire(byte_field_addr(offset)); }
inline void oopDesc::release_byte_field_put(int offset, jbyte contents) { OrderAccess::release_store(byte_field_addr(offset), contents); }
@@ -134,7 +330,6 @@
inline jdouble oopDesc::double_field_acquire(int offset) const { return OrderAccess::load_acquire(double_field_addr(offset)); }
inline void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); }
-
inline int oopDesc::size_given_klass(Klass* klass) {
int lh = klass->layout_helper();
int s = lh >> LogHeapWordSize; // deliver size scaled by wordSize
@@ -200,7 +395,7 @@
// technique) we will need to suitably modify the assertion.
assert((s == klass->oop_size(this)) ||
(((UseParNewGC || UseParallelGC) &&
- Universe::heap()->is_gc_active()) &&
+ Universe::heap()->is_gc_active()) &&
(is_typeArray() ||
(is_objArray() && is_forwarded()))),
"wrong array object size");
@@ -224,52 +419,58 @@
return blueprint()->oop_is_parsable(this);
}
-
-inline void update_barrier_set(oop *p, oop v) {
+inline void update_barrier_set(void* p, oop v) {
assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!");
oopDesc::bs()->write_ref_field(p, v);
}
-
-inline void oop_store(oop* p, oop v) {
+template inline void oop_store(T* p, oop v) {
if (always_do_update_barrier) {
- oop_store((volatile oop*)p, v);
+ oop_store((volatile T*)p, v);
} else {
- *p = v;
+ oopDesc::encode_store_heap_oop(p, v);
update_barrier_set(p, v);
}
}
-inline void oop_store(volatile oop* p, oop v) {
+template inline void oop_store(volatile T* p, oop v) {
// Used by release_obj_field_put, so use release_store_ptr.
- OrderAccess::release_store_ptr(p, v);
- update_barrier_set((oop *)p, v);
+ oopDesc::release_encode_store_heap_oop(p, v);
+ update_barrier_set((void*)p, v);
}
-inline void oop_store_without_check(oop* p, oop v) {
+template