6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
authorcoleenp
Sun, 13 Apr 2008 17:43:42 -0400
changeset 360 21d113ecbf6a
parent 357 f4edb0d9f109
child 361 a06f95253b6b
child 362 00cf4bffd828
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes Summary: Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
hotspot/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/HSDB.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/compiler/OopMapSet.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/compiler/OopMapValue.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/compiler/OopMapVisitor.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/Address.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/Debugger.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/JVMDebugger.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescription.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionAMD64.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionIA64.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionIntelX86.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionSPARC32Bit.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionSPARC64Bit.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxAddress.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxDebugger.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxDebuggerLocal.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/dummy/DummyAddress.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxAddress.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebugger.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcAddress.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebugger.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebuggerLocal.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteAddress.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebugger.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerClient.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerServer.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32Address.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32Debugger.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32DebuggerLocal.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgAddress.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebugger.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebuggerLocal.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Array.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCache.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCacheKlass.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolKlass.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/DefaultOopVisitor.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Instance.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Klass.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/NarrowOopField.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjArray.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHistogram.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHistogramElement.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Oop.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/OopPrinter.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/OopUtilities.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/OopVisitor.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/AddressVisitor.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Frame.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/types/Field.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/types/NarrowOopField.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/types/Type.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicField.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicFieldWrapper.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicNarrowOopField.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicOopField.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicType.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicTypeDataBase.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/FindInHeapPanel.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/AbstractHeapGraphWriter.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/ReversePtrsAnalysis.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/RobustOopDeterminator.java
hotspot/make/Makefile
hotspot/make/solaris/makefiles/sparcWorks.make
hotspot/src/cpu/sparc/vm/assembler_sparc.cpp
hotspot/src/cpu/sparc/vm/assembler_sparc.hpp
hotspot/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp
hotspot/src/cpu/sparc/vm/copy_sparc.hpp
hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp
hotspot/src/cpu/sparc/vm/register_definitions_sparc.cpp
hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
hotspot/src/cpu/sparc/vm/sparc.ad
hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp
hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp
hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp
hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp
hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp
hotspot/src/cpu/x86/vm/assembler_x86_64.cpp
hotspot/src/cpu/x86/vm/assembler_x86_64.hpp
hotspot/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp
hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp
hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp
hotspot/src/cpu/x86/vm/register_definitions_x86.cpp
hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp
hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp
hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp
hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp
hotspot/src/cpu/x86/vm/x86_32.ad
hotspot/src/cpu/x86/vm/x86_64.ad
hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp
hotspot/src/os/solaris/dtrace/jhelper.d
hotspot/src/os/solaris/dtrace/libjvm_db.c
hotspot/src/os/windows/vm/os_windows.cpp
hotspot/src/os_cpu/solaris_sparc/vm/solaris_sparc.s
hotspot/src/share/vm/adlc/archDesc.cpp
hotspot/src/share/vm/adlc/forms.cpp
hotspot/src/share/vm/adlc/forms.hpp
hotspot/src/share/vm/adlc/formssel.cpp
hotspot/src/share/vm/adlc/output_c.cpp
hotspot/src/share/vm/adlc/output_h.cpp
hotspot/src/share/vm/asm/codeBuffer.cpp
hotspot/src/share/vm/c1/c1_Runtime1.cpp
hotspot/src/share/vm/ci/ciInstanceKlass.cpp
hotspot/src/share/vm/ci/ciInstanceKlass.hpp
hotspot/src/share/vm/ci/ciObjectFactory.cpp
hotspot/src/share/vm/classfile/classFileParser.cpp
hotspot/src/share/vm/classfile/javaClasses.cpp
hotspot/src/share/vm/classfile/javaClasses.hpp
hotspot/src/share/vm/compiler/oopMap.cpp
hotspot/src/share/vm/compiler/oopMap.hpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
hotspot/src/share/vm/gc_implementation/includeDB_gc_parNew
hotspot/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge
hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp
hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp
hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp
hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.hpp
hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/prefetchQueue.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp
hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp
hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp
hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp
hotspot/src/share/vm/gc_interface/collectedHeap.cpp
hotspot/src/share/vm/gc_interface/collectedHeap.hpp
hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp
hotspot/src/share/vm/includeDB_core
hotspot/src/share/vm/interpreter/interpreterRuntime.hpp
hotspot/src/share/vm/memory/barrierSet.hpp
hotspot/src/share/vm/memory/barrierSet.inline.hpp
hotspot/src/share/vm/memory/cardTableModRefBS.cpp
hotspot/src/share/vm/memory/cardTableModRefBS.hpp
hotspot/src/share/vm/memory/cardTableRS.cpp
hotspot/src/share/vm/memory/cardTableRS.hpp
hotspot/src/share/vm/memory/compactingPermGenGen.cpp
hotspot/src/share/vm/memory/defNewGeneration.cpp
hotspot/src/share/vm/memory/defNewGeneration.hpp
hotspot/src/share/vm/memory/defNewGeneration.inline.hpp
hotspot/src/share/vm/memory/dump.cpp
hotspot/src/share/vm/memory/genCollectedHeap.cpp
hotspot/src/share/vm/memory/genCollectedHeap.hpp
hotspot/src/share/vm/memory/genMarkSweep.cpp
hotspot/src/share/vm/memory/genOopClosures.hpp
hotspot/src/share/vm/memory/genOopClosures.inline.hpp
hotspot/src/share/vm/memory/genRemSet.hpp
hotspot/src/share/vm/memory/genRemSet.inline.hpp
hotspot/src/share/vm/memory/generation.cpp
hotspot/src/share/vm/memory/generation.hpp
hotspot/src/share/vm/memory/iterator.hpp
hotspot/src/share/vm/memory/modRefBarrierSet.hpp
hotspot/src/share/vm/memory/referenceProcessor.cpp
hotspot/src/share/vm/memory/referenceProcessor.hpp
hotspot/src/share/vm/memory/restore.cpp
hotspot/src/share/vm/memory/serialize.cpp
hotspot/src/share/vm/memory/sharedHeap.cpp
hotspot/src/share/vm/memory/space.cpp
hotspot/src/share/vm/memory/space.hpp
hotspot/src/share/vm/memory/universe.cpp
hotspot/src/share/vm/memory/universe.hpp
hotspot/src/share/vm/oops/arrayOop.hpp
hotspot/src/share/vm/oops/constantPoolKlass.cpp
hotspot/src/share/vm/oops/constantPoolKlass.hpp
hotspot/src/share/vm/oops/constantPoolOop.hpp
hotspot/src/share/vm/oops/cpCacheKlass.cpp
hotspot/src/share/vm/oops/cpCacheKlass.hpp
hotspot/src/share/vm/oops/cpCacheOop.cpp
hotspot/src/share/vm/oops/cpCacheOop.hpp
hotspot/src/share/vm/oops/instanceKlass.cpp
hotspot/src/share/vm/oops/instanceKlass.hpp
hotspot/src/share/vm/oops/instanceKlassKlass.cpp
hotspot/src/share/vm/oops/instanceOop.hpp
hotspot/src/share/vm/oops/instanceRefKlass.cpp
hotspot/src/share/vm/oops/klass.cpp
hotspot/src/share/vm/oops/klass.hpp
hotspot/src/share/vm/oops/klassVtable.cpp
hotspot/src/share/vm/oops/markOop.hpp
hotspot/src/share/vm/oops/methodDataKlass.cpp
hotspot/src/share/vm/oops/methodOop.cpp
hotspot/src/share/vm/oops/objArrayKlass.cpp
hotspot/src/share/vm/oops/objArrayKlass.hpp
hotspot/src/share/vm/oops/objArrayOop.cpp
hotspot/src/share/vm/oops/objArrayOop.hpp
hotspot/src/share/vm/oops/oop.cpp
hotspot/src/share/vm/oops/oop.hpp
hotspot/src/share/vm/oops/oop.inline.hpp
hotspot/src/share/vm/oops/oop.pcgc.inline.hpp
hotspot/src/share/vm/oops/oopsHierarchy.hpp
hotspot/src/share/vm/opto/buildOopMap.cpp
hotspot/src/share/vm/opto/callnode.hpp
hotspot/src/share/vm/opto/cfgnode.cpp
hotspot/src/share/vm/opto/chaitin.cpp
hotspot/src/share/vm/opto/classes.hpp
hotspot/src/share/vm/opto/compile.cpp
hotspot/src/share/vm/opto/connode.cpp
hotspot/src/share/vm/opto/connode.hpp
hotspot/src/share/vm/opto/escape.cpp
hotspot/src/share/vm/opto/graphKit.cpp
hotspot/src/share/vm/opto/idealKit.cpp
hotspot/src/share/vm/opto/lcm.cpp
hotspot/src/share/vm/opto/library_call.cpp
hotspot/src/share/vm/opto/loopTransform.cpp
hotspot/src/share/vm/opto/machnode.cpp
hotspot/src/share/vm/opto/macro.cpp
hotspot/src/share/vm/opto/macro.hpp
hotspot/src/share/vm/opto/matcher.cpp
hotspot/src/share/vm/opto/memnode.cpp
hotspot/src/share/vm/opto/memnode.hpp
hotspot/src/share/vm/opto/node.cpp
hotspot/src/share/vm/opto/node.hpp
hotspot/src/share/vm/opto/opcodes.cpp
hotspot/src/share/vm/opto/opcodes.hpp
hotspot/src/share/vm/opto/parse2.cpp
hotspot/src/share/vm/opto/parse3.cpp
hotspot/src/share/vm/opto/phaseX.cpp
hotspot/src/share/vm/opto/phaseX.hpp
hotspot/src/share/vm/opto/subnode.cpp
hotspot/src/share/vm/opto/subnode.hpp
hotspot/src/share/vm/opto/superword.cpp
hotspot/src/share/vm/opto/type.cpp
hotspot/src/share/vm/opto/type.hpp
hotspot/src/share/vm/prims/jni.cpp
hotspot/src/share/vm/prims/jvmtiTagMap.cpp
hotspot/src/share/vm/prims/unsafe.cpp
hotspot/src/share/vm/runtime/arguments.cpp
hotspot/src/share/vm/runtime/atomic.cpp
hotspot/src/share/vm/runtime/atomic.hpp
hotspot/src/share/vm/runtime/frame.cpp
hotspot/src/share/vm/runtime/frame.hpp
hotspot/src/share/vm/runtime/globals.cpp
hotspot/src/share/vm/runtime/globals.hpp
hotspot/src/share/vm/runtime/globals_extension.hpp
hotspot/src/share/vm/runtime/hpi.cpp
hotspot/src/share/vm/runtime/init.cpp
hotspot/src/share/vm/runtime/jniHandles.cpp
hotspot/src/share/vm/runtime/vmStructs.cpp
hotspot/src/share/vm/services/heapDumper.cpp
hotspot/src/share/vm/utilities/copy.hpp
hotspot/src/share/vm/utilities/debug.cpp
hotspot/src/share/vm/utilities/globalDefinitions.cpp
hotspot/src/share/vm/utilities/globalDefinitions.hpp
hotspot/src/share/vm/utilities/taskqueue.hpp
hotspot/src/share/vm/utilities/vmError.cpp
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Sun Apr 13 17:43:42 2008 -0400
@@ -885,7 +885,12 @@
                                         out.println("found at " + addr);
                                     }
                                 }
-
+                                public void visitCompOopAddress(Address addr) {
+                                    Address val = addr.getCompOopAddressAt(0);
+                                    if (AddressOps.equal(val, value)) {
+                                        out.println("found at " + addr);
+                                    }
+                                }
                                 public void epilogue() {
                                 }
                             };
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/HSDB.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/HSDB.java	Sun Apr 13 17:43:42 2008 -0400
@@ -1011,8 +1011,21 @@
                         Assert.that(addr.andWithMask(VM.getVM().getAddressSize() - 1) == null,
                                     "Address " + addr + "should have been aligned");
                       }
+                      OopHandle handle = addr.getOopHandleAt(0);
+                      addAnnotation(addr, handle);
+                    }
+
+                    public void visitCompOopAddress(Address addr) {
+                      if (Assert.ASSERTS_ENABLED) {
+                        Assert.that(addr.andWithMask(VM.getVM().getAddressSize() - 1) == null,
+                                    "Address " + addr + "should have been aligned");
+                      }
+                      OopHandle handle = addr.getCompOopHandleAt(0);
+                      addAnnotation(addr, handle);
+                    }
+
+                    public void addAnnotation(Address addr, OopHandle handle) {
                       // Check contents
-                      OopHandle handle = addr.getOopHandleAt(0);
                       String anno = "null oop";
                       if (handle != null) {
                         // Find location
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java	Sun Apr 13 17:43:42 2008 -0400
@@ -306,6 +306,8 @@
 
       entryAddr = entryAddr.addOffsetTo(intConstantEntryArrayStride);
     } while (nameAddr != null);
+      String symbol = "heapOopSize"; // global int constant and value is initialized at runtime.
+      addIntConstant(symbol, (int)lookupInProcess(symbol).getCIntegerAt(0, 4, false));
   }
 
   private void readVMLongConstants() {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/compiler/OopMapSet.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/compiler/OopMapSet.java	Sun Apr 13 17:43:42 2008 -0400
@@ -68,7 +68,8 @@
     public void visitValueLocation(Address valueAddr) {
     }
 
-    public void visitDeadLocation(Address deadAddr) {
+    public void visitNarrowOopLocation(Address narrowOopAddr) {
+      addressVisitor.visitCompOopAddress(narrowOopAddr);
     }
   }
 
@@ -197,9 +198,9 @@
       }
     }
 
-    // We want dead, value and oop oop_types
+    // We want narow oop, value and oop oop_types
     OopMapValue.OopTypes[] values = new OopMapValue.OopTypes[] {
-      OopMapValue.OopTypes.OOP_VALUE, OopMapValue.OopTypes.VALUE_VALUE, OopMapValue.OopTypes.DEAD_VALUE
+      OopMapValue.OopTypes.OOP_VALUE, OopMapValue.OopTypes.VALUE_VALUE, OopMapValue.OopTypes.NARROWOOP_VALUE
     };
 
     {
@@ -214,8 +215,8 @@
             visitor.visitOopLocation(loc);
           } else if (omv.getType() == OopMapValue.OopTypes.VALUE_VALUE) {
             visitor.visitValueLocation(loc);
-          } else if (omv.getType() == OopMapValue.OopTypes.DEAD_VALUE) {
-            visitor.visitDeadLocation(loc);
+          } else if (omv.getType() == OopMapValue.OopTypes.NARROWOOP_VALUE) {
+            visitor.visitNarrowOopLocation(loc);
           }
         }
       }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/compiler/OopMapValue.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/compiler/OopMapValue.java	Sun Apr 13 17:43:42 2008 -0400
@@ -50,7 +50,7 @@
   static int UNUSED_VALUE;
   static int OOP_VALUE;
   static int VALUE_VALUE;
-  static int DEAD_VALUE;
+  static int NARROWOOP_VALUE;
   static int CALLEE_SAVED_VALUE;
   static int DERIVED_OOP_VALUE;
 
@@ -74,7 +74,7 @@
     UNUSED_VALUE           = db.lookupIntConstant("OopMapValue::unused_value").intValue();
     OOP_VALUE              = db.lookupIntConstant("OopMapValue::oop_value").intValue();
     VALUE_VALUE            = db.lookupIntConstant("OopMapValue::value_value").intValue();
-    DEAD_VALUE             = db.lookupIntConstant("OopMapValue::dead_value").intValue();
+    NARROWOOP_VALUE        = db.lookupIntConstant("OopMapValue::narrowoop_value").intValue();
     CALLEE_SAVED_VALUE     = db.lookupIntConstant("OopMapValue::callee_saved_value").intValue();
     DERIVED_OOP_VALUE      = db.lookupIntConstant("OopMapValue::derived_oop_value").intValue();
   }
@@ -83,7 +83,7 @@
     public static final OopTypes UNUSED_VALUE       = new OopTypes() { int getValue() { return OopMapValue.UNUSED_VALUE;       }};
     public static final OopTypes OOP_VALUE          = new OopTypes() { int getValue() { return OopMapValue.OOP_VALUE;          }};
     public static final OopTypes VALUE_VALUE        = new OopTypes() { int getValue() { return OopMapValue.VALUE_VALUE;        }};
-    public static final OopTypes DEAD_VALUE         = new OopTypes() { int getValue() { return OopMapValue.DEAD_VALUE;         }};
+    public static final OopTypes NARROWOOP_VALUE    = new OopTypes() { int getValue() { return OopMapValue.NARROWOOP_VALUE;         }};
     public static final OopTypes CALLEE_SAVED_VALUE = new OopTypes() { int getValue() { return OopMapValue.CALLEE_SAVED_VALUE; }};
     public static final OopTypes DERIVED_OOP_VALUE  = new OopTypes() { int getValue() { return OopMapValue.DERIVED_OOP_VALUE;  }};
 
@@ -106,7 +106,7 @@
   // Querying
   public boolean isOop()         { return (getValue() & TYPE_MASK_IN_PLACE) == OOP_VALUE;          }
   public boolean isValue()       { return (getValue() & TYPE_MASK_IN_PLACE) == VALUE_VALUE;        }
-  public boolean isDead()        { return (getValue() & TYPE_MASK_IN_PLACE) == DEAD_VALUE;         }
+  public boolean isNarrowOop()   { return (getValue() & TYPE_MASK_IN_PLACE) == NARROWOOP_VALUE;    }
   public boolean isCalleeSaved() { return (getValue() & TYPE_MASK_IN_PLACE) == CALLEE_SAVED_VALUE; }
   public boolean isDerivedOop()  { return (getValue() & TYPE_MASK_IN_PLACE) == DERIVED_OOP_VALUE;  }
 
@@ -118,7 +118,7 @@
          if (which == UNUSED_VALUE) return OopTypes.UNUSED_VALUE;
     else if (which == OOP_VALUE)    return OopTypes.OOP_VALUE;
     else if (which == VALUE_VALUE)  return OopTypes.VALUE_VALUE;
-    else if (which == DEAD_VALUE)   return OopTypes.DEAD_VALUE;
+    else if (which == NARROWOOP_VALUE)   return OopTypes.NARROWOOP_VALUE;
     else if (which == CALLEE_SAVED_VALUE) return OopTypes.CALLEE_SAVED_VALUE;
     else if (which == DERIVED_OOP_VALUE)  return OopTypes.DERIVED_OOP_VALUE;
     else throw new InternalError("unknown which " + which + " (TYPE_MASK_IN_PLACE = " + TYPE_MASK_IN_PLACE + ")");
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/compiler/OopMapVisitor.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/compiler/OopMapVisitor.java	Sun Apr 13 17:43:42 2008 -0400
@@ -32,5 +32,5 @@
   public void visitOopLocation(Address oopAddr);
   public void visitDerivedOopLocation(Address baseOopAddr, Address derivedOopAddr);
   public void visitValueLocation(Address valueAddr);
-  public void visitDeadLocation(Address deadAddr);
+  public void visitNarrowOopLocation(Address narrowOopAddr);
 }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/Address.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/Address.java	Sun Apr 13 17:43:42 2008 -0400
@@ -87,6 +87,8 @@
     throws UnmappedAddressException, UnalignedAddressException;
   /** This returns null if the address at the given offset is NULL. */
   public Address    getAddressAt       (long offset) throws UnmappedAddressException, UnalignedAddressException;
+  /** Returns the decoded address at the given offset */
+  public Address    getCompOopAddressAt (long offset) throws UnmappedAddressException, UnalignedAddressException;
 
   //
   // Java-related routines
@@ -103,6 +105,8 @@
   /** This returns null if the address at the given offset is NULL. */
   public OopHandle  getOopHandleAt     (long offset)
     throws UnmappedAddressException, UnalignedAddressException, NotInHeapException;
+  public OopHandle  getCompOopHandleAt (long offset)
+    throws UnmappedAddressException, UnalignedAddressException, NotInHeapException;
 
   //
   // C/C++-related mutators. These throw UnmappedAddressException if
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/Debugger.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/Debugger.java	Sun Apr 13 17:43:42 2008 -0400
@@ -118,6 +118,9 @@
   public long getJIntSize();
   public long getJLongSize();
   public long getJShortSize();
+  public long getHeapBase();
+  public long getHeapOopSize();
+  public long getLogMinObjAlignmentInBytes();
 
   public ReadResult readBytesFromProcess(long address, long numBytes)
     throws DebuggerException;
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java	Sun Apr 13 17:43:42 2008 -0400
@@ -37,6 +37,7 @@
     DbxDebugger interfaces. </P> */
 
 public abstract class DebuggerBase implements Debugger {
+
   // May be set lazily, but must be set before calling any of the read
   // routines below
   protected MachineDescription machDesc;
@@ -52,6 +53,11 @@
   protected long jlongSize;
   protected long jshortSize;
   protected boolean javaPrimitiveTypesConfigured;
+  // heap data.
+  protected long oopSize;
+  protected long heapOopSize;
+  protected long heapBase;                 // heap base for compressed oops.
+  protected long logMinObjAlignmentInBytes; // Used to decode compressed oops.
   // Should be initialized if desired by calling initCache()
   private PageCache cache;
 
@@ -153,6 +159,12 @@
     javaPrimitiveTypesConfigured = true;
   }
 
+  public void putHeapConst(long heapBase, long heapOopSize, long logMinObjAlignmentInBytes) {
+    this.heapBase = heapBase;
+    this.heapOopSize = heapOopSize;
+    this.logMinObjAlignmentInBytes = logMinObjAlignmentInBytes;
+  }
+
   /** May be called by subclasses if desired to initialize the page
       cache but may not be overridden */
   protected final void initCache(long pageSize, long maxNumPages) {
@@ -442,6 +454,16 @@
     return readCInteger(address, machDesc.getAddressSize(), true);
   }
 
+  protected long readCompOopAddressValue(long address)
+    throws UnmappedAddressException, UnalignedAddressException {
+    long value = readCInteger(address, getHeapOopSize(), true);
+    if (value != 0) {
+      // See oop.inline.hpp decode_heap_oop
+      value = (long)(heapBase + (long)(value << logMinObjAlignmentInBytes));
+    }
+    return value;
+  }
+
   protected void writeAddressValue(long address, long value)
     throws UnmappedAddressException, UnalignedAddressException {
     writeCInteger(address, machDesc.getAddressSize(), value);
@@ -518,4 +540,15 @@
   public long getJShortSize() {
     return jshortSize;
   }
+
+  public long getHeapOopSize() {
+    return heapOopSize;
+  }
+
+  public long getHeapBase() {
+    return heapBase;
+  }
+  public long getLogMinObjAlignmentInBytes() {
+    return logMinObjAlignmentInBytes;
+  }
 }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/JVMDebugger.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/JVMDebugger.java	Sun Apr 13 17:43:42 2008 -0400
@@ -42,4 +42,5 @@
                                               long jintSize,
                                               long jlongSize,
                                               long jshortSize);
+  public void putHeapConst(long heapBase, long heapOopSize, long logMinObjAlignment);
 }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescription.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescription.java	Sun Apr 13 17:43:42 2008 -0400
@@ -35,13 +35,6 @@
       able to traverse arrays of pointers or oops. */
   public long getAddressSize();
 
-  /** Returns the size of an address in bytes. Currently needed to be
-      able to traverse arrays of pointers or oops. (FIXME: since we're
-      already reading the Java primitive types' sizes from the remote
-      VM, it would be nice to remove this routine, using a similar
-      mechanism to how the TypeDataBase deals with primitive types.) */
-  public long getOopSize();
-
   /** Returns the maximum value of the C integer type with the given
       size in bytes and signedness. Throws IllegalArgumentException if
       the size in bytes is not legal for a C type (or can not be
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionAMD64.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionAMD64.java	Sun Apr 13 17:43:42 2008 -0400
@@ -29,10 +29,6 @@
     return 8;
   }
 
-  public long getOopSize() {
-    return 8;
-  }
-
   public boolean isLP64() {
     return true;
   }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionIA64.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionIA64.java	Sun Apr 13 17:43:42 2008 -0400
@@ -29,10 +29,6 @@
     return 8;
   }
 
-  public long getOopSize() {
-    return 8;
-  }
-
   public boolean isLP64() {
     return true;
   }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionIntelX86.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionIntelX86.java	Sun Apr 13 17:43:42 2008 -0400
@@ -29,10 +29,6 @@
     return 4;
   }
 
-  public long getOopSize() {
-    return 4;
-  }
-
   public boolean isBigEndian() {
     return false;
   }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionSPARC32Bit.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionSPARC32Bit.java	Sun Apr 13 17:43:42 2008 -0400
@@ -29,10 +29,6 @@
     return 4;
   }
 
-  public long getOopSize() {
-    return 4;
-  }
-
   public boolean isBigEndian() {
     return true;
   }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionSPARC64Bit.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionSPARC64Bit.java	Sun Apr 13 17:43:42 2008 -0400
@@ -29,9 +29,6 @@
     return 8;
   }
 
-  public long getOopSize() {
-    return 8;
-  }
 
   public boolean isBigEndian() {
     return true;
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxAddress.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxAddress.java	Sun Apr 13 17:43:42 2008 -0400
@@ -71,6 +71,9 @@
   public Address getAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
     return debugger.readAddress(addr + offset);
   }
+  public Address getCompOopAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
+    return debugger.readCompOopAddress(addr + offset);
+  }
 
   //
   // Java-related routines
@@ -113,6 +116,11 @@
     return debugger.readOopHandle(addr + offset);
   }
 
+  public OopHandle getCompOopHandleAt(long offset)
+    throws UnalignedAddressException, UnmappedAddressException, NotInHeapException {
+    return debugger.readCompOopHandle(addr + offset);
+  }
+
   // Mutators -- not implemented for now (FIXME)
   public void setCIntegerAt(long offset, long numBytes, long value) {
     throw new DebuggerException("Unimplemented");
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxDebugger.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxDebugger.java	Sun Apr 13 17:43:42 2008 -0400
@@ -43,7 +43,9 @@
   public long         readCInteger(long address, long numBytes, boolean isUnsigned)
     throws DebuggerException;
   public DbxAddress   readAddress(long address) throws DebuggerException;
+  public DbxAddress   readCompOopAddress(long address) throws DebuggerException;
   public DbxOopHandle readOopHandle(long address) throws DebuggerException;
+  public DbxOopHandle readCompOopHandle(long address) throws DebuggerException;
   public long[]       getThreadIntegerRegisterSet(int tid) throws DebuggerException;
   public Address      newAddress(long value) throws DebuggerException;
 
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxDebuggerLocal.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/dbx/DbxDebuggerLocal.java	Sun Apr 13 17:43:42 2008 -0400
@@ -460,12 +460,23 @@
     return (value == 0 ? null : new DbxAddress(this, value));
   }
 
+  public DbxAddress readCompOopAddress(long address)
+    throws UnmappedAddressException, UnalignedAddressException {
+    long value = readCompOopAddressValue(address);
+    return (value == 0 ? null : new DbxAddress(this, value));
+  }
+
   /** From the DbxDebugger interface */
   public DbxOopHandle readOopHandle(long address)
     throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
     long value = readAddressValue(address);
     return (value == 0 ? null : new DbxOopHandle(this, value));
   }
+  public DbxOopHandle readCompOopHandle(long address)
+    throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
+    long value = readCompOopAddressValue(address);
+    return (value == 0 ? null : new DbxOopHandle(this, value));
+  }
 
   //--------------------------------------------------------------------------------
   // Thread context access. Can not be package private, but should
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/dummy/DummyAddress.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/dummy/DummyAddress.java	Sun Apr 13 17:43:42 2008 -0400
@@ -76,6 +76,10 @@
     return new DummyAddress(debugger, badLong);
   }
 
+  public Address getCompOopAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
+    return new DummyAddress(debugger, badLong);
+  }
+
   //
   // Java-related routines
   //
@@ -116,6 +120,10 @@
     throws UnalignedAddressException, UnmappedAddressException, NotInHeapException {
     return new DummyOopHandle(debugger, badLong);
   }
+  public OopHandle getCompOopHandleAt(long offset)
+    throws UnalignedAddressException, UnmappedAddressException, NotInHeapException {
+    return new DummyOopHandle(debugger, badLong);
+  }
 
   // Mutators -- not implemented
   public void setCIntegerAt(long offset, long numBytes, long value) {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxAddress.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxAddress.java	Sun Apr 13 17:43:42 2008 -0400
@@ -74,6 +74,11 @@
         return debugger.readAddress(addr + offset);
     }
 
+    public Address getCompOopAddressAt(long offset)
+            throws UnalignedAddressException, UnmappedAddressException {
+        return debugger.readCompOopAddress(addr + offset);
+    }
+
     //
     // Java-related routines
     //
@@ -115,6 +120,11 @@
     return debugger.readOopHandle(addr + offset);
   }
 
+  public OopHandle getCompOopHandleAt(long offset)
+    throws UnalignedAddressException, UnmappedAddressException, NotInHeapException {
+    return debugger.readCompOopHandle(addr + offset);
+  }
+
   // Mutators -- not implemented for now (FIXME)
   public void setCIntegerAt(long offset, long numBytes, long value) {
     throw new DebuggerException("Unimplemented");
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebugger.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebugger.java	Sun Apr 13 17:43:42 2008 -0400
@@ -45,7 +45,9 @@
   public long         readCInteger(long address, long numBytes, boolean isUnsigned)
     throws DebuggerException;
   public LinuxAddress readAddress(long address) throws DebuggerException;
+  public LinuxAddress readCompOopAddress(long address) throws DebuggerException;
   public LinuxOopHandle readOopHandle(long address) throws DebuggerException;
+  public LinuxOopHandle readCompOopHandle(long address) throws DebuggerException;
   public long[]       getThreadIntegerRegisterSet(int lwp_id) throws DebuggerException;
   public long         getAddressValue(Address addr) throws DebuggerException;
   public Address      newAddress(long value) throws DebuggerException;
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java	Sun Apr 13 17:43:42 2008 -0400
@@ -423,6 +423,11 @@
         long value = readAddressValue(address);
         return (value == 0 ? null : new LinuxAddress(this, value));
     }
+    public LinuxAddress readCompOopAddress(long address)
+            throws UnmappedAddressException, UnalignedAddressException {
+        long value = readCompOopAddressValue(address);
+        return (value == 0 ? null : new LinuxAddress(this, value));
+    }
 
     /** From the LinuxDebugger interface */
     public LinuxOopHandle readOopHandle(long address)
@@ -431,6 +436,12 @@
         long value = readAddressValue(address);
         return (value == 0 ? null : new LinuxOopHandle(this, value));
     }
+    public LinuxOopHandle readCompOopHandle(long address)
+            throws UnmappedAddressException, UnalignedAddressException,
+                NotInHeapException {
+        long value = readCompOopAddressValue(address);
+        return (value == 0 ? null : new LinuxOopHandle(this, value));
+    }
 
     //----------------------------------------------------------------------
     // Thread context access
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcAddress.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcAddress.java	Sun Apr 13 17:43:42 2008 -0400
@@ -72,6 +72,10 @@
     return debugger.readAddress(addr + offset);
   }
 
+  public Address getCompOopAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
+    return debugger.readCompOopAddress(addr + offset);
+  }
+
   //
   // Java-related routines
   //
@@ -112,6 +116,10 @@
     throws UnalignedAddressException, UnmappedAddressException, NotInHeapException {
     return debugger.readOopHandle(addr + offset);
   }
+  public OopHandle getCompOopHandleAt(long offset)
+    throws UnalignedAddressException, UnmappedAddressException, NotInHeapException {
+    return debugger.readCompOopHandle(addr + offset);
+  }
 
   // Mutators -- not implemented for now (FIXME)
   public void setCIntegerAt(long offset, long numBytes, long value) {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebugger.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebugger.java	Sun Apr 13 17:43:42 2008 -0400
@@ -46,7 +46,9 @@
   public long         readCInteger(long address, long numBytes, boolean isUnsigned)
     throws DebuggerException;
   public ProcAddress   readAddress(long address) throws DebuggerException;
+  public ProcAddress   readCompOopAddress(long address) throws DebuggerException;
   public ProcOopHandle readOopHandle(long address) throws DebuggerException;
+  public ProcOopHandle readCompOopHandle(long address) throws DebuggerException;
   public long[]       getThreadIntegerRegisterSet(int tid) throws DebuggerException;
   public long         getAddressValue(Address addr) throws DebuggerException;
   public Address      newAddress(long value) throws DebuggerException;
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebuggerLocal.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebuggerLocal.java	Sun Apr 13 17:43:42 2008 -0400
@@ -53,8 +53,6 @@
  */
 
 public class ProcDebuggerLocal extends DebuggerBase implements ProcDebugger {
-
-
     protected static final int cacheSize = 16 * 1024 * 1024; // 16 MB
 
     //------------------------------------------------------------------------
@@ -337,10 +335,21 @@
         return (value == 0 ? null : new ProcAddress(this, value));
     }
 
+    public ProcAddress readCompOopAddress(long address)
+    throws UnmappedAddressException, UnalignedAddressException {
+        long value = readCompOopAddressValue(address);
+        return (value == 0 ? null : new ProcAddress(this, value));
+    }
+
     /** From the ProcDebugger interface */
     public ProcOopHandle readOopHandle(long address)
     throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
-        long value = readAddressValue(address);
+        long   value = readAddressValue(address);
+        return (value == 0 ? null : new ProcOopHandle(this, value));
+    }
+
+    public ProcOopHandle readCompOopHandle(long address) {
+        long value = readCompOopAddressValue(address);
         return (value == 0 ? null : new ProcOopHandle(this, value));
     }
 
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteAddress.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteAddress.java	Sun Apr 13 17:43:42 2008 -0400
@@ -71,6 +71,9 @@
   public Address getAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
     return debugger.readAddress(addr + offset);
   }
+  public Address getCompOopAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
+    return debugger.readCompOopAddress(addr + offset);
+  }
 
   //
   // Java-related routines
@@ -112,6 +115,10 @@
     throws UnalignedAddressException, UnmappedAddressException, NotInHeapException {
     return debugger.readOopHandle(addr + offset);
   }
+  public OopHandle getCompOopHandleAt(long offset)
+    throws UnalignedAddressException, UnmappedAddressException, NotInHeapException {
+    return debugger.readCompOopHandle(addr + offset);
+  }
 
   // Mutators -- not implemented for now (FIXME)
   public void setCIntegerAt(long offset, long numBytes, long value) {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebugger.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebugger.java	Sun Apr 13 17:43:42 2008 -0400
@@ -65,6 +65,9 @@
   public long      getJIntSize() throws RemoteException;
   public long      getJLongSize() throws RemoteException;
   public long      getJShortSize() throws RemoteException;
+  public long      getHeapBase() throws RemoteException;
+  public long      getHeapOopSize() throws RemoteException;
+  public long      getLogMinObjAlignmentInBytes() throws RemoteException;
   public boolean   areThreadsEqual(long addrOrId1, boolean isAddress1,
                                    long addrOrId2, boolean isAddress2) throws RemoteException;
   public int       getThreadHashCode(long addrOrId, boolean isAddress) throws RemoteException;
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerClient.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerClient.java	Sun Apr 13 17:43:42 2008 -0400
@@ -85,6 +85,9 @@
       jlongSize    = remoteDebugger.getJLongSize();
       jshortSize   = remoteDebugger.getJShortSize();
       javaPrimitiveTypesConfigured = true;
+      heapBase     = remoteDebugger.getHeapBase();
+      heapOopSize  = remoteDebugger.getHeapOopSize();
+      logMinObjAlignmentInBytes  = remoteDebugger.getLogMinObjAlignmentInBytes();
     }
     catch (RemoteException e) {
       throw new DebuggerException(e);
@@ -298,12 +301,24 @@
     return (value == 0 ? null : new RemoteAddress(this, value));
   }
 
+  RemoteAddress readCompOopAddress(long address)
+    throws UnmappedAddressException, UnalignedAddressException {
+    long value = readCompOopAddressValue(address);
+    return (value == 0 ? null : new RemoteAddress(this, value));
+  }
+
   RemoteOopHandle readOopHandle(long address)
     throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
     long value = readAddressValue(address);
     return (value == 0 ? null : new RemoteOopHandle(this, value));
   }
 
+  RemoteOopHandle readCompOopHandle(long address)
+    throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
+    long value = readCompOopAddressValue(address);
+    return (value == 0 ? null : new RemoteOopHandle(this, value));
+  }
+
   boolean areThreadsEqual(Address addr1, Address addr2) {
     try {
        return remoteDebugger.areThreadsEqual(getAddressValue(addr1), true,
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerServer.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerServer.java	Sun Apr 13 17:43:42 2008 -0400
@@ -114,6 +114,17 @@
     return debugger.getJShortSize();
   }
 
+  public long getHeapBase() throws RemoteException {
+    return debugger.getHeapBase();
+  }
+
+  public long getHeapOopSize() throws RemoteException {
+    return debugger.getHeapOopSize();
+  }
+
+  public long getLogMinObjAlignmentInBytes() throws RemoteException {
+    return debugger.getLogMinObjAlignmentInBytes();
+  }
   public boolean   areThreadsEqual(long addrOrId1, boolean isAddress1,
                                    long addrOrId2, boolean isAddress2) throws RemoteException {
     ThreadProxy t1 = getThreadProxy(addrOrId1, isAddress1);
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32Address.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32Address.java	Sun Apr 13 17:43:42 2008 -0400
@@ -72,6 +72,10 @@
     return debugger.readAddress(addr + offset);
   }
 
+  public Address getCompOopAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
+    return debugger.readCompOopAddress(addr + offset);
+  }
+
   //
   // Java-related routines
   //
@@ -112,6 +116,10 @@
     throws UnalignedAddressException, UnmappedAddressException, NotInHeapException {
     return debugger.readOopHandle(addr + offset);
   }
+  public OopHandle getCompOopHandleAt(long offset)
+    throws UnalignedAddressException, UnmappedAddressException, NotInHeapException {
+    return debugger.readCompOopHandle(addr + offset);
+  }
 
   //
   // C/C++-related mutators
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32Debugger.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32Debugger.java	Sun Apr 13 17:43:42 2008 -0400
@@ -45,7 +45,9 @@
   public long         readCInteger(long address, long numBytes, boolean isUnsigned)
     throws DebuggerException;
   public Win32Address readAddress(long address) throws DebuggerException;
+  public Win32Address readCompOopAddress(long address) throws DebuggerException;
   public Win32OopHandle readOopHandle(long address) throws DebuggerException;
+  public Win32OopHandle readCompOopHandle(long address) throws DebuggerException;
   public void         writeJBoolean(long address, boolean value) throws DebuggerException;
   public void         writeJByte(long address, byte value) throws DebuggerException;
   public void         writeJChar(long address, char value) throws DebuggerException;
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32DebuggerLocal.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/Win32DebuggerLocal.java	Sun Apr 13 17:43:42 2008 -0400
@@ -306,12 +306,22 @@
     return (Win32Address) newAddress(readAddressValue(address));
   }
 
+  public Win32Address readCompOopAddress(long address)
+    throws UnmappedAddressException, UnalignedAddressException {
+    return (Win32Address) newAddress(readCompOopAddressValue(address));
+  }
+
   /** From the Win32Debugger interface */
   public Win32OopHandle readOopHandle(long address)
     throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
     long value = readAddressValue(address);
     return (value == 0 ? null : new Win32OopHandle(this, value));
   }
+  public Win32OopHandle readCompOopHandle(long address)
+    throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
+    long value = readCompOopAddressValue(address);
+    return (value == 0 ? null : new Win32OopHandle(this, value));
+  }
 
   /** From the Win32Debugger interface */
   public void writeAddress(long address, Win32Address value) {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgAddress.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgAddress.java	Sun Apr 13 17:43:42 2008 -0400
@@ -72,6 +72,10 @@
     return debugger.readAddress(addr + offset);
   }
 
+  public Address getCompOopAddressAt(long offset) throws UnalignedAddressException, UnmappedAddressException {
+    return debugger.readCompOopAddress(addr + offset);
+  }
+
   //
   // Java-related routines
   //
@@ -113,6 +117,10 @@
     return debugger.readOopHandle(addr + offset);
   }
 
+  public OopHandle getCompOopHandleAt(long offset)
+    throws UnalignedAddressException, UnmappedAddressException, NotInHeapException {
+    return debugger.readCompOopHandle(addr + offset);
+  }
   //
   // C/C++-related mutators
   //
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebugger.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebugger.java	Sun Apr 13 17:43:42 2008 -0400
@@ -45,7 +45,9 @@
   public long         readCInteger(long address, long numBytes, boolean isUnsigned)
     throws DebuggerException;
   public WindbgAddress readAddress(long address) throws DebuggerException;
+  public WindbgAddress readCompOopAddress(long address) throws DebuggerException;
   public WindbgOopHandle readOopHandle(long address) throws DebuggerException;
+  public WindbgOopHandle readCompOopHandle(long address) throws DebuggerException;
 
   // The returned array of register contents is guaranteed to be in
   // the same order as in the DbxDebugger for Solaris/x86 or amd64; that is,
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebuggerLocal.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebuggerLocal.java	Sun Apr 13 17:43:42 2008 -0400
@@ -39,6 +39,7 @@
 import sun.jvm.hotspot.debugger.cdbg.basic.BasicDebugEvent;
 import sun.jvm.hotspot.utilities.*;
 import sun.jvm.hotspot.utilities.memo.*;
+import sun.jvm.hotspot.runtime.*;
 
 /** <P> An implementation of the JVMDebugger interface which talks to
     windbg and symbol table management is done in Java. </P>
@@ -315,12 +316,22 @@
     return (WindbgAddress) newAddress(readAddressValue(address));
   }
 
+  public WindbgAddress readCompOopAddress(long address)
+    throws UnmappedAddressException, UnalignedAddressException {
+    return (WindbgAddress) newAddress(readCompOopAddressValue(address));
+  }
+
   /** From the WindbgDebugger interface */
   public WindbgOopHandle readOopHandle(long address)
     throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
     long value = readAddressValue(address);
     return (value == 0 ? null : new WindbgOopHandle(this, value));
   }
+  public WindbgOopHandle readCompOopHandle(long address)
+    throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
+    long value = readCompOopAddressValue(address);
+    return (value == 0 ? null : new WindbgOopHandle(this, value));
+  }
 
   /** From the WindbgDebugger interface */
   public int getAddressSize() {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java	Sun Apr 13 17:43:42 2008 -0400
@@ -53,6 +53,8 @@
   // system obj array klass object
   private static sun.jvm.hotspot.types.OopField systemObjArrayKlassObjField;
 
+  private static AddressField heapBaseField;
+
   static {
     VM.registerVMInitializedObserver(new Observer() {
         public void update(Observable o, Object data) {
@@ -83,6 +85,8 @@
     doubleArrayKlassObjField = type.getOopField("_doubleArrayKlassObj");
 
     systemObjArrayKlassObjField = type.getOopField("_systemObjArrayKlassObj");
+
+    heapBaseField = type.getAddressField("_heap_base");
   }
 
   public Universe() {
@@ -96,6 +100,14 @@
     }
   }
 
+  public static long getHeapBase() {
+    if (heapBaseField.getValue() == null) {
+      return 0;
+    } else {
+      return heapBaseField.getValue().minus(null);
+    }
+  }
+
   /** Returns "TRUE" iff "p" points into the allocated area of the heap. */
   public boolean isIn(Address p) {
     return heap().isIn(p);
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Array.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Array.java	Sun Apr 13 17:43:42 2008 -0400
@@ -47,18 +47,52 @@
 
   private static void initialize(TypeDataBase db) throws WrongTypeException {
     Type type   = db.lookupType("arrayOopDesc");
-    length      = new CIntField(type.getCIntegerField("_length"), 0);
-    headerSize  = type.getSize();
+    typeSize    = (int)type.getSize();
   }
 
   // Size of the arrayOopDesc
-  private static long headerSize;
+  private static long headerSize=0;
+  private static long lengthOffsetInBytes=0;
+  private static long typeSize;
+
+  private static long headerSizeInBytes() {
+    if (headerSize != 0) {
+      return headerSize;
+    }
+    if (VM.getVM().isCompressedOopsEnabled()) {
+      headerSize = typeSize;
+    } else {
+      headerSize = VM.getVM().alignUp(typeSize + VM.getVM().getIntSize(),
+                                      VM.getVM().getHeapWordSize());
+    }
+    return headerSize;
+  }
 
-  // Fields
-  private static CIntField length;
+  private static long headerSize(BasicType type) {
+    if (Universe.elementTypeShouldBeAligned(type)) {
+       return alignObjectSize(headerSizeInBytes())/VM.getVM().getHeapWordSize();
+    } else {
+      return headerSizeInBytes()/VM.getVM().getHeapWordSize();
+    }
+  }
+
+  private long lengthOffsetInBytes() {
+    if (lengthOffsetInBytes != 0) {
+      return lengthOffsetInBytes;
+    }
+    if (VM.getVM().isCompressedOopsEnabled()) {
+      lengthOffsetInBytes = typeSize - VM.getVM().getIntSize();
+    } else {
+      lengthOffsetInBytes = typeSize;
+    }
+    return lengthOffsetInBytes;
+  }
 
   // Accessors for declared fields
-  public long getLength() { return length.getValue(this); }
+  public long getLength() {
+    boolean isUnsigned = true;
+    return this.getHandle().getCIntegerAt(lengthOffsetInBytes(), VM.getVM().getIntSize(), isUnsigned);
+  }
 
   public long getObjectSize() {
     ArrayKlass klass = (ArrayKlass) getKlass();
@@ -72,20 +106,12 @@
   }
 
   public static long baseOffsetInBytes(BasicType type) {
-    if (Universe.elementTypeShouldBeAligned(type)) {
-      return (VM.getVM().isLP64()) ?  alignObjectSize(headerSize)
-                                   : VM.getVM().alignUp(headerSize, 8);
-    } else {
-      return headerSize;
-    }
+    return headerSize(type) * VM.getVM().getHeapWordSize();
   }
 
   public boolean isArray()             { return true; }
 
   public void iterateFields(OopVisitor visitor, boolean doVMFields) {
     super.iterateFields(visitor, doVMFields);
-    if (doVMFields) {
-      visitor.doCInt(length, true);
-    }
   }
 }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java	Sun Apr 13 17:43:42 2008 -0400
@@ -31,10 +31,10 @@
 import sun.jvm.hotspot.types.*;
 import sun.jvm.hotspot.utilities.*;
 
-// A ConstantPool is an array containing class constants
+// A ConstantPool is an oop containing class constants
 // as described in the class file
 
-public class ConstantPool extends Array implements ClassConstants {
+public class ConstantPool extends Oop implements ClassConstants {
   // Used for debugging this code
   private static final boolean DEBUG = false;
 
@@ -55,8 +55,9 @@
     tags        = new OopField(type.getOopField("_tags"), 0);
     cache       = new OopField(type.getOopField("_cache"), 0);
     poolHolder  = new OopField(type.getOopField("_pool_holder"), 0);
+    length      = new CIntField(type.getCIntegerField("_length"), 0);
     headerSize  = type.getSize();
-    elementSize = db.getOopSize();
+    elementSize = 0;
   }
 
   ConstantPool(OopHandle handle, ObjectHeap heap) {
@@ -68,7 +69,7 @@
   private static OopField tags;
   private static OopField cache;
   private static OopField poolHolder;
-
+  private static CIntField length; // number of elements in oop
 
   private static long headerSize;
   private static long elementSize;
@@ -76,12 +77,22 @@
   public TypeArray         getTags()       { return (TypeArray)         tags.getValue(this); }
   public ConstantPoolCache getCache()      { return (ConstantPoolCache) cache.getValue(this); }
   public Klass             getPoolHolder() { return (Klass)             poolHolder.getValue(this); }
+  public int               getLength()     { return (int)length.getValue(this); }
+
+  private long getElementSize() {
+    if (elementSize !=0 ) {
+      return elementSize;
+    } else {
+      elementSize = VM.getVM().getOopSize();
+    }
+    return elementSize;
+  }
 
   private long indexOffset(long index) {
     if (Assert.ASSERTS_ENABLED) {
-      Assert.that(index > 0 && index < getLength(),  "invalid cp index");
+      Assert.that(index > 0 && index < getLength(),  "invalid cp index " + index + " " + getLength());
     }
-    return (index * elementSize) + headerSize;
+    return (index * getElementSize()) + headerSize;
   }
 
   public ConstantTag getTagAt(long index) {
@@ -464,7 +475,7 @@
   }
 
   public long getObjectSize() {
-    return alignObjectSize(headerSize + (getLength() * elementSize));
+    return alignObjectSize(headerSize + (getLength() * getElementSize()));
   }
 
   //----------------------------------------------------------------------
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCache.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCache.java	Sun Apr 13 17:43:42 2008 -0400
@@ -31,10 +31,10 @@
 import sun.jvm.hotspot.types.*;
 import sun.jvm.hotspot.utilities.*;
 
-// A ConstantPool is an array containing class constants
-// as described in the class file
-
-public class ConstantPoolCache extends Array {
+//  ConstantPoolCache : A constant pool cache (constantPoolCacheOopDesc).
+//  See cpCacheOop.hpp for details about this class.
+//
+public class ConstantPoolCache extends Oop {
   static {
     VM.registerVMInitializedObserver(new Observer() {
         public void update(Observable o, Object data) {
@@ -47,9 +47,9 @@
     Type type      = db.lookupType("constantPoolCacheOopDesc");
     constants      = new OopField(type.getOopField("_constant_pool"), 0);
     baseOffset     = type.getSize();
-
     Type elType    = db.lookupType("ConstantPoolCacheEntry");
     elementSize    = elType.getSize();
+    length         = new CIntField(type.getCIntegerField("_length"), 0);
   }
 
   ConstantPoolCache(OopHandle handle, ObjectHeap heap) {
@@ -62,6 +62,8 @@
 
   private static long baseOffset;
   private static long elementSize;
+  private static CIntField length;
+
 
   public ConstantPool getConstants() { return (ConstantPool) constants.getValue(this); }
 
@@ -87,6 +89,10 @@
     tty.print("ConstantPoolCache for " + getConstants().getPoolHolder().getName().asString());
   }
 
+  public int getLength() {
+    return (int) length.getValue(this);
+  }
+
   public void iterateFields(OopVisitor visitor, boolean doVMFields) {
     super.iterateFields(visitor, doVMFields);
     if (doVMFields) {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCacheKlass.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCacheKlass.java	Sun Apr 13 17:43:42 2008 -0400
@@ -32,7 +32,7 @@
 
 // A ConstantPoolCacheKlass is the klass of a ConstantPoolCache
 
-public class ConstantPoolCacheKlass extends ArrayKlass {
+public class ConstantPoolCacheKlass extends Klass {
   static {
     VM.registerVMInitializedObserver(new Observer() {
         public void update(Observable o, Object data) {
@@ -43,13 +43,20 @@
 
   private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
     Type type  = db.lookupType("constantPoolCacheKlass");
+    headerSize = type.getSize() + Oop.getHeaderSize();
   }
 
   ConstantPoolCacheKlass(OopHandle handle, ObjectHeap heap) {
     super(handle, heap);
   }
 
+  public long getObjectSize() { return alignObjectSize(headerSize); }
+
   public void printValueOn(PrintStream tty) {
     tty.print("ConstantPoolCacheKlass");
   }
+
+  private static long headerSize;
 }
+
+
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolKlass.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolKlass.java	Sun Apr 13 17:43:42 2008 -0400
@@ -32,7 +32,7 @@
 
 // A ConstantPoolKlass is the klass of a ConstantPool
 
-public class ConstantPoolKlass extends ArrayKlass {
+public class ConstantPoolKlass extends Klass {
   static {
     VM.registerVMInitializedObserver(new Observer() {
         public void update(Observable o, Object data) {
@@ -43,13 +43,19 @@
 
   private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
     Type type  = db.lookupType("constantPoolKlass");
+    headerSize = type.getSize() + Oop.getHeaderSize();
   }
 
   ConstantPoolKlass(OopHandle handle, ObjectHeap heap) {
     super(handle, heap);
   }
 
+  public long getObjectSize() { return alignObjectSize(headerSize); }
+
   public void printValueOn(PrintStream tty) {
     tty.print("ConstantPoolKlass");
   }
-};
+
+  private static long headerSize;
+}
+
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/DefaultOopVisitor.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/DefaultOopVisitor.java	Sun Apr 13 17:43:42 2008 -0400
@@ -46,6 +46,7 @@
 
   // Callback methods for each field type in an object
   public void doOop(OopField field, boolean isVMField)         {}
+  public void doOop(NarrowOopField field, boolean isVMField)   {}
   public void doByte(ByteField field, boolean isVMField)       {}
   public void doChar(CharField field, boolean isVMField)       {}
   public void doBoolean(BooleanField field, boolean isVMField) {}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Instance.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Instance.java	Sun Apr 13 17:43:42 2008 -0400
@@ -40,15 +40,26 @@
         }
       });
   }
+  private static long typeSize;
 
   private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
     Type type = db.lookupType("instanceOopDesc");
+    typeSize = type.getSize();
   }
 
   Instance(OopHandle handle, ObjectHeap heap) {
     super(handle, heap);
   }
 
+  // Returns header size in bytes.
+  public static long getHeaderSize() {
+    if (VM.getVM().isCompressedOopsEnabled()) {
+      return typeSize - VM.getVM().getIntSize();
+    } else {
+      return typeSize;
+    }
+  }
+
   public boolean isInstance()          { return true; }
 
   public void iterateFields(OopVisitor visitor, boolean doVMFields) {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java	Sun Apr 13 17:43:42 2008 -0400
@@ -467,7 +467,6 @@
     for (int index = 0; index < length; index += NEXT_OFFSET) {
       short accessFlags    = fields.getShortAt(index + ACCESS_FLAGS_OFFSET);
       short signatureIndex = fields.getShortAt(index + SIGNATURE_INDEX_OFFSET);
-
       FieldType   type   = new FieldType((Symbol) getConstants().getObjAt(signatureIndex));
       AccessFlags access = new AccessFlags(accessFlags);
       if (access.isStatic()) {
@@ -790,7 +789,11 @@
     short signatureIndex = fields.getShortAt(index + SIGNATURE_INDEX_OFFSET);
     FieldType type = new FieldType((Symbol) getConstants().getObjAt(signatureIndex));
     if (type.isOop()) {
-      return new OopField(this, index);
+     if (VM.getVM().isCompressedOopsEnabled()) {
+        return new NarrowOopField(this, index);
+     } else {
+        return new OopField(this, index);
+     }
     }
     if (type.isByte()) {
       return new ByteField(this, index);
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Klass.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Klass.java	Sun Apr 13 17:43:42 2008 -0400
@@ -171,8 +171,7 @@
   }
 
   public long getObjectSize() {
-    System.out.println("should not reach here");
-    return 0;
+    throw new RuntimeException("should not reach here");
   }
 
   /** Array class with specific rank */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/NarrowOopField.java	Sun Apr 13 17:43:42 2008 -0400
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2000-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.oops;
+
+import sun.jvm.hotspot.debugger.*;
+
+// The class for an oop field simply provides access to the value.
+public class NarrowOopField extends OopField {
+  public NarrowOopField(FieldIdentifier id, long offset, boolean isVMField) {
+    super(id, offset, isVMField);
+  }
+
+  public NarrowOopField(sun.jvm.hotspot.types.OopField vmField, long startOffset) {
+    super(new NamedFieldIdentifier(vmField.getName()), vmField.getOffset() + startOffset, true);
+  }
+
+  public NarrowOopField(InstanceKlass holder, int fieldArrayIndex) {
+    super(holder, fieldArrayIndex);
+  }
+
+  public Oop getValue(Oop obj) {
+    return obj.getHeap().newOop(getValueAsOopHandle(obj));
+  }
+
+  /** Debugging support */
+  public OopHandle getValueAsOopHandle(Oop obj) {
+    return obj.getHandle().getCompOopHandleAt(getOffset());
+  }
+
+  public void setValue(Oop obj) throws MutationException {
+    // Fix this: setOopAt is missing in Address
+  }
+}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjArray.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjArray.java	Sun Apr 13 17:43:42 2008 -0400
@@ -43,7 +43,7 @@
 
   private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
     Type type   = db.lookupType("objArrayOopDesc");
-    elementSize = db.getOopSize();
+    elementSize = VM.getVM().getHeapOopSize();
   }
 
   ObjArray(OopHandle handle, ObjectHeap heap) {
@@ -54,9 +54,17 @@
 
   private static long elementSize;
 
-  public Oop getObjAt(long index) {
+  public OopHandle getOopHandleAt(long index) {
     long offset = baseOffsetInBytes(BasicType.T_OBJECT) + (index * elementSize);
-    return getHeap().newOop(getHandle().getOopHandleAt(offset));
+    if (VM.getVM().isCompressedOopsEnabled()) {
+      return getHandle().getCompOopHandleAt(offset);
+    } else {
+      return getHandle().getOopHandleAt(offset);
+    }
+  }
+
+  public Oop getObjAt(long index) {
+      return getHeap().newOop(getOopHandleAt(index));
   }
 
   public void printValueOn(PrintStream tty) {
@@ -69,7 +77,13 @@
     long baseOffset = baseOffsetInBytes(BasicType.T_OBJECT);
     for (int index = 0; index < length; index++) {
       long offset = baseOffset + (index * elementSize);
-      visitor.doOop(new OopField(new IndexableFieldIdentifier(index), offset, false), false);
+      OopField field;
+      if (VM.getVM().isCompressedOopsEnabled()) {
+        field = new NarrowOopField(new IndexableFieldIdentifier(index), offset, false);
+      } else {
+        field = new OopField(new IndexableFieldIdentifier(index), offset, false);
+      }
+      visitor.doOop(field, false);
     }
   }
 }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java	Sun Apr 13 17:43:42 2008 -0400
@@ -41,6 +41,12 @@
 
 public class ObjectHeap {
 
+  private static final boolean DEBUG;
+
+  static {
+    DEBUG = System.getProperty("sun.jvm.hotspot.oops.ObjectHeap.DEBUG") != null;
+  }
+
   private OopHandle              symbolKlassHandle;
   private OopHandle              methodKlassHandle;
   private OopHandle              constMethodKlassHandle;
@@ -152,7 +158,7 @@
 
   public ObjectHeap(TypeDataBase db) throws WrongTypeException {
     // Get commonly used sizes of basic types
-    oopSize     = db.getOopSize();
+    oopSize     = VM.getVM().getOopSize();
     byteSize    = db.getJByteType().getSize();
     charSize    = db.getJCharType().getSize();
     booleanSize = db.getJBooleanType().getSize();
@@ -440,12 +446,16 @@
       try {
         // Traverses the space from bottom to top
         OopHandle handle = bottom.addOffsetToAsOopHandle(0);
+
         while (handle.lessThan(top)) {
         Oop obj = null;
 
           try {
             obj = newOop(handle);
           } catch (UnknownOopException exp) {
+            if (DEBUG) {
+              throw new RuntimeException(" UnknownOopException  " + exp);
+            }
           }
           if (obj == null) {
              //Find the object size using Printezis bits and skip over
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHistogram.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHistogram.java	Sun Apr 13 17:43:42 2008 -0400
@@ -64,8 +64,17 @@
     List list = getElements();
     ObjectHistogramElement.titleOn(tty);
     Iterator iterator = list.listIterator();
+    int num=0;
+    int totalCount=0;
+    int totalSize=0;
     while (iterator.hasNext()) {
-      ((ObjectHistogramElement) iterator.next()).printOn(tty);
+      ObjectHistogramElement el = (ObjectHistogramElement) iterator.next();
+      num++;
+      totalCount+=el.getCount();
+      totalSize+=el.getSize();
+      tty.print(num + ":" + "\t\t");
+      el.printOn(tty);
     }
+    tty.println("Total : " + "\t" + totalCount + "\t" + totalSize);
   }
 }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHistogramElement.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHistogramElement.java	Sun Apr 13 17:43:42 2008 -0400
@@ -110,12 +110,12 @@
   public static void titleOn(PrintStream tty) {
     tty.println("Object Histogram:");
     tty.println();
-    tty.println("Size" + "\t" + "Count" + "\t" + "Class description");
-    tty.println("-------------------------------------------------------");
+    tty.println("num " + "\t" + "  #instances" + "\t" + "#bytes" + "\t" + "Class description");
+    tty.println("--------------------------------------------------------------------------");
   }
 
   public void printOn(PrintStream tty) {
-    tty.print(size + "\t" + count + "\t");
+    tty.print(count + "\t" + size + "\t");
     tty.print(getDescription());
     tty.println();
   }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Oop.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Oop.java	Sun Apr 13 17:43:42 2008 -0400
@@ -47,7 +47,8 @@
   private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
     Type type  = db.lookupType("oopDesc");
     mark       = new CIntField(type.getCIntegerField("_mark"), 0);
-    klass      = new OopField(type.getOopField("_klass"), 0);
+    klass      = new OopField(type.getOopField("_metadata._klass"), 0);
+    compressedKlass  = new NarrowOopField(type.getOopField("_metadata._compressed_klass"), 0);
     headerSize = type.getSize();
   }
 
@@ -67,10 +68,11 @@
   public OopHandle getHandle() { return handle; }
 
   private static long headerSize;
-  public  static long getHeaderSize() { return headerSize; }
+  public  static long getHeaderSize() { return headerSize; } // Header size in bytes.
 
   private static CIntField mark;
   private static OopField  klass;
+  private static NarrowOopField compressedKlass;
 
   public boolean isShared() {
     return CompactingPermGenGen.isShared(handle);
@@ -86,7 +88,13 @@
 
   // Accessors for declared fields
   public Mark  getMark()   { return new Mark(getHandle()); }
-  public Klass getKlass()  { return (Klass) klass.getValue(this); }
+  public Klass getKlass() {
+    if (VM.getVM().isCompressedOopsEnabled()) {
+      return (Klass) compressedKlass.getValue(this);
+    } else {
+      return (Klass) klass.getValue(this);
+    }
+  }
 
   public boolean isA(Klass k) {
     return getKlass().isSubtypeOf(k);
@@ -120,7 +128,7 @@
 
   // Align the object size.
   public static long alignObjectSize(long size) {
-    return VM.getVM().alignUp(size, VM.getVM().getMinObjAlignmentInBytes());
+    return VM.getVM().alignUp(size, VM.getVM().getMinObjAlignment());
   }
 
   // All vm's align longs, so pad out certain offsets.
@@ -163,7 +171,11 @@
   void iterateFields(OopVisitor visitor, boolean doVMFields) {
     if (doVMFields) {
       visitor.doCInt(mark, true);
-      visitor.doOop(klass, true);
+      if (VM.getVM().isCompressedOopsEnabled()) {
+        visitor.doOop(compressedKlass, true);
+      } else {
+        visitor.doOop(klass, true);
+      }
     }
   }
 
@@ -219,6 +231,10 @@
     if (handle == null) {
       return null;
     }
-    return handle.getOopHandleAt(klass.getOffset());
+    if (VM.getVM().isCompressedOopsEnabled()) {
+      return handle.getCompOopHandleAt(compressedKlass.getOffset());
+    } else {
+      return handle.getOopHandleAt(klass.getOffset());
+    }
   }
 };
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/OopPrinter.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/OopPrinter.java	Sun Apr 13 17:43:42 2008 -0400
@@ -57,6 +57,13 @@
     Oop.printOopValueOn(field.getValue(getObj()), tty);
     tty.println();
   }
+
+  public void doOop(NarrowOopField field, boolean isVMField) {
+    printField(field);
+    Oop.printOopValueOn(field.getValue(getObj()), tty);
+    tty.println();
+  }
+
   public void doChar(CharField field, boolean isVMField) {
     printField(field);
     char c = field.getValue(getObj());
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/OopUtilities.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/OopUtilities.java	Sun Apr 13 17:43:42 2008 -0400
@@ -281,8 +281,11 @@
        } catch (RuntimeException re) {
           // ignore, currently java_lang_Class::hc_klass_offset is zero
        }
-
-       hcKlassField = new OopField(new NamedFieldIdentifier("hc_klass"), hcKlassOffset, true);
+       if (VM.getVM().isCompressedOopsEnabled()) {
+         hcKlassField = new NarrowOopField(new NamedFieldIdentifier("hc_klass"), hcKlassOffset, true);
+       } else {
+         hcKlassField = new OopField(new NamedFieldIdentifier("hc_klass"), hcKlassOffset, true);
+       }
     }
   }
 
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/OopVisitor.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/OopVisitor.java	Sun Apr 13 17:43:42 2008 -0400
@@ -41,6 +41,7 @@
 
   // Callback methods for each field type in an object
   public void doOop(OopField field, boolean isVMField);
+  public void doOop(NarrowOopField field, boolean isVMField);
   public void doByte(ByteField field, boolean isVMField);
   public void doChar(CharField field, boolean isVMField);
   public void doBoolean(BooleanField field, boolean isVMField);
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/AddressVisitor.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/AddressVisitor.java	Sun Apr 13 17:43:42 2008 -0400
@@ -31,4 +31,5 @@
 
 public interface AddressVisitor {
   public void visitAddress(Address addr);
+  public void visitCompOopAddress(Address addr);
 }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Frame.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Frame.java	Sun Apr 13 17:43:42 2008 -0400
@@ -534,7 +534,8 @@
     public void visitValueLocation(Address valueAddr) {
     }
 
-    public void visitDeadLocation(Address deadAddr) {
+    public void visitNarrowOopLocation(Address compOopAddr) {
+      addressVisitor.visitCompOopAddress(compOopAddr);
     }
   }
 
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Sun Apr 13 17:43:42 2008 -0400
@@ -36,6 +36,7 @@
 import sun.jvm.hotspot.oops.*;
 import sun.jvm.hotspot.types.*;
 import sun.jvm.hotspot.utilities.*;
+import sun.jvm.hotspot.runtime.*;
 
 /** <P> This class encapsulates the global state of the VM; the
     universe, object heap, interpreter, etc. It is a Singleton and
@@ -93,6 +94,10 @@
   private boolean      isLP64;
   private int          bytesPerLong;
   private int          minObjAlignmentInBytes;
+  private int          logMinObjAlignmentInBytes;
+  private int          heapWordSize;
+  private int          heapOopSize;
+  private int          oopSize;
   /** This is only present in a non-core build */
   private CodeCache    codeCache;
   /** This is only present in a C1 build */
@@ -117,6 +122,7 @@
   private static Type uintxType;
   private static CIntegerType boolType;
   private Boolean sharingEnabled;
+  private Boolean compressedOopsEnabled;
 
   // command line flags supplied to VM - see struct Flag in globals.hpp
   public static final class Flag {
@@ -308,6 +314,11 @@
     }
     bytesPerLong = db.lookupIntConstant("BytesPerLong").intValue();
     minObjAlignmentInBytes = db.lookupIntConstant("MinObjAlignmentInBytes").intValue();
+    // minObjAlignment = db.lookupIntConstant("MinObjAlignment").intValue();
+    logMinObjAlignmentInBytes = db.lookupIntConstant("LogMinObjAlignmentInBytes").intValue();
+    heapWordSize = db.lookupIntConstant("HeapWordSize").intValue();
+    oopSize  = db.lookupIntConstant("oopSize").intValue();
+    heapOopSize  = db.lookupIntConstant("heapOopSize").intValue();
 
     intxType = db.lookupType("intx");
     uintxType = db.lookupType("uintx");
@@ -331,6 +342,8 @@
       throw new RuntimeException("Attempt to initialize VM twice");
     }
     soleInstance = new VM(db, debugger, debugger.getMachineDescription().isBigEndian());
+    debugger.putHeapConst(Universe.getHeapBase(), soleInstance.getHeapOopSize(),
+                          soleInstance.logMinObjAlignmentInBytes);
     for (Iterator iter = vmInitializedObservers.iterator(); iter.hasNext(); ) {
       ((Observer) iter.next()).update(null, null);
     }
@@ -440,13 +453,17 @@
   }
 
   public long getOopSize() {
-    return db.getOopSize();
+    return oopSize;
   }
 
   public long getLogAddressSize() {
     return logAddressSize;
   }
 
+  public long getIntSize() {
+    return db.getJIntType().getSize();
+  }
+
   /** NOTE: this offset is in BYTES in this system! */
   public long getStackBias() {
     return stackBias;
@@ -467,10 +484,24 @@
   }
 
   /** Get minimum object alignment in bytes. */
+  public int getMinObjAlignment() {
+    return minObjAlignmentInBytes;
+  }
+
   public int getMinObjAlignmentInBytes() {
     return minObjAlignmentInBytes;
   }
+  public int getLogMinObjAlignmentInBytes() {
+    return logMinObjAlignmentInBytes;
+  }
 
+  public int getHeapWordSize() {
+    return heapWordSize;
+  }
+
+  public int getHeapOopSize() {
+    return heapOopSize;
+  }
   /** Utility routine for getting data structure alignment correct */
   public long alignUp(long size, long alignment) {
     return (size + alignment - 1) & ~(alignment - 1);
@@ -701,6 +732,14 @@
     return sharingEnabled.booleanValue();
   }
 
+  public boolean isCompressedOopsEnabled() {
+    if (compressedOopsEnabled == null) {
+        Flag flag = getCommandLineFlag("UseCompressedOops");
+        compressedOopsEnabled = (flag == null) ? Boolean.FALSE:
+             (flag.getBool()? Boolean.TRUE: Boolean.FALSE);
+    }
+    return compressedOopsEnabled.booleanValue();
+  }
 
   // returns null, if not available.
   public Flag[] getCommandLineFlags() {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/Field.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/Field.java	Sun Apr 13 17:43:42 2008 -0400
@@ -109,6 +109,8 @@
   public Address   getAddress  (Address addr) throws UnmappedAddressException, UnalignedAddressException, WrongTypeException;
   public OopHandle getOopHandle(Address addr)
     throws UnmappedAddressException, UnalignedAddressException, WrongTypeException, NotInHeapException;
+  public OopHandle getNarrowOopHandle(Address addr)
+    throws UnmappedAddressException, UnalignedAddressException, WrongTypeException, NotInHeapException;
 
   /** <P> These accessors require that the field be static; otherwise,
       a WrongTypeException will be thrown. Note that type checking is
@@ -138,4 +140,6 @@
   public Address   getAddress  () throws UnmappedAddressException, UnalignedAddressException;
   public OopHandle getOopHandle()
     throws UnmappedAddressException, UnalignedAddressException, NotInHeapException;
+  public OopHandle getNarrowOopHandle()
+    throws UnmappedAddressException, UnalignedAddressException, NotInHeapException;
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/NarrowOopField.java	Sun Apr 13 17:43:42 2008 -0400
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.types;
+
+import sun.jvm.hotspot.debugger.*;
+
+/** A specialization of Field which represents a field containing an
+    narrow oop value and which adds typechecked getValue() routines returning
+    OopHandles. */
+
+public interface NarrowOopField extends OopField {
+  /** The field must be nonstatic and the type of the field must be an
+      oop type, or a WrongTypeException will be thrown. */
+  public OopHandle getValue(Address addr)     throws UnmappedAddressException, UnalignedAddressException, WrongTypeException;
+
+  /** The field must be static and the type of the field must be an
+      oop type, or a WrongTypeException will be thrown. */
+  public OopHandle getValue()                 throws UnmappedAddressException, UnalignedAddressException, WrongTypeException;
+}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/Type.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/Type.java	Sun Apr 13 17:43:42 2008 -0400
@@ -122,5 +122,6 @@
   public JShortField         getJShortField        (String fieldName) throws WrongTypeException;
   public CIntegerField       getCIntegerField      (String fieldName) throws WrongTypeException;
   public OopField            getOopField           (String fieldName) throws WrongTypeException;
+  public NarrowOopField      getNarrowOopField     (String fieldName) throws WrongTypeException;
   public AddressField        getAddressField       (String fieldName);
 }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicField.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicField.java	Sun Apr 13 17:43:42 2008 -0400
@@ -43,6 +43,19 @@
   /** Used for static fields only */
   private Address staticFieldAddress;
 
+  // Copy constructor to create NarrowOopField from OopField.
+  public BasicField(Field fld) {
+    BasicField field = (BasicField)fld;
+
+    this.db = field.db;
+    this.containingType = field.containingType;
+    this.name = field.name;
+    this.type = field.type;
+    this.size = field.size;
+    this.isStatic = field.isStatic;
+    this.offset = field.offset;
+    this.staticFieldAddress = field.staticFieldAddress;
+  }
   /** offsetInBytes is ignored if the field is static;
       staticFieldAddress is used only if the field is static. */
   public BasicField(BasicTypeDataBase db, Type containingType, String name, Type type,
@@ -161,6 +174,13 @@
     }
     return addr.getOopHandleAt(offset);
   }
+  public OopHandle getNarrowOopHandle(Address addr)
+    throws UnmappedAddressException, UnalignedAddressException, WrongTypeException, NotInHeapException {
+    if (isStatic) {
+      throw new WrongTypeException();
+    }
+    return addr.getCompOopHandleAt(offset);
+  }
 
   //--------------------------------------------------------------------------------
   // Dereferencing operations for static fields
@@ -234,4 +254,11 @@
     }
     return staticFieldAddress.getOopHandleAt(0);
   }
+  public OopHandle getNarrowOopHandle()
+    throws UnmappedAddressException, UnalignedAddressException, WrongTypeException, NotInHeapException {
+    if (!isStatic) {
+      throw new WrongTypeException();
+    }
+    return staticFieldAddress.getCompOopHandleAt(0);
+  }
 }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicFieldWrapper.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicFieldWrapper.java	Sun Apr 13 17:43:42 2008 -0400
@@ -95,6 +95,10 @@
     throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
     return field.getOopHandle(addr);
   }
+  public OopHandle  getNarrowOopHandle(Address addr)
+    throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
+    return field.getNarrowOopHandle(addr);
+  }
 
   public boolean    getJBoolean () throws UnmappedAddressException, UnalignedAddressException, WrongTypeException {
     return field.getJBoolean();
@@ -130,4 +134,8 @@
     throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
     return field.getOopHandle();
   }
+  public OopHandle  getNarrowOopHandle()
+    throws UnmappedAddressException, UnalignedAddressException, NotInHeapException {
+    return field.getNarrowOopHandle();
+  }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicNarrowOopField.java	Sun Apr 13 17:43:42 2008 -0400
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+package sun.jvm.hotspot.types.basic;
+
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.types.*;
+
+/** A specialization of BasicField which represents a field containing
+    an oop value and which adds typechecked getValue() routines
+    returning OopHandles. */
+
+public class BasicNarrowOopField extends BasicOopField implements NarrowOopField {
+
+  private static final boolean DEBUG = false;
+
+  public BasicNarrowOopField (OopField oopf) {
+    super(oopf);
+  }
+
+  public BasicNarrowOopField(BasicTypeDataBase db, Type containingType, String name, Type type,
+                       boolean isStatic, long offset, Address staticFieldAddress) {
+    super(db, containingType, name, type, isStatic, offset, staticFieldAddress);
+
+    if (DEBUG) {
+      System.out.println(" name " + name + " type " + type + " isStatic " + isStatic + " offset " + offset + " static addr " + staticFieldAddress);
+    }
+    if (!type.isOopType()) {
+      throw new WrongTypeException("Type of a BasicOopField must be an oop type");
+    }
+  }
+
+  /** The field must be nonstatic and the type of the field must be a
+      Java oop, or a WrongTypeException will be thrown. */
+  public OopHandle getValue(Address addr) throws UnmappedAddressException, UnalignedAddressException, WrongTypeException {
+    return getNarrowOopHandle(addr);
+  }
+
+  /** The field must be static and the type of the field must be a
+      Java oop, or a WrongTypeException will be thrown. */
+  public OopHandle getValue() throws UnmappedAddressException, UnalignedAddressException, WrongTypeException {
+    return getNarrowOopHandle();
+  }
+}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicOopField.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicOopField.java	Sun Apr 13 17:43:42 2008 -0400
@@ -32,6 +32,12 @@
     returning OopHandles. */
 
 public class BasicOopField extends BasicField implements OopField {
+
+
+  public BasicOopField(OopField oopf) {
+    super(oopf);
+  }
+
   public BasicOopField(BasicTypeDataBase db, Type containingType, String name, Type type,
                        boolean isStatic, long offset, Address staticFieldAddress) {
     super(db, containingType, name, type, isStatic, offset, staticFieldAddress);
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicType.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicType.java	Sun Apr 13 17:43:42 2008 -0400
@@ -273,6 +273,10 @@
     return (OopField) field;
   }
 
+  public NarrowOopField getNarrowOopField(String fieldName) throws WrongTypeException {
+    return (NarrowOopField) new BasicNarrowOopField(getOopField(fieldName));
+  }
+
   public AddressField getAddressField(String fieldName) {
     // This type can not be inferred (for now), so provide a wrapper
     Field field = getField(fieldName);
@@ -287,7 +291,7 @@
       name was already present in this class. */
   public void addField(Field field) {
     if (nameToFieldMap.get(field.getName()) != null) {
-      throw new RuntimeException("field of name \"" + field.getName() + "\" already present");
+      throw new RuntimeException("field of name \"" + field.getName() + "\" already present in type " + this);
     }
 
     nameToFieldMap.put(field.getName(), field);
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicTypeDataBase.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicTypeDataBase.java	Sun Apr 13 17:43:42 2008 -0400
@@ -27,6 +27,7 @@
 import java.util.*;
 import sun.jvm.hotspot.debugger.*;
 import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.runtime.VM;
 
 /** <P> This is a basic implementation of the TypeDataBase interface.
     It allows an external type database builder to add types to be
@@ -146,7 +147,7 @@
   }
 
   public long getOopSize() {
-    return machDesc.getOopSize();
+    return VM.getVM().getOopSize();
   }
 
   public boolean addressTypeIsEqualToType(Address addr, Type type) {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/FindInHeapPanel.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/FindInHeapPanel.java	Sun Apr 13 17:43:42 2008 -0400
@@ -92,7 +92,17 @@
           iterated += addressSize;
           updateProgressBar();
         }
+        public void visitCompOopAddress(Address addr) {
+          if (error) return;
 
+          Address val = addr.getCompOopAddressAt(0);
+          if (AddressOps.equal(val, value)) {
+            error = reportResult(addr);
+          }
+          iterated += addressSize;
+          updateProgressBar();
+
+        }
         public void epilogue() {
           iterated = 0;
           updateProgressBar();
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java	Sun Apr 13 17:43:42 2008 -0400
@@ -1077,8 +1077,8 @@
       oms = new OopMapStream(map, OopMapValue.OopTypes.VALUE_VALUE);
       buf.append(omvIterator.iterate(oms, "Value:", false));
 
-      oms = new OopMapStream(map, OopMapValue.OopTypes.DEAD_VALUE);
-      buf.append(omvIterator.iterate(oms, "Dead:", false));
+      oms = new OopMapStream(map, OopMapValue.OopTypes.NARROWOOP_VALUE);
+      buf.append(omvIterator.iterate(oms, "Oop:", false));
 
       oms = new OopMapStream(map, OopMapValue.OopTypes.CALLEE_SAVED_VALUE);
       buf.append(omvIterator.iterate(oms, "Callee saved:",  true));
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/AbstractHeapGraphWriter.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/AbstractHeapGraphWriter.java	Sun Apr 13 17:43:42 2008 -0400
@@ -156,6 +156,9 @@
                                   throw new RuntimeException(exp);
                               }
                           }
+                              public void visitCompOopAddress(Address handleAddr) {
+                             throw new RuntimeException("Should not reach here. JNIHandles are not compressed");
+                          }
                        });
             } catch (RuntimeException re) {
                 handleRuntimeException(re);
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java	Sun Apr 13 17:43:42 2008 -0400
@@ -574,6 +574,10 @@
                                    throw new RuntimeException(exp);
                                }
                            }
+                           public void visitCompOopAddress(Address handleAddr) {
+                             throw new RuntimeException(
+                                   " Should not reach here. JNIHandles are not compressed \n");
+                           }
                        });
             } catch (RuntimeException re) {
                 handleRuntimeException(re);
@@ -601,8 +605,7 @@
         writeObjectID(array.getKlass().getJavaMirror());
         final int length = (int) array.getLength();
         for (int index = 0; index < length; index++) {
-            long offset = OBJECT_BASE_OFFSET + index * OBJ_ID_SIZE;
-            OopHandle handle = array.getHandle().getOopHandleAt(offset);
+            OopHandle handle = array.getOopHandleAt(index);
             writeObjectID(getAddressValue(handle));
         }
     }
@@ -803,8 +806,13 @@
             break;
         case JVM_SIGNATURE_CLASS:
         case JVM_SIGNATURE_ARRAY: {
-            OopHandle handle = ((OopField)field).getValueAsOopHandle(oop);
-            writeObjectID(getAddressValue(handle));
+            if (VM.getVM().isCompressedOopsEnabled()) {
+              OopHandle handle = ((NarrowOopField)field).getValueAsOopHandle(oop);
+              writeObjectID(getAddressValue(handle));
+            } else {
+              OopHandle handle = ((OopField)field).getValueAsOopHandle(oop);
+              writeObjectID(getAddressValue(handle));
+            }
             break;
         }
         default:
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/ReversePtrsAnalysis.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/ReversePtrsAnalysis.java	Sun Apr 13 17:43:42 2008 -0400
@@ -282,6 +282,15 @@
       markAndTraverse(next);
     }
 
+    public void visitCompOopAddress(Address addr) {
+      Oop next = heap.newOop(addr.getCompOopHandleAt(0));
+      LivenessPathElement lp = new LivenessPathElement(null,
+                                        new NamedFieldIdentifier(baseRootDescription +
+                                                                 " @ " + addr));
+      rp.put(lp, next);
+      markAndTraverse(next);
+    }
+
     private String baseRootDescription;
   }
 
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/RobustOopDeterminator.java	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/RobustOopDeterminator.java	Sun Apr 13 17:43:42 2008 -0400
@@ -51,7 +51,11 @@
   private static void initialize(TypeDataBase db) {
     Type type = db.lookupType("oopDesc");
 
-    klassField = type.getOopField("_klass");
+    if (VM.getVM().isCompressedOopsEnabled()) {
+      klassField = type.getNarrowOopField("_metadata._compressed_klass");
+    } else {
+      klassField = type.getOopField("_metadata._klass");
+    }
   }
 
   public static boolean oopLooksValid(OopHandle oop) {
--- a/hotspot/make/Makefile	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/make/Makefile	Sun Apr 13 17:43:42 2008 -0400
@@ -85,6 +85,9 @@
 C2_VM_TARGETS=product  fastdebug  optimized  jvmg
 KERNEL_VM_TARGETS=productkernel fastdebugkernel optimizedkernel jvmgkernel
 
+# JDK directory list
+JDK_DIRS=bin include jre lib demo
+
 all:           all_product all_fastdebug
 all_product:   product product1 productkernel docs export_product
 all_fastdebug: fastdebug fastdebug1 fastdebugkernel docs export_fastdebug
@@ -341,7 +344,7 @@
 	$(RM) -r $(JDK_IMAGE_DIR)
 	$(MKDIR) -p $(JDK_IMAGE_DIR)
 	($(CD) $(JDK_IMPORT_PATH) && \
-	 $(TAR) -cf - bin include jre lib) | \
+	 $(TAR) -cf - $(JDK_DIRS)) | \
 	 ($(CD) $(JDK_IMAGE_DIR) && $(TAR) -xf -)
 
 copy_fastdebug_jdk:
@@ -349,11 +352,11 @@
 	$(MKDIR) -p $(JDK_IMAGE_DIR)/fastdebug
 	if [ -d $(JDK_IMPORT_PATH)/fastdebug ] ; then \
 	  ($(CD) $(JDK_IMPORT_PATH)/fastdebug && \
-	   $(TAR) -cf - bin include jre lib) | \
+	   $(TAR) -cf - $(JDK_DIRS)) | \
 	   ($(CD) $(JDK_IMAGE_DIR)/fastdebug && $(TAR) -xf -) ; \
 	else \
 	  ($(CD) $(JDK_IMPORT_PATH) && \
-	   $(TAR) -cf - bin include jre lib) | \
+	   $(TAR) -cf - $(JDK_DIRS)) | \
 	   ($(CD) $(JDK_IMAGE_DIR)/fastdebug && $(TAR) -xf -) ; \
 	fi
 
@@ -362,15 +365,15 @@
 	$(MKDIR) -p $(JDK_IMAGE_DIR)/debug
 	if [ -d $(JDK_IMPORT_PATH)/debug ] ; then \
 	  ($(CD) $(JDK_IMPORT_PATH)/debug && \
-	   $(TAR) -cf - bin include jre lib) | \
+	   $(TAR) -cf - $(JDK_DIRS)) | \
 	   ($(CD) $(JDK_IMAGE_DIR)/debug && $(TAR) -xf -) ; \
 	elif [ -d $(JDK_IMPORT_PATH)/fastdebug ] ; then \
 	  ($(CD) $(JDK_IMPORT_PATH)/fastdebug && \
-	   $(TAR) -cf - bin include jre lib) | \
+	   $(TAR) -cf - $(JDK_DIRS)) | \
 	   ($(CD) $(JDK_IMAGE_DIR)/debug && $(TAR) -xf -) ; \
 	else \
 	  ($(CD) $(JDK_IMPORT_PATH) && \
-	   $(TAR) -cf - bin include jre lib) | \
+	   $(TAR) -cf - $(JDK_DIRS)) | \
 	   ($(CD) $(JDK_IMAGE_DIR)/debug && $(TAR) -xf -) ; \
 	fi
 
--- a/hotspot/make/solaris/makefiles/sparcWorks.make	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/make/solaris/makefiles/sparcWorks.make	Sun Apr 13 17:43:42 2008 -0400
@@ -185,6 +185,12 @@
 # no more exceptions
 CFLAGS/NOEX=-features=no%except
 
+
+# avoid compilation problems arising from fact that C++ compiler tries 
+# to search for external template definition by just compiling additional
+# source files in th same context
+CFLAGS +=  -template=no%extdef
+
 # Reduce code bloat by reverting back to 5.0 behavior for static initializers
 CFLAGS += -features=no%split_init
 
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -1779,7 +1779,7 @@
 
   // Check the klassOop of this object for being in the right area of memory.
   // Cannot do the load in the delay above slot in case O0 is null
-  ld_ptr(Address(O0_obj, 0, oopDesc::klass_offset_in_bytes()), O0_obj);
+  load_klass(O0_obj, O0_obj);
   // assert((klass & klass_mask) == klass_bits);
   if( Universe::verify_klass_mask() != Universe::verify_oop_mask() )
     set(Universe::verify_klass_mask(), O2_mask);
@@ -1788,8 +1788,9 @@
   and3(O0_obj, O2_mask, O4_temp);
   cmp(O4_temp, O3_bits);
   brx(notEqual, false, pn, fail);
+  delayed()->nop();
   // Check the klass's klass
-  delayed()->ld_ptr(Address(O0_obj, 0, oopDesc::klass_offset_in_bytes()), O0_obj);
+  load_klass(O0_obj, O0_obj);
   and3(O0_obj, O2_mask, O4_temp);
   cmp(O4_temp, O3_bits);
   brx(notEqual, false, pn, fail);
@@ -2588,8 +2589,9 @@
   and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
   cmp(temp_reg, markOopDesc::biased_lock_pattern);
   brx(Assembler::notEqual, false, Assembler::pn, cas_label);
-
-  delayed()->ld_ptr(Address(obj_reg, 0, oopDesc::klass_offset_in_bytes()), temp_reg);
+  delayed()->nop();
+
+  load_klass(obj_reg, temp_reg);
   ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
   or3(G2_thread, temp_reg, temp_reg);
   xor3(mark_reg, temp_reg, temp_reg);
@@ -2668,7 +2670,7 @@
   //
   // FIXME: due to a lack of registers we currently blow away the age
   // bits in this situation. Should attempt to preserve them.
-  ld_ptr(Address(obj_reg, 0, oopDesc::klass_offset_in_bytes()), temp_reg);
+  load_klass(obj_reg, temp_reg);
   ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
   or3(G2_thread, temp_reg, temp_reg);
   casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
@@ -2700,7 +2702,7 @@
   //
   // FIXME: due to a lack of registers we currently blow away the age
   // bits in this situation. Should attempt to preserve them.
-  ld_ptr(Address(obj_reg, 0, oopDesc::klass_offset_in_bytes()), temp_reg);
+  load_klass(obj_reg, temp_reg);
   ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
   casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
                   (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
@@ -3406,7 +3408,7 @@
   // set klass to intArrayKlass
   set((intptr_t)Universe::intArrayKlassObj_addr(), t2);
   ld_ptr(t2, 0, t2);
-  st_ptr(t2, top, oopDesc::klass_offset_in_bytes());
+  store_klass(t2, top);
   sub(t1, typeArrayOopDesc::header_size(T_INT), t1);
   add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1);
   sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1);
@@ -3534,3 +3536,139 @@
     st(G0, Rtsp, Rscratch);
   }
 }
+
+void MacroAssembler::load_klass(Register s, Register d) {
+  // The number of bytes in this code is used by
+  // MachCallDynamicJavaNode::ret_addr_offset()
+  // if this changes, change that.
+  if (UseCompressedOops) {
+    lduw(s, oopDesc::klass_offset_in_bytes(), d);
+    decode_heap_oop_not_null(d);
+  } else {
+    ld_ptr(s, oopDesc::klass_offset_in_bytes(), d);
+  }
+}
+
+// ??? figure out src vs. dst!
+void MacroAssembler::store_klass(Register d, Register s1) {
+  if (UseCompressedOops) {
+    assert(s1 != d, "not enough registers");
+    encode_heap_oop_not_null(d);
+    // Zero out entire klass field first.
+    st_ptr(G0, s1, oopDesc::klass_offset_in_bytes());
+    st(d, s1, oopDesc::klass_offset_in_bytes());
+  } else {
+    st_ptr(d, s1, oopDesc::klass_offset_in_bytes());
+  }
+}
+
+void MacroAssembler::load_heap_oop(const Address& s, Register d, int offset) {
+  if (UseCompressedOops) {
+    lduw(s, d, offset);
+    decode_heap_oop(d);
+  } else {
+    ld_ptr(s, d, offset);
+  }
+}
+
+void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d) {
+   if (UseCompressedOops) {
+    lduw(s1, s2, d);
+    decode_heap_oop(d, d);
+  } else {
+    ld_ptr(s1, s2, d);
+  }
+}
+
+void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d) {
+   if (UseCompressedOops) {
+    lduw(s1, simm13a, d);
+    decode_heap_oop(d, d);
+  } else {
+    ld_ptr(s1, simm13a, d);
+  }
+}
+
+void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2) {
+  if (UseCompressedOops) {
+    assert(s1 != d && s2 != d, "not enough registers");
+    encode_heap_oop(d);
+    st(d, s1, s2);
+  } else {
+    st_ptr(d, s1, s2);
+  }
+}
+
+void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a) {
+  if (UseCompressedOops) {
+    assert(s1 != d, "not enough registers");
+    encode_heap_oop(d);
+    st(d, s1, simm13a);
+  } else {
+    st_ptr(d, s1, simm13a);
+  }
+}
+
+void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset) {
+  if (UseCompressedOops) {
+    assert(a.base() != d, "not enough registers");
+    encode_heap_oop(d);
+    st(d, a, offset);
+  } else {
+    st_ptr(d, a, offset);
+  }
+}
+
+
+void MacroAssembler::encode_heap_oop(Register src, Register dst) {
+  assert (UseCompressedOops, "must be compressed");
+  Label done;
+  if (src == dst) {
+    // optimize for frequent case src == dst
+    bpr(rc_nz, true, Assembler::pt, src, done);
+    delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken
+    bind(done);
+    srlx(src, LogMinObjAlignmentInBytes, dst);
+  } else {
+    bpr(rc_z, false, Assembler::pn, src, done);
+    delayed() -> mov(G0, dst);
+    // could be moved before branch, and annulate delay,
+    // but may add some unneeded work decoding null
+    sub(src, G6_heapbase, dst);
+    srlx(dst, LogMinObjAlignmentInBytes, dst);
+    bind(done);
+  }
+}
+
+
+void MacroAssembler::encode_heap_oop_not_null(Register r) {
+  assert (UseCompressedOops, "must be compressed");
+  sub(r, G6_heapbase, r);
+  srlx(r, LogMinObjAlignmentInBytes, r);
+}
+
+// Same algorithm as oops.inline.hpp decode_heap_oop.
+void  MacroAssembler::decode_heap_oop(Register src, Register dst) {
+  assert (UseCompressedOops, "must be compressed");
+  Label done;
+  sllx(src, LogMinObjAlignmentInBytes, dst);
+  bpr(rc_nz, true, Assembler::pt, dst, done);
+  delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken
+  bind(done);
+}
+
+void  MacroAssembler::decode_heap_oop_not_null(Register r) {
+  // Do not add assert code to this unless you change vtableStubs_sparc.cpp
+  // pd_code_size_limit.
+  assert (UseCompressedOops, "must be compressed");
+  sllx(r, LogMinObjAlignmentInBytes, r);
+  add(r, G6_heapbase, r);
+}
+
+void MacroAssembler::reinit_heapbase() {
+  if (UseCompressedOops) {
+    // call indirectly to solve generation ordering problem
+    Address base(G6_heapbase, (address)Universe::heap_base_addr());
+    load_ptr_contents(base, G6_heapbase);
+  }
+}
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -59,6 +59,7 @@
 // This global always holds the current JavaThread pointer:
 
 REGISTER_DECLARATION(Register, G2_thread , G2);
+REGISTER_DECLARATION(Register, G6_heapbase , G6);
 
 // The following globals are part of the Java calling convention:
 
@@ -1975,6 +1976,29 @@
   inline void tstbool( Register s ) { tst(s); }
   inline void movbool( bool boolconst, Register d) { mov( (int) boolconst, d); }
 
+  // klass oop manipulations if compressed
+  void load_klass(Register  src_oop, Register dst);
+  void store_klass(Register dst_oop, Register s1);
+
+   // oop manipulations
+  void load_heap_oop(const Address& s, Register d, int offset = 0);
+  void load_heap_oop(Register s1, Register s2, Register d);
+  void load_heap_oop(Register s1, int simm13a, Register d);
+  void store_heap_oop(Register d, Register s1, Register s2);
+  void store_heap_oop(Register d, Register s1, int simm13a);
+  void store_heap_oop(Register d, const Address& a, int offset = 0);
+
+  void encode_heap_oop(Register src, Register dst);
+  void encode_heap_oop(Register r) {
+    encode_heap_oop(r, r);
+  }
+  void decode_heap_oop(Register src, Register dst);
+  void decode_heap_oop(Register r) {
+    decode_heap_oop(r, r);
+  }
+  void encode_heap_oop_not_null(Register r);
+  void decode_heap_oop_not_null(Register r);
+
   // Support for managing the JavaThread pointer (i.e.; the reference to
   // thread-local information).
   void get_thread();                                // load G2_thread
@@ -2050,6 +2074,9 @@
   void push_CPU_state();
   void pop_CPU_state();
 
+  // if heap base register is used - reinit it with the correct value
+  void reinit_heapbase();
+
   // Debugging
   void _verify_oop(Register reg, const char * msg, const char * file, int line);
   void _verify_oop_addr(Address addr, const char * msg, const char * file, int line);
--- a/hotspot/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -236,7 +236,7 @@
   Register t1,                         // temp register
   Register t2                          // temp register
   ) {
-  const int hdr_size_in_bytes = oopDesc::header_size_in_bytes();
+  const int hdr_size_in_bytes = instanceOopDesc::base_offset_in_bytes();
 
   initialize_header(obj, klass, noreg, t1, t2);
 
--- a/hotspot/src/cpu/sparc/vm/copy_sparc.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/sparc/vm/copy_sparc.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -137,24 +137,20 @@
 }
 
 static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
-#if 0
-  if (HeapWordsPerLong == 1 ||
-      (HeapWordsPerLong == 2 &&
-       mask_bits((uintptr_t)tohw, right_n_bits(LogBytesPerLong)) == 0 &&
-       ((count & 1) ? false : count >>= 1))) {
-    julong* to = (julong*)tohw;
-    julong  v  = ((julong)value << 32) | value;
-    while (count-- > 0) {
-      *to++ = v;
-    }
-  } else {
-#endif
-    juint* to = (juint*)tohw;
-    count *= HeapWordSize / BytesPerInt;
-    while (count-- > 0) {
-      *to++ = value;
-    }
-    //  }
+#ifdef _LP64
+  guarantee(mask_bits((uintptr_t)tohw, right_n_bits(LogBytesPerLong)) == 0,
+         "unaligned fill words");
+  julong* to = (julong*)tohw;
+  julong  v  = ((julong)value << 32) | value;
+  while (count-- > 0) {
+    *to++ = v;
+  }
+#else // _LP64
+  juint* to = (juint*)tohw;
+  while (count-- > 0) {
+    *to++ = value;
+  }
+#endif // _LP64
 }
 
 static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) {
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -859,7 +859,7 @@
 
 
 // Generate a subtype check: branch to ok_is_subtype if sub_klass is
-// a subtype of super_klass.  Blows registers Rsub_klass, tmp1, tmp2.
+// a subtype of super_klass.  Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2.
 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
                                                   Register Rsuper_klass,
                                                   Register Rtmp1,
@@ -891,6 +891,9 @@
   // Now do a linear scan of the secondary super-klass chain.
   delayed()->ld_ptr( Rsub_klass, sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes(), Rtmp2 );
 
+  // compress superclass
+  if (UseCompressedOops) encode_heap_oop(Rsuper_klass);
+
   // Rtmp2 holds the objArrayOop of secondary supers.
   ld( Rtmp2, arrayOopDesc::length_offset_in_bytes(), Rtmp1 );// Load the array length
   // Check for empty secondary super list
@@ -900,20 +903,28 @@
   bind( loop );
   br( Assembler::equal, false, Assembler::pn, not_subtype );
   delayed()->nop();
+
   // load next super to check
-  ld_ptr( Rtmp2, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rtmp3 );
-
-  // Bump array pointer forward one oop
-  add( Rtmp2, wordSize, Rtmp2 );
+  if (UseCompressedOops) {
+    ld( Rtmp2, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rtmp3);
+    // Bump array pointer forward one oop
+    add( Rtmp2, 4, Rtmp2 );
+  } else {
+    ld_ptr( Rtmp2, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rtmp3);
+    // Bump array pointer forward one oop
+    add( Rtmp2, wordSize, Rtmp2);
+  }
   // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list
   cmp( Rtmp3, Rsuper_klass );
   // A miss means we are NOT a subtype and need to keep looping
   brx( Assembler::notEqual, false, Assembler::pt, loop );
   delayed()->deccc( Rtmp1 );    // dec trip counter in delay slot
   // Falling out the bottom means we found a hit; we ARE a subtype
+  if (UseCompressedOops) decode_heap_oop(Rsuper_klass);
   br( Assembler::always, false, Assembler::pt, ok_is_subtype );
   // Update the cache
-  delayed()->st_ptr( Rsuper_klass, Rsub_klass, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() );
+  delayed()->st_ptr( Rsuper_klass, Rsub_klass,
+                     sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() );
 
   bind(not_subtype);
   profile_typecheck_failed(Rtmp1);
--- a/hotspot/src/cpu/sparc/vm/register_definitions_sparc.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/sparc/vm/register_definitions_sparc.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -131,6 +131,7 @@
 
 
 REGISTER_DEFINITION(Register, G2_thread);
+REGISTER_DEFINITION(Register, G6_heapbase);
 REGISTER_DEFINITION(Register, G5_method);
 REGISTER_DEFINITION(Register, G5_megamorphic_method);
 REGISTER_DEFINITION(Register, G5_inline_cache_reg);
--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -160,18 +160,24 @@
   map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg());
 #endif /* _LP64 */
 
+
+#ifdef _LP64
+  int debug_offset = 0;
+#else
+  int debug_offset = 4;
+#endif
   // Save the G's
   __ stx(G1, SP, g1_offset+STACK_BIAS);
-  map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + 4)>>2), G1->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg());
 
   __ stx(G3, SP, g3_offset+STACK_BIAS);
-  map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + 4)>>2), G3->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg());
 
   __ stx(G4, SP, g4_offset+STACK_BIAS);
-  map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + 4)>>2), G4->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg());
 
   __ stx(G5, SP, g5_offset+STACK_BIAS);
-  map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + 4)>>2), G5->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg());
 
   // This is really a waste but we'll keep things as they were for now
   if (true) {
@@ -182,11 +188,11 @@
     map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next());
     map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next());
     map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next());
-#endif /* _LP64 */
     map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next());
     map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next());
     map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next());
     map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next());
+#endif /* _LP64 */
   }
 
 
@@ -1217,7 +1223,7 @@
 
     __ verify_oop(O0);
     __ verify_oop(G5_method);
-    __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch);
+    __ load_klass(O0, G3_scratch);
     __ verify_oop(G3_scratch);
 
 #if !defined(_LP64) && defined(COMPILER2)
@@ -1820,7 +1826,7 @@
     const Register temp_reg = G3_scratch;
     Address ic_miss(temp_reg, SharedRuntime::get_ic_miss_stub());
     __ verify_oop(O0);
-    __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg);
+    __ load_klass(O0, temp_reg);
     __ cmp(temp_reg, G5_inline_cache_reg);
     __ brx(Assembler::equal, true, Assembler::pt, L);
     __ delayed()->nop();
--- a/hotspot/src/cpu/sparc/vm/sparc.ad	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad	Sun Apr 13 17:43:42 2008 -0400
@@ -544,11 +544,19 @@
     assert(!UseInlineCaches, "expect vtable calls only if not using ICs");
     int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
     int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
+    int klass_load_size;
+    if (UseCompressedOops) {
+      klass_load_size = 3*BytesPerInstWord; // see MacroAssembler::load_klass()
+    } else {
+      klass_load_size = 1*BytesPerInstWord;
+    }
     if( Assembler::is_simm13(v_off) ) {
-      return (3*BytesPerInstWord +           // ld_ptr, ld_ptr, ld_ptr
+      return klass_load_size +
+             (2*BytesPerInstWord +           // ld_ptr, ld_ptr
              NativeCall::instruction_size);  // call; delay slot
     } else {
-      return (5*BytesPerInstWord +           // ld_ptr, set_hi, set, ld_ptr, ld_ptr
+      return klass_load_size +
+             (4*BytesPerInstWord +           // set_hi, set, ld_ptr, ld_ptr
              NativeCall::instruction_size);  // call; delay slot
     }
   }
@@ -1591,7 +1599,13 @@
 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
   st->print_cr("\nUEP:");
 #ifdef    _LP64
-  st->print_cr("\tLDX    [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
+  if (UseCompressedOops) {
+    st->print_cr("\tLDUW   [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
+    st->print_cr("\tSLL    R_G5,3,R_G5");
+    st->print_cr("\tADD    R_G5,R_G6_heap_base,R_G5");
+  } else {
+    st->print_cr("\tLDX    [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
+  }
   st->print_cr("\tCMP    R_G5,R_G3" );
   st->print   ("\tTne    xcc,R_G0+ST_RESERVED_FOR_USER_0+2");
 #else  // _LP64
@@ -1610,7 +1624,7 @@
   assert( G5_ic_reg != temp_reg, "conflicting registers" );
 
   // Load klass from reciever
-  __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg);
+  __ load_klass(O0, temp_reg);
   // Compare against expected klass
   __ cmp(temp_reg, G5_ic_reg);
   // Branch to miss code, checks xcc or icc depending
@@ -1811,6 +1825,11 @@
       reg == R_I3H_num ||
       reg == R_I4H_num ||
       reg == R_I5H_num ) return true;
+
+  if ((UseCompressedOops) && (reg == R_G6_num || reg == R_G6H_num)) {
+    return true;
+  }
+
 #else
   // 32-bit builds with longs-in-one-entry pass longs in G1 & G4.
   // Longs cannot be passed in O regs, because O regs become I regs
@@ -2474,7 +2493,13 @@
       // get receiver klass (receiver already checked for non-null)
       // If we end up going thru a c2i adapter interpreter expects method in G5
       int off = __ offset();
-      __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch);
+      __ load_klass(O0, G3_scratch);
+      int klass_load_size;
+      if (UseCompressedOops) {
+        klass_load_size = 3*BytesPerInstWord;
+      } else {
+        klass_load_size = 1*BytesPerInstWord;
+      }
       int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
       int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
       if( __ is_simm13(v_off) ) {
@@ -2484,7 +2509,8 @@
         __ Assembler::sethi(v_off & ~0x3ff, G5_method);
         __ or3(G5_method, v_off & 0x3ff, G5_method);
         // ld_ptr, set_hi, set
-        assert(__ offset() - off == 3*BytesPerInstWord, "Unexpected instruction size(s)");
+        assert(__ offset() - off == klass_load_size + 2*BytesPerInstWord,
+               "Unexpected instruction size(s)");
         __ ld_ptr(G3, G5_method, G5_method);
       }
       // NOTE: for vtable dispatches, the vtable entry will never be null.
@@ -2860,12 +2886,12 @@
     int  count_offset = java_lang_String:: count_offset_in_bytes();
 
     // load str1 (jchar*) base address into tmp1_reg
-    __ ld_ptr(Address(str1_reg, 0,  value_offset), tmp1_reg);
+    __ load_heap_oop(Address(str1_reg, 0,  value_offset), tmp1_reg);
     __ ld(Address(str1_reg, 0, offset_offset), result_reg);
     __ add(tmp1_reg, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1_reg);
     __    ld(Address(str1_reg, 0, count_offset), str1_reg); // hoisted
     __ sll(result_reg, exact_log2(sizeof(jchar)), result_reg);
-    __    ld_ptr(Address(str2_reg, 0,  value_offset), tmp2_reg); // hoisted
+    __    load_heap_oop(Address(str2_reg, 0,  value_offset), tmp2_reg); // hoisted
     __ add(result_reg, tmp1_reg, tmp1_reg);
 
     // load str2 (jchar*) base address into tmp2_reg
@@ -3016,6 +3042,7 @@
     MacroAssembler _masm(&cbuf);
     __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) );
   %}
+
   enc_class enc_repl8b( iRegI src, iRegL dst ) %{
     MacroAssembler _masm(&cbuf);
     Register src_reg = reg_to_register_object($src$$reg);
@@ -3189,15 +3216,15 @@
   c_return_value %{
     assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
 #ifdef     _LP64
-    static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num,     R_O0_num,     R_F0_num,     R_F0_num, R_O0_num };
-    static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num,    OptoReg::Bad, R_F1_num, R_O0H_num};
-    static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num,     R_I0_num,     R_F0_num,     R_F0_num, R_I0_num };
-    static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num,    OptoReg::Bad, R_F1_num, R_I0H_num};
+    static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num,     R_O0_num,     R_O0_num,     R_F0_num,     R_F0_num, R_O0_num };
+    static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num,    OptoReg::Bad, R_F1_num, R_O0H_num};
+    static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num,     R_I0_num,     R_I0_num,     R_F0_num,     R_F0_num, R_I0_num };
+    static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num,    OptoReg::Bad, R_F1_num, R_I0H_num};
 #else  // !_LP64
-    static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num,     R_O0_num,     R_F0_num,     R_F0_num, R_G1_num };
-    static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
-    static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num,     R_I0_num,     R_F0_num,     R_F0_num, R_G1_num };
-    static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
+    static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num,     R_O0_num,     R_O0_num,     R_F0_num,     R_F0_num, R_G1_num };
+    static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
+    static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num,     R_I0_num,     R_I0_num,     R_F0_num,     R_F0_num, R_G1_num };
+    static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
 #endif
     return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
                         (is_outgoing?lo_out:lo_in)[ideal_reg] );
@@ -3207,15 +3234,15 @@
   return_value %{
     assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
 #ifdef     _LP64
-    static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num,     R_O0_num,     R_F0_num,     R_F0_num, R_O0_num };
-    static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num,    OptoReg::Bad, R_F1_num, R_O0H_num};
-    static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num,     R_I0_num,     R_F0_num,     R_F0_num, R_I0_num };
-    static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num,    OptoReg::Bad, R_F1_num, R_I0H_num};
+    static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num,     R_O0_num,     R_O0_num,     R_F0_num,     R_F0_num, R_O0_num };
+    static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num,    OptoReg::Bad, R_F1_num, R_O0H_num};
+    static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num,     R_I0_num,     R_I0_num,     R_F0_num,     R_F0_num, R_I0_num };
+    static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num,    OptoReg::Bad, R_F1_num, R_I0H_num};
 #else  // !_LP64
-    static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num,     R_O0_num,     R_F0_num,     R_F0_num, R_G1_num };
-    static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
-    static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num,     R_I0_num,     R_F0_num,     R_F0_num, R_G1_num };
-    static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
+    static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num,     R_O0_num,     R_O0_num,     R_F0_num,     R_F0_num, R_G1_num };
+    static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
+    static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num,     R_I0_num,     R_I0_num,     R_F0_num,     R_F0_num, R_G1_num };
+    static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
 #endif
     return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
                         (is_outgoing?lo_out:lo_in)[ideal_reg] );
@@ -3408,6 +3435,27 @@
   interface(CONST_INTER);
 %}
 
+// Pointer Immediate
+operand immN()
+%{
+  match(ConN);
+
+  op_cost(10);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// NULL Pointer Immediate
+operand immN0()
+%{
+  predicate(n->get_narrowcon() == 0);
+  match(ConN);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
 operand immL() %{
   match(ConL);
   op_cost(40);
@@ -3672,6 +3720,14 @@
   interface(REG_INTER);
 %}
 
+operand iRegN() %{
+  constraint(ALLOC_IN_RC(int_reg));
+  match(RegN);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
 // Long Register
 operand iRegL() %{
   constraint(ALLOC_IN_RC(long_reg));
@@ -5392,9 +5448,30 @@
   ins_pipe(iload_mem);
 %}
 
+// Load Compressed Pointer
+instruct loadN(iRegN dst, memory mem) %{
+   match(Set dst (LoadN mem));
+   ins_cost(MEMORY_REF_COST);
+   size(4);
+
+   format %{ "LDUW   $mem,$dst\t! compressed ptr" %}
+   ins_encode %{
+     Register base = as_Register($mem$$base);
+     Register index = as_Register($mem$$index);
+     Register dst = $dst$$Register;
+     if (index != G0) {
+       __ lduw(base, index, dst);
+     } else {
+       __ lduw(base, $mem$$disp, dst);
+     }
+   %}
+   ins_pipe(iload_mem);
+%}
+
 // Load Klass Pointer
 instruct loadKlass(iRegP dst, memory mem) %{
   match(Set dst (LoadKlass mem));
+  predicate(!n->in(MemNode::Address)->bottom_type()->is_narrow());
   ins_cost(MEMORY_REF_COST);
   size(4);
 
@@ -5409,6 +5486,30 @@
   ins_pipe(iload_mem);
 %}
 
+// Load Klass Pointer
+instruct loadKlassComp(iRegP dst, memory mem) %{
+  match(Set dst (LoadKlass mem));
+  predicate(n->in(MemNode::Address)->bottom_type()->is_narrow());
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LDUW   $mem,$dst\t! compressed klass ptr" %}
+
+  ins_encode %{
+     Register base = as_Register($mem$$base);
+     Register index = as_Register($mem$$index);
+     Register dst = $dst$$Register;
+     if (index != G0) {
+       __ lduw(base, index, dst);
+     } else {
+       __ lduw(base, $mem$$disp, dst);
+     }
+     // klass oop never null but this is generated for nonheader klass loads
+     // too which can be null.
+     __ decode_heap_oop(dst);
+  %}
+  ins_pipe(iload_mem);
+%}
+
 // Load Short (16bit signed)
 instruct loadS(iRegI dst, memory mem) %{
   match(Set dst (LoadS mem));
@@ -5508,6 +5609,24 @@
   ins_pipe(loadConP_poll);
 %}
 
+instruct loadConN(iRegN dst, immN src) %{
+  match(Set dst src);
+  ins_cost(DEFAULT_COST * 2);
+  format %{ "SET    $src,$dst\t!ptr" %}
+  ins_encode %{
+    address con = (address)$src$$constant;
+    Register dst = $dst$$Register;
+    if (con == NULL) {
+      __ mov(G0, dst);
+    } else {
+      __ set_oop((jobject)$src$$constant, dst);
+      __ encode_heap_oop(dst);
+    }
+  %}
+  ins_pipe(loadConP);
+
+%}
+
 instruct loadConL(iRegL dst, immL src, o7RegL tmp) %{
   // %%% maybe this should work like loadConD
   match(Set dst src);
@@ -5741,6 +5860,44 @@
   ins_pipe(istore_mem_zero);
 %}
 
+// Store Compressed Pointer
+instruct storeN(memory dst, iRegN src) %{
+   match(Set dst (StoreN dst src));
+   ins_cost(MEMORY_REF_COST);
+   size(4);
+
+   format %{ "STW    $src,$dst\t! compressed ptr" %}
+   ins_encode %{
+     Register base = as_Register($dst$$base);
+     Register index = as_Register($dst$$index);
+     Register src = $src$$Register;
+     if (index != G0) {
+       __ stw(src, base, index);
+     } else {
+       __ stw(src, base, $dst$$disp);
+     }
+   %}
+   ins_pipe(istore_mem_spORreg);
+%}
+
+instruct storeN0(memory dst, immN0 src) %{
+   match(Set dst (StoreN dst src));
+   ins_cost(MEMORY_REF_COST);
+   size(4);
+
+   format %{ "STW    $src,$dst\t! compressed ptr" %}
+   ins_encode %{
+     Register base = as_Register($dst$$base);
+     Register index = as_Register($dst$$index);
+     if (index != G0) {
+       __ stw(0, base, index);
+     } else {
+       __ stw(0, base, $dst$$disp);
+     }
+   %}
+   ins_pipe(istore_mem_zero);
+%}
+
 // Store Double
 instruct storeD( memory mem, regD src) %{
   match(Set mem (StoreD mem src));
@@ -5798,6 +5955,26 @@
   ins_pipe(fstoreD_mem_reg);
 %}
 
+// Convert oop pointer into compressed form
+instruct encodeHeapOop(iRegN dst, iRegP src) %{
+  match(Set dst (EncodeP src));
+  format %{ "SRL    $src,3,$dst\t encodeHeapOop" %}
+  ins_encode %{
+    __ encode_heap_oop($src$$Register, $dst$$Register);
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct decodeHeapOop(iRegP dst, iRegN src) %{
+  match(Set dst (DecodeN src));
+  format %{ "decode_heap_oop $src, $dst" %}
+  ins_encode %{
+    __ decode_heap_oop($src$$Register, $dst$$Register);
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+
 // Store Zero into Aligned Packed Bytes
 instruct storeA8B0(memory mem, immI0 zero) %{
   match(Set mem (Store8B mem zero));
@@ -6434,17 +6611,27 @@
 instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
   match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
   effect( USE mem_ptr, KILL ccr, KILL tmp1);
-#ifdef _LP64
   format %{
             "MOV    $newval,O7\n\t"
-            "CASXA  [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
+            "CASA_PTR  [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
             "CMP    $oldval,O7\t\t! See if we made progress\n\t"
             "MOV    1,$res\n\t"
             "MOVne  xcc,R_G0,$res"
   %}
+#ifdef _LP64
   ins_encode( enc_casx(mem_ptr, oldval, newval),
               enc_lflags_ne_to_boolean(res) );
 #else
+  ins_encode( enc_casi(mem_ptr, oldval, newval),
+              enc_iflags_ne_to_boolean(res) );
+#endif
+  ins_pipe( long_memory_op );
+%}
+
+instruct compareAndSwapN_bool_comp(iRegP mem_ptr, iRegN oldval, iRegN newval, iRegI res, o7RegI tmp, flagsReg ccr ) %{
+  match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
+  effect( USE mem_ptr, KILL ccr, KILL tmp);
+
   format %{
             "MOV    $newval,O7\n\t"
             "CASA   [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
@@ -6452,9 +6639,18 @@
             "MOV    1,$res\n\t"
             "MOVne  icc,R_G0,$res"
   %}
-  ins_encode( enc_casi(mem_ptr, oldval, newval),
-              enc_iflags_ne_to_boolean(res) );
-#endif
+  ins_encode %{
+    Register Rmem = reg_to_register_object($mem_ptr$$reg);
+    Register Rold = reg_to_register_object($oldval$$reg);
+    Register Rnew = reg_to_register_object($newval$$reg);
+    Register Rres = reg_to_register_object($res$$reg);
+
+    __ cas(Rmem, Rold, Rnew);
+    __ cmp( Rold, Rnew );
+    __ mov(1, Rres);
+    __ movcc( Assembler::notEqual, false, Assembler::icc, G0, Rres );
+  %}
+
   ins_pipe( long_memory_op );
 %}
 
@@ -8607,6 +8803,17 @@
   ins_pipe(partial_subtype_check_pipe);
 %}
 
+
+instruct compP_iRegN_immN0(flagsRegP pcc, iRegN op1, immN0 op2 ) %{
+  match(Set pcc (CmpN op1 op2));
+
+  size(4);
+  format %{ "CMP    $op1,$op2\t! ptr" %}
+  opcode(Assembler::subcc_op3, Assembler::arith_op);
+  ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
+  ins_pipe(ialu_cconly_reg_imm);
+%}
+
 // ============================================================================
 // inlined locking and unlocking
 
@@ -8648,9 +8855,10 @@
   ins_pipe(long_memory_op);
 %}
 
-instruct string_compare(o0RegP str1, o1RegP str2, g3RegP tmp1, g4RegP tmp2, notemp_iRegI result, flagsReg ccr) %{
+instruct string_compare(o0RegP str1, o1RegP str2, g3RegP tmp1, g4RegP tmp2, notemp_iRegI result,
+                        o7RegI tmp3, flagsReg ccr) %{
   match(Set result (StrComp str1 str2));
-  effect(USE_KILL str1, USE_KILL str2, KILL tmp1, KILL tmp2, KILL ccr);
+  effect(USE_KILL str1, USE_KILL str2, KILL tmp1, KILL tmp2, KILL ccr, KILL tmp3);
   ins_cost(300);
   format %{ "String Compare $str1,$str2 -> $result" %}
   ins_encode( enc_String_Compare(str1, str2, tmp1, tmp2, result) );
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -127,6 +127,7 @@
 
     // setup thread register
     __ ld_ptr(thread.as_address(), G2_thread);
+    __ reinit_heapbase();
 
 #ifdef ASSERT
     // make sure we have no pending exceptions
@@ -896,6 +897,7 @@
   //      super: O2, argument, not changed
   //      raddr: O7, blown by call
   address generate_partial_subtype_check() {
+    __ align(CodeEntryAlignment);
     StubCodeMark mark(this, "StubRoutines", "partial_subtype_check");
     address start = __ pc();
     Label loop, miss;
@@ -914,7 +916,7 @@
 
 #if defined(COMPILER2) && !defined(_LP64)
     // Do not use a 'save' because it blows the 64-bit O registers.
-    __ add(SP,-4*wordSize,SP);  // Make space for 4 temps
+    __ add(SP,-4*wordSize,SP);  // Make space for 4 temps (stack must be 2 words aligned)
     __ st_ptr(L0,SP,(frame::register_save_words+0)*wordSize);
     __ st_ptr(L1,SP,(frame::register_save_words+1)*wordSize);
     __ st_ptr(L2,SP,(frame::register_save_words+2)*wordSize);
@@ -934,6 +936,17 @@
     Register L2_super   = L2;
     Register L3_index   = L3;
 
+#ifdef _LP64
+    Register L4_ooptmp  = L4;
+
+    if (UseCompressedOops) {
+      // this must be under UseCompressedOops check, as we rely upon fact
+      // that L4 not clobbered in C2 on 32-bit platforms, where we do explicit save
+      // on stack, see several lines above
+      __ encode_heap_oop(Rsuper, L4_ooptmp);
+    }
+#endif
+
     inc_counter_np(SharedRuntime::_partial_subtype_ctr, L0, L1);
 
     __ ld_ptr( Rsub, sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes(), L3 );
@@ -942,18 +955,33 @@
     __ clr(L3_index);           // zero index
     // Load a little early; will load 1 off the end of the array.
     // Ok for now; revisit if we have other uses of this routine.
-    __ ld_ptr(L1_ary_ptr,0,L2_super);// Will load a little early
-    __ align(CodeEntryAlignment);
-
+    if (UseCompressedOops) {
+      __ ld(L1_ary_ptr,0,L2_super);// Will load a little early
+    } else {
+      __ ld_ptr(L1_ary_ptr,0,L2_super);// Will load a little early
+    }
+
+    assert(heapOopSize != 0, "heapOopSize should be initialized");
     // The scan loop
     __ BIND(loop);
-    __ add(L1_ary_ptr,wordSize,L1_ary_ptr); // Bump by OOP size
+    __ add(L1_ary_ptr, heapOopSize, L1_ary_ptr); // Bump by OOP size
     __ cmp(L3_index,L0_ary_len);
     __ br(Assembler::equal,false,Assembler::pn,miss);
     __ delayed()->inc(L3_index); // Bump index
-    __ subcc(L2_super,Rsuper,Rret);   // Check for match; zero in Rret for a hit
-    __ brx( Assembler::notEqual, false, Assembler::pt, loop );
-    __ delayed()->ld_ptr(L1_ary_ptr,0,L2_super); // Will load a little early
+
+    if (UseCompressedOops) {
+#ifdef  _LP64
+      __ subcc(L2_super,L4_ooptmp,Rret);   // Check for match; zero in Rret for a hit
+      __ br( Assembler::notEqual, false, Assembler::pt, loop );
+      __ delayed()->ld(L1_ary_ptr,0,L2_super);// Will load a little early
+#else
+      ShouldNotReachHere();
+#endif
+    } else {
+      __ subcc(L2_super,Rsuper,Rret);   // Check for match; zero in Rret for a hit
+      __ brx( Assembler::notEqual, false, Assembler::pt, loop );
+      __ delayed()->ld_ptr(L1_ary_ptr,0,L2_super);// Will load a little early
+    }
 
     // Got a hit; report success; set cache.  Cache load doesn't
     // happen here; for speed it is directly emitted by the compiler.
@@ -1107,7 +1135,6 @@
     }
 #endif // 0
   }
-
   //
   //  Generate post-write barrier for array.
   //
@@ -1148,8 +1175,8 @@
 
           Label L_loop;
 
-          __ sll_ptr(count, LogBytesPerOop, count);
-          __ sub(count, BytesPerOop, count);
+          __ sll_ptr(count, LogBytesPerHeapOop, count);
+          __ sub(count, BytesPerHeapOop, count);
           __ add(count, addr, count);
           // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
           __ srl_ptr(addr, CardTableModRefBS::card_shift, addr);
@@ -1171,7 +1198,6 @@
         ShouldNotReachHere();
 
     }
-
   }
 
 
@@ -2226,7 +2252,12 @@
     __ mov(count, G5);
     gen_write_ref_array_pre_barrier(G1, G5);
   #ifdef _LP64
-    generate_disjoint_long_copy_core(aligned);
+    assert_clean_int(count, O3);     // Make sure 'count' is clean int.
+    if (UseCompressedOops) {
+      generate_disjoint_int_copy_core(aligned);
+    } else {
+      generate_disjoint_long_copy_core(aligned);
+    }
   #else
     generate_disjoint_int_copy_core(aligned);
   #endif
@@ -2274,10 +2305,14 @@
         StubRoutines::arrayof_oop_disjoint_arraycopy() :
         disjoint_oop_copy_entry;
 
-    array_overlap_test(nooverlap_target, LogBytesPerWord);
+    array_overlap_test(nooverlap_target, LogBytesPerHeapOop);
 
   #ifdef _LP64
-    generate_conjoint_long_copy_core(aligned);
+    if (UseCompressedOops) {
+      generate_conjoint_int_copy_core(aligned);
+    } else {
+      generate_conjoint_long_copy_core(aligned);
+    }
   #else
     generate_conjoint_int_copy_core(aligned);
   #endif
@@ -2377,8 +2412,6 @@
     StubCodeMark mark(this, "StubRoutines", name);
     address start = __ pc();
 
-    int klass_off = oopDesc::klass_offset_in_bytes();
-
     gen_write_ref_array_pre_barrier(G1, G5);
 
 
@@ -2395,7 +2428,7 @@
     { Label L;
       __ mov(O3, G1);           // spill: overlap test smashes O3
       __ mov(O4, G4);           // spill: overlap test smashes O4
-      array_overlap_test(L, LogBytesPerWord);
+      array_overlap_test(L, LogBytesPerHeapOop);
       __ stop("checkcast_copy within a single array");
       __ bind(L);
       __ mov(G1, O3);
@@ -2429,18 +2462,18 @@
 
     __ bind(store_element);
     // deccc(G1_remain);                // decrement the count (hoisted)
-    __ st_ptr(G3_oop, O1_to, O5_offset); // store the oop
-    __ inc(O5_offset, wordSize);        // step to next offset
+    __ store_heap_oop(G3_oop, O1_to, O5_offset); // store the oop
+    __ inc(O5_offset, heapOopSize);     // step to next offset
     __ brx(Assembler::zero, true, Assembler::pt, do_card_marks);
     __ delayed()->set(0, O0);           // return -1 on success
 
     // ======== loop entry is here ========
     __ bind(load_element);
-    __ ld_ptr(O0_from, O5_offset, G3_oop);  // load the oop
+    __ load_heap_oop(O0_from, O5_offset, G3_oop);  // load the oop
     __ br_null(G3_oop, true, Assembler::pt, store_element);
     __ delayed()->deccc(G1_remain);     // decrement the count
 
-    __ ld_ptr(G3_oop, klass_off, G4_klass); // query the object klass
+    __ load_klass(G3_oop, G4_klass); // query the object klass
 
     generate_type_check(G4_klass, O3_ckoff, O4_ckval, G5_super,
                         // branch to this on success:
@@ -2642,17 +2675,23 @@
 
     BLOCK_COMMENT("arraycopy argument klass checks");
     //  get src->klass()
-    __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), G3_src_klass);
+    if (UseCompressedOops) {
+      __ delayed()->nop(); // ??? not good
+      __ load_klass(src, G3_src_klass);
+    } else {
+      __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), G3_src_klass);
+    }
 
 #ifdef ASSERT
     //  assert(src->klass() != NULL);
     BLOCK_COMMENT("assert klasses not null");
     { Label L_a, L_b;
       __ br_notnull(G3_src_klass, false, Assembler::pt, L_b); // it is broken if klass is NULL
-      __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
+      __ delayed()->nop();
       __ bind(L_a);
       __ stop("broken null klass");
       __ bind(L_b);
+      __ load_klass(dst, G4_dst_klass);
       __ br_null(G4_dst_klass, false, Assembler::pn, L_a); // this would be broken also
       __ delayed()->mov(G0, G4_dst_klass);      // scribble the temp
       BLOCK_COMMENT("assert done");
@@ -2673,12 +2712,19 @@
     // Load 32-bits signed value. Use br() instruction with it to check icc.
     __ lduw(G3_src_klass, lh_offset, G5_lh);
 
+    if (UseCompressedOops) {
+      __ load_klass(dst, G4_dst_klass);
+    }
     // Handle objArrays completely differently...
     juint objArray_lh = Klass::array_layout_helper(T_OBJECT);
     __ set(objArray_lh, O5_temp);
     __ cmp(G5_lh,       O5_temp);
     __ br(Assembler::equal, false, Assembler::pt, L_objArray);
-    __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
+    if (UseCompressedOops) {
+      __ delayed()->nop();
+    } else {
+      __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
+    }
 
     //  if (src->klass() != dst->klass()) return -1;
     __ cmp(G3_src_klass, G4_dst_klass);
@@ -2777,8 +2823,8 @@
 
     __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
     __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
-    __ sll_ptr(src_pos, LogBytesPerOop, src_pos);
-    __ sll_ptr(dst_pos, LogBytesPerOop, dst_pos);
+    __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos);
+    __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos);
     __ add(src, src_pos, from);       // src_addr
     __ add(dst, dst_pos, to);         // dst_addr
   __ BIND(L_plain_copy);
@@ -2801,8 +2847,8 @@
       // Marshal the base address arguments now, freeing registers.
       __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
       __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
-      __ sll_ptr(src_pos, LogBytesPerOop, src_pos);
-      __ sll_ptr(dst_pos, LogBytesPerOop, dst_pos);
+      __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos);
+      __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos);
       __ add(src, src_pos, from);               // src_addr
       __ add(dst, dst_pos, to);                 // dst_addr
       __ signx(length, count);                  // length (reloaded)
--- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -591,7 +591,10 @@
   address entry = __ pc();
   Label slow_path;
 
-  if ( UseFastAccessorMethods) {
+
+  // XXX: for compressed oops pointer loading and decoding doesn't fit in
+  // delay slot and damages G1
+  if ( UseFastAccessorMethods && !UseCompressedOops ) {
     // Check if we need to reach a safepoint and generate full interpreter
     // frame if so.
     Address sync_state(G3_scratch, SafepointSynchronize::address_of_state());
@@ -953,6 +956,7 @@
   // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD
 
   __ restore_thread(L7_thread_cache); // restore G2_thread
+  __ reinit_heapbase();
 
   // must we block?
 
--- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -462,8 +462,8 @@
   transition(itos, atos);
   // Otos_i: index
   // tos: array
-  __ index_check(O2, Otos_i, LogBytesPerWord, G3_scratch, O3);
-  __ ld_ptr(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i);
+  __ index_check(O2, Otos_i, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O3);
+  __ load_heap_oop(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i);
   __ verify_oop(Otos_i);
 }
 
@@ -736,15 +736,16 @@
   // O2: index
   // O3: array
   __ verify_oop(Otos_i);
-  __ index_check_without_pop(O3, O2, LogBytesPerWord, G3_scratch, O1);
+  __ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1);
 
   // do array store check - check for NULL value first
   __ br_null( Otos_i, false, Assembler::pn, is_null );
-  __ delayed()->
-     ld_ptr(O3,     oopDesc::klass_offset_in_bytes(), O4); // get array klass
+  __ delayed()->nop();
+
+  __ load_klass(O3, O4); // get array klass
+  __ load_klass(Otos_i, O5); // get value klass
 
   // do fast instanceof cache test
-  __ ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), O5); // get value klass
 
   __ ld_ptr(O4,     sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes(),  O4);
 
@@ -766,7 +767,7 @@
 
   // Store is OK.
   __ bind(store_ok);
-  __ st_ptr(Otos_i, O1, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
+  __ store_heap_oop(Otos_i, O1, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
   // Quote from rememberedSet.hpp: For objArrays, the precise card
   // corresponding to the pointer store is dirtied so we don't need to
   // scavenge the entire array.
@@ -777,7 +778,7 @@
   __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize()); // adj sp (pops array, index and value)
 
   __ bind(is_null);
-  __ st_ptr(Otos_i, element);
+  __ store_heap_oop(Otos_i, element);
   __ profile_null_seen(G3_scratch);
   __ inc(Lesp, 3* Interpreter::stackElementSize());     // adj sp (pops array, index and value)
   __ bind(done);
@@ -1833,7 +1834,7 @@
     assert(state == vtos, "only valid state");
     __ mov(G0, G3_scratch);
     __ access_local_ptr(G3_scratch, Otos_i);
-    __ ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), O2);
+    __ load_klass(Otos_i, O2);
     __ set(JVM_ACC_HAS_FINALIZER, G3);
     __ ld(O2, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), O2);
     __ andcc(G3, O2, G0);
@@ -2078,7 +2079,7 @@
   __ delayed() ->cmp(Rflags, itos);
 
   // atos
-  __ ld_ptr(Rclass, Roffset, Otos_i);
+  __ load_heap_oop(Rclass, Roffset, Otos_i);
   __ verify_oop(Otos_i);
   __ push(atos);
   if (!is_static) {
@@ -2259,7 +2260,7 @@
       __ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d);
       break;
     case Bytecodes::_fast_agetfield:
-      __ ld_ptr(Otos_i, Roffset, Otos_i);
+      __ load_heap_oop(Otos_i, Roffset, Otos_i);
       break;
     default:
       ShouldNotReachHere();
@@ -2448,7 +2449,7 @@
     // atos
     __ pop_ptr();
     __ verify_oop(Otos_i);
-    __ st_ptr(Otos_i, Rclass, Roffset);
+    __ store_heap_oop(Otos_i, Rclass, Roffset);
     __ store_check(G1_scratch, Rclass, Roffset);
     __ ba(false, checkVolatile);
     __ delayed()->tst(Lscratch);
@@ -2490,7 +2491,7 @@
     __ pop_ptr();
     pop_and_check_object(Rclass);
     __ verify_oop(Otos_i);
-    __ st_ptr(Otos_i, Rclass, Roffset);
+    __ store_heap_oop(Otos_i, Rclass, Roffset);
     __ store_check(G1_scratch, Rclass, Roffset);
     patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch);
     __ ba(false, checkVolatile);
@@ -2645,7 +2646,7 @@
       __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
       break;
     case Bytecodes::_fast_aputfield:
-      __ st_ptr(Otos_i, Rclass, Roffset);
+      __ store_heap_oop(Otos_i, Rclass, Roffset);
       __ store_check(G1_scratch, Rclass, Roffset);
       break;
     default:
@@ -2688,7 +2689,7 @@
   __ verify_oop(Rreceiver);
   __ null_check(Rreceiver);
   if (state == atos) {
-    __ ld_ptr(Rreceiver, Roffset, Otos_i);
+    __ load_heap_oop(Rreceiver, Roffset, Otos_i);
   } else if (state == itos) {
     __ ld (Rreceiver, Roffset, Otos_i) ;
   } else if (state == ftos) {
@@ -2790,7 +2791,7 @@
 
   // get receiver klass
   __ null_check(O0, oopDesc::klass_offset_in_bytes());
-  __ ld_ptr(Address(O0, 0, oopDesc::klass_offset_in_bytes()), Rrecv);
+  __ load_klass(O0, Rrecv);
   __ verify_oop(Rrecv);
 
   __ profile_virtual_call(Rrecv, O4);
@@ -2958,7 +2959,7 @@
 
   // get receiver klass
   __ null_check(O0, oopDesc::klass_offset_in_bytes());
-  __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), RklassOop);
+  __ load_klass(O0, RklassOop);
   __ verify_oop(RklassOop);
 
   // Special case of invokeinterface called for virtual method of
@@ -3221,7 +3222,7 @@
     __ set((intptr_t)markOopDesc::prototype(), G4_scratch);
   }
   __ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes());       // mark
-  __ st_ptr(RinstanceKlass, RallocatedObject, oopDesc::klass_offset_in_bytes()); // klass
+  __ store_klass(RinstanceKlass, RallocatedObject); // klass
 
   {
     SkipIfEqual skip_if(
@@ -3277,7 +3278,7 @@
   __ delayed()->nop();
 
   // Get value klass in RobjKlass
-  __ ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), RobjKlass); // get value klass
+  __ load_klass(Otos_i, RobjKlass); // get value klass
 
   // Get constant pool tag
   __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
@@ -3295,13 +3296,14 @@
   __ pop_ptr(Otos_i, G3_scratch); // restore receiver
 
   __ br(Assembler::always, false, Assembler::pt, resolved);
-  __ delayed()->ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), RobjKlass); // get value klass
+  __ delayed()->nop();
 
   // Extract target class from constant pool
   __ bind(quicked);
   __ add(Roffset, sizeof(constantPoolOopDesc), Roffset);
   __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
   __ bind(resolved);
+  __ load_klass(Otos_i, RobjKlass); // get value klass
 
   // Generate a fast subtype check.  Branch to cast_ok if no
   // failure.  Throw exception if failure.
@@ -3334,7 +3336,7 @@
   __ delayed()->nop();
 
   // Get value klass in RobjKlass
-  __ ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), RobjKlass); // get value klass
+  __ load_klass(Otos_i, RobjKlass); // get value klass
 
   // Get constant pool tag
   __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
@@ -3352,7 +3354,7 @@
   __ pop_ptr(Otos_i, G3_scratch); // restore receiver
 
   __ br(Assembler::always, false, Assembler::pt, resolved);
-  __ delayed()->ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), RobjKlass); // get value klass
+  __ delayed()->nop();
 
 
   // Extract target class from constant pool
@@ -3361,6 +3363,7 @@
   __ get_constant_pool(Lscratch);
   __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
   __ bind(resolved);
+  __ load_klass(Otos_i, RobjKlass); // get value klass
 
   // Generate a fast subtype check.  Branch to cast_ok if no
   // failure.  Return 0 if failure.
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -64,6 +64,15 @@
     if (FLAG_IS_DEFAULT(UseInlineCaches)) {
       UseInlineCaches         = false;
     }
+#ifdef _LP64
+    // Single issue niagara1 is slower for CompressedOops
+    // but niagaras after that it's fine.
+    if (!is_niagara1_plus()) {
+      if (FLAG_IS_DEFAULT(UseCompressedOops)) {
+        FLAG_SET_ERGO(bool, UseCompressedOops, false);
+      }
+    }
+#endif // _LP64
 #ifdef COMPILER2
     // Indirect branch is the same cost as direct
     if (FLAG_IS_DEFAULT(UseJumpTables)) {
--- a/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -60,7 +60,7 @@
 
   // get receiver klass
   address npe_addr = __ pc();
-  __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch);
+  __ load_klass(O0, G3_scratch);
 
   // set methodOop (in case of interpreted method), and destination address
   int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
@@ -131,7 +131,7 @@
 
   // get receiver klass (also an implicit null-check)
   address npe_addr = __ pc();
-  __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_klassOop);
+  __ load_klass(O0, G3_klassOop);
   __ verify_oop(G3_klassOop);
 
   // Push a new window to get some temp registers.  This chops the head of all
@@ -237,11 +237,16 @@
   else {
     const int slop = 2*BytesPerInstWord; // sethi;add  (needed for long offsets)
     if (is_vtable_stub) {
-      const int basic = 5*BytesPerInstWord; // ld;ld;ld,jmp,nop
+      // ld;ld;ld,jmp,nop
+      const int basic = 5*BytesPerInstWord +
+                        // shift;add for load_klass
+                        (UseCompressedOops ? 2*BytesPerInstWord : 0);
       return basic + slop;
     } else {
       // save, ld, ld, sll, and, add, add, ld, cmp, br, add, ld, add, ld, ld, jmp, restore, sethi, jmpl, restore
-      const int basic = (20 LP64_ONLY(+ 6)) * BytesPerInstWord;
+      const int basic = (20 LP64_ONLY(+ 6)) * BytesPerInstWord +
+                        // shift;add for load_klass
+                        (UseCompressedOops ? 2*BytesPerInstWord : 0);
       return (basic + slop);
     }
   }
--- a/hotspot/src/cpu/x86/vm/assembler_x86_64.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/x86/vm/assembler_x86_64.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -127,6 +127,7 @@
 
 bool Assembler::reachable(AddressLiteral adr) {
   int64_t disp;
+
   // None will force a 64bit literal to the code stream. Likely a placeholder
   // for something that will be patched later and we need to certain it will
   // always be reachable.
@@ -636,7 +637,7 @@
   case 0x8A: // movb r, a
   case 0x8B: // movl r, a
   case 0x8F: // popl a
-    debug_only(has_disp32 = true);
+    debug_only(has_disp32 = true;)
     break;
 
   case 0x68: // pushq #32
@@ -2891,7 +2892,7 @@
 }
 
 // scans rcx double words (m64) at [rdi] for occurance of rax
-void Assembler::repne_scan() {
+void Assembler::repne_scanq() {
   // REPNE/REPNZ
   emit_byte(0xF2);
   // SCASQ
@@ -2899,6 +2900,14 @@
   emit_byte(0xAF);
 }
 
+void Assembler::repne_scanl() {
+  // REPNE/REPNZ
+  emit_byte(0xF2);
+  // SCASL
+  emit_byte(0xAF);
+}
+
+
 void Assembler::setb(Condition cc, Register dst) {
   assert(0 <= cc && cc < 16, "illegal cc");
   int encode = prefix_and_encode(dst->encoding(), true);
@@ -4597,7 +4606,6 @@
 
   // pass args on stack, only touch rax
   pushq(reg);
-
   // avoid using pushptr, as it modifies scratch registers
   // and our contract is not to modify anything
   ExternalAddress buffer((address)b);
@@ -4664,9 +4672,9 @@
     JavaThread* thread = JavaThread::current();
     JavaThreadState saved_state = thread->thread_state();
     thread->set_thread_state(_thread_in_vm);
-    ttyLocker ttyl;
 #ifndef PRODUCT
     if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
+      ttyLocker ttyl;
       BytecodeCounter::print();
     }
 #endif
@@ -4674,6 +4682,7 @@
     // XXX correct this offset for amd64
     // This is the value of eip which points to where verify_oop will return.
     if (os::message_box(msg, "Execution stopped, print registers?")) {
+      ttyLocker ttyl;
       tty->print_cr("rip = 0x%016lx", pc);
       tty->print_cr("rax = 0x%016lx", regs[15]);
       tty->print_cr("rbx = 0x%016lx", regs[12]);
@@ -4695,6 +4704,7 @@
     }
     ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
   } else {
+    ttyLocker ttyl;
     ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
                     msg);
   }
@@ -4891,7 +4901,7 @@
   movq(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
   // set klass to intArrayKlass
   movptr(t1, ExternalAddress((address) Universe::intArrayKlassObj_addr()));
-  movq(Address(top, oopDesc::klass_offset_in_bytes()), t1);
+  store_klass(top, t1);
 
   // refill the tlab with an eden allocation
   bind(do_refill);
@@ -4938,7 +4948,6 @@
   assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
   assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
   Address mark_addr      (obj_reg, oopDesc::mark_offset_in_bytes());
-  Address klass_addr     (obj_reg, oopDesc::klass_offset_in_bytes());
   Address saved_mark_addr(lock_reg, 0);
 
   if (PrintBiasedLockingStatistics && counters == NULL)
@@ -4962,7 +4971,7 @@
   jcc(Assembler::notEqual, cas_label);
   // The bias pattern is present in the object's header. Need to check
   // whether the bias owner and the epoch are both still current.
-  movq(tmp_reg, klass_addr);
+  load_klass(tmp_reg, obj_reg);
   movq(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
   orq(tmp_reg, r15_thread);
   xorq(tmp_reg, swap_reg);
@@ -5037,7 +5046,7 @@
   //
   // FIXME: due to a lack of registers we currently blow away the age
   // bits in this situation. Should attempt to preserve them.
-  movq(tmp_reg, klass_addr);
+  load_klass(tmp_reg, obj_reg);
   movq(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
   orq(tmp_reg, r15_thread);
   if (os::is_MP()) {
@@ -5068,7 +5077,7 @@
   //
   // FIXME: due to a lack of registers we currently blow away the age
   // bits in this situation. Should attempt to preserve them.
-  movq(tmp_reg, klass_addr);
+  load_klass(tmp_reg, obj_reg);
   movq(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
   if (os::is_MP()) {
     lock();
@@ -5104,6 +5113,113 @@
 }
 
 
+void MacroAssembler::load_klass(Register dst, Register src) {
+  if (UseCompressedOops) {
+    movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
+    decode_heap_oop_not_null(dst);
+  } else {
+    movq(dst, Address(src, oopDesc::klass_offset_in_bytes()));
+  }
+}
+
+void MacroAssembler::store_klass(Register dst, Register src) {
+  if (UseCompressedOops) {
+    encode_heap_oop_not_null(src);
+    // zero the entire klass field first as the gap needs to be zeroed too.
+    movptr(Address(dst, oopDesc::klass_offset_in_bytes()), NULL_WORD);
+    movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
+  } else {
+    movq(Address(dst, oopDesc::klass_offset_in_bytes()), src);
+  }
+}
+
+void MacroAssembler::load_heap_oop(Register dst, Address src) {
+  if (UseCompressedOops) {
+    movl(dst, src);
+    decode_heap_oop(dst);
+  } else {
+    movq(dst, src);
+  }
+}
+
+void MacroAssembler::store_heap_oop(Address dst, Register src) {
+  if (UseCompressedOops) {
+    assert(!dst.uses(src), "not enough registers");
+    encode_heap_oop(src);
+    movl(dst, src);
+  } else {
+    movq(dst, src);
+  }
+}
+
+// Algorithm must match oop.inline.hpp encode_heap_oop.
+void MacroAssembler::encode_heap_oop(Register r) {
+  assert (UseCompressedOops, "should be compressed");
+#ifdef ASSERT
+  Label ok;
+  pushq(rscratch1); // cmpptr trashes rscratch1
+  cmpptr(r12_heapbase, ExternalAddress((address)Universe::heap_base_addr()));
+  jcc(Assembler::equal, ok);
+  stop("MacroAssembler::encode_heap_oop: heap base corrupted?");
+  bind(ok);
+  popq(rscratch1);
+#endif
+  verify_oop(r);
+  testq(r, r);
+  cmovq(Assembler::equal, r, r12_heapbase);
+  subq(r, r12_heapbase);
+  shrq(r, LogMinObjAlignmentInBytes);
+}
+
+void MacroAssembler::encode_heap_oop_not_null(Register r) {
+  assert (UseCompressedOops, "should be compressed");
+#ifdef ASSERT
+  Label ok;
+  testq(r, r);
+  jcc(Assembler::notEqual, ok);
+  stop("null oop passed to encode_heap_oop_not_null");
+  bind(ok);
+#endif
+  verify_oop(r);
+  subq(r, r12_heapbase);
+  shrq(r, LogMinObjAlignmentInBytes);
+}
+
+void  MacroAssembler::decode_heap_oop(Register r) {
+  assert (UseCompressedOops, "should be compressed");
+#ifdef ASSERT
+  Label ok;
+  pushq(rscratch1);
+  cmpptr(r12_heapbase,
+         ExternalAddress((address)Universe::heap_base_addr()));
+  jcc(Assembler::equal, ok);
+  stop("MacroAssembler::decode_heap_oop: heap base corrupted?");
+  bind(ok);
+  popq(rscratch1);
+#endif
+
+  Label done;
+  shlq(r, LogMinObjAlignmentInBytes);
+  jccb(Assembler::equal, done);
+  addq(r, r12_heapbase);
+#if 0
+   // alternate decoding probably a wash.
+   testq(r, r);
+   jccb(Assembler::equal, done);
+   leaq(r, Address(r12_heapbase, r, Address::times_8, 0));
+#endif
+  bind(done);
+  verify_oop(r);
+}
+
+void  MacroAssembler::decode_heap_oop_not_null(Register r) {
+  assert (UseCompressedOops, "should only be used for compressed headers");
+  // Cannot assert, unverified entry point counts instructions (see .ad file)
+  // vtableStubs also counts instructions in pd_code_size_limit.
+  assert(Address::times_8 == LogMinObjAlignmentInBytes, "decode alg wrong");
+  leaq(r, Address(r12_heapbase, r, Address::times_8, 0));
+}
+
 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
   switch (cond) {
     // Note some conditions are synonyms for others
@@ -5173,3 +5289,9 @@
     movq(Address(tmp, (-i*os::vm_page_size())), size );
   }
 }
+
+void MacroAssembler::reinit_heapbase() {
+  if (UseCompressedOops) {
+    movptr(r12_heapbase, ExternalAddress((address)Universe::heap_base_addr()));
+  }
+}
--- a/hotspot/src/cpu/x86/vm/assembler_x86_64.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/x86/vm/assembler_x86_64.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -37,7 +37,7 @@
 #else
     n_int_register_parameters_c   = 6, // rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
     n_float_register_parameters_c = 8,  // xmm0 - xmm7 (c_farg0, c_farg1, ... )
-#endif
+#endif  // _WIN64
     n_int_register_parameters_j   = 6, // j_rarg0, j_rarg1, ...
     n_float_register_parameters_j = 8  // j_farg0, j_farg1, ...
   };
@@ -77,7 +77,7 @@
 REGISTER_DECLARATION(XMMRegister, c_farg6, xmm6);
 REGISTER_DECLARATION(XMMRegister, c_farg7, xmm7);
 
-#endif
+#endif  // _WIN64
 
 // Symbolically name the register arguments used by the Java calling convention.
 // We have control over the convention for java so we can do what we please.
@@ -105,7 +105,7 @@
 #else
 REGISTER_DECLARATION(Register, j_rarg3, c_rarg4);
 REGISTER_DECLARATION(Register, j_rarg4, c_rarg5);
-#endif /* _WIN64 */
+#endif // _WIN64
 REGISTER_DECLARATION(Register, j_rarg5, c_rarg0);
 
 REGISTER_DECLARATION(XMMRegister, j_farg0, xmm0);
@@ -120,7 +120,8 @@
 REGISTER_DECLARATION(Register, rscratch1, r10);  // volatile
 REGISTER_DECLARATION(Register, rscratch2, r11);  // volatile
 
-REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved
+REGISTER_DECLARATION(Register, r12_heapbase, r12); // callee-saved
+REGISTER_DECLARATION(Register, r15_thread, r15);   // callee-saved
 
 #endif // _LP64
 
@@ -785,7 +786,8 @@
   void rep_movl();
   void rep_movq();
   void rep_set();
-  void repne_scan();
+  void repne_scanl();
+  void repne_scanq();
   void setb(Condition cc, Register dst);
 
   void clflush(Address adr);
@@ -1099,6 +1101,17 @@
   void movbool(Address dst, Register src);
   void testbool(Register dst);
 
+  // oop manipulations
+  void load_klass(Register dst, Register src);
+  void store_klass(Register dst, Register src);
+
+  void load_heap_oop(Register dst, Address src);
+  void store_heap_oop(Address dst, Register src);
+  void encode_heap_oop(Register r);
+  void decode_heap_oop(Register r);
+  void encode_heap_oop_not_null(Register r);
+  void decode_heap_oop_not_null(Register r);
+
   // Stack frame creation/removal
   void enter();
   void leave();
@@ -1250,6 +1263,9 @@
   void verify_oop(Register reg, const char* s = "broken oop");
   void verify_oop_addr(Address addr, const char * s = "broken oop addr");
 
+  // if heap base register is used - reinit it with the correct value
+  void reinit_heapbase();
+
   // only if +VerifyFPU
   void verify_FPU(int stack_depth, const char* s = "illegal FPU state") {}
 
--- a/hotspot/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -218,7 +218,7 @@
 void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2) {
   assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0,
          "con_size_in_bytes is not multiple of alignment");
-  const int hdr_size_in_bytes = oopDesc::header_size_in_bytes();
+  const int hdr_size_in_bytes = instanceOopDesc::base_offset_in_bytes();
 
   initialize_header(obj, klass, noreg, t1, t2);
 
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -267,15 +267,29 @@
   addq(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
   // Scan rcx words at [rdi] for occurance of rax
   // Set NZ/Z based on last compare
-  repne_scan();
-  // Not equal?
-  jcc(Assembler::notEqual, not_subtype);
+
+  // this part is kind tricky, as values in supers array could be 32 or 64 bit wide
+  // and we store values in objArrays always encoded, thus we need to encode value
+  // before repne
+  if (UseCompressedOops) {
+    encode_heap_oop(rax);
+    repne_scanl();
+    // Not equal?
+    jcc(Assembler::notEqual, not_subtype);
+    // decode heap oop here for movq
+    decode_heap_oop(rax);
+  } else {
+    repne_scanq();
+    jcc(Assembler::notEqual, not_subtype);
+  }
   // Must be equal but missed in cache.  Update cache.
   movq(Address(Rsub_klass, sizeof(oopDesc) +
                Klass::secondary_super_cache_offset_in_bytes()), rax);
   jmp(ok_is_subtype);
 
   bind(not_subtype);
+  // decode heap oop here for miss
+  if (UseCompressedOops) decode_heap_oop(rax);
   profile_typecheck_failed(rcx); // blows rcx
 }
 
--- a/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -375,7 +375,7 @@
     __ cmpl(rdx, atos);
     __ jcc(Assembler::notEqual, notObj);
     // atos
-    __ movq(rax, field_address);
+    __ load_heap_oop(rax, field_address);
     __ jmp(xreturn_path);
 
     __ bind(notObj);
--- a/hotspot/src/cpu/x86/vm/register_definitions_x86.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/x86/vm/register_definitions_x86.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -106,6 +106,7 @@
 REGISTER_DEFINITION(Register, rscratch1);
 REGISTER_DEFINITION(Register, rscratch2);
 
+REGISTER_DEFINITION(Register, r12_heapbase);
 REGISTER_DEFINITION(Register, r15_thread);
 #endif // AMD64
 
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -789,7 +789,7 @@
 
   {
     __ verify_oop(holder);
-    __ movq(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
+    __ load_klass(temp, receiver);
     __ verify_oop(temp);
 
     __ cmpq(temp, Address(holder, compiledICHolderOopDesc::holder_klass_offset()));
@@ -1297,21 +1297,26 @@
 
   const Register ic_reg = rax;
   const Register receiver = j_rarg0;
+  const Register tmp = rdx;
 
   Label ok;
   Label exception_pending;
 
   __ verify_oop(receiver);
-  __ cmpq(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
+  __ pushq(tmp); // spill (any other registers free here???)
+  __ load_klass(tmp, receiver);
+  __ cmpq(ic_reg, tmp);
   __ jcc(Assembler::equal, ok);
 
+  __ popq(tmp);
   __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 
+  __ bind(ok);
+  __ popq(tmp);
+
   // Verified entry point must be aligned
   __ align(8);
 
-  __ bind(ok);
-
   int vep_offset = ((intptr_t)__ pc()) - start;
 
   // The instruction at the verified entry point must be 5 bytes or longer
@@ -1663,6 +1668,7 @@
     __ andq(rsp, -16); // align stack as required by ABI
     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
     __ movq(rsp, r12); // restore sp
+    __ reinit_heapbase();
     // Restore any method result value
     restore_native_result(masm, ret_type, stack_slots);
     __ bind(Continue);
@@ -1725,7 +1731,6 @@
     __ bind(done);
 
   }
-
   {
     SkipIfEqual skip(masm, &DTraceMethodProbes, false);
     save_native_result(masm, ret_type, stack_slots);
@@ -1829,6 +1834,7 @@
 
     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
     __ movq(rsp, r12); // restore sp
+    __ reinit_heapbase();
 #ifdef ASSERT
     {
       Label L;
@@ -1859,6 +1865,7 @@
   __ andq(rsp, -16); // align stack as required by ABI
   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
   __ movq(rsp, r12); // restore sp
+  __ reinit_heapbase();
   restore_native_result(masm, ret_type, stack_slots);
   // and continue
   __ jmp(reguard_done);
@@ -1941,9 +1948,8 @@
   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
 
   // Normal deoptimization.  Save exec mode for unpack_frames.
-  __ movl(r12, Deoptimization::Unpack_deopt); // callee-saved
+  __ movl(r14, Deoptimization::Unpack_deopt); // callee-saved
   __ jmp(cont);
-
   int exception_offset = __ pc() - start;
 
   // Prolog for exception case
@@ -1955,7 +1961,7 @@
   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
 
   // Deopt during an exception.  Save exec mode for unpack_frames.
-  __ movl(r12, Deoptimization::Unpack_exception); // callee-saved
+  __ movl(r14, Deoptimization::Unpack_exception); // callee-saved
 
   __ bind(cont);
 
@@ -2088,7 +2094,7 @@
   __ set_last_Java_frame(noreg, rbp, NULL);
 
   __ movq(c_rarg0, r15_thread);
-  __ movl(c_rarg1, r12); // second arg: exec_mode
+  __ movl(c_rarg1, r14); // second arg: exec_mode
   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
 
   // Set an oopmap for the call site
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -30,6 +30,7 @@
 // see the comment in stubRoutines.hpp
 
 #define __ _masm->
+#define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
 
 #ifdef PRODUCT
 #define BLOCK_COMMENT(str) /* nothing */
@@ -252,6 +253,7 @@
 
     // Load up thread register
     __ movq(r15_thread, thread);
+    __ reinit_heapbase();
 
 #ifdef ASSERT
     // make sure we have no pending exceptions
@@ -945,7 +947,7 @@
     __ jcc(Assembler::notZero, error);
 
     // make sure klass is 'reasonable'
-    __ movq(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass
+    __ load_klass(rax, rax);  // get klass
     __ testq(rax, rax);
     __ jcc(Assembler::zero, error); // if klass is NULL it is broken
     // Check if the klass is in the right area of memory
@@ -957,7 +959,7 @@
     __ jcc(Assembler::notZero, error);
 
     // make sure klass' klass is 'reasonable'
-    __ movq(rax, Address(rax, oopDesc::klass_offset_in_bytes()));
+    __ load_klass(rax, rax);
     __ testq(rax, rax);
     __ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken
     // Check if the klass' klass is in the right area of memory
@@ -1001,6 +1003,7 @@
     BLOCK_COMMENT("call MacroAssembler::debug");
     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug)));
     __ movq(rsp, r12);                           // restore rsp
+    __ reinit_heapbase();                        // r12 is heapbase
     __ popaq();                                  // pop registers
     __ ret(3 * wordSize);                        // pop caller saved stuff
 
@@ -1652,6 +1655,7 @@
   // Arguments:
   //   aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
   //             ignored
+  //   is_oop  - true => oop array, so generate store check code
   //   name    - stub name string
   //
   // Inputs:
@@ -1665,9 +1669,9 @@
   //
   // Side Effects:
   //   disjoint_int_copy_entry is set to the no-overlap entry point
-  //   used by generate_conjoint_int_copy().
+  //   used by generate_conjoint_int_oop_copy().
   //
-  address generate_disjoint_int_copy(bool aligned, const char *name) {
+  address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, const char *name) {
     __ align(CodeEntryAlignment);
     StubCodeMark mark(this, "StubRoutines", name);
     address start = __ pc();
@@ -1680,19 +1684,30 @@
     const Register qword_count = count;
     const Register end_from    = from; // source array end address
     const Register end_to      = to;   // destination array end address
+    const Register saved_to    = r11;  // saved destination array address
     // End pointers are inclusive, and if count is not zero they point
     // to the last unit copied:  end_to[0] := end_from[0]
 
     __ enter(); // required for proper stackwalking of RuntimeStub frame
     assert_clean_int(c_rarg2, rax);    // Make sure 'count' is clean int.
 
-    disjoint_int_copy_entry = __ pc();
+    (is_oop ? disjoint_oop_copy_entry : disjoint_int_copy_entry) = __ pc();
+
+    if (is_oop) {
+      // no registers are destroyed by this call
+      gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2);
+    }
+
     BLOCK_COMMENT("Entry:");
     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
 
     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
                       // r9 and r10 may be used to save non-volatile registers
 
+    if (is_oop) {
+      __ movq(saved_to, to);
+    }
+
     // 'from', 'to' and 'count' are now valid
     __ movq(dword_count, count);
     __ shrq(count, 1); // count => qword_count
@@ -1718,6 +1733,10 @@
     __ movl(Address(end_to, 8), rax);
 
   __ BIND(L_exit);
+    if (is_oop) {
+      __ leaq(end_to, Address(saved_to, dword_count, Address::times_4, -4));
+      gen_write_ref_array_post_barrier(saved_to, end_to, rax);
+    }
     inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
     restore_arg_regs();
     __ xorq(rax, rax); // return 0
@@ -1734,6 +1753,7 @@
   // Arguments:
   //   aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
   //             ignored
+  //   is_oop  - true => oop array, so generate store check code
   //   name    - stub name string
   //
   // Inputs:
@@ -1745,12 +1765,12 @@
   // the hardware handle it.  The two dwords within qwords that span
   // cache line boundaries will still be loaded and stored atomicly.
   //
-  address generate_conjoint_int_copy(bool aligned, const char *name) {
+  address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, const char *name) {
     __ align(CodeEntryAlignment);
     StubCodeMark mark(this, "StubRoutines", name);
     address start = __ pc();
 
-    Label L_copy_32_bytes, L_copy_8_bytes, L_copy_2_bytes;
+    Label L_copy_32_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit;
     const Register from        = rdi;  // source array address
     const Register to          = rsi;  // destination array address
     const Register count       = rdx;  // elements count
@@ -1760,14 +1780,21 @@
     __ enter(); // required for proper stackwalking of RuntimeStub frame
     assert_clean_int(c_rarg2, rax);    // Make sure 'count' is clean int.
 
-    int_copy_entry = __ pc();
+    if (is_oop) {
+      // no registers are destroyed by this call
+      gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2);
+    }
+
+    (is_oop ? oop_copy_entry : int_copy_entry) = __ pc();
     BLOCK_COMMENT("Entry:");
     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
 
-    array_overlap_test(disjoint_int_copy_entry, Address::times_4);
+    array_overlap_test(is_oop ? disjoint_oop_copy_entry : disjoint_int_copy_entry,
+                       Address::times_4);
     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
                       // r9 and r10 may be used to save non-volatile registers
 
+    assert_clean_int(count, rax); // Make sure 'count' is clean int.
     // 'from', 'to' and 'count' are now valid
     __ movq(dword_count, count);
     __ shrq(count, 1); // count => qword_count
@@ -1789,6 +1816,9 @@
     __ jcc(Assembler::notZero, L_copy_8_bytes);
 
     inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
+    if (is_oop) {
+      __ jmp(L_exit);
+    }
     restore_arg_regs();
     __ xorq(rax, rax); // return 0
     __ leave(); // required for proper stackwalking of RuntimeStub frame
@@ -1797,7 +1827,13 @@
     // Copy in 32-bytes chunks
     copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
 
-    inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
+   inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
+   __ bind(L_exit);
+     if (is_oop) {
+       Register end_to = rdx;
+       __ leaq(end_to, Address(to, dword_count, Address::times_4, -4));
+       gen_write_ref_array_post_barrier(to, end_to, rax);
+     }
     restore_arg_regs();
     __ xorq(rax, rax); // return 0
     __ leave(); // required for proper stackwalking of RuntimeStub frame
@@ -1817,7 +1853,7 @@
   //   c_rarg1   - destination array address
   //   c_rarg2   - element count, treated as ssize_t, can be zero
   //
-  // Side Effects:
+ // Side Effects:
   //   disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the
   //   no-overlap entry point used by generate_conjoint_long_oop_copy().
   //
@@ -1857,7 +1893,7 @@
 
     // Copy from low to high addresses.  Use 'to' as scratch.
     __ leaq(end_from, Address(from, qword_count, Address::times_8, -8));
-    __ leaq(end_to,   Address(to, qword_count, Address::times_8, -8));
+    __ leaq(end_to,   Address(to,   qword_count, Address::times_8, -8));
     __ negq(qword_count);
     __ jmp(L_copy_32_bytes);
 
@@ -1923,11 +1959,14 @@
 
     address disjoint_copy_entry = NULL;
     if (is_oop) {
+      assert(!UseCompressedOops, "shouldn't be called for compressed oops");
       disjoint_copy_entry = disjoint_oop_copy_entry;
       oop_copy_entry  = __ pc();
+      array_overlap_test(disjoint_oop_copy_entry, Address::times_8);
     } else {
       disjoint_copy_entry = disjoint_long_copy_entry;
       long_copy_entry = __ pc();
+      array_overlap_test(disjoint_long_copy_entry, Address::times_8);
     }
     BLOCK_COMMENT("Entry:");
     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
@@ -1945,8 +1984,6 @@
       gen_write_ref_array_pre_barrier(to, saved_count);
     }
 
-    // Copy from high to low addresses.  Use rcx as scratch.
-
     __ jmp(L_copy_32_bytes);
 
     // Copy trailing qwords
@@ -2038,7 +2075,14 @@
       // Scan rcx words at [rdi] for occurance of rax
       // Set NZ/Z based on last compare
       __ movq(rax, super_klass);
-      __ repne_scan();
+      if (UseCompressedOops) {
+        // Compare against compressed form.  Don't need to uncompress because
+        // looks like orig rax is restored in popq below.
+        __ encode_heap_oop(rax);
+        __ repne_scanl();
+      } else {
+         __ repne_scanq();
+      }
 
       // Unspill the temp. registers:
       __ popq(rdi);
@@ -2115,7 +2159,7 @@
     // caller guarantees that the arrays really are different
     // otherwise, we would have to make conjoint checks
     { Label L;
-      array_overlap_test(L, Address::times_8);
+      array_overlap_test(L, TIMES_OOP);
       __ stop("checkcast_copy within a single array");
       __ bind(L);
     }
@@ -2160,12 +2204,11 @@
 #endif //ASSERT
 
     // Loop-invariant addresses.  They are exclusive end pointers.
-    Address end_from_addr(from, length, Address::times_8, 0);
-    Address   end_to_addr(to,   length, Address::times_8, 0);
+    Address end_from_addr(from, length, TIMES_OOP, 0);
+    Address   end_to_addr(to,   length, TIMES_OOP, 0);
     // Loop-variant addresses.  They assume post-incremented count < 0.
-    Address from_element_addr(end_from, count, Address::times_8, 0);
-    Address   to_element_addr(end_to,   count, Address::times_8, 0);
-    Address oop_klass_addr(rax_oop, oopDesc::klass_offset_in_bytes());
+    Address from_element_addr(end_from, count, TIMES_OOP, 0);
+    Address   to_element_addr(end_to,   count, TIMES_OOP, 0);
 
     gen_write_ref_array_pre_barrier(to, count);
 
@@ -2189,17 +2232,17 @@
     __ align(16);
 
     __ BIND(L_store_element);
-    __ movq(to_element_addr, rax_oop);  // store the oop
+    __ store_heap_oop(to_element_addr, rax_oop);  // store the oop
     __ incrementq(count);               // increment the count toward zero
     __ jcc(Assembler::zero, L_do_card_marks);
 
     // ======== loop entry is here ========
     __ BIND(L_load_element);
-    __ movq(rax_oop, from_element_addr); // load the oop
+    __ load_heap_oop(rax_oop, from_element_addr); // load the oop
     __ testq(rax_oop, rax_oop);
     __ jcc(Assembler::zero, L_store_element);
 
-    __ movq(r11_klass, oop_klass_addr); // query the object klass
+    __ load_klass(r11_klass, rax_oop);// query the object klass
     generate_type_check(r11_klass, ckoff, ckval, L_store_element);
     // ======== end loop ========
 
@@ -2425,15 +2468,14 @@
     // registers used as temp
     const Register r11_length    = r11; // elements count to copy
     const Register r10_src_klass = r10; // array klass
+    const Register r9_dst_klass  = r9;  // dest array klass
 
     //  if (length < 0) return -1;
     __ movl(r11_length, C_RARG4);       // length (elements count, 32-bits value)
     __ testl(r11_length, r11_length);
     __ jccb(Assembler::negative, L_failed_0);
 
-    Address src_klass_addr(src, oopDesc::klass_offset_in_bytes());
-    Address dst_klass_addr(dst, oopDesc::klass_offset_in_bytes());
-    __ movq(r10_src_klass, src_klass_addr);
+    __ load_klass(r10_src_klass, src);
 #ifdef ASSERT
     //  assert(src->klass() != NULL);
     BLOCK_COMMENT("assert klasses not null");
@@ -2443,7 +2485,8 @@
       __ bind(L1);
       __ stop("broken null klass");
       __ bind(L2);
-      __ cmpq(dst_klass_addr, 0);
+      __ load_klass(r9_dst_klass, dst);
+      __ cmpq(r9_dst_klass, 0);
       __ jcc(Assembler::equal, L1);     // this would be broken also
       BLOCK_COMMENT("assert done");
     }
@@ -2470,7 +2513,8 @@
     __ jcc(Assembler::equal, L_objArray);
 
     //  if (src->klass() != dst->klass()) return -1;
-    __ cmpq(r10_src_klass, dst_klass_addr);
+    __ load_klass(r9_dst_klass, dst);
+    __ cmpq(r10_src_klass, r9_dst_klass);
     __ jcc(Assembler::notEqual, L_failed);
 
     //  if (!src->is_Array()) return -1;
@@ -2559,17 +2603,18 @@
 
     Label L_plain_copy, L_checkcast_copy;
     //  test array classes for subtyping
-    __ cmpq(r10_src_klass, dst_klass_addr); // usual case is exact equality
+    __ load_klass(r9_dst_klass, dst);
+    __ cmpq(r10_src_klass, r9_dst_klass); // usual case is exact equality
     __ jcc(Assembler::notEqual, L_checkcast_copy);
 
     // Identically typed arrays can be copied without element-wise checks.
     arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
                            r10, L_failed);
 
-    __ leaq(from, Address(src, src_pos, Address::times_8,
+    __ leaq(from, Address(src, src_pos, TIMES_OOP,
                  arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr
-    __ leaq(to,   Address(dst, dst_pos, Address::times_8,
-                 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
+    __ leaq(to,   Address(dst, dst_pos, TIMES_OOP,
+                  arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
     __ movslq(count, r11_length); // length
   __ BIND(L_plain_copy);
     __ jump(RuntimeAddress(oop_copy_entry));
@@ -2579,7 +2624,7 @@
     {
       // assert(r11_length == C_RARG4); // will reload from here
       Register r11_dst_klass = r11;
-      __ movq(r11_dst_klass, dst_klass_addr);
+      __ load_klass(r11_dst_klass, dst);
 
       // Before looking at dst.length, make sure dst is also an objArray.
       __ cmpl(Address(r11_dst_klass, lh_offset), objArray_lh);
@@ -2593,13 +2638,13 @@
       __ movl(r11_length, C_RARG4);     // reload
       arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
                              rax, L_failed);
-      __ movl(r11_dst_klass, dst_klass_addr); // reload
+      __ load_klass(r11_dst_klass, dst); // reload
 #endif
 
       // Marshal the base address arguments now, freeing registers.
-      __ leaq(from, Address(src, src_pos, Address::times_8,
+      __ leaq(from, Address(src, src_pos, TIMES_OOP,
                    arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
-      __ leaq(to,   Address(dst, dst_pos, Address::times_8,
+      __ leaq(to,   Address(dst, dst_pos, TIMES_OOP,
                    arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
       __ movl(count, C_RARG4);          // length (reloaded)
       Register sco_temp = c_rarg3;      // this register is free now
@@ -2648,14 +2693,20 @@
     StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
     StubRoutines::_jshort_arraycopy          = generate_conjoint_short_copy(false, "jshort_arraycopy");
 
-    StubRoutines::_jint_disjoint_arraycopy   = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy");
-    StubRoutines::_jint_arraycopy            = generate_conjoint_int_copy(false, "jint_arraycopy");
+    StubRoutines::_jint_disjoint_arraycopy   = generate_disjoint_int_oop_copy(false, false, "jint_disjoint_arraycopy");
+    StubRoutines::_jint_arraycopy            = generate_conjoint_int_oop_copy(false, false, "jint_arraycopy");
 
     StubRoutines::_jlong_disjoint_arraycopy  = generate_disjoint_long_oop_copy(false, false, "jlong_disjoint_arraycopy");
     StubRoutines::_jlong_arraycopy           = generate_conjoint_long_oop_copy(false, false, "jlong_arraycopy");
 
-    StubRoutines::_oop_disjoint_arraycopy    = generate_disjoint_long_oop_copy(false, true, "oop_disjoint_arraycopy");
-    StubRoutines::_oop_arraycopy             = generate_conjoint_long_oop_copy(false, true, "oop_arraycopy");
+
+    if (UseCompressedOops) {
+      StubRoutines::_oop_disjoint_arraycopy  = generate_disjoint_int_oop_copy(false, true, "oop_disjoint_arraycopy");
+      StubRoutines::_oop_arraycopy           = generate_conjoint_int_oop_copy(false, true, "oop_arraycopy");
+    } else {
+      StubRoutines::_oop_disjoint_arraycopy  = generate_disjoint_long_oop_copy(false, true, "oop_disjoint_arraycopy");
+      StubRoutines::_oop_arraycopy           = generate_conjoint_long_oop_copy(false, true, "oop_arraycopy");
+    }
 
     StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy");
     StubRoutines::_unsafe_arraycopy    = generate_unsafe_copy("unsafe_arraycopy");
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -664,7 +664,7 @@
 
   // work registers
   const Register method = rbx;
-  const Register t      = r12;
+  const Register t      = r11;
 
   // allocate space for parameters
   __ get_method(method);
@@ -844,6 +844,7 @@
     __ andq(rsp, -16); // align stack as required by ABI
     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
     __ movq(rsp, r12); // restore sp
+    __ reinit_heapbase();
     __ bind(Continue);
   }
 
@@ -891,6 +892,7 @@
     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
     __ movq(rsp, r12); // restore sp
     __ popaq(); // XXX only restore smashed registers
+    __ reinit_heapbase();
 
     __ bind(no_reguard);
   }
@@ -1360,6 +1362,7 @@
   // rdx: return address/pc that threw exception
   __ restore_bcp();    // r13 points to call/send
   __ restore_locals();
+  __ reinit_heapbase();  // restore r12 as heapbase.
   // Entry point for exceptions thrown within interpreter code
   Interpreter::_throw_exception_entry = __ pc();
   // expression stack is undefined here
@@ -1658,6 +1661,7 @@
   __ andq(rsp, -16); // align stack as required by ABI
   __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
   __ movq(rsp, r12); // restore sp
+  __ reinit_heapbase();
 }
 
 
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -557,8 +557,8 @@
   // eax: index
   // rdx: array
   index_check(rdx, rax); // kills rbx
-  __ movq(rax, Address(rdx, rax,
-                       Address::times_8,
+  __ load_heap_oop(rax, Address(rdx, rax,
+                       UseCompressedOops ? Address::times_4 : Address::times_8,
                        arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
 }
 
@@ -870,15 +870,15 @@
   __ jcc(Assembler::zero, is_null);
 
   // Move subklass into rbx
-  __ movq(rbx, Address(rax, oopDesc::klass_offset_in_bytes()));
+  __ load_klass(rbx, rax);
   // Move superklass into rax
-  __ movq(rax, Address(rdx, oopDesc::klass_offset_in_bytes()));
+  __ load_klass(rax, rdx);
   __ movq(rax, Address(rax,
                        sizeof(oopDesc) +
                        objArrayKlass::element_klass_offset_in_bytes()));
-  // Compress array + index*8 + 12 into a single register.  Frees rcx.
+  // Compress array + index*oopSize + 12 into a single register.  Frees rcx.
   __ leaq(rdx, Address(rdx, rcx,
-                       Address::times_8,
+                       UseCompressedOops ? Address::times_4 : Address::times_8,
                        arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
 
   // Generate subtype check.  Blows rcx, rdi
@@ -892,17 +892,17 @@
   // Come here on success
   __ bind(ok_is_subtype);
   __ movq(rax, at_tos()); // Value
-  __ movq(Address(rdx, 0), rax);
+  __ store_heap_oop(Address(rdx, 0), rax);
   __ store_check(rdx);
   __ jmp(done);
 
   // Have a NULL in rax, rdx=array, ecx=index.  Store NULL at ary[idx]
   __ bind(is_null);
   __ profile_null_seen(rbx);
-  __ movq(Address(rdx, rcx,
-                  Address::times_8,
-                  arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
-          rax);
+  __ store_heap_oop(Address(rdx, rcx,
+                            UseCompressedOops ? Address::times_4 : Address::times_8,
+                            arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
+                    rax);
 
   // Pop stack arguments
   __ bind(done);
@@ -1934,7 +1934,7 @@
   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
     assert(state == vtos, "only valid state");
     __ movq(c_rarg1, aaddress(0));
-    __ movq(rdi, Address(c_rarg1, oopDesc::klass_offset_in_bytes()));
+    __ load_klass(rdi, c_rarg1);
     __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
     __ testl(rdi, JVM_ACC_HAS_FINALIZER);
     Label skip_register_finalizer;
@@ -2184,7 +2184,7 @@
   __ cmpl(flags, atos);
   __ jcc(Assembler::notEqual, notObj);
   // atos
-  __ movq(rax, field);
+  __ load_heap_oop(rax, field);
   __ push(atos);
   if (!is_static) {
     patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
@@ -2394,7 +2394,7 @@
   // atos
   __ pop(atos);
   if (!is_static) pop_and_check_object(obj);
-  __ movq(field, rax);
+  __ store_heap_oop(field, rax);
   __ store_check(obj, field); // Need to mark card
   if (!is_static) {
     patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx);
@@ -2515,7 +2515,7 @@
     const Address field(c_rarg3, 0);
 
     switch (bytecode()) {          // load values into the jvalue object
-    case Bytecodes::_fast_aputfield: // fall through
+    case Bytecodes::_fast_aputfield: __ movq(field, rax); break;
     case Bytecodes::_fast_lputfield: __ movq(field, rax); break;
     case Bytecodes::_fast_iputfield: __ movl(field, rax); break;
     case Bytecodes::_fast_bputfield: __ movb(field, rax); break;
@@ -2582,7 +2582,7 @@
   // access field
   switch (bytecode()) {
   case Bytecodes::_fast_aputfield:
-    __ movq(field, rax);
+    __ store_heap_oop(field, rax);
     __ store_check(rcx, field);
     break;
   case Bytecodes::_fast_lputfield:
@@ -2631,8 +2631,8 @@
     __ jcc(Assembler::zero, L1);
     // access constant pool cache entry
     __ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1);
+    __ verify_oop(rax);
     __ movq(r12, rax);  // save object pointer before call_VM() clobbers it
-    __ verify_oop(rax);
     __ movq(c_rarg1, rax);
     // c_rarg1: object pointer copied above
     // c_rarg2: cache entry pointer
@@ -2641,6 +2641,7 @@
                                 InterpreterRuntime::post_field_access),
                c_rarg1, c_rarg2);
     __ movq(rax, r12); // restore object pointer
+    __ reinit_heapbase();
     __ bind(L1);
   }
 
@@ -2667,7 +2668,7 @@
   // access field
   switch (bytecode()) {
   case Bytecodes::_fast_agetfield:
-    __ movq(rax, field);
+    __ load_heap_oop(rax, field);
     __ verify_oop(rax);
     break;
   case Bytecodes::_fast_lgetfield:
@@ -2725,7 +2726,7 @@
     __ movl(rax, Address(rax, rbx, Address::times_1));
     break;
   case atos:
-    __ movq(rax, Address(rax, rbx, Address::times_1));
+    __ load_heap_oop(rax, Address(rax, rbx, Address::times_1));
     __ verify_oop(rax);
     break;
   case ftos:
@@ -2787,7 +2788,8 @@
     __ movl(recv, flags);
     __ andl(recv, 0xFF);
     if (TaggedStackInterpreter) __ shll(recv, 1);  // index*2
-    __ movq(recv, Address(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1)));
+    __ movq(recv, Address(rsp, recv, Address::times_8,
+                                 -Interpreter::expr_offset_in_bytes(1)));
     __ verify_oop(recv);
   }
 
@@ -2854,7 +2856,7 @@
 
   // get receiver klass
   __ null_check(recv, oopDesc::klass_offset_in_bytes());
-  __ movq(rax, Address(recv, oopDesc::klass_offset_in_bytes()));
+  __ load_klass(rax, recv);
 
   __ verify_oop(rax);
 
@@ -2866,8 +2868,8 @@
   assert(vtableEntry::size() * wordSize == 8,
          "adjust the scaling in the code below");
   __ movq(method, Address(rax, index,
-                          Address::times_8,
-                          base + vtableEntry::method_offset_in_bytes()));
+                                 Address::times_8,
+                                 base + vtableEntry::method_offset_in_bytes()));
   __ movq(rdx, Address(method, methodOopDesc::interpreter_entry_offset()));
   __ jump_from_interpreted(method, rdx);
 }
@@ -2932,7 +2934,7 @@
 
   // Get receiver klass into rdx - also a null check
   __ restore_locals(); // restore r14
-  __ movq(rdx, Address(rcx, oopDesc::klass_offset_in_bytes()));
+  __ load_klass(rdx, rcx);
   __ verify_oop(rdx);
 
   // profile this call
@@ -3161,7 +3163,7 @@
       __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()),
                (intptr_t) markOopDesc::prototype()); // header (address 0x1)
     }
-    __ movq(Address(rax, oopDesc::klass_offset_in_bytes()), rsi);  // klass
+    __ store_klass(rax, rsi);  // klass
     __ jmp(done);
   }
 
@@ -3223,12 +3225,12 @@
                   typeArrayOopDesc::header_size(T_BYTE) * wordSize),
           JVM_CONSTANT_Class);
   __ jcc(Assembler::equal, quicked);
-
+  __ push(atos); // save receiver for result, and for GC
   __ movq(r12, rcx); // save rcx XXX
-  __ push(atos); // save receiver for result, and for GC
   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
+  __ movq(rcx, r12); // restore rcx XXX
+  __ reinit_heapbase();
   __ pop_ptr(rdx); // restore receiver
-  __ movq(rcx, r12); // restore rcx XXX
   __ jmpb(resolved);
 
   // Get superklass in rax and subklass in rbx
@@ -3238,7 +3240,7 @@
                        Address::times_8, sizeof(constantPoolOopDesc)));
 
   __ bind(resolved);
-  __ movq(rbx, Address(rdx, oopDesc::klass_offset_in_bytes()));
+  __ load_klass(rbx, rdx);
 
   // Generate subtype check.  Blows rcx, rdi.  Object in rdx.
   // Superklass in rax.  Subklass in rbx.
@@ -3280,19 +3282,20 @@
           JVM_CONSTANT_Class);
   __ jcc(Assembler::equal, quicked);
 
+  __ push(atos); // save receiver for result, and for GC
   __ movq(r12, rcx); // save rcx
-  __ push(atos); // save receiver for result, and for GC
   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
+  __ movq(rcx, r12); // restore rcx
+  __ reinit_heapbase();
   __ pop_ptr(rdx); // restore receiver
-  __ movq(rdx, Address(rdx, oopDesc::klass_offset_in_bytes()));
-  __ movq(rcx, r12); // restore rcx
+  __ load_klass(rdx, rdx);
   __ jmpb(resolved);
 
   // Get superklass in rax and subklass in rdx
   __ bind(quicked);
-  __ movq(rdx, Address(rax, oopDesc::klass_offset_in_bytes()));
+  __ load_klass(rdx, rax);
   __ movq(rax, Address(rcx, rbx,
-                       Address::times_8, sizeof(constantPoolOopDesc)));
+                              Address::times_8, sizeof(constantPoolOopDesc)));
 
   __ bind(resolved);
 
--- a/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -56,7 +56,7 @@
 
   // get receiver klass
   address npe_addr = __ pc();
-  __ movq(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
+  __ load_klass(rax, j_rarg0);
 
   // compute entry offset (in words)
   int entry_offset =
@@ -131,7 +131,7 @@
   // get receiver klass (also an implicit null-check)
   address npe_addr = __ pc();
 
-  __ movq(rbx, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
+  __ load_klass(rbx, j_rarg0);
 
   // If we take a trap while this arg is on the stack we will not
   // be able to walk the stack properly. This is not an issue except
@@ -181,7 +181,7 @@
   // Get methodOop and entrypoint for compiler
 
   // Get klass pointer again
-  __ movq(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
+  __ load_klass(rax, j_rarg0);
 
   const Register method = rbx;
   __ movq(method, Address(rax, j_rarg1, Address::times_1, method_offset));
@@ -226,10 +226,12 @@
 int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
   if (is_vtable_stub) {
     // Vtable stub size
-    return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0);
+    return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0) +
+           (UseCompressedOops ? 16 : 0);  // 1 leaq can be 3 bytes + 1 long
   } else {
     // Itable stub size
-    return (DebugVtables ? 636 : 72) + (CountCompiledCalls ? 13 : 0);
+    return (DebugVtables ? 636 : 72) + (CountCompiledCalls ? 13 : 0) +
+           (UseCompressedOops ? 32 : 0);  // 2 leaqs
   }
 }
 
--- a/hotspot/src/cpu/x86/vm/x86_32.ad	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/x86/vm/x86_32.ad	Sun Apr 13 17:43:42 2008 -0400
@@ -4538,8 +4538,8 @@
   // Location of C & interpreter return values
   c_return_value %{
     assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
-    static int lo[Op_RegL+1] = { 0, 0, EAX_num,      EAX_num,      FPR1L_num,    FPR1L_num, EAX_num };
-    static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, FPR1H_num, EDX_num };
+    static int lo[Op_RegL+1] = { 0, 0, OptoReg::Bad, EAX_num,      EAX_num,      FPR1L_num,    FPR1L_num, EAX_num };
+    static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, FPR1H_num, EDX_num };
 
     // in SSE2+ mode we want to keep the FPU stack clean so pretend
     // that C functions return float and double results in XMM0.
@@ -4554,8 +4554,8 @@
   // Location of return values
   return_value %{
     assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
-    static int lo[Op_RegL+1] = { 0, 0, EAX_num,      EAX_num,      FPR1L_num,    FPR1L_num, EAX_num };
-    static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, FPR1H_num, EDX_num };
+    static int lo[Op_RegL+1] = { 0, 0, OptoReg::Bad, EAX_num,      EAX_num,      FPR1L_num,    FPR1L_num, EAX_num };
+    static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, FPR1H_num, EDX_num };
     if( ideal_reg == Op_RegD && UseSSE>=2 )
       return OptoRegPair(XMM0b_num,XMM0a_num);
     if( ideal_reg == Op_RegF && UseSSE>=1 )
--- a/hotspot/src/cpu/x86/vm/x86_64.ad	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad	Sun Apr 13 17:43:42 2008 -0400
@@ -312,7 +312,6 @@
                   R9,  R9_H,
                   R10, R10_H,
                   R11, R11_H,
-                  R12, R12_H,
                   R13, R13_H,
                   R14, R14_H);
 
@@ -392,7 +391,6 @@
                    R9,  R9_H,
                    R10, R10_H,
                    R11, R11_H,
-                   R12, R12_H,
                    R13, R13_H,
                    R14, R14_H);
 
@@ -406,7 +404,6 @@
                               R9,  R9_H,
                               R10, R10_H,
                               R11, R11_H,
-                              R12, R12_H,
                               R13, R13_H,
                               R14, R14_H);
 
@@ -421,7 +418,6 @@
                           R9,  R9_H,
                           R10, R10_H,
                           R11, R11_H,
-                          R12, R12_H,
                           R13, R13_H,
                           R14, R14_H);
 
@@ -436,7 +432,6 @@
                           R9,  R9_H,
                           R10, R10_H,
                           R11, R11_H,
-                          R12, R12_H,
                           R13, R13_H,
                           R14, R14_H);
 
@@ -449,6 +444,9 @@
 // Singleton class for RDX long register
 reg_class long_rdx_reg(RDX, RDX_H);
 
+// Singleton class for R12 long register
+reg_class long_r12_reg(R12, R12_H);
+
 // Class for all int registers (except RSP)
 reg_class int_reg(RAX,
                   RDX,
@@ -461,7 +459,6 @@
                   R9,
                   R10,
                   R11,
-                  R12,
                   R13,
                   R14);
 
@@ -476,7 +473,6 @@
                          R9,
                          R10,
                          R11,
-                         R12,
                          R13,
                          R14);
 
@@ -490,7 +486,6 @@
                              R9,
                              R10,
                              R11,
-                             R12,
                              R13,
                              R14);
 
@@ -1844,8 +1839,14 @@
 #ifndef PRODUCT
 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 {
-  st->print_cr("cmpq    rax, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t"
-               "# Inline cache check", oopDesc::klass_offset_in_bytes());
+  if (UseCompressedOops) {
+    st->print_cr("movl    rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t", oopDesc::klass_offset_in_bytes());
+    st->print_cr("leaq    rscratch1, [r12_heapbase, r, Address::times_8, 0]");
+    st->print_cr("cmpq    rax, rscratch1\t # Inline cache check");
+  } else {
+    st->print_cr("cmpq    rax, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t"
+                 "# Inline cache check", oopDesc::klass_offset_in_bytes());
+  }
   st->print_cr("\tjne     SharedRuntime::_ic_miss_stub");
   st->print_cr("\tnop");
   if (!OptoBreakpoint) {
@@ -1860,7 +1861,12 @@
 #ifdef ASSERT
   uint code_size = cbuf.code_size();
 #endif
-  masm.cmpq(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
+  if (UseCompressedOops) {
+    masm.load_klass(rscratch1, j_rarg0);
+    masm.cmpq(rax, rscratch1);
+  } else {
+    masm.cmpq(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
+  }
 
   masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 
@@ -1871,6 +1877,10 @@
     // Leave space for int3
      nops_cnt += 1;
   }
+  if (UseCompressedOops) {
+    // ??? divisible by 4 is aligned?
+    nops_cnt += 1;
+  }
   masm.nop(nops_cnt);
 
   assert(cbuf.code_size() - code_size == size(ra_),
@@ -1879,7 +1889,11 @@
 
 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
 {
-  return OptoBreakpoint ? 11 : 12;
+  if (UseCompressedOops) {
+    return OptoBreakpoint ? 19 : 20;
+  } else {
+    return OptoBreakpoint ? 11 : 12;
+  }
 }
 
 
@@ -2052,6 +2066,7 @@
     reg ==  RCX_num || reg ==  RCX_H_num ||
     reg ==   R8_num || reg ==   R8_H_num ||
     reg ==   R9_num || reg ==   R9_H_num ||
+    reg ==  R12_num || reg ==  R12_H_num ||
     reg == XMM0_num || reg == XMM0_H_num ||
     reg == XMM1_num || reg == XMM1_H_num ||
     reg == XMM2_num || reg == XMM2_H_num ||
@@ -2087,6 +2102,17 @@
   return LONG_RDX_REG_mask;
 }
 
+static Address build_address(int b, int i, int s, int d) {
+  Register index = as_Register(i);
+  Address::ScaleFactor scale = (Address::ScaleFactor)s;
+  if (index == rsp) {
+    index = noreg;
+    scale = Address::no_scale;
+  }
+  Address addr(as_Register(b), index, scale, d);
+  return addr;
+}
+
 %}
 
 //----------ENCODING BLOCK-----------------------------------------------------
@@ -2545,7 +2571,7 @@
     Register Rrax = as_Register(RAX_enc); // super class
     Register Rrcx = as_Register(RCX_enc); // killed
     Register Rrsi = as_Register(RSI_enc); // sub class
-    Label hit, miss;
+    Label hit, miss, cmiss;
 
     MacroAssembler _masm(&cbuf);
     // Compare super with sub directly, since super is not in its own SSA.
@@ -2562,12 +2588,27 @@
                           Klass::secondary_supers_offset_in_bytes()));
     __ movl(Rrcx, Address(Rrdi, arrayOopDesc::length_offset_in_bytes()));
     __ addq(Rrdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
-    __ repne_scan();
-    __ jcc(Assembler::notEqual, miss);
-    __ movq(Address(Rrsi,
-                    sizeof(oopDesc) +
-                    Klass::secondary_super_cache_offset_in_bytes()),
-            Rrax);
+    if (UseCompressedOops) {
+      __ encode_heap_oop(Rrax);
+      __ repne_scanl();
+      __ jcc(Assembler::notEqual, cmiss);
+      __ decode_heap_oop(Rrax);
+      __ movq(Address(Rrsi,
+                      sizeof(oopDesc) +
+                      Klass::secondary_super_cache_offset_in_bytes()),
+              Rrax);
+      __ jmp(hit);
+      __ bind(cmiss);
+      __ decode_heap_oop(Rrax);
+      __ jmp(miss);
+    } else {
+      __ repne_scanq();
+      __ jcc(Assembler::notEqual, miss);
+      __ movq(Address(Rrsi,
+                      sizeof(oopDesc) +
+                      Klass::secondary_super_cache_offset_in_bytes()),
+              Rrax);
+    }
     __ bind(hit);
     if ($primary) {
       __ xorq(Rrdi, Rrdi);
@@ -3693,10 +3734,10 @@
     int count_offset  = java_lang_String::count_offset_in_bytes();
     int base_offset   = arrayOopDesc::base_offset_in_bytes(T_CHAR);
 
-    masm.movq(rax, Address(rsi, value_offset));
+    masm.load_heap_oop(rax, Address(rsi, value_offset));
     masm.movl(rcx, Address(rsi, offset_offset));
     masm.leaq(rax, Address(rax, rcx, Address::times_2, base_offset));
-    masm.movq(rbx, Address(rdi, value_offset));
+    masm.load_heap_oop(rbx, Address(rdi, value_offset));
     masm.movl(rcx, Address(rdi, offset_offset));
     masm.leaq(rbx, Address(rbx, rcx, Address::times_2, base_offset));
 
@@ -4120,6 +4161,7 @@
 %}
 
 
+
 //----------FRAME--------------------------------------------------------------
 // Definition of frame structure and management information.
 //
@@ -4255,6 +4297,7 @@
     static const int lo[Op_RegL + 1] = {
       0,
       0,
+      RAX_num,  // Op_RegN
       RAX_num,  // Op_RegI
       RAX_num,  // Op_RegP
       XMM0_num, // Op_RegF
@@ -4264,13 +4307,14 @@
     static const int hi[Op_RegL + 1] = {
       0,
       0,
+      OptoReg::Bad, // Op_RegN
       OptoReg::Bad, // Op_RegI
       RAX_H_num,    // Op_RegP
       OptoReg::Bad, // Op_RegF
       XMM0_H_num,   // Op_RegD
       RAX_H_num     // Op_RegL
     };
-
+    assert(ARRAY_SIZE(hi) == _last_machine_leaf - 1, "missing type");
     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
   %}
 %}
@@ -4417,9 +4461,25 @@
   interface(CONST_INTER);
 %}
 
-// Unsigned 31-bit Pointer Immediate
-// Can be used in both 32-bit signed and 32-bit unsigned insns.
-// Works for nulls and markOops; not for relocatable (oop) pointers.
+// Pointer Immediate
+operand immN() %{
+  match(ConN);
+
+  op_cost(10);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// NULL Pointer Immediate
+operand immN0() %{
+  predicate(n->get_narrowcon() == 0);
+  match(ConN);
+
+  op_cost(5);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
 operand immP31()
 %{
   predicate(!n->as_Type()->type()->isa_oopptr()
@@ -4431,6 +4491,7 @@
   interface(CONST_INTER);
 %}
 
+
 // Long Immediate
 operand immL()
 %{
@@ -4767,6 +4828,23 @@
   interface(REG_INTER);
 %}
 
+
+operand r12RegL() %{
+  constraint(ALLOC_IN_RC(long_r12_reg));
+  match(RegL);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand rRegN() %{
+  constraint(ALLOC_IN_RC(int_reg));
+  match(RegN);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
 // Question: Why is r15_RegP (the read-only TLS register) a match for rRegP?
 // Answer: Operand match rules govern the DFA as it processes instruction inputs.
 // It's fine for an instruction input which expects rRegP to match a r15_RegP.
@@ -4822,6 +4900,18 @@
   interface(REG_INTER);
 %}
 
+// Special Registers
+// Return a compressed pointer value
+operand rax_RegN()
+%{
+  constraint(ALLOC_IN_RC(int_rax_reg));
+  match(RegN);
+  match(rRegN);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
 // Used in AtomicAdd
 operand rbx_RegP()
 %{
@@ -5112,6 +5202,21 @@
   %}
 %}
 
+// Indirect Memory Times Scale Plus Index Register Plus Offset Operand
+operand indIndexScaleOffsetComp(rRegN src, immL32 off, r12RegL base) %{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP (DecodeN src base) off);
+
+  op_cost(10);
+  format %{"[$base + $src << 3 + $off] (compressed)" %}
+  interface(MEMORY_INTER) %{
+    base($base);
+    index($src);
+    scale(0x3);
+    disp($off);
+  %}
+%}
+
 // Indirect Memory Times Scale Plus Positive Index Register Plus Offset Operand
 operand indPosIndexScaleOffset(any_RegP reg, immL32 off, rRegI idx, immI2 scale)
 %{
@@ -5259,7 +5364,8 @@
 // case of this is memory operands.
 
 opclass memory(indirect, indOffset8, indOffset32, indIndexOffset, indIndex,
-               indIndexScale, indIndexScaleOffset, indPosIndexScaleOffset);
+               indIndexScale, indIndexScaleOffset, indPosIndexScaleOffset,
+               indIndexScaleOffsetComp);
 
 //----------PIPELINE-----------------------------------------------------------
 // Rules which define the behavior of the target architectures pipeline.
@@ -5937,10 +6043,28 @@
   ins_pipe(ialu_reg_mem); // XXX
 %}
 
+// Load Compressed Pointer
+instruct loadN(rRegN dst, memory mem, rFlagsReg cr)
+%{
+   match(Set dst (LoadN mem));
+   effect(KILL cr);
+
+   ins_cost(125); // XXX
+   format %{ "movl    $dst, $mem\t# compressed ptr" %}
+   ins_encode %{
+     Address addr = build_address($mem$$base, $mem$$index, $mem$$scale, $mem$$disp);
+     Register dst = as_Register($dst$$reg);
+     __ movl(dst, addr);
+   %}
+   ins_pipe(ialu_reg_mem); // XXX
+%}
+
+
 // Load Klass Pointer
 instruct loadKlass(rRegP dst, memory mem)
 %{
   match(Set dst (LoadKlass mem));
+  predicate(!n->in(MemNode::Address)->bottom_type()->is_narrow());
 
   ins_cost(125); // XXX
   format %{ "movq    $dst, $mem\t# class" %}
@@ -5949,6 +6073,25 @@
   ins_pipe(ialu_reg_mem); // XXX
 %}
 
+// Load Klass Pointer
+instruct loadKlassComp(rRegP dst, memory mem)
+%{
+  match(Set dst (LoadKlass mem));
+  predicate(n->in(MemNode::Address)->bottom_type()->is_narrow());
+
+  ins_cost(125); // XXX
+  format %{ "movl    $dst, $mem\t# compressed class" %}
+  ins_encode %{
+    Address addr = build_address($mem$$base, $mem$$index, $mem$$scale, $mem$$disp);
+    Register dst = as_Register($dst$$reg);
+    __ movl(dst, addr);
+    // klass is never null in the header but this is generated for all
+    // klass loads not just the _klass field in the header.
+    __ decode_heap_oop(dst);
+  %}
+  ins_pipe(ialu_reg_mem); // XXX
+%}
+
 // Load Float
 instruct loadF(regF dst, memory mem)
 %{
@@ -6203,6 +6346,35 @@
   ins_pipe(pipe_slow);
 %}
 
+instruct loadConN0(rRegN dst, immN0 src, rFlagsReg cr) %{
+  match(Set dst src);
+  effect(KILL cr);
+  format %{ "xorq    $dst, $src\t# compressed ptr" %}
+  ins_encode %{
+    Register dst = $dst$$Register;
+    __ xorq(dst, dst);
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct loadConN(rRegN dst, immN src) %{
+  match(Set dst src);
+
+  ins_cost(125);
+  format %{ "movl    $dst, $src\t# compressed ptr" %}
+  ins_encode %{
+    address con = (address)$src$$constant;
+    Register dst = $dst$$Register;
+    if (con == NULL) {
+      ShouldNotReachHere();
+    } else {
+      __ movoop(dst, (jobject)$src$$constant);
+      __ encode_heap_oop_not_null(dst);
+    }
+  %}
+  ins_pipe(ialu_reg_fat); // XXX
+%}
+
 instruct loadConF0(regF dst, immF0 src)
 %{
   match(Set dst src);
@@ -6458,6 +6630,22 @@
   ins_pipe(ialu_mem_imm);
 %}
 
+// Store Compressed Pointer
+instruct storeN(memory mem, rRegN src, rFlagsReg cr)
+%{
+  match(Set mem (StoreN mem src));
+  effect(KILL cr);
+
+  ins_cost(125); // XXX
+  format %{ "movl    $mem, $src\t# ptr" %}
+  ins_encode %{
+    Address addr = build_address($mem$$base, $mem$$index, $mem$$scale, $mem$$disp);
+    Register src = as_Register($src$$reg);
+    __ movl(addr, src);
+  %}
+  ins_pipe(ialu_mem_reg);
+%}
+
 // Store Integer Immediate
 instruct storeImmI(memory mem, immI src)
 %{
@@ -6805,6 +6993,39 @@
   ins_pipe(ialu_reg_reg); // XXX
 %}
 
+
+// Convert oop pointer into compressed form
+instruct encodeHeapOop(rRegN dst, rRegP src, rFlagsReg cr) %{
+  match(Set dst (EncodeP src));
+  effect(KILL cr);
+  format %{ "encode_heap_oop $dst,$src" %}
+  ins_encode %{
+    Register s = $src$$Register;
+    Register d = $dst$$Register;
+    if (s != d) {
+      __ movq(d, s);
+    }
+    __ encode_heap_oop(d);
+  %}
+  ins_pipe(ialu_reg_long);
+%}
+
+instruct decodeHeapOop(rRegP dst, rRegN src, rFlagsReg cr) %{
+  match(Set dst (DecodeN src));
+  effect(KILL cr);
+  format %{ "decode_heap_oop $dst,$src" %}
+  ins_encode %{
+    Register s = $src$$Register;
+    Register d = $dst$$Register;
+    if (s != d) {
+      __ movq(d, s);
+    }
+    __ decode_heap_oop(d);
+  %}
+  ins_pipe(ialu_reg_long);
+%}
+
+
 //----------Conditional Move---------------------------------------------------
 // Jump
 // dummy instruction for generating temp registers
@@ -7521,6 +7742,28 @@
 %}
 
 
+instruct compareAndSwapN(rRegI res,
+                          memory mem_ptr,
+                          rax_RegN oldval, rRegN newval,
+                          rFlagsReg cr) %{
+  match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
+  effect(KILL cr, KILL oldval);
+
+  format %{ "cmpxchgl $mem_ptr,$newval\t# "
+            "If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
+            "sete    $res\n\t"
+            "movzbl  $res, $res" %}
+  opcode(0x0F, 0xB1);
+  ins_encode(lock_prefix,
+             REX_reg_mem(newval, mem_ptr),
+             OpcP, OpcS,
+             reg_mem(newval, mem_ptr),
+             REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
+             REX_reg_breg(res, res), // movzbl
+             Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
+  ins_pipe( pipe_cmpxchg );
+%}
+
 //----------Subtraction Instructions-------------------------------------------
 
 // Integer Subtraction Instructions
@@ -10771,6 +11014,14 @@
   ins_pipe(ialu_cr_reg_imm);
 %}
 
+instruct testN_reg(rFlagsReg cr, rRegN src, immN0 zero) %{
+  match(Set cr (CmpN src zero));
+
+  format %{ "testl   $src, $src" %}
+  ins_encode %{ __ testl($src$$Register, $src$$Register); %}
+  ins_pipe(ialu_cr_reg_imm);
+%}
+
 // Yanked all unsigned pointer compare operations.
 // Pointer compares are done with CmpP which is already unsigned.
 
@@ -11018,6 +11269,7 @@
                                      rdi_RegP result)
 %{
   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
+  predicate(!UseCompressedOops); // decoding oop kills condition codes
   effect(KILL rcx, KILL result);
 
   ins_cost(1000);
--- a/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -196,7 +196,7 @@
   printf("\n");
 
   GEN_VALUE(OFFSET_HeapBlockHeader_used, offset_of(HeapBlock::Header, _used));
-  GEN_OFFS(oopDesc, _klass);
+  GEN_OFFS(oopDesc, _metadata);
   printf("\n");
 
   GEN_VALUE(AccessFlags_NATIVE, JVM_ACC_NATIVE);
--- a/hotspot/src/os/solaris/dtrace/jhelper.d	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/os/solaris/dtrace/jhelper.d	Sun Apr 13 17:43:42 2008 -0400
@@ -46,6 +46,7 @@
 extern pointer __1cJCodeCacheF_heap_;
 extern pointer __1cIUniverseP_methodKlassObj_;
 extern pointer __1cIUniverseO_collectedHeap_;
+extern pointer __1cIUniverseK_heap_base_;
 
 extern pointer __1cHnmethodG__vtbl_;
 extern pointer __1cKBufferBlobG__vtbl_;
@@ -107,7 +108,7 @@
   copyin_offset(OFFSET_constantPoolOopDesc_pool_holder);
 
   copyin_offset(OFFSET_HeapBlockHeader_used);
-  copyin_offset(OFFSET_oopDesc_klass);
+  copyin_offset(OFFSET_oopDesc_metadata);
 
   copyin_offset(OFFSET_symbolOopDesc_length);
   copyin_offset(OFFSET_symbolOopDesc_body);
@@ -150,6 +151,7 @@
 
   this->Universe_methodKlassOop = copyin_ptr(&``__1cIUniverseP_methodKlassObj_);
   this->CodeCache_heap_address = copyin_ptr(&``__1cJCodeCacheF_heap_);
+  this->Universe_heap_base = copyin_ptr(&``__1cIUniverseK_heap_base_);
 
   /* Reading volatile values */
   this->CodeCache_low = copyin_ptr(this->CodeCache_heap_address + 
@@ -293,10 +295,27 @@
 
 dtrace:helper:ustack:
 /!this->done && this->vtbl == this->BufferBlob_vtbl &&
+this->Universe_heap_base == NULL &&
 this->methodOopPtr > this->heap_start && this->methodOopPtr < this->heap_end/
 {
   MARK_LINE;
-  this->klass = copyin_ptr(this->methodOopPtr + OFFSET_oopDesc_klass);
+  this->klass = copyin_ptr(this->methodOopPtr + OFFSET_oopDesc_metadata);
+  this->methodOop = this->klass == this->Universe_methodKlassOop;
+  this->done = !this->methodOop;
+}
+
+dtrace:helper:ustack:
+/!this->done && this->vtbl == this->BufferBlob_vtbl &&
+this->Universe_heap_base != NULL &&
+this->methodOopPtr > this->heap_start && this->methodOopPtr < this->heap_end/
+{
+  MARK_LINE;
+  /*
+   * Read compressed pointer and  decode heap oop, same as oop.inline.hpp
+   */
+  this->cklass = copyin_uint32(this->methodOopPtr + OFFSET_oopDesc_metadata);
+  this->klass = (uint64_t)((uintptr_t)this->Universe_heap_base +
+                ((uintptr_t)this->cklass << 3));
   this->methodOop = this->klass == this->Universe_methodKlassOop;
   this->done = !this->methodOop;
 }
--- a/hotspot/src/os/solaris/dtrace/libjvm_db.c	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/os/solaris/dtrace/libjvm_db.c	Sun Apr 13 17:43:42 2008 -0400
@@ -148,9 +148,11 @@
 
   uint64_t Universe_methodKlassObj_address;
   uint64_t CodeCache_heap_address;
+  uint64_t Universe_heap_base_address;
 
   /* Volatiles */
   uint64_t Universe_methodKlassObj;
+  uint64_t Universe_heap_base;
   uint64_t CodeCache_low;
   uint64_t CodeCache_high;
   uint64_t CodeCache_segmap_low;
@@ -166,7 +168,6 @@
   Frame_t   curr_fr;
 };
 
-
 static int
 read_string(struct ps_prochandle *P,
         char *buf,              /* caller's buffer */
@@ -185,6 +186,14 @@
   return -1;
 }
 
+static int read_compressed_pointer(jvm_agent_t* J, uint64_t base, uint32_t *ptr) {
+  int err = -1;
+  uint32_t ptr32;
+  err = ps_pread(J->P, base, &ptr32, sizeof(uint32_t));
+  *ptr = ptr32;
+  return err;
+}
+
 static int read_pointer(jvm_agent_t* J, uint64_t base, uint64_t* ptr) {
   int err = -1;
   uint32_t ptr32;
@@ -270,6 +279,9 @@
       if (strcmp("_methodKlassObj", vmp->fieldName) == 0) {
         J->Universe_methodKlassObj_address = vmp->address;
       }
+      if (strcmp("_heap_base", vmp->fieldName) == 0) {
+        J->Universe_heap_base_address = vmp->address;
+      }
     }
     CHECK_FAIL(err);
 
@@ -292,6 +304,8 @@
 
   err = read_pointer(J, J->Universe_methodKlassObj_address, &J->Universe_methodKlassObj);
   CHECK_FAIL(err);
+  err = read_pointer(J, J->Universe_heap_base_address, &J->Universe_heap_base);
+  CHECK_FAIL(err);
   err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory +
                      OFFSET_VirtualSpace_low, &J->CodeCache_low);
   CHECK_FAIL(err);
@@ -444,7 +458,17 @@
 static int is_methodOop(jvm_agent_t* J, uint64_t methodOopPtr) {
   uint64_t klass;
   int err;
-  err = read_pointer(J, methodOopPtr + OFFSET_oopDesc_klass, &klass);
+  // If heap_base is nonnull, this was a compressed oop.
+  if (J->Universe_heap_base != NULL) {
+    uint32_t cklass;
+    err = read_compressed_pointer(J, methodOopPtr + OFFSET_oopDesc_metadata,
+          &cklass);
+    // decode heap oop, same as oop.inline.hpp
+    klass = (uint64_t)((uintptr_t)J->Universe_heap_base +
+            ((uintptr_t)cklass << 3));
+  } else {
+    err = read_pointer(J, methodOopPtr + OFFSET_oopDesc_metadata, &klass);
+  }
   if (err != PS_OK) goto fail;
   return klass == J->Universe_methodKlassObj;
 
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -3116,7 +3116,7 @@
   // as reserve size, since on a 64-bit platform we'll run into that more
   // often than running out of virtual memory space.  We can use the
   // lower value of the two calculations as the os_thread_limit.
-  size_t max_address_space = ((size_t)1 << (BitsPerOop - 1)) - (200 * K * K);
+  size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
 
   // at exit methods are called in the reverse order of their registration.
--- a/hotspot/src/os_cpu/solaris_sparc/vm/solaris_sparc.s	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/solaris_sparc.s	Sun Apr 13 17:43:42 2008 -0400
@@ -33,7 +33,9 @@
     !! by the .il "call", in some cases optimizing the code, completely eliding it,
     !! or by moving the code from the "call site". 
         
-
+     !! ASM better know we may use G6 for our own purposes
+    .register %g6, #ignore
+        
     .globl  SafeFetch32
     .align  32
     .global Fetch32PFI, Fetch32Resume 
@@ -106,6 +108,7 @@
     .globl _raw_thread_id
     .align 32
  _raw_thread_id:
+    .register %g7, #scratch
         retl
         mov     %g7, %o0
  
--- a/hotspot/src/share/vm/adlc/archDesc.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/adlc/archDesc.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -867,6 +867,7 @@
   Form *form = (Form*)_globalNames[result];
   assert( form, "Result operand must be defined");
   OperandForm *oper = form->is_operand();
+  if (oper == NULL) form->dump();
   assert( oper, "Result must be an OperandForm");
   return reg_mask( *oper );
 }
@@ -908,6 +909,7 @@
   switch( last_char ) {
   case 'I':    return "TypeInt::INT";
   case 'P':    return "TypePtr::BOTTOM";
+  case 'N':    return "TypeNarrowOop::BOTTOM";
   case 'F':    return "Type::FLOAT";
   case 'D':    return "Type::DOUBLE";
   case 'L':    return "TypeLong::LONG";
@@ -944,7 +946,7 @@
   // Create InstructForm and assign type for each ideal instruction.
   for ( int j = _last_machine_leaf+1; j < _last_opcode; ++j) {
     char         *ident    = (char *)NodeClassNames[j];
-    if(!strcmp(ident, "ConI") || !strcmp(ident, "ConP") ||
+    if(!strcmp(ident, "ConI") || !strcmp(ident, "ConP") || !strcmp(ident, "ConN") ||
        !strcmp(ident, "ConF") || !strcmp(ident, "ConD") ||
        !strcmp(ident, "ConL") || !strcmp(ident, "Con" ) ||
        !strcmp(ident, "Bool") ) {
@@ -1109,6 +1111,7 @@
     if ( strcmp(idealName,"CmpI") == 0
          || strcmp(idealName,"CmpU") == 0
          || strcmp(idealName,"CmpP") == 0
+         || strcmp(idealName,"CmpN") == 0
          || strcmp(idealName,"CmpL") == 0
          || strcmp(idealName,"CmpD") == 0
          || strcmp(idealName,"CmpF") == 0
--- a/hotspot/src/share/vm/adlc/forms.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/adlc/forms.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -211,6 +211,7 @@
 
   if (strcmp(name,"ConI")==0) return Form::idealI;
   if (strcmp(name,"ConP")==0) return Form::idealP;
+  if (strcmp(name,"ConN")==0) return Form::idealN;
   if (strcmp(name,"ConL")==0) return Form::idealL;
   if (strcmp(name,"ConF")==0) return Form::idealF;
   if (strcmp(name,"ConD")==0) return Form::idealD;
@@ -256,6 +257,7 @@
   if( strcmp(opType,"LoadPLocked")==0 )  return Form::idealP;
   if( strcmp(opType,"LoadLLocked")==0 )  return Form::idealL;
   if( strcmp(opType,"LoadP")==0 )  return Form::idealP;
+  if( strcmp(opType,"LoadN")==0 )  return Form::idealN;
   if( strcmp(opType,"LoadRange")==0 )  return Form::idealI;
   if( strcmp(opType,"LoadS")==0 )  return Form::idealS;
   if( strcmp(opType,"Load16B")==0 )  return Form::idealB;
@@ -286,6 +288,7 @@
   if( strcmp(opType,"StoreI")==0)  return Form::idealI;
   if( strcmp(opType,"StoreL")==0)  return Form::idealL;
   if( strcmp(opType,"StoreP")==0)  return Form::idealP;
+  if( strcmp(opType,"StoreN")==0) return Form::idealN;
   if( strcmp(opType,"Store16B")==0)  return Form::idealB;
   if( strcmp(opType,"Store8B")==0)  return Form::idealB;
   if( strcmp(opType,"Store4B")==0)  return Form::idealB;
--- a/hotspot/src/share/vm/adlc/forms.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/adlc/forms.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -168,7 +168,8 @@
     idealD      =  5,  // Double  type
     idealB      =  6,  // Byte    type
     idealC      =  7,  // Char    type
-    idealS      =  8   // String  type
+    idealS      =  8,  // String  type
+    idealN      =  9   // Narrow oop types
   };
   // Convert ideal name to a DataType, return DataType::none if not a 'ConX'
   Form::DataType  ideal_to_const_type(const char *ideal_type_name) const;
--- a/hotspot/src/share/vm/adlc/formssel.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/adlc/formssel.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -726,6 +726,9 @@
   if( _matrule && _matrule->_rChild &&
        (!strcmp(_matrule->_rChild->_opType,"CastPP")     ||  // new result type
         !strcmp(_matrule->_rChild->_opType,"CastX2P")    ||  // new result type
+        !strcmp(_matrule->_rChild->_opType,"DecodeN")    ||
+        !strcmp(_matrule->_rChild->_opType,"EncodeP")    ||
+        !strcmp(_matrule->_rChild->_opType,"LoadN")      ||
         !strcmp(_matrule->_rChild->_opType,"CreateEx")   ||  // type of exception
         !strcmp(_matrule->_rChild->_opType,"CheckCastPP")) ) return true;
   else if ( is_ideal_load() == Form::idealP )                return true;
@@ -2101,6 +2104,7 @@
   if (strcmp(name,"RegF")==0) size =  1;
   if (strcmp(name,"RegD")==0) size =  2;
   if (strcmp(name,"RegL")==0) size =  2;
+  if (strcmp(name,"RegN")==0) size =  1;
   if (strcmp(name,"RegP")==0) size =  globalAD->get_preproc_def("_LP64") ? 2 : 1;
   if (size == 0) return false;
   return size == reg_class->size();
@@ -2365,11 +2369,12 @@
 
 void OperandForm::format_constant(FILE *fp, uint const_index, uint const_type) {
   switch(const_type) {
-  case Form::idealI: fprintf(fp,"st->print(\"#%%d\", _c%d);\n", const_index); break;
-  case Form::idealP: fprintf(fp,"_c%d->dump_on(st);\n",         const_index); break;
-  case Form::idealL: fprintf(fp,"st->print(\"#%%lld\", _c%d);\n", const_index); break;
-  case Form::idealF: fprintf(fp,"st->print(\"#%%f\", _c%d);\n", const_index); break;
-  case Form::idealD: fprintf(fp,"st->print(\"#%%f\", _c%d);\n", const_index); break;
+  case Form::idealI:  fprintf(fp,"st->print(\"#%%d\", _c%d);\n", const_index); break;
+  case Form::idealP:  fprintf(fp,"_c%d->dump_on(st);\n",         const_index); break;
+  case Form::idealN:  fprintf(fp,"_c%d->dump_on(st);\n",         const_index); break;
+  case Form::idealL:  fprintf(fp,"st->print(\"#%%lld\", _c%d);\n", const_index); break;
+  case Form::idealF:  fprintf(fp,"st->print(\"#%%f\", _c%d);\n", const_index); break;
+  case Form::idealD:  fprintf(fp,"st->print(\"#%%f\", _c%d);\n", const_index); break;
   default:
     assert( false, "ShouldNotReachHere()");
   }
@@ -3300,9 +3305,9 @@
 
 int MatchNode::needs_ideal_memory_edge(FormDict &globals) const {
   static const char *needs_ideal_memory_list[] = {
-    "StoreI","StoreL","StoreP","StoreD","StoreF" ,
+    "StoreI","StoreL","StoreP","StoreN","StoreD","StoreF" ,
     "StoreB","StoreC","Store" ,"StoreFP",
-    "LoadI" ,"LoadL", "LoadP" ,"LoadD" ,"LoadF"  ,
+    "LoadI" ,"LoadL", "LoadP" ,"LoadN", "LoadD" ,"LoadF"  ,
     "LoadB" ,"LoadC" ,"LoadS" ,"Load"   ,
     "Store4I","Store2I","Store2L","Store2D","Store4F","Store2F","Store16B",
     "Store8B","Store4B","Store8C","Store4C","Store2C",
@@ -3311,7 +3316,7 @@
     "LoadRange", "LoadKlass", "LoadL_unaligned", "LoadD_unaligned",
     "LoadPLocked", "LoadLLocked",
     "StorePConditional", "StoreLConditional",
-    "CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP",
+    "CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP", "CompareAndSwapN",
     "StoreCM",
     "ClearArray"
   };
@@ -3712,6 +3717,7 @@
     if( base_operand(position, globals, result, name, opType) &&
         (strcmp(opType,"RegI")==0 ||
          strcmp(opType,"RegP")==0 ||
+         strcmp(opType,"RegN")==0 ||
          strcmp(opType,"RegL")==0 ||
          strcmp(opType,"RegF")==0 ||
          strcmp(opType,"RegD")==0 ||
--- a/hotspot/src/share/vm/adlc/output_c.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/adlc/output_c.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -1546,6 +1546,18 @@
 
     // Build a mapping from operand index to input edges
     fprintf(fp,"  unsigned idx0 = oper_input_base();\n");
+
+    // The order in which inputs are added to a node is very
+    // strange.  Store nodes get a memory input before Expand is
+    // called and all other nodes get it afterwards so
+    // oper_input_base is wrong during expansion.  This code adjusts
+    // is so that expansion will work correctly.
+    bool missing_memory_edge = node->_matrule->needs_ideal_memory_edge(_globalNames) &&
+                               node->is_ideal_store() == Form::none;
+    if (missing_memory_edge) {
+      fprintf(fp,"  idx0--; // Adjust base because memory edge hasn't been inserted yet\n");
+    }
+
     for( i = 0; i < node->num_opnds(); i++ ) {
       fprintf(fp,"  unsigned idx%d = idx%d + num%d;\n",
               i+1,i,i);
@@ -1600,8 +1612,10 @@
         int node_mem_op = node->memory_operand(_globalNames);
         assert( node_mem_op != InstructForm::NO_MEMORY_OPERAND,
                 "expand rule member needs memory but top-level inst doesn't have any" );
-        // Copy memory edge
-        fprintf(fp,"  n%d->add_req(_in[1]);\t// Add memory edge\n", cnt);
+        if (!missing_memory_edge) {
+          // Copy memory edge
+          fprintf(fp,"  n%d->add_req(_in[1]);\t// Add memory edge\n", cnt);
+        }
       }
 
       // Iterate over the new instruction's operands
@@ -2363,6 +2377,8 @@
   fprintf(fp,"uint  %sNode::size(PhaseRegAlloc *ra_) const {\n",
           inst._ident);
 
+  fprintf(fp, " assert(VerifyOops || MachNode::size(ra_) <= %s, \"bad fixed size\");\n", inst._size);
+
   //(2)
   // Print the size
   fprintf(fp, " return (VerifyOops ? MachNode::size(ra_) : %s);\n", inst._size);
@@ -3426,6 +3442,8 @@
       fprintf(fp, "_leaf->get_int()");
     } else if ( (strcmp(optype,"ConP") == 0) ) {
       fprintf(fp, "_leaf->bottom_type()->is_ptr()");
+    } else if ( (strcmp(optype,"ConN") == 0) ) {
+      fprintf(fp, "_leaf->bottom_type()->is_narrowoop()");
     } else if ( (strcmp(optype,"ConF") == 0) ) {
       fprintf(fp, "_leaf->getf()");
     } else if ( (strcmp(optype,"ConD") == 0) ) {
--- a/hotspot/src/share/vm/adlc/output_h.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/adlc/output_h.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -203,6 +203,10 @@
       if (i > 0) fprintf(fp,", ");
       fprintf(fp,"  const TypePtr *_c%d;\n", i);
     }
+    else if (!strcmp(type, "ConN")) {
+      if (i > 0) fprintf(fp,", ");
+      fprintf(fp,"  const TypeNarrowOop *_c%d;\n", i);
+    }
     else if (!strcmp(type, "ConL")) {
       if (i > 0) fprintf(fp,", ");
       fprintf(fp,"  jlong          _c%d;\n", i);
@@ -235,6 +239,10 @@
         fprintf(fp,"  const TypePtr *_c%d;\n", i);
         i++;
       }
+      else if (!strcmp(comp->base_type(globals), "ConN")) {
+        fprintf(fp,"  const TypePtr *_c%d;\n", i);
+        i++;
+      }
       else if (!strcmp(comp->base_type(globals), "ConL")) {
         fprintf(fp,"  jlong            _c%d;\n", i);
         i++;
@@ -280,6 +288,7 @@
       fprintf(fp,is_ideal_bool ? "BoolTest::mask c%d" : "int32 c%d", i);
       break;
     }
+    case Form::idealN : { fprintf(fp,"const TypeNarrowOop *c%d", i); break; }
     case Form::idealP : { fprintf(fp,"const TypePtr *c%d", i); break; }
     case Form::idealL : { fprintf(fp,"jlong c%d", i);   break;        }
     case Form::idealF : { fprintf(fp,"jfloat c%d", i);  break;        }
@@ -302,6 +311,11 @@
         fprintf(fp,"const TypePtr *c%d", i);
         i++;
       }
+      else if (!strcmp(comp->base_type(globals), "ConN")) {
+        if (i > 0) fprintf(fp,", ");
+        fprintf(fp,"const TypePtr *c%d", i);
+        i++;
+      }
       else if (!strcmp(comp->base_type(globals), "ConL")) {
         if (i > 0) fprintf(fp,", ");
         fprintf(fp,"jlong c%d", i);
@@ -360,6 +374,10 @@
     fprintf(fp,"    _c%d->dump_on(st);\n", i);
     ++i;
   }
+  else if (!strcmp(ideal_type, "ConN")) {
+    fprintf(fp,"    _c%d->dump();\n", i);
+    ++i;
+  }
   else if (!strcmp(ideal_type, "ConL")) {
     fprintf(fp,"    st->print(\"#\" INT64_FORMAT, _c%d);\n", i);
     ++i;
@@ -417,8 +435,13 @@
           // Replacement variable
           const char *rep_var = oper._format->_rep_vars.iter();
           // Check that it is a local name, and an operand
-          OperandForm *op      = oper._localNames[rep_var]->is_operand();
-          assert( op, "replacement variable was not found in local names");
+          const Form* form = oper._localNames[rep_var];
+          if (form == NULL) {
+            globalAD->syntax_err(oper._linenum,
+                                 "\'%s\' not found in format for %s\n", rep_var, oper._ident);
+            assert(form, "replacement variable was not found in local names");
+          }
+          OperandForm *op      = form->is_operand();
           // Get index if register or constant
           if ( op->_matrule && op->_matrule->is_base_register(globals) ) {
             idx  = oper.register_position( globals, rep_var);
@@ -483,9 +506,14 @@
         } else {
           // Replacement variable
           const char *rep_var = oper._format->_rep_vars.iter();
-          // Check that it is a local name, and an operand
-          OperandForm *op      = oper._localNames[rep_var]->is_operand();
-          assert( op, "replacement variable was not found in local names");
+         // Check that it is a local name, and an operand
+          const Form* form = oper._localNames[rep_var];
+          if (form == NULL) {
+            globalAD->syntax_err(oper._linenum,
+                                 "\'%s\' not found in format for %s\n", rep_var, oper._ident);
+            assert(form, "replacement variable was not found in local names");
+          }
+          OperandForm *op      = form->is_operand();
           // Get index if register or constant
           if ( op->_matrule && op->_matrule->is_base_register(globals) ) {
             idx  = oper.register_position( globals, rep_var);
@@ -1163,7 +1191,7 @@
       if( type != NULL ) {
         Form::DataType data_type = oper->is_base_constant(_globalNames);
         // Check if we are an ideal pointer type
-        if( data_type == Form::idealP ) {
+        if( data_type == Form::idealP || data_type == Form::idealN ) {
           // Return the ideal type we already have: <TypePtr *>
           fprintf(fp," return _c0;");
         } else {
@@ -1291,6 +1319,16 @@
           fprintf(fp,   " return _c0->isa_oop_ptr();");
           fprintf(fp, " }\n");
         }
+        else if (!strcmp(oper->ideal_type(_globalNames), "ConN")) {
+          // Access the locally stored constant
+          fprintf(fp,"  virtual intptr_t       constant() const {");
+          fprintf(fp,   " return _c0->make_oopptr()->get_con();");
+          fprintf(fp, " }\n");
+          // Generate query to determine if this pointer is an oop
+          fprintf(fp,"  virtual bool           constant_is_oop() const {");
+          fprintf(fp,   " return _c0->make_oopptr()->isa_oop_ptr();");
+          fprintf(fp, " }\n");
+        }
         else if (!strcmp(oper->ideal_type(_globalNames), "ConL")) {
           fprintf(fp,"  virtual intptr_t       constant() const {");
           // We don't support addressing modes with > 4Gig offsets.
@@ -1748,6 +1786,7 @@
         fprintf(fp,"    return  TypeInt::make(opnd_array(1)->constant());\n");
         break;
       case Form::idealP:
+      case Form::idealN:
         fprintf(fp,"    return  opnd_array(1)->type();\n",result);
         break;
       case Form::idealD:
--- a/hotspot/src/share/vm/asm/codeBuffer.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/asm/codeBuffer.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -281,8 +281,10 @@
 
     // Need to return a pc, doesn't matter what it is since it will be
     // replaced during resolution later.
-    // (Don't return NULL or badAddress, since branches shouldn't overflow.)
-    return base;
+    // Don't return NULL or badAddress, since branches shouldn't overflow.
+    // Don't return base either because that could overflow displacements
+    // for shorter branches.  It will get checked when bound.
+    return branch_pc;
   }
 }
 
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -1074,6 +1074,43 @@
 JRT_END
 
 
+// Array copy return codes.
+enum {
+  ac_failed = -1, // arraycopy failed
+  ac_ok = 0       // arraycopy succeeded
+};
+
+
+template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
+                                          oopDesc* dst, T* dst_addr,
+                                          int length) {
+
+  // For performance reasons, we assume we are using a card marking write
+  // barrier. The assert will fail if this is not the case.
+  // Note that we use the non-virtual inlineable variant of write_ref_array.
+  BarrierSet* bs = Universe::heap()->barrier_set();
+  assert(bs->has_write_ref_array_opt(),
+         "Barrier set must have ref array opt");
+  if (src == dst) {
+    // same object, no check
+    Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
+    bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
+                                  (HeapWord*)(dst_addr + length)));
+    return ac_ok;
+  } else {
+    klassOop bound = objArrayKlass::cast(dst->klass())->element_klass();
+    klassOop stype = objArrayKlass::cast(src->klass())->element_klass();
+    if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) {
+      // Elements are guaranteed to be subtypes, so no check necessary
+      Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
+      bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
+                                    (HeapWord*)(dst_addr + length)));
+      return ac_ok;
+    }
+  }
+  return ac_failed;
+}
+
 // fast and direct copy of arrays; returning -1, means that an exception may be thrown
 // and we did not copy anything
 JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int dst_pos, int length))
@@ -1081,11 +1118,6 @@
   _generic_arraycopy_cnt++;        // Slow-path oop array copy
 #endif
 
-  enum {
-    ac_failed = -1, // arraycopy failed
-    ac_ok = 0       // arraycopy succeeded
-  };
-
   if (src == NULL || dst == NULL || src_pos < 0 || dst_pos < 0 || length < 0) return ac_failed;
   if (!dst->is_array() || !src->is_array()) return ac_failed;
   if ((unsigned int) arrayOop(src)->length() < (unsigned int)src_pos + (unsigned int)length) return ac_failed;
@@ -1105,30 +1137,14 @@
     memmove(dst_addr, src_addr, length << l2es);
     return ac_ok;
   } else if (src->is_objArray() && dst->is_objArray()) {
-    oop* src_addr = objArrayOop(src)->obj_at_addr(src_pos);
-    oop* dst_addr = objArrayOop(dst)->obj_at_addr(dst_pos);
-    // For performance reasons, we assume we are using a card marking write
-    // barrier. The assert will fail if this is not the case.
-    // Note that we use the non-virtual inlineable variant of write_ref_array.
-    BarrierSet* bs = Universe::heap()->barrier_set();
-    assert(bs->has_write_ref_array_opt(),
-           "Barrier set must have ref array opt");
-    if (src == dst) {
-      // same object, no check
-      Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
-      bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
-                                    (HeapWord*)(dst_addr + length)));
-      return ac_ok;
+    if (UseCompressedOops) {  // will need for tiered
+      narrowOop *src_addr  = objArrayOop(src)->obj_at_addr<narrowOop>(src_pos);
+      narrowOop *dst_addr  = objArrayOop(dst)->obj_at_addr<narrowOop>(dst_pos);
+      return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);
     } else {
-      klassOop bound = objArrayKlass::cast(dst->klass())->element_klass();
-      klassOop stype = objArrayKlass::cast(src->klass())->element_klass();
-      if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) {
-        // Elements are guaranteed to be subtypes, so no check necessary
-        Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
-        bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
-                                      (HeapWord*)(dst_addr + length)));
-        return ac_ok;
-      }
+      oop *src_addr  = objArrayOop(src)->obj_at_addr<oop>(src_pos);
+      oop *dst_addr  = objArrayOop(dst)->obj_at_addr<oop>(dst_pos);
+      return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);
     }
   }
   return ac_failed;
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -48,6 +48,7 @@
   // Next line must follow and use the result of the previous line:
   _is_linked = _is_initialized || ik->is_linked();
   _nonstatic_field_size = ik->nonstatic_field_size();
+  _has_nonstatic_fields = ik->has_nonstatic_fields();
   _nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields:
 
   _nof_implementors = ik->nof_implementors();
@@ -93,6 +94,7 @@
   _is_initialized = false;
   _is_linked = false;
   _nonstatic_field_size = -1;
+  _has_nonstatic_fields = false;
   _nonstatic_fields = NULL;
   _nof_implementors = -1;
   _loader = loader;
@@ -201,7 +203,7 @@
   assert(offset >= 0 && offset < layout_helper(), "offset must be tame");
   #endif
 
-  if (offset < (instanceOopDesc::header_size() * wordSize)) {
+  if (offset < instanceOopDesc::base_offset_in_bytes()) {
     // All header offsets belong properly to java/lang/Object.
     return CURRENT_ENV->Object_klass();
   }
@@ -210,7 +212,8 @@
   for (;;) {
     assert(self->is_loaded(), "must be loaded to have size");
     ciInstanceKlass* super = self->super();
-    if (super == NULL || !super->contains_field_offset(offset)) {
+    if (super == NULL || super->nof_nonstatic_fields() == 0 ||
+        !super->contains_field_offset(offset)) {
       return self;
     } else {
       self = super;  // return super->get_canonical_holder(offset)
@@ -381,31 +384,28 @@
   if (_nonstatic_fields != NULL)
     return _nonstatic_fields->length();
 
-  // Size in bytes of my fields, including inherited fields.
-  // About equal to size_helper() - sizeof(oopDesc).
-  int fsize = nonstatic_field_size() * wordSize;
-  if (fsize == 0) {     // easy shortcut
+  if (!has_nonstatic_fields()) {
     Arena* arena = CURRENT_ENV->arena();
     _nonstatic_fields = new (arena) GrowableArray<ciField*>(arena, 0, 0, NULL);
     return 0;
   }
   assert(!is_java_lang_Object(), "bootstrap OK");
 
+  // Size in bytes of my fields, including inherited fields.
+  int fsize = nonstatic_field_size() * wordSize;
+
   ciInstanceKlass* super = this->super();
-  int      super_fsize = 0;
-  int      super_flen  = 0;
   GrowableArray<ciField*>* super_fields = NULL;
-  if (super != NULL) {
-    super_fsize  = super->nonstatic_field_size() * wordSize;
-    super_flen   = super->nof_nonstatic_fields();
+  if (super != NULL && super->has_nonstatic_fields()) {
+    int super_fsize  = super->nonstatic_field_size() * wordSize;
+    int super_flen   = super->nof_nonstatic_fields();
     super_fields = super->_nonstatic_fields;
     assert(super_flen == 0 || super_fields != NULL, "first get nof_fields");
-  }
-
-  // See if I am no larger than my super; if so, I can use his fields.
-  if (fsize == super_fsize) {
-    _nonstatic_fields = super_fields;
-    return super_fields->length();
+    // See if I am no larger than my super; if so, I can use his fields.
+    if (fsize == super_fsize) {
+      _nonstatic_fields = super_fields;
+      return super_fields->length();
+    }
   }
 
   GrowableArray<ciField*>* fields = NULL;
@@ -425,11 +425,11 @@
   // (In principle, they could mix with superclass fields.)
   fields->sort(sort_field_by_offset);
 #ifdef ASSERT
-  int last_offset = sizeof(oopDesc);
+  int last_offset = instanceOopDesc::base_offset_in_bytes();
   for (int i = 0; i < fields->length(); i++) {
     ciField* field = fields->at(i);
     int offset = field->offset_in_bytes();
-    int size   = (field->_type == NULL) ? oopSize : field->size_in_bytes();
+    int size   = (field->_type == NULL) ? heapOopSize : field->size_in_bytes();
     assert(last_offset <= offset, "no field overlap");
     if (last_offset > (int)sizeof(oopDesc))
       assert((offset - last_offset) < BytesPerLong, "no big holes");
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/ci/ciInstanceKlass.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -35,15 +35,16 @@
   friend class ciBytecodeStream;
 
 private:
-  bool                   _is_shared;
-
   jobject                _loader;
   jobject                _protection_domain;
 
+  bool                   _is_shared;
   bool                   _is_initialized;
   bool                   _is_linked;
   bool                   _has_finalizer;
   bool                   _has_subklass;
+  bool                   _has_nonstatic_fields;
+
   ciFlags                _flags;
   jint                   _nonstatic_field_size;
   jint                   _nonstatic_oop_map_size;
@@ -132,6 +133,9 @@
   jint                   nonstatic_field_size()  {
     assert(is_loaded(), "must be loaded");
     return _nonstatic_field_size; }
+  jint                   has_nonstatic_fields()  {
+    assert(is_loaded(), "must be loaded");
+    return _has_nonstatic_fields; }
   jint                   nonstatic_oop_map_size()  {
     assert(is_loaded(), "must be loaded");
     return _nonstatic_oop_map_size; }
@@ -164,8 +168,7 @@
   bool has_finalizable_subclass();
 
   bool contains_field_offset(int offset) {
-      return (offset/wordSize) >= instanceOopDesc::header_size()
-             && (offset/wordSize)-instanceOopDesc::header_size() < nonstatic_field_size();
+    return instanceOopDesc::contains_field_offset(offset, nonstatic_field_size());
   }
 
   // Get the instance of java.lang.Class corresponding to
--- a/hotspot/src/share/vm/ci/ciObjectFactory.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -121,7 +121,7 @@
 
   for (int i = T_BOOLEAN; i <= T_CONFLICT; i++) {
     BasicType t = (BasicType)i;
-    if (type2name(t) != NULL && t != T_OBJECT && t != T_ARRAY) {
+    if (type2name(t) != NULL && t != T_OBJECT && t != T_ARRAY && t != T_NARROWOOP) {
       ciType::_basic_types[t] = new (_arena) ciType(t);
       init_ident_of(ciType::_basic_types[t]);
     }
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -2341,7 +2341,7 @@
   // Incrementing next_nonstatic_oop_offset here advances the
   // location where the real java fields are placed.
   const int extra = java_lang_Class::number_of_fake_oop_fields;
-  (*next_nonstatic_oop_offset_ptr) += (extra * wordSize);
+  (*next_nonstatic_oop_offset_ptr) += (extra * heapOopSize);
 }
 
 
@@ -2647,7 +2647,7 @@
                                   align_object_offset(vtable_size) +
                                   align_object_offset(itable_size)) * wordSize;
     next_static_double_offset   = next_static_oop_offset +
-                                  (fac.static_oop_count * oopSize);
+                                  (fac.static_oop_count * heapOopSize);
     if ( fac.static_double_count &&
          (Universe::field_type_should_be_aligned(T_DOUBLE) ||
           Universe::field_type_should_be_aligned(T_LONG)) ) {
@@ -2687,6 +2687,14 @@
     int nonstatic_byte_count   = fac.nonstatic_byte_count;
     int nonstatic_oop_count    = fac.nonstatic_oop_count;
 
+    bool super_has_nonstatic_fields =
+            (super_klass() != NULL && super_klass->has_nonstatic_fields());
+    bool has_nonstatic_fields  =  super_has_nonstatic_fields ||
+            ((nonstatic_double_count + nonstatic_word_count +
+              nonstatic_short_count + nonstatic_byte_count +
+              nonstatic_oop_count) != 0);
+
+
     // Prepare list of oops for oop maps generation.
     u2* nonstatic_oop_offsets;
     u2* nonstatic_oop_length;
@@ -2703,7 +2711,7 @@
       java_lang_Class_fix_post(&next_nonstatic_field_offset);
       nonstatic_oop_offsets[0] = (u2)first_nonstatic_field_offset;
       int fake_oop_count       = (( next_nonstatic_field_offset -
-                                    first_nonstatic_field_offset ) / oopSize);
+                                    first_nonstatic_field_offset ) / heapOopSize);
       nonstatic_oop_length [0] = (u2)fake_oop_count;
       nonstatic_oop_map_count  = 1;
       nonstatic_oop_count     -= fake_oop_count;
@@ -2715,7 +2723,7 @@
 #ifndef PRODUCT
     if( PrintCompactFieldsSavings ) {
       next_nonstatic_double_offset = next_nonstatic_field_offset +
-                                     (nonstatic_oop_count * oopSize);
+                                     (nonstatic_oop_count * heapOopSize);
       if ( nonstatic_double_count > 0 ) {
         next_nonstatic_double_offset = align_size_up(next_nonstatic_double_offset, BytesPerLong);
       }
@@ -2749,7 +2757,15 @@
          class_name() == vmSymbols::java_lang_ref_SoftReference() ||
          class_name() == vmSymbols::java_lang_StackTraceElement() ||
          class_name() == vmSymbols::java_lang_String() ||
-         class_name() == vmSymbols::java_lang_Throwable()) ) {
+         class_name() == vmSymbols::java_lang_Throwable() ||
+         class_name() == vmSymbols::java_lang_Boolean() ||
+         class_name() == vmSymbols::java_lang_Character() ||
+         class_name() == vmSymbols::java_lang_Float() ||
+         class_name() == vmSymbols::java_lang_Double() ||
+         class_name() == vmSymbols::java_lang_Byte() ||
+         class_name() == vmSymbols::java_lang_Short() ||
+         class_name() == vmSymbols::java_lang_Integer() ||
+         class_name() == vmSymbols::java_lang_Long())) {
       allocation_style = 0;     // Allocate oops first
       compact_fields   = false; // Don't compact fields
     }
@@ -2758,7 +2774,7 @@
       // Fields order: oops, longs/doubles, ints, shorts/chars, bytes
       next_nonstatic_oop_offset    = next_nonstatic_field_offset;
       next_nonstatic_double_offset = next_nonstatic_oop_offset +
-                                     (nonstatic_oop_count * oopSize);
+                                      (nonstatic_oop_count * heapOopSize);
     } else if( allocation_style == 1 ) {
       // Fields order: longs/doubles, ints, shorts/chars, bytes, oops
       next_nonstatic_double_offset = next_nonstatic_field_offset;
@@ -2775,8 +2791,18 @@
     int nonstatic_short_space_offset;
     int nonstatic_byte_space_offset;
 
-    if( nonstatic_double_count > 0 ) {
-      int offset = next_nonstatic_double_offset;
+    bool compact_into_header = (UseCompressedOops &&
+                                allocation_style == 1 && compact_fields &&
+                                !super_has_nonstatic_fields);
+
+    if( compact_into_header || nonstatic_double_count > 0 ) {
+      int offset;
+      // Pack something in with the header if no super klass has done so.
+      if (compact_into_header) {
+        offset = oopDesc::klass_gap_offset_in_bytes();
+      } else {
+        offset = next_nonstatic_double_offset;
+      }
       next_nonstatic_double_offset = align_size_up(offset, BytesPerLong);
       if( compact_fields && offset != next_nonstatic_double_offset ) {
         // Allocate available fields into the gap before double field.
@@ -2804,12 +2830,13 @@
         }
         // Allocate oop field in the gap if there are no other fields for that.
         nonstatic_oop_space_offset = offset;
-        if( length >= oopSize && nonstatic_oop_count > 0 &&
+        if(!compact_into_header && length >= heapOopSize &&
+            nonstatic_oop_count > 0 &&
             allocation_style != 0 ) { // when oop fields not first
           nonstatic_oop_count      -= 1;
           nonstatic_oop_space_count = 1; // Only one will fit
-          length -= oopSize;
-          offset += oopSize;
+          length -= heapOopSize;
+          offset += heapOopSize;
         }
       }
     }
@@ -2828,9 +2855,9 @@
       next_nonstatic_oop_offset = next_nonstatic_byte_offset + nonstatic_byte_count;
       if( nonstatic_oop_count > 0 ) {
         notaligned_offset = next_nonstatic_oop_offset;
-        next_nonstatic_oop_offset = align_size_up(next_nonstatic_oop_offset, oopSize);
+        next_nonstatic_oop_offset = align_size_up(next_nonstatic_oop_offset, heapOopSize);
       }
-      notaligned_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * oopSize);
+      notaligned_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * heapOopSize);
     }
     next_nonstatic_type_offset = align_size_up(notaligned_offset, wordSize );
     nonstatic_field_size = nonstatic_field_size + ((next_nonstatic_type_offset
@@ -2846,7 +2873,7 @@
       switch (atype) {
         case STATIC_OOP:
           real_offset = next_static_oop_offset;
-          next_static_oop_offset += oopSize;
+          next_static_oop_offset += heapOopSize;
           break;
         case STATIC_BYTE:
           real_offset = next_static_byte_offset;
@@ -2868,16 +2895,16 @@
         case NONSTATIC_OOP:
           if( nonstatic_oop_space_count > 0 ) {
             real_offset = nonstatic_oop_space_offset;
-            nonstatic_oop_space_offset += oopSize;
+            nonstatic_oop_space_offset += heapOopSize;
             nonstatic_oop_space_count  -= 1;
           } else {
             real_offset = next_nonstatic_oop_offset;
-            next_nonstatic_oop_offset += oopSize;
+            next_nonstatic_oop_offset += heapOopSize;
           }
           // Update oop maps
           if( nonstatic_oop_map_count > 0 &&
               nonstatic_oop_offsets[nonstatic_oop_map_count - 1] ==
-              (u2)(real_offset - nonstatic_oop_length[nonstatic_oop_map_count - 1] * oopSize) ) {
+              (u2)(real_offset - nonstatic_oop_length[nonstatic_oop_map_count - 1] * heapOopSize) ) {
             // Extend current oop map
             nonstatic_oop_length[nonstatic_oop_map_count - 1] += 1;
           } else {
@@ -2970,6 +2997,7 @@
     //this_klass->set_super(super_klass());
     this_klass->set_class_loader(class_loader());
     this_klass->set_nonstatic_field_size(nonstatic_field_size);
+    this_klass->set_has_nonstatic_fields(has_nonstatic_fields);
     this_klass->set_static_oop_field_size(fac.static_oop_count);
     cp->set_pool_holder(this_klass());
     this_klass->set_constants(cp());
@@ -3128,7 +3156,7 @@
       OopMapBlock* first_map = super->start_of_nonstatic_oop_maps();
       OopMapBlock* last_map = first_map + map_size - 1;
 
-      int next_offset = last_map->offset() + (last_map->length() * oopSize);
+      int next_offset = last_map->offset() + (last_map->length() * heapOopSize);
       if (next_offset == first_nonstatic_oop_offset) {
         // There is no gap bettwen superklass's last oop field and first
         // local oop field, merge maps.
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -520,16 +520,12 @@
 
 
 JavaThread* java_lang_Thread::thread(oop java_thread) {
-  return (JavaThread*) java_thread->obj_field(_eetop_offset);
+  return (JavaThread*)java_thread->address_field(_eetop_offset);
 }
 
 
 void java_lang_Thread::set_thread(oop java_thread, JavaThread* thread) {
-  // We are storing a JavaThread* (malloc'ed data) into a long field in the thread
-  // object. The store has to be 64-bit wide so we use a pointer store, but we
-  // cannot call oopDesc::obj_field_put since it includes a write barrier!
-  oop* addr = java_thread->obj_field_addr(_eetop_offset);
-  *addr = (oop) thread;
+  java_thread->address_field_put(_eetop_offset, (address)thread);
 }
 
 
@@ -1038,8 +1034,8 @@
     if (_dirty && _methods != NULL) {
       BarrierSet* bs = Universe::heap()->barrier_set();
       assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
-      bs->write_ref_array(MemRegion((HeapWord*)_methods->obj_at_addr(0),
-                                    _methods->length() * HeapWordsPerOop));
+      bs->write_ref_array(MemRegion((HeapWord*)_methods->base(),
+                                    _methods->array_size()));
       _dirty = false;
     }
   }
@@ -1083,8 +1079,9 @@
       method = mhandle();
     }
 
-    // _methods->obj_at_put(_index, method);
-    *_methods->obj_at_addr(_index) = method;
+     _methods->obj_at_put(_index, method);
+    // bad for UseCompressedOops
+    // *_methods->obj_at_addr(_index) = method;
     _bcis->ushort_at_put(_index, bci);
     _index++;
     _dirty = true;
@@ -1973,39 +1970,30 @@
 
 
 // Support for java_lang_ref_Reference
-
-void java_lang_ref_Reference::set_referent(oop ref, oop value) {
-  ref->obj_field_put(referent_offset, value);
-}
-
-oop* java_lang_ref_Reference::referent_addr(oop ref) {
-  return ref->obj_field_addr(referent_offset);
-}
-
-void java_lang_ref_Reference::set_next(oop ref, oop value) {
-  ref->obj_field_put(next_offset, value);
-}
-
-oop* java_lang_ref_Reference::next_addr(oop ref) {
-  return ref->obj_field_addr(next_offset);
+oop java_lang_ref_Reference::pending_list_lock() {
+  instanceKlass* ik = instanceKlass::cast(SystemDictionary::reference_klass());
+  char *addr = (((char *)ik->start_of_static_fields()) + static_lock_offset);
+  if (UseCompressedOops) {
+    return oopDesc::load_decode_heap_oop((narrowOop *)addr);
+  } else {
+    return oopDesc::load_decode_heap_oop((oop*)addr);
+  }
 }
 
-void java_lang_ref_Reference::set_discovered(oop ref, oop value) {
-  ref->obj_field_put(discovered_offset, value);
-}
-
-oop* java_lang_ref_Reference::discovered_addr(oop ref) {
-  return ref->obj_field_addr(discovered_offset);
+HeapWord *java_lang_ref_Reference::pending_list_addr() {
+  instanceKlass* ik = instanceKlass::cast(SystemDictionary::reference_klass());
+  char *addr = (((char *)ik->start_of_static_fields()) + static_pending_offset);
+  // XXX This might not be HeapWord aligned, almost rather be char *.
+  return (HeapWord*)addr;
 }
 
-oop* java_lang_ref_Reference::pending_list_lock_addr() {
-  instanceKlass* ik = instanceKlass::cast(SystemDictionary::reference_klass());
-  return (oop*)(((char *)ik->start_of_static_fields()) + static_lock_offset);
-}
-
-oop* java_lang_ref_Reference::pending_list_addr() {
-  instanceKlass* ik = instanceKlass::cast(SystemDictionary::reference_klass());
-  return (oop *)(((char *)ik->start_of_static_fields()) + static_pending_offset);
+oop java_lang_ref_Reference::pending_list() {
+  char *addr = (char *)pending_list_addr();
+  if (UseCompressedOops) {
+    return oopDesc::load_decode_heap_oop((narrowOop *)addr);
+  } else {
+    return oopDesc::load_decode_heap_oop((oop*)addr);
+  }
 }
 
 
@@ -2291,8 +2279,11 @@
 // Invoked before SystemDictionary::initialize, so pre-loaded classes
 // are not available to determine the offset_of_static_fields.
 void JavaClasses::compute_hard_coded_offsets() {
-  const int x = wordSize;
-  const int header = instanceOopDesc::header_size_in_bytes();
+  const int x = heapOopSize;
+  // Objects don't get allocated in the gap in the header with compressed oops
+  // for these special classes because hard coded offsets can't be conditional
+  // so base_offset_in_bytes() is wrong here, allocate after the header.
+  const int header = sizeof(instanceOopDesc);
 
   // Do the String Class
   java_lang_String::value_offset  = java_lang_String::hc_value_offset  * x + header;
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -691,24 +691,47 @@
   static int number_of_fake_oop_fields;
 
   // Accessors
-  static oop referent(oop ref)        { return *referent_addr(ref); }
-  static void set_referent(oop ref, oop value);
-  static oop* referent_addr(oop ref);
-
-  static oop next(oop ref)            { return *next_addr(ref); }
-  static void set_next(oop ref, oop value);
-  static oop* next_addr(oop ref);
+  static oop referent(oop ref) {
+    return ref->obj_field(referent_offset);
+  }
+  static void set_referent(oop ref, oop value) {
+    ref->obj_field_put(referent_offset, value);
+  }
+  static void set_referent_raw(oop ref, oop value) {
+    ref->obj_field_raw_put(referent_offset, value);
+  }
+  static HeapWord* referent_addr(oop ref) {
+    return ref->obj_field_addr<HeapWord>(referent_offset);
+  }
+  static oop next(oop ref) {
+    return ref->obj_field(next_offset);
+  }
+  static void set_next(oop ref, oop value) {
+    ref->obj_field_put(next_offset, value);
+  }
+  static void set_next_raw(oop ref, oop value) {
+    ref->obj_field_raw_put(next_offset, value);
+  }
+  static HeapWord* next_addr(oop ref) {
+    return ref->obj_field_addr<HeapWord>(next_offset);
+  }
+  static oop discovered(oop ref) {
+    return ref->obj_field(discovered_offset);
+  }
+  static void set_discovered(oop ref, oop value) {
+    ref->obj_field_put(discovered_offset, value);
+  }
+  static void set_discovered_raw(oop ref, oop value) {
+    ref->obj_field_raw_put(discovered_offset, value);
+  }
+  static HeapWord* discovered_addr(oop ref) {
+    return ref->obj_field_addr<HeapWord>(discovered_offset);
+  }
+  // Accessors for statics
+  static oop  pending_list_lock();
+  static oop  pending_list();
 
-  static oop discovered(oop ref)      { return *discovered_addr(ref); }
-  static void set_discovered(oop ref, oop value);
-  static oop* discovered_addr(oop ref);
-
-  // Accessors for statics
-  static oop  pending_list_lock()     { return *pending_list_lock_addr(); }
-  static oop  pending_list()          { return *pending_list_addr(); }
-
-  static oop* pending_list_lock_addr();
-  static oop* pending_list_addr();
+  static HeapWord*  pending_list_addr();
 };
 
 
--- a/hotspot/src/share/vm/compiler/oopMap.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/compiler/oopMap.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -169,11 +169,8 @@
 }
 
 
-void OopMap::set_dead(VMReg reg) {
-  // At this time, we only need dead entries in our OopMap when ZapDeadCompiledLocals is active.
-  if (ZapDeadCompiledLocals) {
-    set_xxx(reg, OopMapValue::dead_value, VMRegImpl::Bad());
-  }
+void OopMap::set_narrowoop(VMReg reg) {
+  set_xxx(reg, OopMapValue::narrowoop_value, VMRegImpl::Bad());
 }
 
 
@@ -305,7 +302,9 @@
 }
 
 class DoNothingClosure: public OopClosure {
-public: void do_oop(oop* p) {}
+ public:
+  void do_oop(oop* p)       {}
+  void do_oop(narrowOop* p) {}
 };
 static DoNothingClosure do_nothing;
 
@@ -349,23 +348,21 @@
 
 void OopMapSet::oops_do(const frame *fr, const RegisterMap* reg_map, OopClosure* f) {
   // add derived oops to a table
-  all_do(fr, reg_map, f, add_derived_oop, &do_nothing, &do_nothing);
+  all_do(fr, reg_map, f, add_derived_oop, &do_nothing);
 }
 
 
 void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
                        OopClosure* oop_fn, void derived_oop_fn(oop*, oop*),
-                       OopClosure* value_fn, OopClosure* dead_fn) {
+                       OopClosure* value_fn) {
   CodeBlob* cb = fr->cb();
-  {
-    assert(cb != NULL, "no codeblob");
-  }
+  assert(cb != NULL, "no codeblob");
 
   NOT_PRODUCT(if (TraceCodeBlobStacks) trace_codeblob_maps(fr, reg_map);)
 
   OopMapSet* maps = cb->oop_maps();
-  OopMap* map  = cb->oop_map_for_return_address(fr->pc());
-  assert(map != NULL, " no ptr map found");
+  OopMap* map = cb->oop_map_for_return_address(fr->pc());
+  assert(map != NULL, "no ptr map found");
 
   // handle derived pointers first (otherwise base pointer may be
   // changed before derived pointer offset has been collected)
@@ -393,8 +390,8 @@
     }
   }
 
-  // We want dead, value and oop oop_types
-  int mask = OopMapValue::oop_value | OopMapValue::value_value | OopMapValue::dead_value;
+  // We want coop, value and oop oop_types
+  int mask = OopMapValue::oop_value | OopMapValue::value_value | OopMapValue::narrowoop_value;
   {
     for (OopMapStream oms(map,mask); !oms.is_done(); oms.next()) {
       omv = oms.current();
@@ -402,11 +399,15 @@
       if ( loc != NULL ) {
         if ( omv.type() == OopMapValue::oop_value ) {
 #ifdef ASSERT
-          if (COMPILER2_PRESENT(!DoEscapeAnalysis &&) !Universe::heap()->is_in_or_null(*loc)) {
+          if (COMPILER2_PRESENT(!DoEscapeAnalysis &&)
+             (((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
+             !Universe::heap()->is_in_or_null(*loc)) {
             tty->print_cr("# Found non oop pointer.  Dumping state at failure");
             // try to dump out some helpful debugging information
             trace_codeblob_maps(fr, reg_map);
             omv.print();
+            tty->print_cr("register r");
+            omv.reg()->print();
             tty->print_cr("loc = %p *loc = %p\n", loc, (address)*loc);
             // do the real assert.
             assert(Universe::heap()->is_in_or_null(*loc), "found non oop pointer");
@@ -415,8 +416,17 @@
           oop_fn->do_oop(loc);
         } else if ( omv.type() == OopMapValue::value_value ) {
           value_fn->do_oop(loc);
-        } else if ( omv.type() == OopMapValue::dead_value ) {
-          dead_fn->do_oop(loc);
+        } else if ( omv.type() == OopMapValue::narrowoop_value ) {
+          narrowOop *nl = (narrowOop*)loc;
+#ifndef VM_LITTLE_ENDIAN
+          if (!omv.reg()->is_stack()) {
+            // compressed oops in registers only take up 4 bytes of an
+            // 8 byte register but they are in the wrong part of the
+            // word so adjust loc to point at the right place.
+            nl = (narrowOop*)((address)nl + 4);
+          }
+#endif
+          oop_fn->do_oop(nl);
         }
       }
     }
@@ -519,8 +529,8 @@
   case OopMapValue::value_value:
     st->print("Value" );
     break;
-  case OopMapValue::dead_value:
-    st->print("Dead" );
+  case OopMapValue::narrowoop_value:
+    tty->print("NarrowOop" );
     break;
   case OopMapValue::callee_saved_value:
     st->print("Callers_" );
--- a/hotspot/src/share/vm/compiler/oopMap.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/compiler/oopMap.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -61,7 +61,7 @@
          unused_value =0,       // powers of 2, for masking OopMapStream
          oop_value = 1,
          value_value = 2,
-         dead_value = 4,
+         narrowoop_value = 4,
          callee_saved_value = 8,
          derived_oop_value= 16,
          stack_obj = 32 };
@@ -90,14 +90,14 @@
   // Querying
   bool is_oop()               { return mask_bits(value(), type_mask_in_place) == oop_value; }
   bool is_value()             { return mask_bits(value(), type_mask_in_place) == value_value; }
-  bool is_dead()              { return mask_bits(value(), type_mask_in_place) == dead_value; }
+  bool is_narrowoop()           { return mask_bits(value(), type_mask_in_place) == narrowoop_value; }
   bool is_callee_saved()      { return mask_bits(value(), type_mask_in_place) == callee_saved_value; }
   bool is_derived_oop()       { return mask_bits(value(), type_mask_in_place) == derived_oop_value; }
   bool is_stack_obj()         { return mask_bits(value(), type_mask_in_place) == stack_obj; }
 
   void set_oop()              { set_value((value() & register_mask_in_place) | oop_value); }
   void set_value()            { set_value((value() & register_mask_in_place) | value_value); }
-  void set_dead()             { set_value((value() & register_mask_in_place) | dead_value); }
+  void set_narrowoop()          { set_value((value() & register_mask_in_place) | narrowoop_value); }
   void set_callee_saved()     { set_value((value() & register_mask_in_place) | callee_saved_value); }
   void set_derived_oop()      { set_value((value() & register_mask_in_place) | derived_oop_value); }
   void set_stack_obj()        { set_value((value() & register_mask_in_place) | stack_obj); }
@@ -176,6 +176,7 @@
   // slots to hold 4-byte values like ints and floats in the LP64 build.
   void set_oop  ( VMReg local);
   void set_value( VMReg local);
+  void set_narrowoop(VMReg local);
   void set_dead ( VMReg local);
   void set_callee_saved( VMReg local, VMReg caller_machine_register );
   void set_derived_oop ( VMReg local, VMReg derived_from_local_register );
@@ -245,7 +246,7 @@
   static void all_do(const frame* fr, const RegisterMap* reg_map,
                      OopClosure* oop_fn,
                      void derived_oop_fn(oop* base, oop* derived),
-                     OopClosure* value_fn, OopClosure* dead_fn);
+                     OopClosure* value_fn);
 
   // Printing
   void print_on(outputStream* st) const;
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -29,22 +29,34 @@
 class CMSBitMap;
 class CMSMarkStack;
 class CMSCollector;
-template<class E> class GenericTaskQueue;
-typedef GenericTaskQueue<oop> OopTaskQueue;
-template<class E> class GenericTaskQueueSet;
-typedef GenericTaskQueueSet<oop> OopTaskQueueSet;
 class MarkFromRootsClosure;
 class Par_MarkFromRootsClosure;
 
+// Decode the oop and call do_oop on it.
+#define DO_OOP_WORK_DEFN \
+  void do_oop(oop obj);                                   \
+  template <class T> inline void do_oop_work(T* p) {      \
+    T heap_oop = oopDesc::load_heap_oop(p);               \
+    if (!oopDesc::is_null(heap_oop)) {                    \
+      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);       \
+      do_oop(obj);                                        \
+    }                                                     \
+  }
+
 class MarkRefsIntoClosure: public OopsInGenClosure {
-  const MemRegion    _span;
-  CMSBitMap*         _bitMap;
-  const bool         _should_do_nmethods;
+ private:
+  const MemRegion _span;
+  CMSBitMap*      _bitMap;
+  const bool      _should_do_nmethods;
+ protected:
+  DO_OOP_WORK_DEFN
  public:
   MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap,
                       bool should_do_nmethods);
-  void do_oop(oop* p);
-  void do_oop_nv(oop* p) { MarkRefsIntoClosure::do_oop(p); }
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+  inline void do_oop_nv(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
+  inline void do_oop_nv(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
   bool do_header() { return true; }
   virtual const bool do_nmethods() const {
     return _should_do_nmethods;
@@ -57,15 +69,20 @@
 // A variant of the above used in certain kinds of CMS
 // marking verification.
 class MarkRefsIntoVerifyClosure: public OopsInGenClosure {
-  const MemRegion    _span;
-  CMSBitMap*         _verification_bm;
-  CMSBitMap*         _cms_bm;
-  const bool         _should_do_nmethods;
+ private:
+  const MemRegion _span;
+  CMSBitMap*      _verification_bm;
+  CMSBitMap*      _cms_bm;
+  const bool      _should_do_nmethods;
+ protected:
+  DO_OOP_WORK_DEFN
  public:
   MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
                             CMSBitMap* cms_bm, bool should_do_nmethods);
-  void do_oop(oop* p);
-  void do_oop_nv(oop* p) { MarkRefsIntoVerifyClosure::do_oop(p); }
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+  inline void do_oop_nv(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
+  inline void do_oop_nv(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
   bool do_header() { return true; }
   virtual const bool do_nmethods() const {
     return _should_do_nmethods;
@@ -75,37 +92,40 @@
   }
 };
 
-
 // The non-parallel version (the parallel version appears further below).
 class PushAndMarkClosure: public OopClosure {
-  CMSCollector*    _collector;
-  MemRegion        _span;
-  CMSBitMap*       _bit_map;
-  CMSBitMap*       _mod_union_table;
-  CMSMarkStack*    _mark_stack;
-  CMSMarkStack*    _revisit_stack;
-  bool             _concurrent_precleaning;
-  bool     const   _should_remember_klasses;
+ private:
+  CMSCollector* _collector;
+  MemRegion     _span;
+  CMSBitMap*    _bit_map;
+  CMSBitMap*    _mod_union_table;
+  CMSMarkStack* _mark_stack;
+  CMSMarkStack* _revisit_stack;
+  bool          _concurrent_precleaning;
+  bool const    _should_remember_klasses;
+ protected:
+  DO_OOP_WORK_DEFN
  public:
   PushAndMarkClosure(CMSCollector* collector,
                      MemRegion span,
                      ReferenceProcessor* rp,
                      CMSBitMap* bit_map,
                      CMSBitMap* mod_union_table,
-                     CMSMarkStack*  mark_stack,
-                     CMSMarkStack*  revisit_stack,
-                     bool           concurrent_precleaning);
-
-  void do_oop(oop* p);
-  void do_oop_nv(oop* p)  { PushAndMarkClosure::do_oop(p); }
+                     CMSMarkStack* mark_stack,
+                     CMSMarkStack* revisit_stack,
+                     bool concurrent_precleaning);
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+  inline void do_oop_nv(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
+  inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
   bool do_header() { return true; }
   Prefetch::style prefetch_style() {
     return Prefetch::do_read;
   }
-  const bool should_remember_klasses() const {
+  virtual const bool should_remember_klasses() const {
     return _should_remember_klasses;
   }
-  void remember_klass(Klass* k);
+  virtual void remember_klass(Klass* k);
 };
 
 // In the parallel case, the revisit stack, the bit map and the
@@ -115,12 +135,15 @@
 // used in the non-parallel case above is here replaced with
 // an OopTaskQueue structure to allow efficient work stealing.
 class Par_PushAndMarkClosure: public OopClosure {
-  CMSCollector*    _collector;
-  MemRegion        _span;
-  CMSBitMap*       _bit_map;
-  OopTaskQueue*    _work_queue;
-  CMSMarkStack*    _revisit_stack;
-  bool     const   _should_remember_klasses;
+ private:
+  CMSCollector* _collector;
+  MemRegion     _span;
+  CMSBitMap*    _bit_map;
+  OopTaskQueue* _work_queue;
+  CMSMarkStack* _revisit_stack;
+  bool const    _should_remember_klasses;
+ protected:
+  DO_OOP_WORK_DEFN
  public:
   Par_PushAndMarkClosure(CMSCollector* collector,
                          MemRegion span,
@@ -128,43 +151,48 @@
                          CMSBitMap* bit_map,
                          OopTaskQueue* work_queue,
                          CMSMarkStack* revisit_stack);
-
-  void do_oop(oop* p);
-  void do_oop_nv(oop* p)  { Par_PushAndMarkClosure::do_oop(p); }
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+  inline void do_oop_nv(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
+  inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
   bool do_header() { return true; }
   Prefetch::style prefetch_style() {
     return Prefetch::do_read;
   }
-  const bool should_remember_klasses() const {
+  virtual const bool should_remember_klasses() const {
     return _should_remember_klasses;
   }
-  void remember_klass(Klass* k);
+  virtual void remember_klass(Klass* k);
 };
 
-
 // The non-parallel version (the parallel version appears further below).
 class MarkRefsIntoAndScanClosure: public OopsInGenClosure {
-  MemRegion                  _span;
-  CMSBitMap*                 _bit_map;
-  CMSMarkStack*              _mark_stack;
-  PushAndMarkClosure         _pushAndMarkClosure;
-  CMSCollector*              _collector;
-  bool                       _yield;
+ private:
+  MemRegion          _span;
+  CMSBitMap*         _bit_map;
+  CMSMarkStack*      _mark_stack;
+  PushAndMarkClosure _pushAndMarkClosure;
+  CMSCollector*      _collector;
+  Mutex*             _freelistLock;
+  bool               _yield;
   // Whether closure is being used for concurrent precleaning
-  bool                       _concurrent_precleaning;
-  Mutex*                     _freelistLock;
+  bool               _concurrent_precleaning;
+ protected:
+  DO_OOP_WORK_DEFN
  public:
   MarkRefsIntoAndScanClosure(MemRegion span,
                              ReferenceProcessor* rp,
                              CMSBitMap* bit_map,
                              CMSBitMap* mod_union_table,
-                             CMSMarkStack*  mark_stack,
-                             CMSMarkStack*  revisit_stack,
+                             CMSMarkStack* mark_stack,
+                             CMSMarkStack* revisit_stack,
                              CMSCollector* collector,
                              bool should_yield,
                              bool concurrent_precleaning);
-  void do_oop(oop* p);
-  void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop(p); }
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+  inline void do_oop_nv(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
+  inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
   bool do_header() { return true; }
   virtual const bool do_nmethods() const { return true; }
   Prefetch::style prefetch_style() {
@@ -185,11 +213,14 @@
 // sycnhronized. An OopTaskQueue structure, supporting efficient
 // workstealing, replaces a CMSMarkStack for storing grey objects.
 class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
-  MemRegion                      _span;
-  CMSBitMap*                     _bit_map;
-  OopTaskQueue*                  _work_queue;
-  const uint                     _low_water_mark;
-  Par_PushAndMarkClosure         _par_pushAndMarkClosure;
+ private:
+  MemRegion              _span;
+  CMSBitMap*             _bit_map;
+  OopTaskQueue*          _work_queue;
+  const uint             _low_water_mark;
+  Par_PushAndMarkClosure _par_pushAndMarkClosure;
+ protected:
+  DO_OOP_WORK_DEFN
  public:
   Par_MarkRefsIntoAndScanClosure(CMSCollector* collector,
                                  MemRegion span,
@@ -197,8 +228,10 @@
                                  CMSBitMap* bit_map,
                                  OopTaskQueue* work_queue,
                                  CMSMarkStack*  revisit_stack);
-  void do_oop(oop* p);
-  void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop(p); }
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+  inline void do_oop_nv(oop* p)       { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
+  inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
   bool do_header() { return true; }
   virtual const bool do_nmethods() const { return true; }
   Prefetch::style prefetch_style() {
@@ -211,28 +244,34 @@
 // following the first checkpoint. Its use is buried in
 // the closure MarkFromRootsClosure.
 class PushOrMarkClosure: public OopClosure {
-  CMSCollector*    _collector;
-  MemRegion        _span;
-  CMSBitMap*       _bitMap;
-  CMSMarkStack*    _markStack;
-  CMSMarkStack*    _revisitStack;
-  HeapWord* const  _finger;
-  MarkFromRootsClosure* const _parent;
-  bool                  const _should_remember_klasses;
+ private:
+  CMSCollector*   _collector;
+  MemRegion       _span;
+  CMSBitMap*      _bitMap;
+  CMSMarkStack*   _markStack;
+  CMSMarkStack*   _revisitStack;
+  HeapWord* const _finger;
+  MarkFromRootsClosure* const
+                  _parent;
+  bool const      _should_remember_klasses;
+ protected:
+  DO_OOP_WORK_DEFN
  public:
   PushOrMarkClosure(CMSCollector* cms_collector,
                     MemRegion span,
                     CMSBitMap* bitMap,
-                    CMSMarkStack*  markStack,
-                    CMSMarkStack*  revisitStack,
-                    HeapWord*      finger,
+                    CMSMarkStack* markStack,
+                    CMSMarkStack* revisitStack,
+                    HeapWord* finger,
                     MarkFromRootsClosure* parent);
-  void do_oop(oop* p);
-  void do_oop_nv(oop* p)  { PushOrMarkClosure::do_oop(p); }
-  const bool should_remember_klasses() const {
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+  inline void do_oop_nv(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
+  inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
+  virtual const bool should_remember_klasses() const {
     return _should_remember_klasses;
   }
-  void remember_klass(Klass* k);
+  virtual void remember_klass(Klass* k);
   // Deal with a stack overflow condition
   void handle_stack_overflow(HeapWord* lost);
  private:
@@ -244,6 +283,7 @@
 // following the first checkpoint. Its use is buried in
 // the closure Par_MarkFromRootsClosure.
 class Par_PushOrMarkClosure: public OopClosure {
+ private:
   CMSCollector*    _collector;
   MemRegion        _whole_span;
   MemRegion        _span;        // local chunk
@@ -253,24 +293,29 @@
   CMSMarkStack*    _revisit_stack;
   HeapWord*  const _finger;
   HeapWord** const _global_finger_addr;
-  Par_MarkFromRootsClosure* const _parent;
-  bool       const _should_remember_klasses;
+  Par_MarkFromRootsClosure* const
+                   _parent;
+  bool const       _should_remember_klasses;
+ protected:
+  DO_OOP_WORK_DEFN
  public:
   Par_PushOrMarkClosure(CMSCollector* cms_collector,
-                    MemRegion span,
-                    CMSBitMap* bit_map,
-                    OopTaskQueue* work_queue,
-                    CMSMarkStack*  mark_stack,
-                    CMSMarkStack*  revisit_stack,
-                    HeapWord*      finger,
-                    HeapWord**     global_finger_addr,
-                    Par_MarkFromRootsClosure* parent);
-  void do_oop(oop* p);
-  void do_oop_nv(oop* p)  { Par_PushOrMarkClosure::do_oop(p); }
-  const bool should_remember_klasses() const {
+                        MemRegion span,
+                        CMSBitMap* bit_map,
+                        OopTaskQueue* work_queue,
+                        CMSMarkStack* mark_stack,
+                        CMSMarkStack* revisit_stack,
+                        HeapWord* finger,
+                        HeapWord** global_finger_addr,
+                        Par_MarkFromRootsClosure* parent);
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+  inline void do_oop_nv(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
+  inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
+  virtual const bool should_remember_klasses() const {
     return _should_remember_klasses;
   }
-  void remember_klass(Klass* k);
+  virtual void remember_klass(Klass* k);
   // Deal with a stack overflow condition
   void handle_stack_overflow(HeapWord* lost);
  private:
@@ -282,10 +327,13 @@
 // This is currently used during the (weak) reference object
 // processing phase of the CMS final checkpoint step.
 class CMSKeepAliveClosure: public OopClosure {
+ private:
   CMSCollector* _collector;
   MemRegion     _span;
   CMSMarkStack* _mark_stack;
   CMSBitMap*    _bit_map;
+ protected:
+  DO_OOP_WORK_DEFN
  public:
   CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
                       CMSBitMap* bit_map, CMSMarkStack* mark_stack):
@@ -293,16 +341,20 @@
     _span(span),
     _bit_map(bit_map),
     _mark_stack(mark_stack) { }
-
-  void do_oop(oop* p);
-  void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop(p); }
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+  inline void do_oop_nv(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
+  inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
 };
 
 class CMSInnerParMarkAndPushClosure: public OopClosure {
+ private:
   CMSCollector* _collector;
   MemRegion     _span;
   OopTaskQueue* _work_queue;
   CMSBitMap*    _bit_map;
+ protected:
+  DO_OOP_WORK_DEFN
  public:
   CMSInnerParMarkAndPushClosure(CMSCollector* collector,
                                 MemRegion span, CMSBitMap* bit_map,
@@ -311,24 +363,32 @@
     _span(span),
     _bit_map(bit_map),
     _work_queue(work_queue) { }
-  void do_oop(oop* p);
-  void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop(p); }
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+  inline void do_oop_nv(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
+  inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
 };
 
 // A parallel (MT) version of the above, used when
 // reference processing is parallel; the only difference
 // is in the do_oop method.
 class CMSParKeepAliveClosure: public OopClosure {
+ private:
   CMSCollector* _collector;
   MemRegion     _span;
   OopTaskQueue* _work_queue;
   CMSBitMap*    _bit_map;
-  CMSInnerParMarkAndPushClosure _mark_and_push;
+  CMSInnerParMarkAndPushClosure
+                _mark_and_push;
   const uint    _low_water_mark;
   void trim_queue(uint max);
+ protected:
+  DO_OOP_WORK_DEFN
  public:
   CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
                          CMSBitMap* bit_map, OopTaskQueue* work_queue);
-  void do_oop(oop* p);
-  void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop(p); }
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+  inline void do_oop_nv(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
+  inline void do_oop_nv(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
 };
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -177,7 +177,7 @@
     assert(q->forwardee() == NULL, "should be forwarded to NULL");
   }
 
-  debug_only(MarkSweep::register_live_oop(q, adjusted_size));
+  VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
   compact_top += adjusted_size;
 
   // we need to update the offset table so that the beginnings of objects can be
@@ -1211,7 +1211,7 @@
   return fc;
 }
 
-oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size, oop* ref) {
+oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
   assert_locked();
 
@@ -2116,7 +2116,6 @@
   splitBirth(to2);
 }
 
-
 void CompactibleFreeListSpace::print() const {
   tty->print(" CompactibleFreeListSpace");
   Space::print();
@@ -2130,6 +2129,7 @@
 }
 
 class VerifyAllBlksClosure: public BlkClosure {
+ private:
   const CompactibleFreeListSpace* _sp;
   const MemRegion                 _span;
 
@@ -2137,7 +2137,7 @@
   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
     MemRegion span) :  _sp(sp), _span(span) { }
 
-  size_t do_blk(HeapWord* addr) {
+  virtual size_t do_blk(HeapWord* addr) {
     size_t res;
     if (_sp->block_is_obj(addr)) {
       oop p = oop(addr);
@@ -2160,12 +2160,54 @@
 };
 
 class VerifyAllOopsClosure: public OopClosure {
+ private:
   const CMSCollector*             _collector;
   const CompactibleFreeListSpace* _sp;
   const MemRegion                 _span;
   const bool                      _past_remark;
   const CMSBitMap*                _bit_map;
 
+ protected:
+  void do_oop(void* p, oop obj) {
+    if (_span.contains(obj)) { // the interior oop points into CMS heap
+      if (!_span.contains(p)) { // reference from outside CMS heap
+        // Should be a valid object; the first disjunct below allows
+        // us to sidestep an assertion in block_is_obj() that insists
+        // that p be in _sp. Note that several generations (and spaces)
+        // are spanned by _span (CMS heap) above.
+        guarantee(!_sp->is_in_reserved(obj) ||
+                  _sp->block_is_obj((HeapWord*)obj),
+                  "Should be an object");
+        guarantee(obj->is_oop(), "Should be an oop");
+        obj->verify();
+        if (_past_remark) {
+          // Remark has been completed, the object should be marked
+          _bit_map->isMarked((HeapWord*)obj);
+        }
+      } else { // reference within CMS heap
+        if (_past_remark) {
+          // Remark has been completed -- so the referent should have
+          // been marked, if referring object is.
+          if (_bit_map->isMarked(_collector->block_start(p))) {
+            guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
+          }
+        }
+      }
+    } else if (_sp->is_in_reserved(p)) {
+      // the reference is from FLS, and points out of FLS
+      guarantee(obj->is_oop(), "Should be an oop");
+      obj->verify();
+    }
+  }
+
+  template <class T> void do_oop_work(T* p) {
+    T heap_oop = oopDesc::load_heap_oop(p);
+    if (!oopDesc::is_null(heap_oop)) {
+      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+      do_oop(p, obj);
+    }
+  }
+
  public:
   VerifyAllOopsClosure(const CMSCollector* collector,
     const CompactibleFreeListSpace* sp, MemRegion span,
@@ -2173,40 +2215,8 @@
     OopClosure(), _collector(collector), _sp(sp), _span(span),
     _past_remark(past_remark), _bit_map(bit_map) { }
 
-  void do_oop(oop* ptr) {
-    oop p = *ptr;
-    if (p != NULL) {
-      if (_span.contains(p)) { // the interior oop points into CMS heap
-        if (!_span.contains(ptr)) { // reference from outside CMS heap
-          // Should be a valid object; the first disjunct below allows
-          // us to sidestep an assertion in block_is_obj() that insists
-          // that p be in _sp. Note that several generations (and spaces)
-          // are spanned by _span (CMS heap) above.
-          guarantee(!_sp->is_in_reserved(p) || _sp->block_is_obj((HeapWord*)p),
-                    "Should be an object");
-          guarantee(p->is_oop(), "Should be an oop");
-          p->verify();
-          if (_past_remark) {
-            // Remark has been completed, the object should be marked
-            _bit_map->isMarked((HeapWord*)p);
-          }
-        }
-        else { // reference within CMS heap
-          if (_past_remark) {
-            // Remark has been completed -- so the referent should have
-            // been marked, if referring object is.
-            if (_bit_map->isMarked(_collector->block_start(ptr))) {
-              guarantee(_bit_map->isMarked((HeapWord*)p), "Marking error?");
-            }
-          }
-        }
-      } else if (_sp->is_in_reserved(ptr)) {
-        // the reference is from FLS, and points out of FLS
-        guarantee(p->is_oop(), "Should be an oop");
-        p->verify();
-      }
-    }
-  }
+  virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
+  virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
 };
 
 void CompactibleFreeListSpace::verify(bool ignored) const {
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -540,7 +540,7 @@
   HeapWord* allocate(size_t size);
   HeapWord* par_allocate(size_t size);
 
-  oop       promote(oop obj, size_t obj_size, oop* ref);
+  oop       promote(oop obj, size_t obj_size);
   void      gc_prologue();
   void      gc_epilogue();
 
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -1226,7 +1226,7 @@
   return NULL;
 }
 
-oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size, oop* ref) {
+oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
   // allocate, copy and if necessary update promoinfo --
   // delegate to underlying space.
@@ -1238,7 +1238,7 @@
   }
 #endif  // #ifndef PRODUCT
 
-  oop res = _cmsSpace->promote(obj, obj_size, ref);
+  oop res = _cmsSpace->promote(obj, obj_size);
   if (res == NULL) {
     // expand and retry
     size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
@@ -1249,7 +1249,7 @@
     assert(next_gen() == NULL, "assumption, based upon which no attempt "
                                "is made to pass on a possibly failing "
                                "promotion to next generation");
-    res = _cmsSpace->promote(obj, obj_size, ref);
+    res = _cmsSpace->promote(obj, obj_size);
   }
   if (res != NULL) {
     // See comment in allocate() about when objects should
@@ -3922,13 +3922,15 @@
 }
 
 class Par_ConcMarkingClosure: public OopClosure {
+ private:
   CMSCollector* _collector;
   MemRegion     _span;
   CMSBitMap*    _bit_map;
   CMSMarkStack* _overflow_stack;
   CMSMarkStack* _revisit_stack;     // XXXXXX Check proper use
   OopTaskQueue* _work_queue;
-
+ protected:
+  DO_OOP_WORK_DEFN
  public:
   Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue,
                          CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
@@ -3937,8 +3939,8 @@
     _work_queue(work_queue),
     _bit_map(bit_map),
     _overflow_stack(overflow_stack) { }   // need to initialize revisit stack etc.
-
-  void do_oop(oop* p);
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
   void trim_queue(size_t max);
   void handle_stack_overflow(HeapWord* lost);
 };
@@ -3947,11 +3949,9 @@
 // the salient assumption here is that stolen oops must
 // always be initialized, so we do not need to check for
 // uninitialized objects before scanning here.
-void Par_ConcMarkingClosure::do_oop(oop* p) {
-  oop    this_oop = *p;
-  assert(this_oop->is_oop_or_null(),
-         "expected an oop or NULL");
-  HeapWord* addr = (HeapWord*)this_oop;
+void Par_ConcMarkingClosure::do_oop(oop obj) {
+  assert(obj->is_oop_or_null(), "expected an oop or NULL");
+  HeapWord* addr = (HeapWord*)obj;
   // Check if oop points into the CMS generation
   // and is not marked
   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
@@ -3970,7 +3970,7 @@
         }
       )
       if (simulate_overflow ||
-          !(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) {
+          !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
         // stack overflow
         if (PrintCMSStatistics != 0) {
           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
@@ -3987,6 +3987,9 @@
   }
 }
 
+void Par_ConcMarkingClosure::do_oop(oop* p)       { Par_ConcMarkingClosure::do_oop_work(p); }
+void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
+
 void Par_ConcMarkingClosure::trim_queue(size_t max) {
   while (_work_queue->size() > max) {
     oop new_oop;
@@ -4086,8 +4089,8 @@
   //
   // Tony 2006.06.29
   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
-                       ConcurrentMarkSweepThread::should_yield() &&
-                       !CMSCollector::foregroundGCIsActive(); ++i) {
+                   ConcurrentMarkSweepThread::should_yield() &&
+                   !CMSCollector::foregroundGCIsActive(); ++i) {
     os::sleep(Thread::current(), 1, false);
     ConcurrentMarkSweepThread::acknowledge_yield_request();
   }
@@ -6048,8 +6051,8 @@
 
         // See the comment in coordinator_yield()
         for (unsigned i = 0; i < CMSYieldSleepCount &&
-                        ConcurrentMarkSweepThread::should_yield() &&
-                        !CMSCollector::foregroundGCIsActive(); ++i) {
+                         ConcurrentMarkSweepThread::should_yield() &&
+                         !CMSCollector::foregroundGCIsActive(); ++i) {
           os::sleep(Thread::current(), 1, false);
           ConcurrentMarkSweepThread::acknowledge_yield_request();
         }
@@ -6362,18 +6365,18 @@
     assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
 }
 
-void MarkRefsIntoClosure::do_oop(oop* p) {
+void MarkRefsIntoClosure::do_oop(oop obj) {
   // if p points into _span, then mark corresponding bit in _markBitMap
-  oop thisOop = *p;
-  if (thisOop != NULL) {
-    assert(thisOop->is_oop(), "expected an oop");
-    HeapWord* addr = (HeapWord*)thisOop;
-    if (_span.contains(addr)) {
-      // this should be made more efficient
-      _bitMap->mark(addr);
-    }
-  }
-}
+  assert(obj->is_oop(), "expected an oop");
+  HeapWord* addr = (HeapWord*)obj;
+  if (_span.contains(addr)) {
+    // this should be made more efficient
+    _bitMap->mark(addr);
+  }
+}
+
+void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
+void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
 
 // A variant of the above, used for CMS marking verification.
 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
@@ -6387,22 +6390,22 @@
     assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
 }
 
-void MarkRefsIntoVerifyClosure::do_oop(oop* p) {
+void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
   // if p points into _span, then mark corresponding bit in _markBitMap
-  oop this_oop = *p;
-  if (this_oop != NULL) {
-    assert(this_oop->is_oop(), "expected an oop");
-    HeapWord* addr = (HeapWord*)this_oop;
-    if (_span.contains(addr)) {
-      _verification_bm->mark(addr);
-      if (!_cms_bm->isMarked(addr)) {
-        oop(addr)->print();
-        gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
-        fatal("... aborting");
-      }
-    }
-  }
-}
+  assert(obj->is_oop(), "expected an oop");
+  HeapWord* addr = (HeapWord*)obj;
+  if (_span.contains(addr)) {
+    _verification_bm->mark(addr);
+    if (!_cms_bm->isMarked(addr)) {
+      oop(addr)->print();
+      gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
+      fatal("... aborting");
+    }
+  }
+}
+
+void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
+void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
 
 //////////////////////////////////////////////////
 // MarkRefsIntoAndScanClosure
@@ -6438,13 +6441,13 @@
 // The marks are made in the marking bit map and the marking stack is
 // used for keeping the (newly) grey objects during the scan.
 // The parallel version (Par_...) appears further below.
-void MarkRefsIntoAndScanClosure::do_oop(oop* p) {
-  oop this_oop = *p;
-  if (this_oop != NULL) {
-    assert(this_oop->is_oop(), "expected an oop");
-    HeapWord* addr = (HeapWord*)this_oop;
-    assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
-    assert(_collector->overflow_list_is_empty(), "should be empty");
+void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
+  if (obj != NULL) {
+    assert(obj->is_oop(), "expected an oop");
+    HeapWord* addr = (HeapWord*)obj;
+    assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
+    assert(_collector->overflow_list_is_empty(),
+           "overflow list should be empty");
     if (_span.contains(addr) &&
         !_bit_map->isMarked(addr)) {
       // mark bit map (object is now grey)
@@ -6452,7 +6455,7 @@
       // push on marking stack (stack should be empty), and drain the
       // stack by applying this closure to the oops in the oops popped
       // from the stack (i.e. blacken the grey objects)
-      bool res = _mark_stack->push(this_oop);
+      bool res = _mark_stack->push(obj);
       assert(res, "Should have space to push on empty stack");
       do {
         oop new_oop = _mark_stack->pop();
@@ -6488,6 +6491,9 @@
   }
 }
 
+void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
+void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
+
 void MarkRefsIntoAndScanClosure::do_yield_work() {
   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
          "CMS thread should hold CMS token");
@@ -6506,9 +6512,11 @@
   _collector->icms_wait();
 
   // See the comment in coordinator_yield()
-  for (unsigned i = 0; i < CMSYieldSleepCount &&
-                       ConcurrentMarkSweepThread::should_yield() &&
-                       !CMSCollector::foregroundGCIsActive(); ++i) {
+  for (unsigned i = 0;
+       i < CMSYieldSleepCount &&
+       ConcurrentMarkSweepThread::should_yield() &&
+       !CMSCollector::foregroundGCIsActive();
+       ++i) {
     os::sleep(Thread::current(), 1, false);
     ConcurrentMarkSweepThread::acknowledge_yield_request();
   }
@@ -6545,13 +6553,12 @@
 // the scan phase whence they are also available for stealing by parallel
 // threads. Since the marking bit map is shared, updates are
 // synchronized (via CAS).
-void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) {
-  oop this_oop = *p;
-  if (this_oop != NULL) {
+void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
+  if (obj != NULL) {
     // Ignore mark word because this could be an already marked oop
     // that may be chained at the end of the overflow list.
-    assert(this_oop->is_oop(true /* ignore mark word */), "expected an oop");
-    HeapWord* addr = (HeapWord*)this_oop;
+    assert(obj->is_oop(), "expected an oop");
+    HeapWord* addr = (HeapWord*)obj;
     if (_span.contains(addr) &&
         !_bit_map->isMarked(addr)) {
       // mark bit map (object will become grey):
@@ -6565,7 +6572,7 @@
         // queue to an appropriate length by applying this closure to
         // the oops in the oops popped from the stack (i.e. blacken the
         // grey objects)
-        bool res = _work_queue->push(this_oop);
+        bool res = _work_queue->push(obj);
         assert(res, "Low water mark should be less than capacity?");
         trim_queue(_low_water_mark);
       } // Else, another thread claimed the object
@@ -6573,6 +6580,9 @@
   }
 }
 
+void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p)       { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
+void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
+
 // This closure is used to rescan the marked objects on the dirty cards
 // in the mod union table and the card table proper.
 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
@@ -6675,8 +6685,8 @@
 
   // See the comment in coordinator_yield()
   for (unsigned i = 0; i < CMSYieldSleepCount &&
-                       ConcurrentMarkSweepThread::should_yield() &&
-                       !CMSCollector::foregroundGCIsActive(); ++i) {
+                   ConcurrentMarkSweepThread::should_yield() &&
+                   !CMSCollector::foregroundGCIsActive(); ++i) {
     os::sleep(Thread::current(), 1, false);
     ConcurrentMarkSweepThread::acknowledge_yield_request();
   }
@@ -6928,13 +6938,13 @@
   assert(_markStack->isEmpty(),
          "should drain stack to limit stack usage");
   // convert ptr to an oop preparatory to scanning
-  oop this_oop = oop(ptr);
+  oop obj = oop(ptr);
   // Ignore mark word in verification below, since we
   // may be running concurrent with mutators.
-  assert(this_oop->is_oop(true), "should be an oop");
+  assert(obj->is_oop(true), "should be an oop");
   assert(_finger <= ptr, "_finger runneth ahead");
   // advance the finger to right end of this object
-  _finger = ptr + this_oop->size();
+  _finger = ptr + obj->size();
   assert(_finger > ptr, "we just incremented it above");
   // On large heaps, it may take us some time to get through
   // the marking phase (especially if running iCMS). During
@@ -6980,7 +6990,7 @@
                                       _span, _bitMap, _markStack,
                                       _revisitStack,
                                       _finger, this);
-  bool res = _markStack->push(this_oop);
+  bool res = _markStack->push(obj);
   assert(res, "Empty non-zero size stack should have space for single push");
   while (!_markStack->isEmpty()) {
     oop new_oop = _markStack->pop();
@@ -7052,13 +7062,13 @@
   assert(_work_queue->size() == 0,
          "should drain stack to limit stack usage");
   // convert ptr to an oop preparatory to scanning
-  oop this_oop = oop(ptr);
+  oop obj = oop(ptr);
   // Ignore mark word in verification below, since we
   // may be running concurrent with mutators.
-  assert(this_oop->is_oop(true), "should be an oop");
+  assert(obj->is_oop(true), "should be an oop");
   assert(_finger <= ptr, "_finger runneth ahead");
   // advance the finger to right end of this object
-  _finger = ptr + this_oop->size();
+  _finger = ptr + obj->size();
   assert(_finger > ptr, "we just incremented it above");
   // On large heaps, it may take us some time to get through
   // the marking phase (especially if running iCMS). During
@@ -7106,7 +7116,7 @@
                                       _revisit_stack,
                                       _finger,
                                       gfa, this);
-  bool res = _work_queue->push(this_oop);   // overflow could occur here
+  bool res = _work_queue->push(obj);   // overflow could occur here
   assert(res, "Will hold once we use workqueues");
   while (true) {
     oop new_oop;
@@ -7176,15 +7186,15 @@
   assert(_mark_stack->isEmpty(),
          "should drain stack to limit stack usage");
   // convert addr to an oop preparatory to scanning
-  oop this_oop = oop(addr);
-  assert(this_oop->is_oop(), "should be an oop");
+  oop obj = oop(addr);
+  assert(obj->is_oop(), "should be an oop");
   assert(_finger <= addr, "_finger runneth ahead");
   // advance the finger to right end of this object
-  _finger = addr + this_oop->size();
+  _finger = addr + obj->size();
   assert(_finger > addr, "we just incremented it above");
   // Note: the finger doesn't advance while we drain
   // the stack below.
-  bool res = _mark_stack->push(this_oop);
+  bool res = _mark_stack->push(obj);
   assert(res, "Empty non-zero size stack should have space for single push");
   while (!_mark_stack->isEmpty()) {
     oop new_oop = _mark_stack->pop();
@@ -7207,6 +7217,8 @@
   _mark_stack(mark_stack)
 { }
 
+void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
+void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
 
 // Upon stack overflow, we discard (part of) the stack,
 // remembering the least address amongst those discarded
@@ -7219,20 +7231,20 @@
   _mark_stack->expand(); // expand the stack if possible
 }
 
-void PushAndMarkVerifyClosure::do_oop(oop* p) {
-  oop    this_oop = *p;
-  assert(this_oop->is_oop_or_null(), "expected an oop or NULL");
-  HeapWord* addr = (HeapWord*)this_oop;
+void PushAndMarkVerifyClosure::do_oop(oop obj) {
+  assert(obj->is_oop_or_null(), "expected an oop or NULL");
+  HeapWord* addr = (HeapWord*)obj;
   if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
     // Oop lies in _span and isn't yet grey or black
     _verification_bm->mark(addr);            // now grey
     if (!_cms_bm->isMarked(addr)) {
       oop(addr)->print();
-      gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
+      gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
+                             addr);
       fatal("... aborting");
     }
 
-    if (!_mark_stack->push(this_oop)) { // stack overflow
+    if (!_mark_stack->push(obj)) { // stack overflow
       if (PrintCMSStatistics != 0) {
         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
                                SIZE_FORMAT, _mark_stack->capacity());
@@ -7285,7 +7297,6 @@
   _should_remember_klasses(collector->should_unload_classes())
 { }
 
-
 void CMSCollector::lower_restart_addr(HeapWord* low) {
   assert(_span.contains(low), "Out of bounds addr");
   if (_restart_addr == NULL) {
@@ -7321,12 +7332,10 @@
   _overflow_stack->expand(); // expand the stack if possible
 }
 
-
-void PushOrMarkClosure::do_oop(oop* p) {
-  oop    thisOop = *p;
+void PushOrMarkClosure::do_oop(oop obj) {
   // Ignore mark word because we are running concurrent with mutators.
-  assert(thisOop->is_oop_or_null(true), "expected an oop or NULL");
-  HeapWord* addr = (HeapWord*)thisOop;
+  assert(obj->is_oop_or_null(true), "expected an oop or NULL");
+  HeapWord* addr = (HeapWord*)obj;
   if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
     // Oop lies in _span and isn't yet grey or black
     _bitMap->mark(addr);            // now grey
@@ -7342,7 +7351,7 @@
           simulate_overflow = true;
         }
       )
-      if (simulate_overflow || !_markStack->push(thisOop)) { // stack overflow
+      if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
         if (PrintCMSStatistics != 0) {
           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
                                  SIZE_FORMAT, _markStack->capacity());
@@ -7358,11 +7367,13 @@
   }
 }
 
-void Par_PushOrMarkClosure::do_oop(oop* p) {
-  oop    this_oop = *p;
+void PushOrMarkClosure::do_oop(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
+void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
+
+void Par_PushOrMarkClosure::do_oop(oop obj) {
   // Ignore mark word because we are running concurrent with mutators.
-  assert(this_oop->is_oop_or_null(true), "expected an oop or NULL");
-  HeapWord* addr = (HeapWord*)this_oop;
+  assert(obj->is_oop_or_null(true), "expected an oop or NULL");
+  HeapWord* addr = (HeapWord*)obj;
   if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
     // Oop lies in _span and isn't yet grey or black
     // We read the global_finger (volatile read) strictly after marking oop
@@ -7391,7 +7402,7 @@
       }
     )
     if (simulate_overflow ||
-        !(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) {
+        !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
       // stack overflow
       if (PrintCMSStatistics != 0) {
         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
@@ -7408,6 +7419,8 @@
   }
 }
 
+void Par_PushOrMarkClosure::do_oop(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
+void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
 
 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
                                        MemRegion span,
@@ -7432,16 +7445,11 @@
 
 // Grey object rescan during pre-cleaning and second checkpoint phases --
 // the non-parallel version (the parallel version appears further below.)
-void PushAndMarkClosure::do_oop(oop* p) {
-  oop    this_oop = *p;
-  // Ignore mark word verification. If during concurrent precleaning
-  // the object monitor may be locked. If during the checkpoint
-  // phases, the object may already have been reached by a  different
-  // path and may be at the end of the global overflow list (so
-  // the mark word may be NULL).
-  assert(this_oop->is_oop_or_null(true/* ignore mark word */),
+void PushAndMarkClosure::do_oop(oop obj) {
+  // If _concurrent_precleaning, ignore mark word verification
+  assert(obj->is_oop_or_null(_concurrent_precleaning),
          "expected an oop or NULL");
-  HeapWord* addr = (HeapWord*)this_oop;
+  HeapWord* addr = (HeapWord*)obj;
   // Check if oop points into the CMS generation
   // and is not marked
   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
@@ -7456,7 +7464,7 @@
         simulate_overflow = true;
       }
     )
-    if (simulate_overflow || !_mark_stack->push(this_oop)) {
+    if (simulate_overflow || !_mark_stack->push(obj)) {
       if (_concurrent_precleaning) {
          // During precleaning we can just dirty the appropriate card
          // in the mod union table, thus ensuring that the object remains
@@ -7468,7 +7476,7 @@
       } else {
          // During the remark phase, we need to remember this oop
          // in the overflow list.
-         _collector->push_on_overflow_list(this_oop);
+         _collector->push_on_overflow_list(obj);
          _collector->_ser_pmc_remark_ovflw++;
       }
     }
@@ -7492,10 +7500,12 @@
   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
 }
 
+void PushAndMarkClosure::do_oop(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
+void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
+
 // Grey object rescan during second checkpoint phase --
 // the parallel version.
-void Par_PushAndMarkClosure::do_oop(oop* p) {
-  oop    this_oop = *p;
+void Par_PushAndMarkClosure::do_oop(oop obj) {
   // In the assert below, we ignore the mark word because
   // this oop may point to an already visited object that is
   // on the overflow stack (in which case the mark word has
@@ -7507,9 +7517,9 @@
   // value, by the time we get to examined this failing assert in
   // the debugger, is_oop_or_null(false) may subsequently start
   // to hold.
-  assert(this_oop->is_oop_or_null(true),
+  assert(obj->is_oop_or_null(true),
          "expected an oop or NULL");
-  HeapWord* addr = (HeapWord*)this_oop;
+  HeapWord* addr = (HeapWord*)obj;
   // Check if oop points into the CMS generation
   // and is not marked
   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
@@ -7527,14 +7537,17 @@
           simulate_overflow = true;
         }
       )
-      if (simulate_overflow || !_work_queue->push(this_oop)) {
-        _collector->par_push_on_overflow_list(this_oop);
+      if (simulate_overflow || !_work_queue->push(obj)) {
+        _collector->par_push_on_overflow_list(obj);
         _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
       }
     } // Else, some other thread got there first
   }
 }
 
+void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
+void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
+
 void PushAndMarkClosure::remember_klass(Klass* k) {
   if (!_revisit_stack->push(oop(k))) {
     fatal("Revisit stack overflowed in PushAndMarkClosure");
@@ -8228,9 +8241,8 @@
 }
 
 // CMSKeepAliveClosure: the serial version
-void CMSKeepAliveClosure::do_oop(oop* p) {
-  oop this_oop = *p;
-  HeapWord* addr = (HeapWord*)this_oop;
+void CMSKeepAliveClosure::do_oop(oop obj) {
+  HeapWord* addr = (HeapWord*)obj;
   if (_span.contains(addr) &&
       !_bit_map->isMarked(addr)) {
     _bit_map->mark(addr);
@@ -8242,26 +8254,28 @@
         simulate_overflow = true;
       }
     )
-    if (simulate_overflow || !_mark_stack->push(this_oop)) {
-      _collector->push_on_overflow_list(this_oop);
+    if (simulate_overflow || !_mark_stack->push(obj)) {
+      _collector->push_on_overflow_list(obj);
       _collector->_ser_kac_ovflw++;
     }
   }
 }
 
+void CMSKeepAliveClosure::do_oop(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
+void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
+
 // CMSParKeepAliveClosure: a parallel version of the above.
 // The work queues are private to each closure (thread),
 // but (may be) available for stealing by other threads.
-void CMSParKeepAliveClosure::do_oop(oop* p) {
-  oop this_oop = *p;
-  HeapWord* addr = (HeapWord*)this_oop;
+void CMSParKeepAliveClosure::do_oop(oop obj) {
+  HeapWord* addr = (HeapWord*)obj;
   if (_span.contains(addr) &&
       !_bit_map->isMarked(addr)) {
     // In general, during recursive tracing, several threads
     // may be concurrently getting here; the first one to
     // "tag" it, claims it.
     if (_bit_map->par_mark(addr)) {
-      bool res = _work_queue->push(this_oop);
+      bool res = _work_queue->push(obj);
       assert(res, "Low water mark should be much less than capacity");
       // Do a recursive trim in the hope that this will keep
       // stack usage lower, but leave some oops for potential stealers
@@ -8270,6 +8284,9 @@
   }
 }
 
+void CMSParKeepAliveClosure::do_oop(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
+void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
+
 void CMSParKeepAliveClosure::trim_queue(uint max) {
   while (_work_queue->size() > max) {
     oop new_oop;
@@ -8285,9 +8302,8 @@
   }
 }
 
-void CMSInnerParMarkAndPushClosure::do_oop(oop* p) {
-  oop this_oop = *p;
-  HeapWord* addr = (HeapWord*)this_oop;
+void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
+  HeapWord* addr = (HeapWord*)obj;
   if (_span.contains(addr) &&
       !_bit_map->isMarked(addr)) {
     if (_bit_map->par_mark(addr)) {
@@ -8299,14 +8315,17 @@
           simulate_overflow = true;
         }
       )
-      if (simulate_overflow || !_work_queue->push(this_oop)) {
-        _collector->par_push_on_overflow_list(this_oop);
+      if (simulate_overflow || !_work_queue->push(obj)) {
+        _collector->par_push_on_overflow_list(obj);
         _collector->_par_kac_ovflw++;
       }
     } // Else another thread got there already
   }
 }
 
+void CMSInnerParMarkAndPushClosure::do_oop(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
+void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
+
 //////////////////////////////////////////////////////////////////
 //  CMSExpansionCause                /////////////////////////////
 //////////////////////////////////////////////////////////////////
@@ -8337,12 +8356,12 @@
   while (!_mark_stack->isEmpty() ||
          // if stack is empty, check the overflow list
          _collector->take_from_overflow_list(num, _mark_stack)) {
-    oop this_oop = _mark_stack->pop();
-    HeapWord* addr = (HeapWord*)this_oop;
+    oop obj = _mark_stack->pop();
+    HeapWord* addr = (HeapWord*)obj;
     assert(_span.contains(addr), "Should be within span");
     assert(_bit_map->isMarked(addr), "Should be marked");
-    assert(this_oop->is_oop(), "Should be an oop");
-    this_oop->oop_iterate(_keep_alive);
+    assert(obj->is_oop(), "Should be an oop");
+    obj->oop_iterate(_keep_alive);
   }
 }
 
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -1138,7 +1138,7 @@
   // Allocation support
   HeapWord* allocate(size_t size, bool tlab);
   HeapWord* have_lock_and_allocate(size_t size, bool tlab);
-  oop       promote(oop obj, size_t obj_size, oop* ref);
+  oop       promote(oop obj, size_t obj_size);
   HeapWord* par_allocate(size_t size, bool tlab) {
     return allocate(size, tlab);
   }
@@ -1301,9 +1301,8 @@
 // This closure is used to check that a certain set of oops is empty.
 class FalseClosure: public OopClosure {
  public:
-  void do_oop(oop* p) {
-    guarantee(false, "Should be an empty set");
-  }
+  void do_oop(oop* p)       { guarantee(false, "Should be an empty set"); }
+  void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); }
 };
 
 // This closure is used to do concurrent marking from the roots
@@ -1380,6 +1379,12 @@
   CMSBitMap*       _verification_bm;
   CMSBitMap*       _cms_bm;
   CMSMarkStack*    _mark_stack;
+ protected:
+  void do_oop(oop p);
+  template <class T> inline void do_oop_work(T *p) {
+    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    do_oop(obj);
+  }
  public:
   PushAndMarkVerifyClosure(CMSCollector* cms_collector,
                            MemRegion span,
@@ -1387,6 +1392,7 @@
                            CMSBitMap* cms_bm,
                            CMSMarkStack*  mark_stack);
   void do_oop(oop* p);
+  void do_oop(narrowOop* p);
   // Deal with a stack overflow condition
   void handle_stack_overflow(HeapWord* lost);
 };
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_parNew	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_parNew	Sun Apr 13 17:43:42 2008 -0400
@@ -19,7 +19,7 @@
 // Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
 // CA 95054 USA or visit www.sun.com if you need additional information or
 // have any questions.
-//  
+//
 //
 
 asParNewGeneration.hpp			adaptiveSizePolicy.hpp
@@ -66,8 +66,8 @@
 parNewGeneration.cpp                    handles.inline.hpp
 parNewGeneration.cpp                    java.hpp
 parNewGeneration.cpp                    objArrayOop.hpp
+parNewGeneration.cpp                    oop.inline.hpp
 parNewGeneration.cpp                    oop.pcgc.inline.hpp
-parNewGeneration.cpp                    oop.inline.hpp
 parNewGeneration.cpp                    parGCAllocBuffer.hpp
 parNewGeneration.cpp                    parNewGeneration.hpp
 parNewGeneration.cpp                    parOopClosures.inline.hpp
@@ -80,3 +80,8 @@
 parNewGeneration.hpp                    defNewGeneration.hpp
 parNewGeneration.hpp                    parGCAllocBuffer.hpp
 parNewGeneration.hpp                    taskqueue.hpp
+
+parOopClosures.hpp                      genOopClosures.hpp
+
+parOopClosures.inline.hpp               parNewGeneration.hpp
+parOopClosures.inline.hpp               parOopClosures.hpp
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge	Sun Apr 13 17:43:42 2008 -0400
@@ -19,7 +19,7 @@
 // Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
 // CA 95054 USA or visit www.sun.com if you need additional information or
 // have any questions.
-//  
+//
 //
 
 // NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps!
@@ -279,6 +279,7 @@
 psParallelCompact.hpp                   objectStartArray.hpp
 psParallelCompact.hpp			oop.hpp
 psParallelCompact.hpp			parMarkBitMap.hpp
+psParallelCompact.hpp			psCompactionManager.hpp
 psParallelCompact.hpp			sharedHeap.hpp
 
 psOldGen.cpp                            psAdaptiveSizePolicy.hpp
--- a/hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -32,18 +32,19 @@
   _allocated(0), _wasted(0)
 {
   assert (min_size() > AlignmentReserve, "Inconsistency!");
+  // arrayOopDesc::header_size depends on command line initialization.
+  FillerHeaderSize = align_object_size(arrayOopDesc::header_size(T_INT));
+  AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0;
 }
 
-const size_t ParGCAllocBuffer::FillerHeaderSize =
-             align_object_size(arrayOopDesc::header_size(T_INT));
+size_t ParGCAllocBuffer::FillerHeaderSize;
 
 // If the minimum object size is greater than MinObjAlignment, we can
 // end up with a shard at the end of the buffer that's smaller than
 // the smallest object.  We can't allow that because the buffer must
 // look like it's full of objects when we retire it, so we make
 // sure we have enough space for a filler int array object.
-const size_t ParGCAllocBuffer::AlignmentReserve =
-             oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0;
+size_t ParGCAllocBuffer::AlignmentReserve;
 
 void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) {
   assert(!retain || end_of_gc, "Can only retain at GC end.");
--- a/hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -41,8 +41,8 @@
   size_t    _allocated;     // in HeapWord units
   size_t    _wasted;        // in HeapWord units
   char tail[32];
-  static const size_t FillerHeaderSize;
-  static const size_t AlignmentReserve;
+  static size_t FillerHeaderSize;
+  static size_t AlignmentReserve;
 
 public:
   // Initializes the buffer to be empty, but with the given "word_sz".
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -104,16 +104,15 @@
     // must be removed.
     arrayOop(old)->set_length(end);
   }
+
   // process our set of indices (include header in first chunk)
-  oop* start_addr = start == 0 ? (oop*)obj : obj->obj_at_addr(start);
-  oop* end_addr   = obj->base() + end; // obj_at_addr(end) asserts end < length
-  MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr);
+  // should make sure end is even (aligned to HeapWord in case of compressed oops)
   if ((HeapWord *)obj < young_old_boundary()) {
     // object is in to_space
-    obj->oop_iterate(&_to_space_closure, mr);
+    obj->oop_iterate_range(&_to_space_closure, start, end);
   } else {
     // object is in old generation
-    obj->oop_iterate(&_old_gen_closure, mr);
+    obj->oop_iterate_range(&_old_gen_closure, start, end);
   }
 }
 
@@ -319,7 +318,6 @@
   }
 }
 
-
 ParScanClosure::ParScanClosure(ParNewGeneration* g,
                                ParScanThreadState* par_scan_state) :
   OopsInGenClosure(g), _par_scan_state(par_scan_state), _g(g)
@@ -328,11 +326,25 @@
   _boundary = _g->reserved().end();
 }
 
+void ParScanWithBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, true, false); }
+void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
+
+void ParScanWithoutBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, false, false); }
+void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
+
+void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, true, true); }
+void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
+
+void ParRootScanWithoutBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, false, true); }
+void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }
+
 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
                                              ParScanThreadState* par_scan_state)
   : ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
-{
-}
+{}
+
+void ParScanWeakRefClosure::do_oop(oop* p)       { ParScanWeakRefClosure::do_oop_work(p); }
+void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); }
 
 #ifdef WIN32
 #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */
@@ -475,51 +487,66 @@
 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) :
   DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {}
 
-void
-// ParNewGeneration::
-ParKeepAliveClosure::do_oop(oop* p) {
-  // We never expect to see a null reference being processed
-  // as a weak reference.
-  assert (*p != NULL, "expected non-null ref");
-  assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
+template <class T>
+void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
+#ifdef ASSERT
+  {
+    assert(!oopDesc::is_null(*p), "expected non-null ref");
+    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    // We never expect to see a null reference being processed
+    // as a weak reference.
+    assert(obj->is_oop(), "expected an oop while scanning weak refs");
+  }
+#endif // ASSERT
 
   _par_cl->do_oop_nv(p);
 
   if (Universe::heap()->is_in_reserved(p)) {
-    _rs->write_ref_field_gc_par(p, *p);
+    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    _rs->write_ref_field_gc_par(p, obj);
   }
 }
 
+void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p)       { ParKeepAliveClosure::do_oop_work(p); }
+void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); }
+
 // ParNewGeneration::
 KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) :
   DefNewGeneration::KeepAliveClosure(cl) {}
 
-void
-// ParNewGeneration::
-KeepAliveClosure::do_oop(oop* p) {
-  // We never expect to see a null reference being processed
-  // as a weak reference.
-  assert (*p != NULL, "expected non-null ref");
-  assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
+template <class T>
+void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
+#ifdef ASSERT
+  {
+    assert(!oopDesc::is_null(*p), "expected non-null ref");
+    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    // We never expect to see a null reference being processed
+    // as a weak reference.
+    assert(obj->is_oop(), "expected an oop while scanning weak refs");
+  }
+#endif // ASSERT
 
   _cl->do_oop_nv(p);
 
   if (Universe::heap()->is_in_reserved(p)) {
-    _rs->write_ref_field_gc_par(p, *p);
+    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    _rs->write_ref_field_gc_par(p, obj);
   }
 }
 
-void ScanClosureWithParBarrier::do_oop(oop* p) {
-  oop obj = *p;
-  // Should we copy the obj?
-  if (obj != NULL) {
+void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p)       { KeepAliveClosure::do_oop_work(p); }
+void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); }
+
+template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) {
+  T heap_oop = oopDesc::load_heap_oop(p);
+  if (!oopDesc::is_null(heap_oop)) {
+    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
     if ((HeapWord*)obj < _boundary) {
       assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
-      if (obj->is_forwarded()) {
-        *p = obj->forwardee();
-      } else {
-        *p = _g->DefNewGeneration::copy_to_survivor_space(obj, p);
-      }
+      oop new_obj = obj->is_forwarded()
+                      ? obj->forwardee()
+                      : _g->DefNewGeneration::copy_to_survivor_space(obj);
+      oopDesc::encode_store_heap_oop_not_null(p, new_obj);
     }
     if (_gc_barrier) {
       // If p points to a younger generation, mark the card.
@@ -530,6 +557,9 @@
   }
 }
 
+void ScanClosureWithParBarrier::do_oop(oop* p)       { ScanClosureWithParBarrier::do_oop_work(p); }
+void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
+
 class ParNewRefProcTaskProxy: public AbstractGangTask {
   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
 public:
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -33,7 +33,6 @@
 // but they must be here to allow ParScanClosure::do_oop_work to be defined
 // in genOopClosures.inline.hpp.
 
-
 typedef OopTaskQueue    ObjToScanQueue;
 typedef OopTaskQueueSet ObjToScanQueueSet;
 
@@ -41,15 +40,20 @@
 const int PAR_STATS_ENABLED = 0;
 
 class ParKeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
+ private:
   ParScanWeakRefClosure* _par_cl;
+ protected:
+  template <class T> void do_oop_work(T* p);
  public:
   ParKeepAliveClosure(ParScanWeakRefClosure* cl);
-  void do_oop(oop* p);
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
 };
 
 // The state needed by thread performing parallel young-gen collection.
 class ParScanThreadState {
   friend class ParScanThreadStateSet;
+ private:
   ObjToScanQueue *_work_queue;
 
   ParGCAllocBuffer _to_space_alloc_buffer;
@@ -111,7 +115,7 @@
                      ObjToScanQueueSet* work_queue_set_, size_t desired_plab_sz_,
                      ParallelTaskTerminator& term_);
 
-public:
+ public:
   ageTable* age_table() {return &_ageTable;}
 
   ObjToScanQueue* work_queue() { return _work_queue; }
@@ -195,13 +199,13 @@
   double elapsed() {
     return os::elapsedTime() - _start;
   }
-
 };
 
 class ParNewGenTask: public AbstractGangTask {
-  ParNewGeneration* _gen;
-  Generation* _next_gen;
-  HeapWord* _young_old_boundary;
+ private:
+  ParNewGeneration*            _gen;
+  Generation*                  _next_gen;
+  HeapWord*                    _young_old_boundary;
   class ParScanThreadStateSet* _state_set;
 
 public:
@@ -216,35 +220,44 @@
 };
 
 class KeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
+ protected:
+  template <class T> void do_oop_work(T* p);
  public:
   KeepAliveClosure(ScanWeakRefClosure* cl);
-  void do_oop(oop* p);
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
 };
 
 class EvacuateFollowersClosureGeneral: public VoidClosure {
-    GenCollectedHeap* _gch;
-    int _level;
-    OopsInGenClosure* _scan_cur_or_nonheap;
-    OopsInGenClosure* _scan_older;
-  public:
-    EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
-                                    OopsInGenClosure* cur,
-                                    OopsInGenClosure* older);
-    void do_void();
+ private:
+  GenCollectedHeap* _gch;
+  int               _level;
+  OopsInGenClosure* _scan_cur_or_nonheap;
+  OopsInGenClosure* _scan_older;
+ public:
+  EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
+                                  OopsInGenClosure* cur,
+                                  OopsInGenClosure* older);
+  virtual void do_void();
 };
 
 // Closure for scanning ParNewGeneration.
 // Same as ScanClosure, except does parallel GC barrier.
 class ScanClosureWithParBarrier: public ScanClosure {
-public:
+ protected:
+  template <class T> void do_oop_work(T* p);
+ public:
   ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier);
-  void do_oop(oop* p);
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
 };
 
 // Implements AbstractRefProcTaskExecutor for ParNew.
 class ParNewRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
-public:
-
+ private:
+  ParNewGeneration&      _generation;
+  ParScanThreadStateSet& _state_set;
+ public:
   ParNewRefProcTaskExecutor(ParNewGeneration& generation,
                             ParScanThreadStateSet& state_set)
     : _generation(generation), _state_set(state_set)
@@ -255,9 +268,6 @@
   virtual void execute(EnqueueTask& task);
   // Switch to single threaded mode.
   virtual void set_single_threaded_mode();
-private:
-  ParNewGeneration&      _generation;
-  ParScanThreadStateSet& _state_set;
 };
 
 
@@ -269,6 +279,7 @@
   friend class ParNewRefProcTaskExecutor;
   friend class ParScanThreadStateSet;
 
+ private:
   // XXX use a global constant instead of 64!
   struct ObjToScanQueuePadded {
         ObjToScanQueue work_queue;
@@ -314,7 +325,7 @@
   // the details of the policy.
   virtual void adjust_desired_tenuring_threshold();
 
-public:
+ public:
   ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level);
 
   ~ParNewGeneration() {
--- a/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -26,70 +26,77 @@
 
 class ParScanThreadState;
 class ParNewGeneration;
-template<class E> class GenericTaskQueueSet;
-typedef GenericTaskQueueSet<oop> ObjToScanQueueSet;
+typedef OopTaskQueueSet ObjToScanQueueSet;
 class ParallelTaskTerminator;
 
 class ParScanClosure: public OopsInGenClosure {
-protected:
+ protected:
   ParScanThreadState* _par_scan_state;
-  ParNewGeneration* _g;
-  HeapWord* _boundary;
-  void do_oop_work(oop* p,
-                          bool gc_barrier,
-                          bool root_scan);
-
-  void par_do_barrier(oop* p);
-
-public:
+  ParNewGeneration*   _g;
+  HeapWord*           _boundary;
+  template <class T> void inline par_do_barrier(T* p);
+  template <class T> void inline do_oop_work(T* p,
+                                             bool gc_barrier,
+                                             bool root_scan);
+ public:
   ParScanClosure(ParNewGeneration* g, ParScanThreadState* par_scan_state);
 };
 
 class ParScanWithBarrierClosure: public ParScanClosure {
-public:
-  void do_oop(oop* p)    { do_oop_work(p, true, false); }
-  void do_oop_nv(oop* p) { do_oop_work(p, true, false); }
+ public:
   ParScanWithBarrierClosure(ParNewGeneration* g,
                             ParScanThreadState* par_scan_state) :
     ParScanClosure(g, par_scan_state) {}
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+  inline void do_oop_nv(oop* p);
+  inline void do_oop_nv(narrowOop* p);
 };
 
 class ParScanWithoutBarrierClosure: public ParScanClosure {
-public:
+ public:
   ParScanWithoutBarrierClosure(ParNewGeneration* g,
                                ParScanThreadState* par_scan_state) :
     ParScanClosure(g, par_scan_state) {}
-  void do_oop(oop* p)    { do_oop_work(p, false, false); }
-  void do_oop_nv(oop* p) { do_oop_work(p, false, false); }
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+  inline void do_oop_nv(oop* p);
+  inline void do_oop_nv(narrowOop* p);
 };
 
 class ParRootScanWithBarrierTwoGensClosure: public ParScanClosure {
-public:
+ public:
   ParRootScanWithBarrierTwoGensClosure(ParNewGeneration* g,
                                        ParScanThreadState* par_scan_state) :
     ParScanClosure(g, par_scan_state) {}
-  void do_oop(oop* p) { do_oop_work(p, true, true); }
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
 };
 
 class ParRootScanWithoutBarrierClosure: public ParScanClosure {
-public:
+ public:
   ParRootScanWithoutBarrierClosure(ParNewGeneration* g,
                                    ParScanThreadState* par_scan_state) :
     ParScanClosure(g, par_scan_state) {}
-  void do_oop(oop* p) { do_oop_work(p, false, true); }
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
 };
 
 class ParScanWeakRefClosure: public ScanWeakRefClosure {
-protected:
+ protected:
   ParScanThreadState* _par_scan_state;
-public:
+  template <class T> inline void do_oop_work(T* p);
+ public:
   ParScanWeakRefClosure(ParNewGeneration* g,
                         ParScanThreadState* par_scan_state);
-  void do_oop(oop* p);
-  void do_oop_nv(oop* p);
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+  inline void do_oop_nv(oop* p);
+  inline void do_oop_nv(narrowOop* p);
 };
 
 class ParEvacuateFollowersClosure: public VoidClosure {
+ private:
   ParScanThreadState* _par_scan_state;
   ParScanThreadState* par_scan_state() { return _par_scan_state; }
 
@@ -121,8 +128,7 @@
 
   ParallelTaskTerminator* _terminator;
   ParallelTaskTerminator* terminator() { return _terminator; }
-
-public:
+ public:
   ParEvacuateFollowersClosure(
     ParScanThreadState* par_scan_state_,
     ParScanWithoutBarrierClosure* to_space_closure_,
@@ -132,5 +138,5 @@
     ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_,
     ObjToScanQueueSet* task_queues_,
     ParallelTaskTerminator* terminator_);
-  void do_void();
+  virtual void do_void();
 };
--- a/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -22,10 +22,9 @@
  *
  */
 
-inline void ParScanWeakRefClosure::do_oop(oop* p)
-{
-  oop obj = *p;
-  assert (obj != NULL, "null weak reference?");
+template <class T> inline void ParScanWeakRefClosure::do_oop_work(T* p) {
+  assert (!oopDesc::is_null(*p), "null weak reference?");
+  oop obj = oopDesc::load_decode_heap_oop_not_null(p);
   // weak references are sometimes scanned twice; must check
   // that to-space doesn't already contain this object
   if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) {
@@ -33,41 +32,43 @@
     // ParScanClosure::do_oop_work).
     klassOop objK = obj->klass();
     markOop m = obj->mark();
+    oop new_obj;
     if (m->is_marked()) { // Contains forwarding pointer.
-      *p = ParNewGeneration::real_forwardee(obj);
+      new_obj = ParNewGeneration::real_forwardee(obj);
     } else {
       size_t obj_sz = obj->size_given_klass(objK->klass_part());
-      *p = ((ParNewGeneration*)_g)->copy_to_survivor_space(_par_scan_state,
-                                                           obj, obj_sz, m);
+      new_obj = ((ParNewGeneration*)_g)->copy_to_survivor_space(_par_scan_state,
+                                                                obj, obj_sz, m);
     }
+    oopDesc::encode_store_heap_oop_not_null(p, new_obj);
   }
 }
 
-inline void ParScanWeakRefClosure::do_oop_nv(oop* p)
-{
-  ParScanWeakRefClosure::do_oop(p);
-}
+inline void ParScanWeakRefClosure::do_oop_nv(oop* p)       { ParScanWeakRefClosure::do_oop_work(p); }
+inline void ParScanWeakRefClosure::do_oop_nv(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); }
 
-inline void ParScanClosure::par_do_barrier(oop* p) {
+template <class T> inline void ParScanClosure::par_do_barrier(T* p) {
   assert(generation()->is_in_reserved(p), "expected ref in generation");
-  oop obj = *p;
-  assert(obj != NULL, "expected non-null object");
+  assert(!oopDesc::is_null(*p), "expected non-null object");
+  oop obj = oopDesc::load_decode_heap_oop_not_null(p);
   // If p points to a younger generation, mark the card.
   if ((HeapWord*)obj < gen_boundary()) {
     rs()->write_ref_field_gc_par(p, obj);
   }
 }
 
-inline void ParScanClosure::do_oop_work(oop* p,
+template <class T>
+inline void ParScanClosure::do_oop_work(T* p,
                                         bool gc_barrier,
                                         bool root_scan) {
-  oop obj = *p;
   assert((!Universe::heap()->is_in_reserved(p) ||
           generation()->is_in_reserved(p))
          && (generation()->level() == 0 || gc_barrier),
          "The gen must be right, and we must be doing the barrier "
          "in older generations.");
-  if (obj != NULL) {
+  T heap_oop = oopDesc::load_heap_oop(p);
+  if (!oopDesc::is_null(heap_oop)) {
+    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
     if ((HeapWord*)obj < _boundary) {
       assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
       // OK, we need to ensure that it is copied.
@@ -78,11 +79,14 @@
       // forwarded.
       klassOop objK = obj->klass();
       markOop m = obj->mark();
+      oop new_obj;
       if (m->is_marked()) { // Contains forwarding pointer.
-        *p = ParNewGeneration::real_forwardee(obj);
+        new_obj = ParNewGeneration::real_forwardee(obj);
+        oopDesc::encode_store_heap_oop_not_null(p, new_obj);
       } else {
         size_t obj_sz = obj->size_given_klass(objK->klass_part());
-        *p = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m);
+        new_obj = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m);
+        oopDesc::encode_store_heap_oop_not_null(p, new_obj);
         if (root_scan) {
           // This may have pushed an object.  If we have a root
           // category with a lot of roots, can't let the queue get too
@@ -97,3 +101,9 @@
     }
   }
 }
+
+inline void ParScanWithBarrierClosure::do_oop_nv(oop* p)       { ParScanClosure::do_oop_work(p, true, false); }
+inline void ParScanWithBarrierClosure::do_oop_nv(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
+
+inline void ParScanWithoutBarrierClosure::do_oop_nv(oop* p)       { ParScanClosure::do_oop_work(p, false, false); }
+inline void ParScanWithoutBarrierClosure::do_oop_nv(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -28,17 +28,16 @@
 // Checks an individual oop for missing precise marks. Mark
 // may be either dirty or newgen.
 class CheckForUnmarkedOops : public OopClosure {
-  PSYoungGen* _young_gen;
+ private:
+  PSYoungGen*         _young_gen;
   CardTableExtension* _card_table;
-  HeapWord* _unmarked_addr;
-  jbyte* _unmarked_card;
+  HeapWord*           _unmarked_addr;
+  jbyte*              _unmarked_card;
 
- public:
-  CheckForUnmarkedOops( PSYoungGen* young_gen, CardTableExtension* card_table ) :
-    _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
-
-  virtual void do_oop(oop* p) {
-    if (_young_gen->is_in_reserved(*p) &&
+ protected:
+  template <class T> void do_oop_work(T* p) {
+    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    if (_young_gen->is_in_reserved(obj) &&
         !_card_table->addr_is_marked_imprecise(p)) {
       // Don't overwrite the first missing card mark
       if (_unmarked_addr == NULL) {
@@ -48,6 +47,13 @@
     }
   }
 
+ public:
+  CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) :
+    _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
+
+  virtual void do_oop(oop* p)       { CheckForUnmarkedOops::do_oop_work(p); }
+  virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); }
+
   bool has_unmarked_oop() {
     return _unmarked_addr != NULL;
   }
@@ -56,7 +62,8 @@
 // Checks all objects for the existance of some type of mark,
 // precise or imprecise, dirty or newgen.
 class CheckForUnmarkedObjects : public ObjectClosure {
-  PSYoungGen* _young_gen;
+ private:
+  PSYoungGen*         _young_gen;
   CardTableExtension* _card_table;
 
  public:
@@ -75,7 +82,7 @@
   // we test for missing precise marks first. If any are found, we don't
   // fail unless the object head is also unmarked.
   virtual void do_object(oop obj) {
-    CheckForUnmarkedOops object_check( _young_gen, _card_table );
+    CheckForUnmarkedOops object_check(_young_gen, _card_table);
     obj->oop_iterate(&object_check);
     if (object_check.has_unmarked_oop()) {
       assert(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
@@ -85,19 +92,25 @@
 
 // Checks for precise marking of oops as newgen.
 class CheckForPreciseMarks : public OopClosure {
-  PSYoungGen* _young_gen;
+ private:
+  PSYoungGen*         _young_gen;
   CardTableExtension* _card_table;
 
+ protected:
+  template <class T> void do_oop_work(T* p) {
+    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    if (_young_gen->is_in_reserved(obj)) {
+      assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop");
+      _card_table->set_card_newgen(p);
+    }
+  }
+
  public:
   CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) :
     _young_gen(young_gen), _card_table(card_table) { }
 
-  virtual void do_oop(oop* p) {
-    if (_young_gen->is_in_reserved(*p)) {
-      assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop");
-      _card_table->set_card_newgen(p);
-    }
-  }
+  virtual void do_oop(oop* p)       { CheckForPreciseMarks::do_oop_work(p); }
+  virtual void do_oop(narrowOop* p) { CheckForPreciseMarks::do_oop_work(p); }
 };
 
 // We get passed the space_top value to prevent us from traversing into
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -80,7 +80,7 @@
   static bool card_is_verify(int value)     { return value == verify_card; }
 
   // Card marking
-  void inline_write_ref_field_gc(oop* field, oop new_val) {
+  void inline_write_ref_field_gc(void* field, oop new_val) {
     jbyte* byte = byte_for(field);
     *byte = youngergen_card;
   }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -146,7 +146,7 @@
 {
   ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
   uint parallel_gc_threads = heap->gc_task_manager()->workers();
-  TaskQueueSetSuper* qset = ParCompactionManager::chunk_array();
+  ChunkTaskQueueSet* qset = ParCompactionManager::chunk_array();
   ParallelTaskTerminator terminator(parallel_gc_threads, qset);
   GCTaskQueue* q = GCTaskQueue::create();
   for(uint i=0; i<parallel_gc_threads; i++) {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/prefetchQueue.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/prefetchQueue.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -33,8 +33,8 @@
 
 class PrefetchQueue : public CHeapObj {
  private:
-  oop*                         _prefetch_queue[PREFETCH_QUEUE_SIZE];
-  unsigned int                 _prefetch_index;
+  void* _prefetch_queue[PREFETCH_QUEUE_SIZE];
+  uint  _prefetch_index;
 
  public:
   int length() { return PREFETCH_QUEUE_SIZE; }
@@ -46,20 +46,21 @@
     _prefetch_index = 0;
   }
 
-  inline oop* push_and_pop(oop* p) {
-    Prefetch::write((*p)->mark_addr(), 0);
+  template <class T> inline void* push_and_pop(T* p) {
+    oop o = oopDesc::load_decode_heap_oop_not_null(p);
+    Prefetch::write(o->mark_addr(), 0);
     // This prefetch is intended to make sure the size field of array
     // oops is in cache. It assumes the the object layout is
     // mark -> klass -> size, and that mark and klass are heapword
     // sized. If this should change, this prefetch will need updating!
-    Prefetch::write((*p)->mark_addr() + (HeapWordSize*2), 0);
+    Prefetch::write(o->mark_addr() + (HeapWordSize*2), 0);
     _prefetch_queue[_prefetch_index++] = p;
     _prefetch_index &= (PREFETCH_QUEUE_SIZE-1);
     return _prefetch_queue[_prefetch_index];
   }
 
   // Stores a NULL pointer in the pop'd location.
-  inline oop* pop() {
+  inline void* pop() {
     _prefetch_queue[_prefetch_index++] = NULL;
     _prefetch_index &= (PREFETCH_QUEUE_SIZE-1);
     return _prefetch_queue[_prefetch_index];
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -168,7 +168,7 @@
           start_array->allocate_block(compact_top);
       }
 
-      debug_only(MarkSweep::register_live_oop(oop(q), size));
+      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), size));
       compact_top += size;
       assert(compact_top <= dest->space()->end(),
         "Exceeding space in destination");
@@ -234,7 +234,7 @@
               start_array->allocate_block(compact_top);
           }
 
-          debug_only(MarkSweep::register_live_oop(oop(q), sz));
+          VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), sz));
           compact_top += sz;
           assert(compact_top <= dest->space()->end(),
             "Exceeding space in destination");
@@ -326,15 +326,11 @@
     HeapWord* end = _first_dead;
 
     while (q < end) {
-      debug_only(MarkSweep::track_interior_pointers(oop(q)));
-
+      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));
       // point all the oops to the new location
       size_t size = oop(q)->adjust_pointers();
-
-      debug_only(MarkSweep::check_interior_pointers());
-
-      debug_only(MarkSweep::validate_live_oop(oop(q), size));
-
+      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());
+      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));
       q += size;
     }
 
@@ -354,11 +350,11 @@
     Prefetch::write(q, interval);
     if (oop(q)->is_gc_marked()) {
       // q is alive
-      debug_only(MarkSweep::track_interior_pointers(oop(q)));
+      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));
       // point all the oops to the new location
       size_t size = oop(q)->adjust_pointers();
-      debug_only(MarkSweep::check_interior_pointers());
-      debug_only(MarkSweep::validate_live_oop(oop(q), size));
+      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());
+      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));
       debug_only(prev_q = q);
       q += size;
     } else {
@@ -392,7 +388,7 @@
     while (q < end) {
       size_t size = oop(q)->size();
       assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
-      debug_only(MarkSweep::live_oop_moved_to(q, size, q));
+      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q));
       debug_only(prev_q = q);
       q += size;
     }
@@ -427,7 +423,7 @@
       Prefetch::write(compaction_top, copy_interval);
 
       // copy object and reinit its mark
-      debug_only(MarkSweep::live_oop_moved_to(q, size, compaction_top));
+      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, compaction_top));
       assert(q != compaction_top, "everything in this pass should be moving");
       Copy::aligned_conjoint_words(q, compaction_top, size);
       oop(compaction_top)->init_mark();
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -81,14 +81,14 @@
 #endif  // #ifdef ASSERT
 
 #ifdef VALIDATE_MARK_SWEEP
-GrowableArray<oop*>*    PSParallelCompact::_root_refs_stack = NULL;
+GrowableArray<void*>*   PSParallelCompact::_root_refs_stack = NULL;
 GrowableArray<oop> *    PSParallelCompact::_live_oops = NULL;
 GrowableArray<oop> *    PSParallelCompact::_live_oops_moved_to = NULL;
 GrowableArray<size_t>*  PSParallelCompact::_live_oops_size = NULL;
 size_t                  PSParallelCompact::_live_oops_index = 0;
 size_t                  PSParallelCompact::_live_oops_index_at_perm = 0;
-GrowableArray<oop*>*    PSParallelCompact::_other_refs_stack = NULL;
-GrowableArray<oop*>*    PSParallelCompact::_adjusted_pointers = NULL;
+GrowableArray<void*>*   PSParallelCompact::_other_refs_stack = NULL;
+GrowableArray<void*>*   PSParallelCompact::_adjusted_pointers = NULL;
 bool                    PSParallelCompact::_pointer_tracking = false;
 bool                    PSParallelCompact::_root_tracking = true;
 
@@ -811,46 +811,23 @@
 ParallelCompactData PSParallelCompact::_summary_data;
 
 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
+
+void PSParallelCompact::IsAliveClosure::do_object(oop p)   { ShouldNotReachHere(); }
+bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
+
+void PSParallelCompact::KeepAliveClosure::do_oop(oop* p)       { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
+void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
+
 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true);
 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false);
 
-void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) {
-#ifdef VALIDATE_MARK_SWEEP
-  if (ValidateMarkSweep) {
-    if (!Universe::heap()->is_in_reserved(p)) {
-      _root_refs_stack->push(p);
-    } else {
-      _other_refs_stack->push(p);
-    }
-  }
-#endif
-  mark_and_push(_compaction_manager, p);
-}
-
-void PSParallelCompact::mark_and_follow(ParCompactionManager* cm,
-                                        oop* p) {
-  assert(Universe::heap()->is_in_reserved(p),
-         "we should only be traversing objects here");
-  oop m = *p;
-  if (m != NULL && mark_bitmap()->is_unmarked(m)) {
-    if (mark_obj(m)) {
-      m->follow_contents(cm);  // Follow contents of the marked object
-    }
-  }
-}
-
-// Anything associated with this variable is temporary.
-
-void PSParallelCompact::mark_and_push_internal(ParCompactionManager* cm,
-                                               oop* p) {
-  // Push marked object, contents will be followed later
-  oop m = *p;
-  if (mark_obj(m)) {
-    // This thread marked the object and
-    // owns the subsequent processing of it.
-    cm->save_for_scanning(m);
-  }
-}
+void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p)       { adjust_pointer(p, _is_root); }
+void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
+
+void PSParallelCompact::FollowStackClosure::do_void() { follow_stack(_compaction_manager); }
+
+void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p)       { mark_and_push(_compaction_manager, p); }
+void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); }
 
 void PSParallelCompact::post_initialize() {
   ParallelScavengeHeap* heap = gc_heap();
@@ -2751,23 +2728,6 @@
   young_gen->move_and_update(cm);
 }
 
-void PSParallelCompact::follow_root(ParCompactionManager* cm, oop* p) {
-  assert(!Universe::heap()->is_in_reserved(p),
-         "roots shouldn't be things within the heap");
-#ifdef VALIDATE_MARK_SWEEP
-  if (ValidateMarkSweep) {
-    guarantee(!_root_refs_stack->contains(p), "should only be in here once");
-    _root_refs_stack->push(p);
-  }
-#endif
-  oop m = *p;
-  if (m != NULL && mark_bitmap()->is_unmarked(m)) {
-    if (mark_obj(m)) {
-      m->follow_contents(cm);  // Follow contents of the marked object
-    }
-  }
-  follow_stack(cm);
-}
 
 void PSParallelCompact::follow_stack(ParCompactionManager* cm) {
   while(!cm->overflow_stack()->is_empty()) {
@@ -2807,7 +2767,7 @@
 
 #ifdef VALIDATE_MARK_SWEEP
 
-void PSParallelCompact::track_adjusted_pointer(oop* p, oop newobj, bool isroot) {
+void PSParallelCompact::track_adjusted_pointer(void* p, bool isroot) {
   if (!ValidateMarkSweep)
     return;
 
@@ -2821,7 +2781,7 @@
     if (index != -1) {
       int l = _root_refs_stack->length();
       if (l > 0 && l - 1 != index) {
-        oop* last = _root_refs_stack->pop();
+        void* last = _root_refs_stack->pop();
         assert(last != p, "should be different");
         _root_refs_stack->at_put(index, last);
       } else {
@@ -2832,7 +2792,7 @@
 }
 
 
-void PSParallelCompact::check_adjust_pointer(oop* p) {
+void PSParallelCompact::check_adjust_pointer(void* p) {
   _adjusted_pointers->push(p);
 }
 
@@ -2840,7 +2800,8 @@
 class AdjusterTracker: public OopClosure {
  public:
   AdjusterTracker() {};
-  void do_oop(oop* o)   { PSParallelCompact::check_adjust_pointer(o); }
+  void do_oop(oop* o)         { PSParallelCompact::check_adjust_pointer(o); }
+  void do_oop(narrowOop* o)   { PSParallelCompact::check_adjust_pointer(o); }
 };
 
 
@@ -2948,25 +2909,6 @@
 }
 #endif //VALIDATE_MARK_SWEEP
 
-void PSParallelCompact::adjust_pointer(oop* p, bool isroot) {
-  oop obj = *p;
-  VALIDATE_MARK_SWEEP_ONLY(oop saved_new_pointer = NULL);
-  if (obj != NULL) {
-    oop new_pointer = (oop) summary_data().calc_new_pointer(obj);
-    assert(new_pointer != NULL ||                     // is forwarding ptr?
-           obj->is_shared(),                          // never forwarded?
-           "should have a new location");
-    // Just always do the update unconditionally?
-    if (new_pointer != NULL) {
-      *p = new_pointer;
-      assert(Universe::heap()->is_in_reserved(new_pointer),
-             "should be in object space");
-      VALIDATE_MARK_SWEEP_ONLY(saved_new_pointer = new_pointer);
-    }
-  }
-  VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, saved_new_pointer, isroot));
-}
-
 // Update interior oops in the ranges of chunks [beg_chunk, end_chunk).
 void
 PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -80,11 +80,11 @@
   static const size_t ChunkSize;
   static const size_t ChunkSizeBytes;
 
- // Mask for the bits in a size_t to get an offset within a chunk.
+  // Mask for the bits in a size_t to get an offset within a chunk.
   static const size_t ChunkSizeOffsetMask;
- // Mask for the bits in a pointer to get an offset within a chunk.
+  // Mask for the bits in a pointer to get an offset within a chunk.
   static const size_t ChunkAddrOffsetMask;
- // Mask for the bits in a pointer to get the address of the start of a chunk.
+  // Mask for the bits in a pointer to get the address of the start of a chunk.
   static const size_t ChunkAddrMask;
 
   static const size_t Log2BlockSize;
@@ -229,7 +229,7 @@
   // 1 bit marks the end of an object.
   class BlockData
   {
-  public:
+   public:
     typedef short int blk_ofs_t;
 
     blk_ofs_t offset() const { return _offset >= 0 ? _offset : -_offset; }
@@ -269,7 +269,7 @@
       return !_first_is_start_bit;
     }
 
-  private:
+   private:
     blk_ofs_t _offset;
     // This is temporary until the mark_bitmap is separated into
     // a start bit array and an end bit array.
@@ -277,7 +277,7 @@
 #ifdef ASSERT
     short     _set_phase;
     static short _cur_phase;
-  public:
+   public:
     static void set_cur_phase(short v) { _cur_phase = v; }
 #endif
   };
@@ -729,48 +729,51 @@
   } SpaceId;
 
  public:
-  // In line closure decls
+  // Inline closure decls
   //
-
   class IsAliveClosure: public BoolObjectClosure {
    public:
-    void do_object(oop p) { assert(false, "don't call"); }
-    bool do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
+    virtual void do_object(oop p);
+    virtual bool do_object_b(oop p);
   };
 
   class KeepAliveClosure: public OopClosure {
+   private:
     ParCompactionManager* _compaction_manager;
+   protected:
+    template <class T> inline void do_oop_work(T* p);
    public:
-    KeepAliveClosure(ParCompactionManager* cm) {
-      _compaction_manager = cm;
-    }
-    void do_oop(oop* p);
+    KeepAliveClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
+    virtual void do_oop(oop* p);
+    virtual void do_oop(narrowOop* p);
   };
 
-  class FollowRootClosure: public OopsInGenClosure{
+  // Current unused
+  class FollowRootClosure: public OopsInGenClosure {
+   private:
     ParCompactionManager* _compaction_manager;
    public:
-    FollowRootClosure(ParCompactionManager* cm) {
-      _compaction_manager = cm;
-    }
-    void do_oop(oop* p) { follow_root(_compaction_manager, p); }
+    FollowRootClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
+    virtual void do_oop(oop* p);
+    virtual void do_oop(narrowOop* p);
     virtual const bool do_nmethods() const { return true; }
   };
 
   class FollowStackClosure: public VoidClosure {
+   private:
     ParCompactionManager* _compaction_manager;
    public:
-    FollowStackClosure(ParCompactionManager* cm) {
-      _compaction_manager = cm;
-    }
-    void do_void() { follow_stack(_compaction_manager); }
+    FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
+    virtual void do_void();
   };
 
   class AdjustPointerClosure: public OopsInGenClosure {
+   private:
     bool _is_root;
    public:
-    AdjustPointerClosure(bool is_root) : _is_root(is_root) {}
-    void do_oop(oop* p) { adjust_pointer(p, _is_root); }
+    AdjustPointerClosure(bool is_root) : _is_root(is_root) { }
+    virtual void do_oop(oop* p);
+    virtual void do_oop(narrowOop* p);
   };
 
   // Closure for verifying update of pointers.  Does not
@@ -805,8 +808,6 @@
   friend class instanceKlassKlass;
   friend class RefProcTaskProxy;
 
-  static void mark_and_push_internal(ParCompactionManager* cm, oop* p);
-
  private:
   static elapsedTimer         _accumulated_time;
   static unsigned int         _total_invocations;
@@ -838,9 +839,9 @@
 
  private:
   // Closure accessors
-  static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; }
+  static OopClosure* adjust_pointer_closure()      { return (OopClosure*)&_adjust_pointer_closure; }
   static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&_adjust_root_pointer_closure; }
-  static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
+  static BoolObjectClosure* is_alive_closure()     { return (BoolObjectClosure*)&_is_alive_closure; }
 
   static void initialize_space_info();
 
@@ -859,10 +860,11 @@
   static void follow_stack(ParCompactionManager* cm);
   static void follow_weak_klass_links(ParCompactionManager* cm);
 
-  static void adjust_pointer(oop* p, bool is_root);
+  template <class T> static inline void adjust_pointer(T* p, bool is_root);
   static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); }
 
-  static void follow_root(ParCompactionManager* cm, oop* p);
+  template <class T>
+  static inline void follow_root(ParCompactionManager* cm, T* p);
 
   // Compute the dense prefix for the designated space.  This is an experimental
   // implementation currently not used in production.
@@ -971,14 +973,14 @@
 
  protected:
 #ifdef VALIDATE_MARK_SWEEP
-  static GrowableArray<oop*>*            _root_refs_stack;
+  static GrowableArray<void*>*           _root_refs_stack;
   static GrowableArray<oop> *            _live_oops;
   static GrowableArray<oop> *            _live_oops_moved_to;
   static GrowableArray<size_t>*          _live_oops_size;
   static size_t                          _live_oops_index;
   static size_t                          _live_oops_index_at_perm;
-  static GrowableArray<oop*>*            _other_refs_stack;
-  static GrowableArray<oop*>*            _adjusted_pointers;
+  static GrowableArray<void*>*           _other_refs_stack;
+  static GrowableArray<void*>*           _adjusted_pointers;
   static bool                            _pointer_tracking;
   static bool                            _root_tracking;
 
@@ -999,12 +1001,12 @@
 
  public:
   class MarkAndPushClosure: public OopClosure {
+   private:
     ParCompactionManager* _compaction_manager;
    public:
-    MarkAndPushClosure(ParCompactionManager* cm) {
-      _compaction_manager = cm;
-    }
-    void do_oop(oop* p) { mark_and_push(_compaction_manager, p); }
+    MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
+    virtual void do_oop(oop* p);
+    virtual void do_oop(narrowOop* p);
     virtual const bool do_nmethods() const { return true; }
   };
 
@@ -1038,21 +1040,9 @@
 
   // Marking support
   static inline bool mark_obj(oop obj);
-  static bool mark_obj(oop* p)  {
-    if (*p != NULL) {
-      return mark_obj(*p);
-    } else {
-      return false;
-    }
-  }
-  static void mark_and_push(ParCompactionManager* cm, oop* p) {
-                                          // Check mark and maybe push on
-                                          // marking stack
-    oop m = *p;
-    if (m != NULL && mark_bitmap()->is_unmarked(m)) {
-      mark_and_push_internal(cm, p);
-    }
-  }
+  // Check mark and maybe push on marking stack
+  template <class T> static inline void mark_and_push(ParCompactionManager* cm,
+                                                      T* p);
 
   // Compaction support.
   // Return true if p is in the range [beg_addr, end_addr).
@@ -1127,13 +1117,17 @@
   static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
 
   // Mark pointer and follow contents.
-  static void mark_and_follow(ParCompactionManager* cm, oop* p);
+  template <class T>
+  static inline void mark_and_follow(ParCompactionManager* cm, T* p);
 
   static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
   static ParallelCompactData& summary_data() { return _summary_data; }
 
-  static inline void adjust_pointer(oop* p) { adjust_pointer(p, false); }
-  static inline void adjust_pointer(oop* p,
+  static inline void adjust_pointer(oop* p)       { adjust_pointer(p, false); }
+  static inline void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); }
+
+  template <class T>
+  static inline void adjust_pointer(T* p,
                                     HeapWord* beg_addr,
                                     HeapWord* end_addr);
 
@@ -1147,8 +1141,8 @@
   static jlong millis_since_last_gc();
 
 #ifdef VALIDATE_MARK_SWEEP
-  static void track_adjusted_pointer(oop* p, oop newobj, bool isroot);
-  static void check_adjust_pointer(oop* p);     // Adjust this pointer
+  static void track_adjusted_pointer(void* p, bool isroot);
+  static void check_adjust_pointer(void* p);
   static void track_interior_pointers(oop obj);
   static void check_interior_pointers();
 
@@ -1185,7 +1179,7 @@
 #endif  // #ifdef ASSERT
 };
 
-bool PSParallelCompact::mark_obj(oop obj) {
+inline bool PSParallelCompact::mark_obj(oop obj) {
   const int obj_size = obj->size();
   if (mark_bitmap()->mark_obj(obj, obj_size)) {
     _summary_data.add_obj(obj, obj_size);
@@ -1195,13 +1189,94 @@
   }
 }
 
-inline bool PSParallelCompact::print_phases()
-{
+template <class T>
+inline void PSParallelCompact::follow_root(ParCompactionManager* cm, T* p) {
+  assert(!Universe::heap()->is_in_reserved(p),
+         "roots shouldn't be things within the heap");
+#ifdef VALIDATE_MARK_SWEEP
+  if (ValidateMarkSweep) {
+    guarantee(!_root_refs_stack->contains(p), "should only be in here once");
+    _root_refs_stack->push(p);
+  }
+#endif
+  T heap_oop = oopDesc::load_heap_oop(p);
+  if (!oopDesc::is_null(heap_oop)) {
+    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    if (mark_bitmap()->is_unmarked(obj)) {
+      if (mark_obj(obj)) {
+        obj->follow_contents(cm);
+      }
+    }
+  }
+  follow_stack(cm);
+}
+
+template <class T>
+inline void PSParallelCompact::mark_and_follow(ParCompactionManager* cm,
+                                               T* p) {
+  T heap_oop = oopDesc::load_heap_oop(p);
+  if (!oopDesc::is_null(heap_oop)) {
+    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    if (mark_bitmap()->is_unmarked(obj)) {
+      if (mark_obj(obj)) {
+        obj->follow_contents(cm);
+      }
+    }
+  }
+}
+
+template <class T>
+inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
+  T heap_oop = oopDesc::load_heap_oop(p);
+  if (!oopDesc::is_null(heap_oop)) {
+    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    if (mark_bitmap()->is_unmarked(obj)) {
+      if (mark_obj(obj)) {
+        // This thread marked the object and owns the subsequent processing of it.
+        cm->save_for_scanning(obj);
+      }
+    }
+  }
+}
+
+template <class T>
+inline void PSParallelCompact::adjust_pointer(T* p, bool isroot) {
+  T heap_oop = oopDesc::load_heap_oop(p);
+  if (!oopDesc::is_null(heap_oop)) {
+    oop obj     = oopDesc::decode_heap_oop_not_null(heap_oop);
+    oop new_obj = (oop)summary_data().calc_new_pointer(obj);
+    assert(new_obj != NULL ||                     // is forwarding ptr?
+           obj->is_shared(),                      // never forwarded?
+           "should be forwarded");
+    // Just always do the update unconditionally?
+    if (new_obj != NULL) {
+      assert(Universe::heap()->is_in_reserved(new_obj),
+             "should be in object space");
+      oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+    }
+  }
+  VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, isroot));
+}
+
+template <class T>
+inline void PSParallelCompact::KeepAliveClosure::do_oop_work(T* p) {
+#ifdef VALIDATE_MARK_SWEEP
+  if (ValidateMarkSweep) {
+    if (!Universe::heap()->is_in_reserved(p)) {
+      _root_refs_stack->push(p);
+    } else {
+      _other_refs_stack->push(p);
+    }
+  }
+#endif
+  mark_and_push(_compaction_manager, p);
+}
+
+inline bool PSParallelCompact::print_phases() {
   return _print_phases;
 }
 
-inline double PSParallelCompact::normal_distribution(double density)
-{
+inline double PSParallelCompact::normal_distribution(double density) {
   assert(_dwl_initialized, "uninitialized");
   const double squared_term = (density - _dwl_mean) / _dwl_std_dev;
   return _dwl_first_term * exp(-0.5 * squared_term * squared_term);
@@ -1257,10 +1332,11 @@
   return ((HeapWord*) k) >= dense_prefix(perm_space_id);
 }
 
-inline void PSParallelCompact::adjust_pointer(oop* p,
+template <class T>
+inline void PSParallelCompact::adjust_pointer(T* p,
                                               HeapWord* beg_addr,
                                               HeapWord* end_addr) {
-  if (is_in(p, beg_addr, end_addr)) {
+  if (is_in((HeapWord*)p, beg_addr, end_addr)) {
     adjust_pointer(p);
   }
 }
@@ -1332,18 +1408,18 @@
   inline void do_addr(HeapWord* addr);
 };
 
-inline void UpdateOnlyClosure::do_addr(HeapWord* addr) {
+inline void UpdateOnlyClosure::do_addr(HeapWord* addr)
+{
   _start_array->allocate_block(addr);
   oop(addr)->update_contents(compaction_manager());
 }
 
 class FillClosure: public ParMarkBitMapClosure {
-public:
-  FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id):
+ public:
+  FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) :
     ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm),
     _space_id(space_id),
-    _start_array(PSParallelCompact::start_array(space_id))
-  {
+    _start_array(PSParallelCompact::start_array(space_id)) {
     assert(_space_id == PSParallelCompact::perm_space_id ||
            _space_id == PSParallelCompact::old_space_id,
            "cannot use FillClosure in the young gen");
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -25,7 +25,7 @@
 #include "incls/_precompiled.incl"
 #include "incls/_psPromotionLAB.cpp.incl"
 
-const size_t PSPromotionLAB::filler_header_size = align_object_size(typeArrayOopDesc::header_size(T_INT));
+size_t PSPromotionLAB::filler_header_size;
 
 // This is the shared initialization code. It sets up the basic pointers,
 // and allows enough extra space for a filler object. We call a virtual
@@ -41,6 +41,10 @@
   set_end(end);
   set_top(bottom);
 
+  // Initialize after VM starts up because header_size depends on compressed
+  // oops.
+  filler_header_size = align_object_size(typeArrayOopDesc::header_size(T_INT));
+
   // We can be initialized to a zero size!
   if (free() > 0) {
     if (ZapUnusedHeapArea) {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -32,7 +32,7 @@
 
 class PSPromotionLAB : public CHeapObj {
  protected:
-  static const size_t filler_header_size;
+  static size_t filler_header_size;
 
   enum LabState {
     needs_flush,
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -182,7 +182,7 @@
     claimed_stack_depth()->initialize();
     queue_size = claimed_stack_depth()->max_elems();
     // We want the overflow stack to be permanent
-    _overflow_stack_depth = new (ResourceObj::C_HEAP) GrowableArray<oop*>(10, true);
+    _overflow_stack_depth = new (ResourceObj::C_HEAP) GrowableArray<StarTask>(10, true);
     _overflow_stack_breadth = NULL;
   } else {
     claimed_stack_breadth()->initialize();
@@ -240,6 +240,7 @@
 #endif // PS_PM_STATS
 }
 
+
 void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
   assert(depth_first(), "invariant");
   assert(overflow_stack_depth() != NULL, "invariant");
@@ -254,13 +255,15 @@
 #endif /* ASSERT */
 
   do {
-    oop* p;
+    StarTask p;
 
     // Drain overflow stack first, so other threads can steal from
     // claimed stack while we work.
     while(!overflow_stack_depth()->is_empty()) {
-      p = overflow_stack_depth()->pop();
-      process_popped_location_depth(p);
+      // linux compiler wants different overloaded operator= in taskqueue to
+      // assign to p that the other compilers don't like.
+      StarTask ptr = overflow_stack_depth()->pop();
+      process_popped_location_depth(ptr);
     }
 
     if (totally_drain) {
@@ -365,7 +368,7 @@
 //
 
 oop PSPromotionManager::copy_to_survivor_space(oop o, bool depth_first) {
-  assert(PSScavenge::should_scavenge(o), "Sanity");
+  assert(PSScavenge::should_scavenge(&o), "Sanity");
 
   oop new_obj = NULL;
 
@@ -530,16 +533,30 @@
   // This code must come after the CAS test, or it will print incorrect
   // information.
   if (TraceScavenge) {
-    gclog_or_tty->print_cr("{%s %s 0x%x -> 0x%x (%d)}",
-       PSScavenge::should_scavenge(new_obj) ? "copying" : "tenuring",
+    gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (" SIZE_FORMAT ")}",
+       PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
        new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size());
-
   }
 #endif
 
   return new_obj;
 }
 
+template <class T> void PSPromotionManager::process_array_chunk_work(
+                                                 oop obj,
+                                                 int start, int end) {
+  assert(start < end, "invariant");
+  T* const base      = (T*)objArrayOop(obj)->base();
+  T* p               = base + start;
+  T* const chunk_end = base + end;
+  while (p < chunk_end) {
+    if (PSScavenge::should_scavenge(p)) {
+      claim_or_forward_depth(p);
+    }
+    ++p;
+  }
+}
+
 void PSPromotionManager::process_array_chunk(oop old) {
   assert(PSChunkLargeArrays, "invariant");
   assert(old->is_objArray(), "invariant");
@@ -569,15 +586,10 @@
     arrayOop(old)->set_length(actual_length);
   }
 
-  assert(start < end, "invariant");
-  oop* const base      = objArrayOop(obj)->base();
-  oop* p               = base + start;
-  oop* const chunk_end = base + end;
-  while (p < chunk_end) {
-    if (PSScavenge::should_scavenge(*p)) {
-      claim_or_forward_depth(p);
-    }
-    ++p;
+  if (UseCompressedOops) {
+    process_array_chunk_work<narrowOop>(obj, start, end);
+  } else {
+    process_array_chunk_work<oop>(obj, start, end);
   }
 }
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -42,8 +42,6 @@
 class PSOldGen;
 class ParCompactionManager;
 
-#define PS_CHUNKED_ARRAY_OOP_MASK  1
-
 #define PS_PM_STATS         0
 
 class PSPromotionManager : public CHeapObj {
@@ -80,7 +78,7 @@
   PrefetchQueue                       _prefetch_queue;
 
   OopStarTaskQueue                    _claimed_stack_depth;
-  GrowableArray<oop*>*                _overflow_stack_depth;
+  GrowableArray<StarTask>*            _overflow_stack_depth;
   OopTaskQueue                        _claimed_stack_breadth;
   GrowableArray<oop>*                 _overflow_stack_breadth;
 
@@ -92,13 +90,15 @@
   uint                                _min_array_size_for_chunking;
 
   // Accessors
-  static PSOldGen* old_gen()              { return _old_gen; }
-  static MutableSpace* young_space()      { return _young_space; }
+  static PSOldGen* old_gen()         { return _old_gen; }
+  static MutableSpace* young_space() { return _young_space; }
 
   inline static PSPromotionManager* manager_array(int index);
+  template <class T> inline void claim_or_forward_internal_depth(T* p);
+  template <class T> inline void claim_or_forward_internal_breadth(T* p);
 
-  GrowableArray<oop*>* overflow_stack_depth()  { return _overflow_stack_depth; }
-  GrowableArray<oop>* overflow_stack_breadth()   { return _overflow_stack_breadth; }
+  GrowableArray<StarTask>* overflow_stack_depth() { return _overflow_stack_depth; }
+  GrowableArray<oop>*  overflow_stack_breadth()   { return _overflow_stack_breadth; }
 
   // On the task queues we push reference locations as well as
   // partially-scanned arrays (in the latter case, we push an oop to
@@ -116,27 +116,37 @@
   // (oop). We do all the necessary casting in the mask / unmask
   // methods to avoid sprinkling the rest of the code with more casts.
 
-  bool is_oop_masked(oop* p) {
-    return ((intptr_t) p & PS_CHUNKED_ARRAY_OOP_MASK) == PS_CHUNKED_ARRAY_OOP_MASK;
+  // These are added to the taskqueue so PS_CHUNKED_ARRAY_OOP_MASK (or any
+  // future masks) can't conflict with COMPRESSED_OOP_MASK
+#define PS_CHUNKED_ARRAY_OOP_MASK  0x2
+
+  bool is_oop_masked(StarTask p) {
+    // If something is marked chunked it's always treated like wide oop*
+    return (((intptr_t)(oop*)p) & PS_CHUNKED_ARRAY_OOP_MASK) ==
+                                  PS_CHUNKED_ARRAY_OOP_MASK;
   }
 
   oop* mask_chunked_array_oop(oop obj) {
     assert(!is_oop_masked((oop*) obj), "invariant");
-    oop* ret = (oop*) ((intptr_t) obj  | PS_CHUNKED_ARRAY_OOP_MASK);
+    oop* ret = (oop*) ((uintptr_t)obj | PS_CHUNKED_ARRAY_OOP_MASK);
     assert(is_oop_masked(ret), "invariant");
     return ret;
   }
 
-  oop unmask_chunked_array_oop(oop* p) {
+  oop unmask_chunked_array_oop(StarTask p) {
     assert(is_oop_masked(p), "invariant");
-    oop ret = oop((intptr_t) p & ~PS_CHUNKED_ARRAY_OOP_MASK);
+    assert(!p.is_narrow(), "chunked array oops cannot be narrow");
+    oop *chunk = (oop*)p;  // cast p to oop (uses conversion operator)
+    oop ret = oop((oop*)((uintptr_t)chunk & ~PS_CHUNKED_ARRAY_OOP_MASK));
     assert(!is_oop_masked((oop*) ret), "invariant");
     return ret;
   }
 
+  template <class T> void  process_array_chunk_work(oop obj,
+                                                    int start, int end);
   void process_array_chunk(oop old);
 
-  void push_depth(oop* p) {
+  template <class T> void push_depth(T* p) {
     assert(depth_first(), "pre-condition");
 
 #if PS_PM_STATS
@@ -175,7 +185,7 @@
   }
 
  protected:
-  static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; }
+  static OopStarTaskQueueSet* stack_array_depth()   { return _stack_array_depth; }
   static OopTaskQueueSet*     stack_array_breadth() { return _stack_array_breadth; }
 
  public:
@@ -227,6 +237,7 @@
       drain_stacks_breadth(totally_drain);
     }
   }
+ public:
   void drain_stacks_cond_depth() {
     if (claimed_stack_depth()->size() > _target_stack_size) {
       drain_stacks_depth(false);
@@ -256,15 +267,11 @@
     return _depth_first;
   }
 
-  inline void process_popped_location_depth(oop* p);
+  inline void process_popped_location_depth(StarTask p);
 
   inline void flush_prefetch_queue();
-
-  inline void claim_or_forward_depth(oop* p);
-  inline void claim_or_forward_internal_depth(oop* p);
-
-  inline void claim_or_forward_breadth(oop* p);
-  inline void claim_or_forward_internal_breadth(oop* p);
+  template <class T> inline void claim_or_forward_depth(T* p);
+  template <class T> inline void claim_or_forward_breadth(T* p);
 
 #if PS_PM_STATS
   void increment_steals(oop* p = NULL) {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -28,64 +28,68 @@
   return _manager_array[index];
 }
 
-inline void PSPromotionManager::claim_or_forward_internal_depth(oop* p) {
-  if (p != NULL) {
-    oop o = *p;
+template <class T>
+inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) {
+  if (p != NULL) { // XXX: error if p != NULL here
+    oop o = oopDesc::load_decode_heap_oop_not_null(p);
     if (o->is_forwarded()) {
       o = o->forwardee();
-
       // Card mark
       if (PSScavenge::is_obj_in_young((HeapWord*) o)) {
         PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
       }
-      *p = o;
+      oopDesc::encode_store_heap_oop_not_null(p, o);
     } else {
       push_depth(p);
     }
   }
 }
 
-inline void PSPromotionManager::claim_or_forward_internal_breadth(oop* p) {
-  if (p != NULL) {
-    oop o = *p;
+template <class T>
+inline void PSPromotionManager::claim_or_forward_internal_breadth(T* p) {
+  if (p != NULL) { // XXX: error if p != NULL here
+    oop o = oopDesc::load_decode_heap_oop_not_null(p);
     if (o->is_forwarded()) {
       o = o->forwardee();
     } else {
       o = copy_to_survivor_space(o, false);
     }
-
     // Card mark
     if (PSScavenge::is_obj_in_young((HeapWord*) o)) {
       PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
     }
-    *p = o;
+    oopDesc::encode_store_heap_oop_not_null(p, o);
   }
 }
 
 inline void PSPromotionManager::flush_prefetch_queue() {
   assert(!depth_first(), "invariant");
-  for (int i=0; i<_prefetch_queue.length(); i++) {
-    claim_or_forward_internal_breadth(_prefetch_queue.pop());
+  for (int i = 0; i < _prefetch_queue.length(); i++) {
+    claim_or_forward_internal_breadth((oop*)_prefetch_queue.pop());
   }
 }
 
-inline void PSPromotionManager::claim_or_forward_depth(oop* p) {
+template <class T>
+inline void PSPromotionManager::claim_or_forward_depth(T* p) {
   assert(depth_first(), "invariant");
-  assert(PSScavenge::should_scavenge(*p, true), "revisiting object?");
-  assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+  assert(PSScavenge::should_scavenge(p, true), "revisiting object?");
+  assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap,
+         "Sanity");
   assert(Universe::heap()->is_in(p), "pointer outside heap");
 
   claim_or_forward_internal_depth(p);
 }
 
-inline void PSPromotionManager::claim_or_forward_breadth(oop* p) {
+template <class T>
+inline void PSPromotionManager::claim_or_forward_breadth(T* p) {
   assert(!depth_first(), "invariant");
-  assert(PSScavenge::should_scavenge(*p, true), "revisiting object?");
-  assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+  assert(PSScavenge::should_scavenge(p, true), "revisiting object?");
+  assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap,
+         "Sanity");
   assert(Universe::heap()->is_in(p), "pointer outside heap");
 
   if (UsePrefetchQueue) {
-    claim_or_forward_internal_breadth(_prefetch_queue.push_and_pop(p));
+    claim_or_forward_internal_breadth((T*)_prefetch_queue.push_and_pop(p));
   } else {
     // This option is used for testing.  The use of the prefetch
     // queue can delay the processing of the objects and thus
@@ -106,12 +110,16 @@
   }
 }
 
-inline void PSPromotionManager::process_popped_location_depth(oop* p) {
+inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
   if (is_oop_masked(p)) {
     assert(PSChunkLargeArrays, "invariant");
     oop const old = unmask_chunked_array_oop(p);
     process_array_chunk(old);
   } else {
-    PSScavenge::copy_and_push_safe_barrier(this, p);
+    if (p.is_narrow()) {
+      PSScavenge::copy_and_push_safe_barrier(this, (narrowOop*)p);
+    } else {
+      PSScavenge::copy_and_push_safe_barrier(this, (oop*)p);
+    }
   }
 }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -65,16 +65,18 @@
     assert(_promotion_manager != NULL, "Sanity");
   }
 
-  void do_oop(oop* p) {
-    assert (*p != NULL, "expected non-null ref");
-    assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
+  template <class T> void do_oop_work(T* p) {
+    assert (!oopDesc::is_null(*p), "expected non-null ref");
+    assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(),
+            "expected an oop while scanning weak refs");
 
-    oop obj = oop(*p);
     // Weak refs may be visited more than once.
-    if (PSScavenge::should_scavenge(obj, _to_space)) {
+    if (PSScavenge::should_scavenge(p, _to_space)) {
       PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p);
     }
   }
+  virtual void do_oop(oop* p)       { PSKeepAliveClosure::do_oop_work(p); }
+  virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); }
 };
 
 class PSEvacuateFollowersClosure: public VoidClosure {
@@ -83,7 +85,7 @@
  public:
   PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {}
 
-  void do_void() {
+  virtual void do_void() {
     assert(_promotion_manager != NULL, "Sanity");
     _promotion_manager->drain_stacks(true);
     guarantee(_promotion_manager->stacks_empty(),
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -116,16 +116,16 @@
   // If an attempt to promote fails, this method is invoked
   static void oop_promotion_failed(oop obj, markOop obj_mark);
 
-  static inline bool should_scavenge(oop p);
+  template <class T> static inline bool should_scavenge(T* p);
 
   // These call should_scavenge() above and, if it returns true, also check that
   // the object was not newly copied into to_space.  The version with the bool
   // argument is a convenience wrapper that fetches the to_space pointer from
   // the heap and calls the other version (if the arg is true).
-  static inline bool should_scavenge(oop p, MutableSpace* to_space);
-  static inline bool should_scavenge(oop p, bool check_to_space);
+  template <class T> static inline bool should_scavenge(T* p, MutableSpace* to_space);
+  template <class T> static inline bool should_scavenge(T* p, bool check_to_space);
 
-  inline static void copy_and_push_safe_barrier(PSPromotionManager* pm, oop* p);
+  template <class T> inline static void copy_and_push_safe_barrier(PSPromotionManager* pm, T* p);
 
   // Is an object in the young generation
   // This assumes that the HeapWord argument is in the heap,
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -22,28 +22,33 @@
  *
  */
 
-
 inline void PSScavenge::save_to_space_top_before_gc() {
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   _to_space_top_before_gc = heap->young_gen()->to_space()->top();
 }
 
-inline bool PSScavenge::should_scavenge(oop p) {
-  return p == NULL ? false : PSScavenge::is_obj_in_young((HeapWord*) p);
+template <class T> inline bool PSScavenge::should_scavenge(T* p) {
+  T heap_oop = oopDesc::load_heap_oop(p);
+  if (oopDesc::is_null(heap_oop)) return false;
+  oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  return PSScavenge::is_obj_in_young((HeapWord*)obj);
 }
 
-inline bool PSScavenge::should_scavenge(oop p, MutableSpace* to_space) {
+template <class T>
+inline bool PSScavenge::should_scavenge(T* p, MutableSpace* to_space) {
   if (should_scavenge(p)) {
+    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
     // Skip objects copied to to_space since the scavenge started.
-    HeapWord* const addr = (HeapWord*) p;
+    HeapWord* const addr = (HeapWord*)obj;
     return addr < to_space_top_before_gc() || addr >= to_space->end();
   }
   return false;
 }
 
-inline bool PSScavenge::should_scavenge(oop p, bool check_to_space) {
+template <class T>
+inline bool PSScavenge::should_scavenge(T* p, bool check_to_space) {
   if (check_to_space) {
-    ParallelScavengeHeap* heap = (ParallelScavengeHeap*) Universe::heap();
+    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
     return should_scavenge(p, heap->young_gen()->to_space());
   }
   return should_scavenge(p);
@@ -52,24 +57,23 @@
 // Attempt to "claim" oop at p via CAS, push the new obj if successful
 // This version tests the oop* to make sure it is within the heap before
 // attempting marking.
+template <class T>
 inline void PSScavenge::copy_and_push_safe_barrier(PSPromotionManager* pm,
-                                                   oop*                p) {
-  assert(should_scavenge(*p, true), "revisiting object?");
+                                                   T*                  p) {
+  assert(should_scavenge(p, true), "revisiting object?");
 
-  oop o = *p;
-  if (o->is_forwarded()) {
-    *p = o->forwardee();
-  } else {
-    *p = pm->copy_to_survivor_space(o, pm->depth_first());
-  }
+  oop o = oopDesc::load_decode_heap_oop_not_null(p);
+  oop new_obj = o->is_forwarded()
+        ? o->forwardee()
+        : pm->copy_to_survivor_space(o, pm->depth_first());
+  oopDesc::encode_store_heap_oop_not_null(p, new_obj);
 
   // We cannot mark without test, as some code passes us pointers
   // that are outside the heap.
-  if ((!PSScavenge::is_obj_in_young((HeapWord*) p)) &&
+  if ((!PSScavenge::is_obj_in_young((HeapWord*)p)) &&
       Universe::heap()->is_in_reserved(p)) {
-    o = *p;
-    if (PSScavenge::is_obj_in_young((HeapWord*) o)) {
-      card_table()->inline_write_ref_field_gc(p, o);
+    if (PSScavenge::is_obj_in_young((HeapWord*)new_obj)) {
+      card_table()->inline_write_ref_field_gc(p, new_obj);
     }
   }
 }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -34,15 +34,17 @@
  private:
   PSPromotionManager* _promotion_manager;
 
- public:
-  PSScavengeRootsClosure(PSPromotionManager* pm) : _promotion_manager(pm) { }
-
-  virtual void do_oop(oop* p) {
-    if (PSScavenge::should_scavenge(*p)) {
+ protected:
+  template <class T> void do_oop_work(T *p) {
+    if (PSScavenge::should_scavenge(p)) {
       // We never card mark roots, maybe call a func without test?
       PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p);
     }
   }
+ public:
+  PSScavengeRootsClosure(PSPromotionManager* pm) : _promotion_manager(pm) { }
+  void do_oop(oop* p)       { PSScavengeRootsClosure::do_oop_work(p); }
+  void do_oop(narrowOop* p) { PSScavengeRootsClosure::do_oop_work(p); }
 };
 
 void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) {
@@ -135,7 +137,7 @@
   int random_seed = 17;
   if (pm->depth_first()) {
     while(true) {
-      oop* p;
+      StarTask p;
       if (PSPromotionManager::steal_depth(which, &random_seed, p)) {
 #if PS_PM_STATS
         pm->increment_steals(p);
@@ -164,8 +166,7 @@
       }
     }
   }
-  guarantee(pm->stacks_empty(),
-            "stacks should be empty at this point");
+  guarantee(pm->stacks_empty(), "stacks should be empty at this point");
 }
 
 //
--- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -36,16 +36,16 @@
 ReferenceProcessor*     MarkSweep::_ref_processor   = NULL;
 
 #ifdef VALIDATE_MARK_SWEEP
-GrowableArray<oop*>*    MarkSweep::_root_refs_stack = NULL;
+GrowableArray<void*>*   MarkSweep::_root_refs_stack = NULL;
 GrowableArray<oop> *    MarkSweep::_live_oops = NULL;
 GrowableArray<oop> *    MarkSweep::_live_oops_moved_to = NULL;
 GrowableArray<size_t>*  MarkSweep::_live_oops_size = NULL;
 size_t                  MarkSweep::_live_oops_index = 0;
 size_t                  MarkSweep::_live_oops_index_at_perm = 0;
-GrowableArray<oop*>*    MarkSweep::_other_refs_stack = NULL;
-GrowableArray<oop*>*    MarkSweep::_adjusted_pointers = NULL;
-bool                    MarkSweep::_pointer_tracking = false;
-bool                    MarkSweep::_root_tracking = true;
+GrowableArray<void*>*   MarkSweep::_other_refs_stack = NULL;
+GrowableArray<void*>*   MarkSweep::_adjusted_pointers = NULL;
+bool                         MarkSweep::_pointer_tracking = false;
+bool                         MarkSweep::_root_tracking = true;
 
 GrowableArray<HeapWord*>* MarkSweep::_cur_gc_live_oops = NULL;
 GrowableArray<HeapWord*>* MarkSweep::_cur_gc_live_oops_moved_to = NULL;
@@ -59,7 +59,6 @@
   _revisit_klass_stack->push(k);
 }
 
-
 void MarkSweep::follow_weak_klass_links() {
   // All klasses on the revisit stack are marked at this point.
   // Update and follow all subklass, sibling and implementor links.
@@ -69,44 +68,15 @@
   follow_stack();
 }
 
+MarkSweep::FollowRootClosure  MarkSweep::follow_root_closure;
 
-void MarkSweep::mark_and_follow(oop* p) {
-  assert(Universe::heap()->is_in_reserved(p),
-         "we should only be traversing objects here");
-  oop m = *p;
-  if (m != NULL && !m->mark()->is_marked()) {
-    mark_object(m);
-    m->follow_contents();  // Follow contents of the marked object
-  }
-}
-
-void MarkSweep::_mark_and_push(oop* p) {
-  // Push marked object, contents will be followed later
-  oop m = *p;
-  mark_object(m);
-  _marking_stack->push(m);
-}
+void MarkSweep::FollowRootClosure::do_oop(oop* p)       { follow_root(p); }
+void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
 
 MarkSweep::MarkAndPushClosure MarkSweep::mark_and_push_closure;
 
-void MarkSweep::follow_root(oop* p) {
-  assert(!Universe::heap()->is_in_reserved(p),
-         "roots shouldn't be things within the heap");
-#ifdef VALIDATE_MARK_SWEEP
-  if (ValidateMarkSweep) {
-    guarantee(!_root_refs_stack->contains(p), "should only be in here once");
-    _root_refs_stack->push(p);
-  }
-#endif
-  oop m = *p;
-  if (m != NULL && !m->mark()->is_marked()) {
-    mark_object(m);
-    m->follow_contents();  // Follow contents of the marked object
-  }
-  follow_stack();
-}
-
-MarkSweep::FollowRootClosure MarkSweep::follow_root_closure;
+void MarkSweep::MarkAndPushClosure::do_oop(oop* p)       { mark_and_push(p); }
+void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(p); }
 
 void MarkSweep::follow_stack() {
   while (!_marking_stack->is_empty()) {
@@ -118,6 +88,7 @@
 
 MarkSweep::FollowStackClosure MarkSweep::follow_stack_closure;
 
+void MarkSweep::FollowStackClosure::do_void() { follow_stack(); }
 
 // We preserve the mark which should be replaced at the end and the location that it
 // will go.  Note that the object that this markOop belongs to isn't currently at that
@@ -142,6 +113,9 @@
 MarkSweep::AdjustPointerClosure MarkSweep::adjust_root_pointer_closure(true);
 MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure(false);
 
+void MarkSweep::AdjustPointerClosure::do_oop(oop* p)       { adjust_pointer(p, _is_root); }
+void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
+
 void MarkSweep::adjust_marks() {
   assert(_preserved_oop_stack == NULL ||
          _preserved_oop_stack->length() == _preserved_mark_stack->length(),
@@ -187,7 +161,7 @@
 
 #ifdef VALIDATE_MARK_SWEEP
 
-void MarkSweep::track_adjusted_pointer(oop* p, oop newobj, bool isroot) {
+void MarkSweep::track_adjusted_pointer(void* p, bool isroot) {
   if (!ValidateMarkSweep)
     return;
 
@@ -201,7 +175,7 @@
     if (index != -1) {
       int l = _root_refs_stack->length();
       if (l > 0 && l - 1 != index) {
-        oop* last = _root_refs_stack->pop();
+        void* last = _root_refs_stack->pop();
         assert(last != p, "should be different");
         _root_refs_stack->at_put(index, last);
       } else {
@@ -211,19 +185,17 @@
   }
 }
 
-
-void MarkSweep::check_adjust_pointer(oop* p) {
+void MarkSweep::check_adjust_pointer(void* p) {
   _adjusted_pointers->push(p);
 }
 
-
 class AdjusterTracker: public OopClosure {
  public:
-  AdjusterTracker() {};
-  void do_oop(oop* o)   { MarkSweep::check_adjust_pointer(o); }
+  AdjusterTracker() {}
+  void do_oop(oop* o)       { MarkSweep::check_adjust_pointer(o); }
+  void do_oop(narrowOop* o) { MarkSweep::check_adjust_pointer(o); }
 };
 
-
 void MarkSweep::track_interior_pointers(oop obj) {
   if (ValidateMarkSweep) {
     _adjusted_pointers->clear();
@@ -234,7 +206,6 @@
   }
 }
 
-
 void MarkSweep::check_interior_pointers() {
   if (ValidateMarkSweep) {
     _pointer_tracking = false;
@@ -242,7 +213,6 @@
   }
 }
 
-
 void MarkSweep::reset_live_oop_tracking(bool at_perm) {
   if (ValidateMarkSweep) {
     guarantee((size_t)_live_oops->length() == _live_oops_index, "should be at end of live oops");
@@ -250,7 +220,6 @@
   }
 }
 
-
 void MarkSweep::register_live_oop(oop p, size_t size) {
   if (ValidateMarkSweep) {
     _live_oops->push(p);
@@ -283,7 +252,6 @@
   }
 }
 
-
 void MarkSweep::compaction_complete() {
   if (RecordMarkSweepCompaction) {
     GrowableArray<HeapWord*>* _tmp_live_oops          = _cur_gc_live_oops;
@@ -299,7 +267,6 @@
   }
 }
 
-
 void MarkSweep::print_new_location_of_heap_address(HeapWord* q) {
   if (!RecordMarkSweepCompaction) {
     tty->print_cr("Requires RecordMarkSweepCompaction to be enabled");
@@ -318,7 +285,7 @@
       HeapWord* new_oop = _last_gc_live_oops_moved_to->at(i);
       size_t offset = (q - old_oop);
       tty->print_cr("Address " PTR_FORMAT, q);
-      tty->print_cr(" Was in oop " PTR_FORMAT ", size %d, at offset %d", old_oop, sz, offset);
+      tty->print_cr(" Was in oop " PTR_FORMAT ", size " SIZE_FORMAT ", at offset " SIZE_FORMAT, old_oop, sz, offset);
       tty->print_cr(" Now in oop " PTR_FORMAT ", actual address " PTR_FORMAT, new_oop, new_oop + offset);
       return;
     }
@@ -328,23 +295,16 @@
 }
 #endif //VALIDATE_MARK_SWEEP
 
-MarkSweep::IsAliveClosure MarkSweep::is_alive;
+MarkSweep::IsAliveClosure   MarkSweep::is_alive;
 
-void MarkSweep::KeepAliveClosure::do_oop(oop* p) {
-#ifdef VALIDATE_MARK_SWEEP
-  if (ValidateMarkSweep) {
-    if (!Universe::heap()->is_in_reserved(p)) {
-      _root_refs_stack->push(p);
-    } else {
-      _other_refs_stack->push(p);
-    }
-  }
-#endif
-  mark_and_push(p);
-}
+void MarkSweep::IsAliveClosure::do_object(oop p)   { ShouldNotReachHere(); }
+bool MarkSweep::IsAliveClosure::do_object_b(oop p) { return p->is_gc_marked(); }
 
 MarkSweep::KeepAliveClosure MarkSweep::keep_alive;
 
+void MarkSweep::KeepAliveClosure::do_oop(oop* p)       { MarkSweep::KeepAliveClosure::do_oop_work(p); }
+void MarkSweep::KeepAliveClosure::do_oop(narrowOop* p) { MarkSweep::KeepAliveClosure::do_oop_work(p); }
+
 void marksweep_init() { /* empty */ }
 
 #ifndef PRODUCT
--- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -46,55 +46,59 @@
 #define VALIDATE_MARK_SWEEP_ONLY(code)
 #endif
 
-
 // declared at end
 class PreservedMark;
 
 class MarkSweep : AllStatic {
   //
-  // In line closure decls
+  // Inline closure decls
   //
-
-  class FollowRootClosure: public OopsInGenClosure{
+  class FollowRootClosure: public OopsInGenClosure {
    public:
-    void do_oop(oop* p) { follow_root(p); }
+    virtual void do_oop(oop* p);
+    virtual void do_oop(narrowOop* p);
     virtual const bool do_nmethods() const { return true; }
   };
 
   class MarkAndPushClosure: public OopClosure {
    public:
-    void do_oop(oop* p) { mark_and_push(p); }
+    virtual void do_oop(oop* p);
+    virtual void do_oop(narrowOop* p);
     virtual const bool do_nmethods() const { return true; }
   };
 
   class FollowStackClosure: public VoidClosure {
    public:
-    void do_void() { follow_stack(); }
+    virtual void do_void();
   };
 
   class AdjustPointerClosure: public OopsInGenClosure {
+   private:
     bool _is_root;
    public:
     AdjustPointerClosure(bool is_root) : _is_root(is_root) {}
-    void do_oop(oop* p) { _adjust_pointer(p, _is_root); }
+    virtual void do_oop(oop* p);
+    virtual void do_oop(narrowOop* p);
   };
 
   // Used for java/lang/ref handling
   class IsAliveClosure: public BoolObjectClosure {
    public:
-    void do_object(oop p) { assert(false, "don't call"); }
-    bool do_object_b(oop p) { return p->is_gc_marked(); }
+    virtual void do_object(oop p);
+    virtual bool do_object_b(oop p);
   };
 
   class KeepAliveClosure: public OopClosure {
+   protected:
+    template <class T> void do_oop_work(T* p);
    public:
-    void do_oop(oop* p);
+    virtual void do_oop(oop* p);
+    virtual void do_oop(narrowOop* p);
   };
 
   //
   // Friend decls
   //
-
   friend class AdjustPointerClosure;
   friend class KeepAliveClosure;
   friend class VM_MarkSweep;
@@ -120,14 +124,14 @@
   static ReferenceProcessor*             _ref_processor;
 
 #ifdef VALIDATE_MARK_SWEEP
-  static GrowableArray<oop*>*            _root_refs_stack;
+  static GrowableArray<void*>*           _root_refs_stack;
   static GrowableArray<oop> *            _live_oops;
   static GrowableArray<oop> *            _live_oops_moved_to;
   static GrowableArray<size_t>*          _live_oops_size;
   static size_t                          _live_oops_index;
   static size_t                          _live_oops_index_at_perm;
-  static GrowableArray<oop*>*            _other_refs_stack;
-  static GrowableArray<oop*>*            _adjusted_pointers;
+  static GrowableArray<void*>*           _other_refs_stack;
+  static GrowableArray<void*>*           _adjusted_pointers;
   static bool                            _pointer_tracking;
   static bool                            _root_tracking;
 
@@ -146,9 +150,8 @@
   static GrowableArray<size_t>*          _last_gc_live_oops_size;
 #endif
 
-
   // Non public closures
-  static IsAliveClosure is_alive;
+  static IsAliveClosure   is_alive;
   static KeepAliveClosure keep_alive;
 
   // Class unloading. Update subklass/sibling/implementor links at end of marking phase.
@@ -159,9 +162,9 @@
 
  public:
   // Public closures
-  static FollowRootClosure follow_root_closure;
-  static MarkAndPushClosure mark_and_push_closure;
-  static FollowStackClosure follow_stack_closure;
+  static FollowRootClosure    follow_root_closure;
+  static MarkAndPushClosure   mark_and_push_closure;
+  static FollowStackClosure   follow_stack_closure;
   static AdjustPointerClosure adjust_root_pointer_closure;
   static AdjustPointerClosure adjust_pointer_closure;
 
@@ -170,39 +173,29 @@
 
   // Call backs for marking
   static void mark_object(oop obj);
-  static void follow_root(oop* p);        // Mark pointer and follow contents. Empty marking
-
-                                          // stack afterwards.
+  // Mark pointer and follow contents.  Empty marking stack afterwards.
+  template <class T> static inline void follow_root(T* p);
+  // Mark pointer and follow contents.
+  template <class T> static inline void mark_and_follow(T* p);
+  // Check mark and maybe push on marking stack
+  template <class T> static inline void mark_and_push(T* p);
 
-  static void mark_and_follow(oop* p);    // Mark pointer and follow contents.
-  static void _mark_and_push(oop* p);     // Mark pointer and push obj on
-                                          // marking stack.
-
+  static void follow_stack();   // Empty marking stack.
 
-  static void mark_and_push(oop* p) {     // Check mark and maybe push on
-                                          // marking stack
-    // assert(Universe::is_reserved_heap((oop)p), "we should only be traversing objects here");
-    oop m = *p;
-    if (m != NULL && !m->mark()->is_marked()) {
-      _mark_and_push(p);
-    }
-  }
+  static void preserve_mark(oop p, markOop mark);
+                                // Save the mark word so it can be restored later
+  static void adjust_marks();   // Adjust the pointers in the preserved marks table
+  static void restore_marks();  // Restore the marks that we saved in preserve_mark
 
-  static void follow_stack();             // Empty marking stack.
-
+  template <class T> static inline void adjust_pointer(T* p, bool isroot);
 
-  static void preserve_mark(oop p, markOop mark);       // Save the mark word so it can be restored later
-  static void adjust_marks();             // Adjust the pointers in the preserved marks table
-  static void restore_marks();            // Restore the marks that we saved in preserve_mark
-
-  static void _adjust_pointer(oop* p, bool isroot);
-
-  static void adjust_root_pointer(oop* p) { _adjust_pointer(p, true); }
-  static void adjust_pointer(oop* p)      { _adjust_pointer(p, false); }
+  static void adjust_root_pointer(oop* p)  { adjust_pointer(p, true); }
+  static void adjust_pointer(oop* p)       { adjust_pointer(p, false); }
+  static void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); }
 
 #ifdef VALIDATE_MARK_SWEEP
-  static void track_adjusted_pointer(oop* p, oop newobj, bool isroot);
-  static void check_adjust_pointer(oop* p);     // Adjust this pointer
+  static void track_adjusted_pointer(void* p, bool isroot);
+  static void check_adjust_pointer(void* p);
   static void track_interior_pointers(oop obj);
   static void check_interior_pointers();
 
@@ -223,7 +216,6 @@
   static void revisit_weak_klass_link(Klass* k);  // Update subklass/sibling/implementor links at end of marking.
 };
 
-
 class PreservedMark VALUE_OBJ_CLASS_SPEC {
 private:
   oop _obj;
--- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -22,32 +22,11 @@
  *
  */
 
-inline void MarkSweep::_adjust_pointer(oop* p, bool isroot) {
-  oop obj = *p;
-  VALIDATE_MARK_SWEEP_ONLY(oop saved_new_pointer = NULL);
-  if (obj != NULL) {
-    oop new_pointer = oop(obj->mark()->decode_pointer());
-    assert(new_pointer != NULL ||                     // is forwarding ptr?
-           obj->mark() == markOopDesc::prototype() || // not gc marked?
-           (UseBiasedLocking && obj->mark()->has_bias_pattern()) || // not gc marked?
-           obj->is_shared(),                          // never forwarded?
-           "should contain a forwarding pointer");
-    if (new_pointer != NULL) {
-      *p = new_pointer;
-      assert(Universe::heap()->is_in_reserved(new_pointer),
-             "should be in object space");
-      VALIDATE_MARK_SWEEP_ONLY(saved_new_pointer = new_pointer);
-    }
-  }
-  VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, saved_new_pointer, isroot));
-}
-
 inline void MarkSweep::mark_object(oop obj) {
-
 #ifndef SERIALGC
   if (UseParallelOldGC && VerifyParallelOldWithMarkSweep) {
     assert(PSParallelCompact::mark_bitmap()->is_marked(obj),
-      "Should be marked in the marking bitmap");
+           "Should be marked in the marking bitmap");
   }
 #endif // SERIALGC
 
@@ -60,3 +39,80 @@
     preserve_mark(obj, mark);
   }
 }
+
+template <class T> inline void MarkSweep::follow_root(T* p) {
+  assert(!Universe::heap()->is_in_reserved(p),
+         "roots shouldn't be things within the heap");
+#ifdef VALIDATE_MARK_SWEEP
+  if (ValidateMarkSweep) {
+    guarantee(!_root_refs_stack->contains(p), "should only be in here once");
+    _root_refs_stack->push(p);
+  }
+#endif
+  T heap_oop = oopDesc::load_heap_oop(p);
+  if (!oopDesc::is_null(heap_oop)) {
+    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    if (!obj->mark()->is_marked()) {
+      mark_object(obj);
+      obj->follow_contents();
+    }
+  }
+  follow_stack();
+}
+
+template <class T> inline void MarkSweep::mark_and_follow(T* p) {
+//  assert(Universe::heap()->is_in_reserved(p), "should be in object space");
+  T heap_oop = oopDesc::load_heap_oop(p);
+  if (!oopDesc::is_null(heap_oop)) {
+    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    if (!obj->mark()->is_marked()) {
+      mark_object(obj);
+      obj->follow_contents();
+    }
+  }
+}
+
+template <class T> inline void MarkSweep::mark_and_push(T* p) {
+//  assert(Universe::heap()->is_in_reserved(p), "should be in object space");
+  T heap_oop = oopDesc::load_heap_oop(p);
+  if (!oopDesc::is_null(heap_oop)) {
+    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    if (!obj->mark()->is_marked()) {
+      mark_object(obj);
+      _marking_stack->push(obj);
+    }
+  }
+}
+
+template <class T> inline void MarkSweep::adjust_pointer(T* p, bool isroot) {
+  T heap_oop = oopDesc::load_heap_oop(p);
+  if (!oopDesc::is_null(heap_oop)) {
+    oop obj     = oopDesc::decode_heap_oop_not_null(heap_oop);
+    oop new_obj = oop(obj->mark()->decode_pointer());
+    assert(new_obj != NULL ||                         // is forwarding ptr?
+           obj->mark() == markOopDesc::prototype() || // not gc marked?
+           (UseBiasedLocking && obj->mark()->has_bias_pattern()) ||
+                                                      // not gc marked?
+           obj->is_shared(),                          // never forwarded?
+           "should be forwarded");
+    if (new_obj != NULL) {
+      assert(Universe::heap()->is_in_reserved(new_obj),
+             "should be in object space");
+      oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+    }
+  }
+  VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, isroot));
+}
+
+template <class T> inline void MarkSweep::KeepAliveClosure::do_oop_work(T* p) {
+#ifdef VALIDATE_MARK_SWEEP
+  if (ValidateMarkSweep) {
+    if (!Universe::heap()->is_in_reserved(p)) {
+      _root_refs_stack->push(p);
+    } else {
+      _other_refs_stack->push(p);
+    }
+  }
+#endif
+  mark_and_push(p);
+}
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -35,7 +35,6 @@
 CollectedHeap::CollectedHeap() :
   _reserved(), _barrier_set(NULL), _is_gc_active(false),
   _total_collections(0), _total_full_collections(0),
-  _max_heap_capacity(0),
   _gc_cause(GCCause::_no_gc), _gc_lastcause(GCCause::_no_gc) {
   NOT_PRODUCT(_promotion_failure_alot_count = 0;)
   NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -53,7 +53,6 @@
   bool _is_gc_active;
   unsigned int _total_collections;          // ... started
   unsigned int _total_full_collections;     // ... started
-  size_t _max_heap_capacity;
   NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
   NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
 
@@ -149,10 +148,7 @@
   virtual void post_initialize() = 0;
 
   MemRegion reserved_region() const { return _reserved; }
-
-  // Return the number of bytes currently reserved, committed, and used,
-  // respectively, for holding objects.
-  size_t reserved_obj_bytes() const { return _reserved.byte_size(); }
+  address base() const { return (address)reserved_region().start(); }
 
   // Future cleanup here. The following functions should specify bytes or
   // heapwords as part of their signature.
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -61,7 +61,10 @@
   obj->set_klass(klass());
   assert(!Universe::is_fully_initialized() || obj->blueprint() != NULL,
          "missing blueprint");
+}
 
+// Support for jvmti and dtrace
+inline void post_allocation_notify(KlassHandle klass, oop obj) {
   // support for JVMTI VMObjectAlloc event (no-op if not enabled)
   JvmtiExport::vm_object_alloc_event_collector(obj);
 
@@ -79,18 +82,22 @@
   post_allocation_setup_common(klass, obj, size);
   assert(Universe::is_bootstrapping() ||
          !((oop)obj)->blueprint()->oop_is_array(), "must not be an array");
+  // notify jvmti and dtrace
+  post_allocation_notify(klass, (oop)obj);
 }
 
 void CollectedHeap::post_allocation_setup_array(KlassHandle klass,
                                                 HeapWord* obj,
                                                 size_t size,
                                                 int length) {
-  // Set array length before posting jvmti object alloc event
-  // in post_allocation_setup_common()
   assert(length >= 0, "length should be non-negative");
+  post_allocation_setup_common(klass, obj, size);
+  // Must set length after installing klass as set_klass zeros the length
+  // field in UseCompressedOops
   ((arrayOop)obj)->set_length(length);
-  post_allocation_setup_common(klass, obj, size);
   assert(((oop)obj)->blueprint()->oop_is_array(), "must be an array");
+  // notify jvmti and dtrace (must be after length is set for dtrace)
+  post_allocation_notify(klass, (oop)obj);
 }
 
 HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, bool is_noref, TRAPS) {
--- a/hotspot/src/share/vm/includeDB_core	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/includeDB_core	Sun Apr 13 17:43:42 2008 -0400
@@ -191,7 +191,6 @@
 arrayKlass.cpp                          arrayKlass.hpp
 arrayKlass.cpp                          arrayKlassKlass.hpp
 arrayKlass.cpp                          arrayOop.hpp
-arrayKlass.cpp                          collectedHeap.hpp
 arrayKlass.cpp                          collectedHeap.inline.hpp
 arrayKlass.cpp                          gcLocker.hpp
 arrayKlass.cpp                          instanceKlass.hpp
@@ -211,6 +210,7 @@
 arrayKlassKlass.cpp                     arrayKlassKlass.hpp
 arrayKlassKlass.cpp                     handles.inline.hpp
 arrayKlassKlass.cpp                     javaClasses.hpp
+arrayKlassKlass.cpp                     markSweep.inline.hpp
 arrayKlassKlass.cpp                     oop.inline.hpp
 
 arrayKlassKlass.hpp                     arrayKlass.hpp
@@ -250,7 +250,7 @@
 assembler_<arch_model>.cpp              assembler_<arch_model>.inline.hpp
 assembler_<arch_model>.cpp              biasedLocking.hpp
 assembler_<arch_model>.cpp              cardTableModRefBS.hpp
-assembler_<arch_model>.cpp              collectedHeap.hpp
+assembler_<arch_model>.cpp              collectedHeap.inline.hpp
 assembler_<arch_model>.cpp              interfaceSupport.hpp
 assembler_<arch_model>.cpp              interpreter.hpp
 assembler_<arch_model>.cpp              objectMonitor.hpp
@@ -331,9 +331,8 @@
 bitMap.inline.hpp                       atomic.hpp
 bitMap.inline.hpp                       bitMap.hpp
 
-blockOffsetTable.cpp                    blockOffsetTable.hpp
 blockOffsetTable.cpp                    blockOffsetTable.inline.hpp
-blockOffsetTable.cpp                    collectedHeap.hpp
+blockOffsetTable.cpp                    collectedHeap.inline.hpp
 blockOffsetTable.cpp                    iterator.hpp
 blockOffsetTable.cpp                    java.hpp
 blockOffsetTable.cpp                    oop.inline.hpp
@@ -990,6 +989,7 @@
 codeCache.cpp                           mutexLocker.hpp
 codeCache.cpp                           nmethod.hpp
 codeCache.cpp                           objArrayOop.hpp
+codeCache.cpp                           oop.inline.hpp
 codeCache.cpp                           pcDesc.hpp
 codeCache.cpp                           resourceArea.hpp
 
@@ -1124,7 +1124,7 @@
 compiledICHolderKlass.cpp               compiledICHolderKlass.hpp
 compiledICHolderKlass.cpp               handles.inline.hpp
 compiledICHolderKlass.cpp               javaClasses.hpp
-compiledICHolderKlass.cpp               markSweep.hpp
+compiledICHolderKlass.cpp               markSweep.inline.hpp
 compiledICHolderKlass.cpp               oop.inline.hpp
 compiledICHolderKlass.cpp               oop.inline2.hpp
 compiledICHolderKlass.cpp               permGen.hpp
@@ -1192,6 +1192,7 @@
 constMethodKlass.cpp                    gcLocker.hpp
 constMethodKlass.cpp                    handles.inline.hpp
 constMethodKlass.cpp                    interpreter.hpp
+constMethodKlass.cpp                    markSweep.inline.hpp
 constMethodKlass.cpp                    oop.inline.hpp
 constMethodKlass.cpp                    oop.inline2.hpp
 constMethodKlass.cpp                    resourceArea.hpp
@@ -1210,6 +1211,8 @@
 constantPoolKlass.cpp                   constantPoolKlass.hpp
 constantPoolKlass.cpp                   constantPoolOop.hpp
 constantPoolKlass.cpp                   handles.inline.hpp
+constantPoolKlass.cpp                   javaClasses.hpp
+constantPoolKlass.cpp                   markSweep.inline.hpp
 constantPoolKlass.cpp                   oop.inline.hpp
 constantPoolKlass.cpp                   oop.inline2.hpp
 constantPoolKlass.cpp                   oopFactory.hpp
@@ -1261,7 +1264,8 @@
 cpCacheKlass.cpp                        constantPoolOop.hpp
 cpCacheKlass.cpp                        cpCacheKlass.hpp
 cpCacheKlass.cpp                        handles.inline.hpp
-cpCacheKlass.cpp                        markSweep.hpp
+cpCacheKlass.cpp                        javaClasses.hpp
+cpCacheKlass.cpp                        markSweep.inline.hpp
 cpCacheKlass.cpp                        oop.inline.hpp
 cpCacheKlass.cpp                        permGen.hpp
 
@@ -1273,7 +1277,6 @@
 cpCacheOop.cpp                          handles.inline.hpp
 cpCacheOop.cpp                          interpreter.hpp
 cpCacheOop.cpp                          jvmtiRedefineClassesTrace.hpp
-cpCacheOop.cpp                          markSweep.hpp
 cpCacheOop.cpp                          markSweep.inline.hpp
 cpCacheOop.cpp                          objArrayOop.hpp
 cpCacheOop.cpp                          oop.inline.hpp
@@ -1385,7 +1388,6 @@
 
 defNewGeneration.cpp                    collectorCounters.hpp
 defNewGeneration.cpp                    copy.hpp
-defNewGeneration.cpp                    defNewGeneration.hpp
 defNewGeneration.cpp                    defNewGeneration.inline.hpp
 defNewGeneration.cpp                    gcLocker.inline.hpp
 defNewGeneration.cpp                    gcPolicyCounters.hpp
@@ -1397,7 +1399,6 @@
 defNewGeneration.cpp                    java.hpp
 defNewGeneration.cpp                    oop.inline.hpp
 defNewGeneration.cpp                    referencePolicy.hpp
-defNewGeneration.cpp                    space.hpp
 defNewGeneration.cpp                    space.inline.hpp
 defNewGeneration.cpp                    thread_<os_family>.inline.hpp
 
@@ -1406,6 +1407,7 @@
 defNewGeneration.hpp                    generation.inline.hpp
 defNewGeneration.hpp                    generationCounters.hpp
 
+defNewGeneration.inline.hpp             cardTableRS.hpp
 defNewGeneration.inline.hpp             defNewGeneration.hpp
 defNewGeneration.inline.hpp             space.hpp
 
@@ -1956,6 +1958,7 @@
 instanceKlass.cpp                       jvmti.h
 instanceKlass.cpp                       jvmtiExport.hpp
 instanceKlass.cpp                       jvmtiRedefineClassesTrace.hpp
+instanceKlass.cpp                       markSweep.inline.hpp
 instanceKlass.cpp                       methodOop.hpp
 instanceKlass.cpp                       mutexLocker.hpp
 instanceKlass.cpp                       objArrayKlassKlass.hpp
@@ -1991,6 +1994,7 @@
 instanceKlassKlass.cpp                  instanceRefKlass.hpp
 instanceKlassKlass.cpp                  javaClasses.hpp
 instanceKlassKlass.cpp                  jvmtiExport.hpp
+instanceKlassKlass.cpp                  markSweep.inline.hpp
 instanceKlassKlass.cpp                  objArrayKlassKlass.hpp
 instanceKlassKlass.cpp                  objArrayOop.hpp
 instanceKlassKlass.cpp                  oop.inline.hpp
@@ -2012,7 +2016,7 @@
 instanceRefKlass.cpp                    genOopClosures.inline.hpp
 instanceRefKlass.cpp                    instanceRefKlass.hpp
 instanceRefKlass.cpp                    javaClasses.hpp
-instanceRefKlass.cpp                    markSweep.hpp
+instanceRefKlass.cpp                    markSweep.inline.hpp
 instanceRefKlass.cpp                    oop.inline.hpp
 instanceRefKlass.cpp                    preserveException.hpp
 instanceRefKlass.cpp                    systemDictionary.hpp
@@ -2492,7 +2496,7 @@
 klassKlass.cpp                          instanceOop.hpp
 klassKlass.cpp                          klassKlass.hpp
 klassKlass.cpp                          klassOop.hpp
-klassKlass.cpp                          markSweep.hpp
+klassKlass.cpp                          markSweep.inline.hpp
 klassKlass.cpp                          methodKlass.hpp
 klassKlass.cpp                          objArrayKlass.hpp
 klassKlass.cpp                          oop.inline.hpp
@@ -2519,7 +2523,7 @@
 klassVtable.cpp                         jvmtiRedefineClassesTrace.hpp
 klassVtable.cpp                         klassOop.hpp
 klassVtable.cpp                         klassVtable.hpp
-klassVtable.cpp                         markSweep.hpp
+klassVtable.cpp                         markSweep.inline.hpp
 klassVtable.cpp                         methodOop.hpp
 klassVtable.cpp                         objArrayOop.hpp
 klassVtable.cpp                         oop.inline.hpp
@@ -2632,6 +2636,9 @@
 markOop.inline.hpp                      markOop.hpp
 
 markSweep.cpp                           compileBroker.hpp
+
+markSweep.hpp                           collectedHeap.hpp
+
 memRegion.cpp                           globals.hpp
 memRegion.cpp                           memRegion.hpp
 
@@ -2731,7 +2738,7 @@
 methodDataKlass.cpp                     gcLocker.hpp
 methodDataKlass.cpp                     handles.inline.hpp
 methodDataKlass.cpp                     klassOop.hpp
-methodDataKlass.cpp                     markSweep.hpp
+methodDataKlass.cpp                     markSweep.inline.hpp
 methodDataKlass.cpp                     methodDataKlass.hpp
 methodDataKlass.cpp                     methodDataOop.hpp
 methodDataKlass.cpp                     oop.inline.hpp
@@ -2746,7 +2753,6 @@
 methodDataOop.cpp                       deoptimization.hpp
 methodDataOop.cpp                       handles.inline.hpp
 methodDataOop.cpp                       linkResolver.hpp
-methodDataOop.cpp                       markSweep.hpp
 methodDataOop.cpp                       markSweep.inline.hpp
 methodDataOop.cpp                       methodDataOop.hpp
 methodDataOop.cpp                       oop.inline.hpp
@@ -2764,7 +2770,7 @@
 methodKlass.cpp                         interpreter.hpp
 methodKlass.cpp                         javaClasses.hpp
 methodKlass.cpp                         klassOop.hpp
-methodKlass.cpp                         markSweep.hpp
+methodKlass.cpp                         markSweep.inline.hpp
 methodKlass.cpp                         methodDataOop.hpp
 methodKlass.cpp                         methodKlass.hpp
 methodKlass.cpp                         oop.inline.hpp
@@ -2941,6 +2947,7 @@
 objArrayKlass.cpp                       universe.inline.hpp
 objArrayKlass.cpp                       vmSymbols.hpp
 
+
 objArrayKlass.hpp                       arrayKlass.hpp
 objArrayKlass.hpp                       instanceKlass.hpp
 objArrayKlass.hpp                       specialized_oop_closures.hpp
@@ -2948,6 +2955,7 @@
 objArrayKlassKlass.cpp                  collectedHeap.inline.hpp
 objArrayKlassKlass.cpp                  instanceKlass.hpp
 objArrayKlassKlass.cpp                  javaClasses.hpp
+objArrayKlassKlass.cpp                  markSweep.inline.hpp
 objArrayKlassKlass.cpp                  objArrayKlassKlass.hpp
 objArrayKlassKlass.cpp                  oop.inline.hpp
 objArrayKlassKlass.cpp                  oop.inline2.hpp
@@ -2956,6 +2964,7 @@
 objArrayKlassKlass.hpp                  arrayKlassKlass.hpp
 objArrayKlassKlass.hpp                  objArrayKlass.hpp
 
+objArrayOop.cpp                         objArrayKlass.hpp
 objArrayOop.cpp                         objArrayOop.hpp
 objArrayOop.cpp                         oop.inline.hpp
 
@@ -3005,7 +3014,6 @@
 oop.inline.hpp                          klass.hpp
 oop.inline.hpp                          klassOop.hpp
 oop.inline.hpp                          markOop.inline.hpp
-oop.inline.hpp                          markSweep.hpp
 oop.inline.hpp                          markSweep.inline.hpp
 oop.inline.hpp                          oop.hpp
 oop.inline.hpp                          os.hpp
@@ -4536,6 +4544,7 @@
 vtableStubs.cpp                         instanceKlass.hpp
 vtableStubs.cpp                         jvmtiExport.hpp
 vtableStubs.cpp                         klassVtable.hpp
+vtableStubs.cpp                         oop.inline.hpp
 vtableStubs.cpp                         mutexLocker.hpp
 vtableStubs.cpp                         resourceArea.hpp
 vtableStubs.cpp                         sharedRuntime.hpp
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -35,7 +35,10 @@
   static methodOop method(JavaThread *thread)        { return last_frame(thread).interpreter_frame_method(); }
   static address   bcp(JavaThread *thread)           { return last_frame(thread).interpreter_frame_bcp(); }
   static void      set_bcp_and_mdp(address bcp, JavaThread*thread);
-  static Bytecodes::Code code(JavaThread *thread)       { return Bytecodes::code_at(bcp(thread)); }
+  static Bytecodes::Code code(JavaThread *thread)    {
+    // pass method to avoid calling unsafe bcp_to_method (partial fix 4926272)
+    return Bytecodes::code_at(bcp(thread), method(thread));
+  }
   static bool      already_resolved(JavaThread *thread) { return cache_entry(thread)->is_resolved(code(thread)); }
   static int       one_byte_index(JavaThread *thread)   { return bcp(thread)[1]; }
   static int       two_byte_index(JavaThread *thread)   { return Bytes::get_Java_u2(bcp(thread) + 1); }
--- a/hotspot/src/share/vm/memory/barrierSet.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/barrierSet.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -54,9 +54,9 @@
 
   // These functions indicate whether a particular access of the given
   // kinds requires a barrier.
-  virtual bool read_ref_needs_barrier(oop* field) = 0;
+  virtual bool read_ref_needs_barrier(void* field) = 0;
   virtual bool read_prim_needs_barrier(HeapWord* field, size_t bytes) = 0;
-  virtual bool write_ref_needs_barrier(oop* field, oop new_val) = 0;
+  virtual bool write_ref_needs_barrier(void* field, oop new_val) = 0;
   virtual bool write_prim_needs_barrier(HeapWord* field, size_t bytes, juint val1, juint val2) = 0;
 
   // The first four operations provide a direct implementation of the
@@ -64,7 +64,7 @@
   // directly, as appropriate.
 
   // Invoke the barrier, if any, necessary when reading the given ref field.
-  virtual void read_ref_field(oop* field) = 0;
+  virtual void read_ref_field(void* field) = 0;
 
   // Invoke the barrier, if any, necessary when reading the given primitive
   // "field" of "bytes" bytes in "obj".
@@ -75,9 +75,9 @@
   // (For efficiency reasons, this operation is specialized for certain
   // barrier types.  Semantically, it should be thought of as a call to the
   // virtual "_work" function below, which must implement the barrier.)
-  inline void write_ref_field(oop* field, oop new_val);
+  inline void write_ref_field(void* field, oop new_val);
 protected:
-  virtual void write_ref_field_work(oop* field, oop new_val) = 0;
+  virtual void write_ref_field_work(void* field, oop new_val) = 0;
 public:
 
   // Invoke the barrier, if any, necessary when writing the "bytes"-byte
--- a/hotspot/src/share/vm/memory/barrierSet.inline.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/barrierSet.inline.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -26,7 +26,7 @@
 // performance-critical calls when when the barrier is the most common
 // card-table kind.
 
-void BarrierSet::write_ref_field(oop* field, oop new_val) {
+void BarrierSet::write_ref_field(void* field, oop new_val) {
   if (kind() == CardTableModRef) {
     ((CardTableModRefBS*)this)->inline_write_ref_field(field, new_val);
   } else {
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -294,7 +294,7 @@
 // Note that these versions are precise!  The scanning code has to handle the
 // fact that the write barrier may be either precise or imprecise.
 
-void CardTableModRefBS::write_ref_field_work(oop* field, oop newVal) {
+void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) {
   inline_write_ref_field(field, newVal);
 }
 
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -273,7 +273,7 @@
 
   // *** Barrier set functions.
 
-  inline bool write_ref_needs_barrier(oop* field, oop new_val) {
+  inline bool write_ref_needs_barrier(void* field, oop new_val) {
     // Note that this assumes the perm gen is the highest generation
     // in the address space
     return new_val != NULL && !new_val->is_perm();
@@ -285,7 +285,7 @@
   // these functions here for performance.
 protected:
   void write_ref_field_work(oop obj, size_t offset, oop newVal);
-  void write_ref_field_work(oop* field, oop newVal);
+  void write_ref_field_work(void* field, oop newVal);
 public:
 
   bool has_write_ref_array_opt() { return true; }
@@ -315,7 +315,7 @@
 
   // *** Card-table-barrier-specific things.
 
-  inline void inline_write_ref_field(oop* field, oop newVal) {
+  inline void inline_write_ref_field(void* field, oop newVal) {
     jbyte* byte = byte_for(field);
     *byte = dirty_card;
   }
--- a/hotspot/src/share/vm/memory/cardTableRS.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/cardTableRS.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -191,7 +191,7 @@
 // prev-younger-gen               ==> cur_youngergen_and_prev_nonclean_card
 // cur-younger-gen                ==> cur_younger_gen
 // cur_youngergen_and_prev_nonclean_card ==> no change.
-void CardTableRS::write_ref_field_gc_par(oop* field, oop new_val) {
+void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) {
   jbyte* entry = ct_bs()->byte_for(field);
   do {
     jbyte entry_val = *entry;
@@ -290,28 +290,36 @@
 
 
 class VerifyCleanCardClosure: public OopClosure {
-  HeapWord* boundary;
-  HeapWord* begin; HeapWord* end;
-public:
-  void do_oop(oop* p) {
+private:
+  HeapWord* _boundary;
+  HeapWord* _begin;
+  HeapWord* _end;
+protected:
+  template <class T> void do_oop_work(T* p) {
     HeapWord* jp = (HeapWord*)p;
-    if (jp >= begin && jp < end) {
-      guarantee(*p == NULL || (HeapWord*)p < boundary
-                || (HeapWord*)(*p) >= boundary,
+    if (jp >= _begin && jp < _end) {
+      oop obj = oopDesc::load_decode_heap_oop(p);
+      guarantee(obj == NULL ||
+                (HeapWord*)p < _boundary ||
+                (HeapWord*)obj >= _boundary,
                 "pointer on clean card crosses boundary");
     }
   }
-  VerifyCleanCardClosure(HeapWord* b, HeapWord* _begin, HeapWord* _end) :
-    boundary(b), begin(_begin), end(_end) {}
+public:
+  VerifyCleanCardClosure(HeapWord* b, HeapWord* begin, HeapWord* end) :
+    _boundary(b), _begin(begin), _end(end) {}
+  virtual void do_oop(oop* p)       { VerifyCleanCardClosure::do_oop_work(p); }
+  virtual void do_oop(narrowOop* p) { VerifyCleanCardClosure::do_oop_work(p); }
 };
 
 class VerifyCTSpaceClosure: public SpaceClosure {
+private:
   CardTableRS* _ct;
   HeapWord* _boundary;
 public:
   VerifyCTSpaceClosure(CardTableRS* ct, HeapWord* boundary) :
     _ct(ct), _boundary(boundary) {}
-  void do_space(Space* s) { _ct->verify_space(s, _boundary); }
+  virtual void do_space(Space* s) { _ct->verify_space(s, _boundary); }
 };
 
 class VerifyCTGenClosure: public GenCollectedHeap::GenClosure {
--- a/hotspot/src/share/vm/memory/cardTableRS.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/cardTableRS.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -106,18 +106,18 @@
   // closure application.
   void younger_refs_iterate(Generation* g, OopsInGenClosure* blk);
 
-  void inline_write_ref_field_gc(oop* field, oop new_val) {
+  void inline_write_ref_field_gc(void* field, oop new_val) {
     jbyte* byte = _ct_bs.byte_for(field);
     *byte = youngergen_card;
   }
-  void write_ref_field_gc_work(oop* field, oop new_val) {
+  void write_ref_field_gc_work(void* field, oop new_val) {
     inline_write_ref_field_gc(field, new_val);
   }
 
   // Override.  Might want to devirtualize this in the same fashion as
   // above.  Ensures that the value of the card for field says that it's
   // a younger card in the current collection.
-  virtual void write_ref_field_gc_par(oop* field, oop new_val);
+  virtual void write_ref_field_gc_par(void* field, oop new_val);
 
   void resize_covered_region(MemRegion new_region);
 
--- a/hotspot/src/share/vm/memory/compactingPermGenGen.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/compactingPermGenGen.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -49,9 +49,9 @@
 // to prevent visiting any object twice.
 
 class RecursiveAdjustSharedObjectClosure : public OopClosure {
-public:
-  void do_oop(oop* o) {
-    oop obj = *o;
+ protected:
+  template <class T> inline void do_oop_work(T* p) {
+    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
     if (obj->is_shared_readwrite()) {
       if (obj->mark()->is_marked()) {
         obj->init_mark();         // Don't revisit this object.
@@ -71,7 +71,10 @@
         }
       }
     }
-  };
+  }
+ public:
+  virtual void do_oop(oop* p)       { RecursiveAdjustSharedObjectClosure::do_oop_work(p); }
+  virtual void do_oop(narrowOop* p) { RecursiveAdjustSharedObjectClosure::do_oop_work(p); }
 };
 
 
@@ -86,9 +89,9 @@
 // as doing so can cause hash codes to be computed, destroying
 // forwarding pointers.
 class TraversePlaceholdersClosure : public OopClosure {
- public:
-  void do_oop(oop* o) {
-    oop obj = *o;
+ protected:
+  template <class T> inline void do_oop_work(T* p) {
+    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
     if (obj->klass() == Universe::symbolKlassObj() &&
         obj->is_shared_readonly()) {
       symbolHandle sym((symbolOop) obj);
@@ -99,6 +102,10 @@
       }
     }
   }
+ public:
+  virtual void do_oop(oop* p)       { TraversePlaceholdersClosure::do_oop_work(p); }
+  virtual void do_oop(narrowOop* p) { TraversePlaceholdersClosure::do_oop_work(p); }
+
 };
 
 
--- a/hotspot/src/share/vm/memory/defNewGeneration.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -47,31 +47,9 @@
   _rs = (CardTableRS*)rs;
 }
 
-void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) {
-  // We never expect to see a null reference being processed
-  // as a weak reference.
-  assert (*p != NULL, "expected non-null ref");
-  assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
-
-  _cl->do_oop_nv(p);
+void DefNewGeneration::KeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
+void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
 
-  // Card marking is trickier for weak refs.
-  // This oop is a 'next' field which was filled in while we
-  // were discovering weak references. While we might not need
-  // to take a special action to keep this reference alive, we
-  // will need to dirty a card as the field was modified.
-  //
-  // Alternatively, we could create a method which iterates through
-  // each generation, allowing them in turn to examine the modified
-  // field.
-  //
-  // We could check that p is also in an older generation, but
-  // dirty cards in the youngest gen are never scanned, so the
-  // extra check probably isn't worthwhile.
-  if (Universe::heap()->is_in_reserved(p)) {
-    _rs->inline_write_ref_field_gc(p, *p);
-  }
-}
 
 DefNewGeneration::FastKeepAliveClosure::
 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
@@ -79,19 +57,8 @@
   _boundary = g->reserved().end();
 }
 
-void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) {
-  assert (*p != NULL, "expected non-null ref");
-  assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
-
-  _cl->do_oop_nv(p);
-
-  // Optimized for Defnew generation if it's the youngest generation:
-  // we set a younger_gen card if we have an older->youngest
-  // generation pointer.
-  if (((HeapWord*)(*p) < _boundary) && Universe::heap()->is_in_reserved(p)) {
-    _rs->inline_write_ref_field_gc(p, *p);
-  }
-}
+void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
+void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
 
 DefNewGeneration::EvacuateFollowersClosure::
 EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
@@ -132,6 +99,9 @@
   _boundary = _g->reserved().end();
 }
 
+void ScanClosure::do_oop(oop* p)       { ScanClosure::do_oop_work(p); }
+void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
+
 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
   OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 {
@@ -139,6 +109,9 @@
   _boundary = _g->reserved().end();
 }
 
+void FastScanClosure::do_oop(oop* p)       { FastScanClosure::do_oop_work(p); }
+void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
+
 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
   OopClosure(g->ref_processor()), _g(g)
 {
@@ -146,6 +119,11 @@
   _boundary = _g->reserved().end();
 }
 
+void ScanWeakRefClosure::do_oop(oop* p)       { ScanWeakRefClosure::do_oop_work(p); }
+void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
+
+void FilteringClosure::do_oop(oop* p)       { FilteringClosure::do_oop_work(p); }
+void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
 
 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
                                    size_t initial_size,
@@ -656,7 +634,7 @@
   }
 }
 
-oop DefNewGeneration::copy_to_survivor_space(oop old, oop* from) {
+oop DefNewGeneration::copy_to_survivor_space(oop old) {
   assert(is_in_reserved(old) && !old->is_forwarded(),
          "shouldn't be scavenging this oop");
   size_t s = old->size();
@@ -669,7 +647,7 @@
 
   // Otherwise try allocating obj tenured
   if (obj == NULL) {
-    obj = _next_gen->promote(old, s, from);
+    obj = _next_gen->promote(old, s);
     if (obj == NULL) {
       if (!HandlePromotionFailure) {
         // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
@@ -862,3 +840,69 @@
 const char* DefNewGeneration::name() const {
   return "def new generation";
 }
+
+// Moved from inline file as they are not called inline
+CompactibleSpace* DefNewGeneration::first_compaction_space() const {
+  return eden();
+}
+
+HeapWord* DefNewGeneration::allocate(size_t word_size,
+                                     bool is_tlab) {
+  // This is the slow-path allocation for the DefNewGeneration.
+  // Most allocations are fast-path in compiled code.
+  // We try to allocate from the eden.  If that works, we are happy.
+  // Note that since DefNewGeneration supports lock-free allocation, we
+  // have to use it here, as well.
+  HeapWord* result = eden()->par_allocate(word_size);
+  if (result != NULL) {
+    return result;
+  }
+  do {
+    HeapWord* old_limit = eden()->soft_end();
+    if (old_limit < eden()->end()) {
+      // Tell the next generation we reached a limit.
+      HeapWord* new_limit =
+        next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
+      if (new_limit != NULL) {
+        Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
+      } else {
+        assert(eden()->soft_end() == eden()->end(),
+               "invalid state after allocation_limit_reached returned null");
+      }
+    } else {
+      // The allocation failed and the soft limit is equal to the hard limit,
+      // there are no reasons to do an attempt to allocate
+      assert(old_limit == eden()->end(), "sanity check");
+      break;
+    }
+    // Try to allocate until succeeded or the soft limit can't be adjusted
+    result = eden()->par_allocate(word_size);
+  } while (result == NULL);
+
+  // If the eden is full and the last collection bailed out, we are running
+  // out of heap space, and we try to allocate the from-space, too.
+  // allocate_from_space can't be inlined because that would introduce a
+  // circular dependency at compile time.
+  if (result == NULL) {
+    result = allocate_from_space(word_size);
+  }
+  return result;
+}
+
+HeapWord* DefNewGeneration::par_allocate(size_t word_size,
+                                         bool is_tlab) {
+  return eden()->par_allocate(word_size);
+}
+
+void DefNewGeneration::gc_prologue(bool full) {
+  // Ensure that _end and _soft_end are the same in eden space.
+  eden()->set_soft_end(eden()->end());
+}
+
+size_t DefNewGeneration::tlab_capacity() const {
+  return eden()->capacity();
+}
+
+size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
+  return unsafe_max_alloc_nogc();
+}
--- a/hotspot/src/share/vm/memory/defNewGeneration.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/defNewGeneration.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -24,6 +24,7 @@
 
 class EdenSpace;
 class ContiguousSpace;
+class ScanClosure;
 
 // DefNewGeneration is a young generation containing eden, from- and
 // to-space.
@@ -155,17 +156,21 @@
   protected:
     ScanWeakRefClosure* _cl;
     CardTableRS* _rs;
+    template <class T> void do_oop_work(T* p);
   public:
     KeepAliveClosure(ScanWeakRefClosure* cl);
-    void do_oop(oop* p);
+    virtual void do_oop(oop* p);
+    virtual void do_oop(narrowOop* p);
   };
 
   class FastKeepAliveClosure: public KeepAliveClosure {
   protected:
     HeapWord* _boundary;
+    template <class T> void do_oop_work(T* p);
   public:
     FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl);
-    void do_oop(oop* p);
+    virtual void do_oop(oop* p);
+    virtual void do_oop(narrowOop* p);
   };
 
   class EvacuateFollowersClosure: public VoidClosure {
@@ -206,7 +211,7 @@
   ContiguousSpace* from() const           { return _from_space;  }
   ContiguousSpace* to()   const           { return _to_space;    }
 
-  inline CompactibleSpace* first_compaction_space() const;
+  virtual CompactibleSpace* first_compaction_space() const;
 
   // Space enquiries
   size_t capacity() const;
@@ -226,8 +231,8 @@
 
   // Thread-local allocation buffers
   bool supports_tlab_allocation() const { return true; }
-  inline size_t tlab_capacity() const;
-  inline size_t unsafe_max_tlab_alloc() const;
+  size_t tlab_capacity() const;
+  size_t unsafe_max_tlab_alloc() const;
 
   // Grow the generation by the specified number of bytes.
   // The size of bytes is assumed to be properly aligned.
@@ -265,13 +270,13 @@
     return result;
   }
 
-  inline HeapWord* allocate(size_t word_size, bool is_tlab);
+  HeapWord* allocate(size_t word_size, bool is_tlab);
   HeapWord* allocate_from_space(size_t word_size);
 
-  inline HeapWord* par_allocate(size_t word_size, bool is_tlab);
+  HeapWord* par_allocate(size_t word_size, bool is_tlab);
 
   // Prologue & Epilogue
-  inline virtual void gc_prologue(bool full);
+  virtual void gc_prologue(bool full);
   virtual void gc_epilogue(bool full);
 
   // Doesn't require additional work during GC prologue and epilogue
@@ -307,7 +312,7 @@
                                 bool is_tlab,
                                 bool parallel = false);
 
-  oop copy_to_survivor_space(oop old, oop* from);
+  oop copy_to_survivor_space(oop old);
   int tenuring_threshold() { return _tenuring_threshold; }
 
   // Performance Counter support
--- a/hotspot/src/share/vm/memory/defNewGeneration.inline.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/defNewGeneration.inline.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -22,67 +22,60 @@
  *
  */
 
-CompactibleSpace* DefNewGeneration::first_compaction_space() const {
-  return eden();
+// Methods of protected closure types
+
+template <class T>
+inline void DefNewGeneration::KeepAliveClosure::do_oop_work(T* p) {
+#ifdef ASSERT
+  {
+    // We never expect to see a null reference being processed
+    // as a weak reference.
+    assert (!oopDesc::is_null(*p), "expected non-null ref");
+    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    assert (obj->is_oop(), "expected an oop while scanning weak refs");
+  }
+#endif // ASSERT
+
+  _cl->do_oop_nv(p);
+
+  // Card marking is trickier for weak refs.
+  // This oop is a 'next' field which was filled in while we
+  // were discovering weak references. While we might not need
+  // to take a special action to keep this reference alive, we
+  // will need to dirty a card as the field was modified.
+  //
+  // Alternatively, we could create a method which iterates through
+  // each generation, allowing them in turn to examine the modified
+  // field.
+  //
+  // We could check that p is also in an older generation, but
+  // dirty cards in the youngest gen are never scanned, so the
+  // extra check probably isn't worthwhile.
+  if (Universe::heap()->is_in_reserved(p)) {
+    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    _rs->inline_write_ref_field_gc(p, obj);
+  }
 }
 
-HeapWord* DefNewGeneration::allocate(size_t word_size,
-                                     bool is_tlab) {
-  // This is the slow-path allocation for the DefNewGeneration.
-  // Most allocations are fast-path in compiled code.
-  // We try to allocate from the eden.  If that works, we are happy.
-  // Note that since DefNewGeneration supports lock-free allocation, we
-  // have to use it here, as well.
-  HeapWord* result = eden()->par_allocate(word_size);
-  if (result != NULL) {
-    return result;
+template <class T>
+inline void DefNewGeneration::FastKeepAliveClosure::do_oop_work(T* p) {
+#ifdef ASSERT
+  {
+    // We never expect to see a null reference being processed
+    // as a weak reference.
+    assert (!oopDesc::is_null(*p), "expected non-null ref");
+    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    assert (obj->is_oop(), "expected an oop while scanning weak refs");
   }
-  do {
-    HeapWord* old_limit = eden()->soft_end();
-    if (old_limit < eden()->end()) {
-      // Tell the next generation we reached a limit.
-      HeapWord* new_limit =
-        next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
-      if (new_limit != NULL) {
-        Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
-      } else {
-        assert(eden()->soft_end() == eden()->end(),
-               "invalid state after allocation_limit_reached returned null");
-      }
-    } else {
-      // The allocation failed and the soft limit is equal to the hard limit,
-      // there are no reasons to do an attempt to allocate
-      assert(old_limit == eden()->end(), "sanity check");
-      break;
-    }
-    // Try to allocate until succeeded or the soft limit can't be adjusted
-    result = eden()->par_allocate(word_size);
-  } while (result == NULL);
+#endif // ASSERT
+
+  _cl->do_oop_nv(p);
 
-  // If the eden is full and the last collection bailed out, we are running
-  // out of heap space, and we try to allocate the from-space, too.
-  // allocate_from_space can't be inlined because that would introduce a
-  // circular dependency at compile time.
-  if (result == NULL) {
-    result = allocate_from_space(word_size);
+  // Optimized for Defnew generation if it's the youngest generation:
+  // we set a younger_gen card if we have an older->youngest
+  // generation pointer.
+  oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+  if (((HeapWord*)obj < _boundary) && Universe::heap()->is_in_reserved(p)) {
+    _rs->inline_write_ref_field_gc(p, obj);
   }
-  return result;
-}
-
-HeapWord* DefNewGeneration::par_allocate(size_t word_size,
-                                         bool is_tlab) {
-  return eden()->par_allocate(word_size);
 }
-
-void DefNewGeneration::gc_prologue(bool full) {
-  // Ensure that _end and _soft_end are the same in eden space.
-  eden()->set_soft_end(eden()->end());
-}
-
-size_t DefNewGeneration::tlab_capacity() const {
-  return eden()->capacity();
-}
-
-size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
-  return unsafe_max_alloc_nogc();
-}
--- a/hotspot/src/share/vm/memory/dump.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/dump.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -60,9 +60,9 @@
     hash_offset = java_lang_String::hash_offset_in_bytes();
   }
 
-  void do_oop(oop* pobj) {
-    if (pobj != NULL) {
-      oop obj = *pobj;
+  void do_oop(oop* p) {
+    if (p != NULL) {
+      oop obj = *p;
       if (obj->klass() == SystemDictionary::string_klass()) {
 
         int hash;
@@ -79,6 +79,7 @@
       }
     }
   }
+  void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 };
 
 
@@ -121,9 +122,8 @@
 
 class MarkObjectsOopClosure : public OopClosure {
 public:
-  void do_oop(oop* pobj) {
-    mark_object(*pobj);
-  }
+  void do_oop(oop* p)       { mark_object(*p); }
+  void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 };
 
 
@@ -136,6 +136,7 @@
       mark_object(obj);
     }
   }
+  void do_oop(narrowOop* pobj) { ShouldNotReachHere(); }
 };
 
 
@@ -554,6 +555,7 @@
       }
     }
   }
+  void do_oop(narrowOop* pobj) { ShouldNotReachHere(); }
 };
 
 
@@ -690,6 +692,8 @@
     ++top;
   }
 
+  void do_oop(narrowOop* pobj) { ShouldNotReachHere(); }
+
   void do_int(int* p) {
     check_space();
     *top = (oop)(intptr_t)*p;
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -624,6 +624,7 @@
   void do_oop(oop* p) {
     assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm.");
   }
+  void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 };
 static AssertIsPermClosure assert_is_perm_closure;
 
@@ -1300,8 +1301,7 @@
 
 oop GenCollectedHeap::handle_failed_promotion(Generation* gen,
                                               oop obj,
-                                              size_t obj_size,
-                                              oop* ref) {
+                                              size_t obj_size) {
   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
   HeapWord* result = NULL;
 
--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -452,8 +452,7 @@
   // gen; return the new location of obj if successful.  Otherwise, return NULL.
   oop handle_failed_promotion(Generation* gen,
                               oop obj,
-                              size_t obj_size,
-                              oop* ref);
+                              size_t obj_size);
 
 private:
   // Accessor for memory state verification support
--- a/hotspot/src/share/vm/memory/genMarkSweep.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/genMarkSweep.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -73,8 +73,7 @@
 
   VALIDATE_MARK_SWEEP_ONLY(
     if (ValidateMarkSweep) {
-      guarantee(_root_refs_stack->length() == 0,
-                "should be empty by now");
+      guarantee(_root_refs_stack->length() == 0, "should be empty by now");
     }
   )
 
@@ -165,9 +164,9 @@
 
 #ifdef VALIDATE_MARK_SWEEP
   if (ValidateMarkSweep) {
-    _root_refs_stack    = new (ResourceObj::C_HEAP) GrowableArray<oop*>(100, true);
-    _other_refs_stack   = new (ResourceObj::C_HEAP) GrowableArray<oop*>(100, true);
-    _adjusted_pointers  = new (ResourceObj::C_HEAP) GrowableArray<oop*>(100, true);
+    _root_refs_stack    = new (ResourceObj::C_HEAP) GrowableArray<void*>(100, true);
+    _other_refs_stack   = new (ResourceObj::C_HEAP) GrowableArray<void*>(100, true);
+    _adjusted_pointers  = new (ResourceObj::C_HEAP) GrowableArray<void*>(100, true);
     _live_oops          = new (ResourceObj::C_HEAP) GrowableArray<oop>(100, true);
     _live_oops_moved_to = new (ResourceObj::C_HEAP) GrowableArray<oop>(100, true);
     _live_oops_size     = new (ResourceObj::C_HEAP) GrowableArray<size_t>(100, true);
--- a/hotspot/src/share/vm/memory/genOopClosures.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/genOopClosures.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -28,6 +28,11 @@
 class CardTableModRefBS;
 class DefNewGeneration;
 
+template<class E> class GenericTaskQueue;
+typedef GenericTaskQueue<oop> OopTaskQueue;
+template<class E> class GenericTaskQueueSet;
+typedef GenericTaskQueueSet<oop> OopTaskQueueSet;
+
 // Closure for iterating roots from a particular generation
 // Note: all classes deriving from this MUST call this do_barrier
 // method at the end of their own do_oop method!
@@ -35,13 +40,13 @@
 
 class OopsInGenClosure : public OopClosure {
  private:
-  Generation*         _orig_gen;     // generation originally set in ctor
-  Generation*         _gen;          // generation being scanned
+  Generation*  _orig_gen;     // generation originally set in ctor
+  Generation*  _gen;          // generation being scanned
 
  protected:
   // Some subtypes need access.
-  HeapWord*           _gen_boundary; // start of generation
-  CardTableRS*        _rs;           // remembered set
+  HeapWord*    _gen_boundary; // start of generation
+  CardTableRS* _rs;           // remembered set
 
   // For assertions
   Generation* generation() { return _gen; }
@@ -49,7 +54,7 @@
 
   // Derived classes that modify oops so that they might be old-to-young
   // pointers must call the method below.
-  void do_barrier(oop* p);
+  template <class T> void do_barrier(T* p);
 
  public:
   OopsInGenClosure() : OopClosure(NULL),
@@ -75,14 +80,17 @@
 // This closure will perform barrier store calls for ALL
 // pointers in scanned oops.
 class ScanClosure: public OopsInGenClosure {
-protected:
+ protected:
   DefNewGeneration* _g;
-  HeapWord* _boundary;
-  bool _gc_barrier;
-public:
+  HeapWord*         _boundary;
+  bool              _gc_barrier;
+  template <class T> inline void do_oop_work(T* p);
+ public:
   ScanClosure(DefNewGeneration* g, bool gc_barrier);
-  void do_oop(oop* p);
-  void do_oop_nv(oop* p);
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+  inline void do_oop_nv(oop* p);
+  inline void do_oop_nv(narrowOop* p);
   bool do_header() { return false; }
   Prefetch::style prefetch_style() {
     return Prefetch::do_write;
@@ -95,14 +103,17 @@
 // pointers into the DefNewGeneration. This is less
 // precise, but faster, than a ScanClosure
 class FastScanClosure: public OopsInGenClosure {
-protected:
+ protected:
   DefNewGeneration* _g;
-  HeapWord* _boundary;
-  bool _gc_barrier;
-public:
+  HeapWord*         _boundary;
+  bool              _gc_barrier;
+  template <class T> inline void do_oop_work(T* p);
+ public:
   FastScanClosure(DefNewGeneration* g, bool gc_barrier);
-  void do_oop(oop* p);
-  void do_oop_nv(oop* p);
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+  inline void do_oop_nv(oop* p);
+  inline void do_oop_nv(narrowOop* p);
   bool do_header() { return false; }
   Prefetch::style prefetch_style() {
     return Prefetch::do_write;
@@ -110,19 +121,27 @@
 };
 
 class FilteringClosure: public OopClosure {
-  HeapWord* _boundary;
+ private:
+  HeapWord*   _boundary;
   OopClosure* _cl;
-public:
+ protected:
+  template <class T> inline void do_oop_work(T* p) {
+    T heap_oop = oopDesc::load_heap_oop(p);
+    if (!oopDesc::is_null(heap_oop)) {
+      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+      if ((HeapWord*)obj < _boundary) {
+        _cl->do_oop(p);
+      }
+    }
+  }
+ public:
   FilteringClosure(HeapWord* boundary, OopClosure* cl) :
     OopClosure(cl->_ref_processor), _boundary(boundary),
     _cl(cl) {}
-  void do_oop(oop* p);
-  void do_oop_nv(oop* p) {
-    oop obj = *p;
-    if ((HeapWord*)obj < _boundary && obj != NULL) {
-      _cl->do_oop(p);
-    }
-  }
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+  inline void do_oop_nv(oop* p)       { FilteringClosure::do_oop_work(p); }
+  inline void do_oop_nv(narrowOop* p) { FilteringClosure::do_oop_work(p); }
   bool do_header() { return false; }
 };
 
@@ -131,19 +150,26 @@
 //  OopsInGenClosure -- weak references are processed all
 //  at once, with no notion of which generation they were in.
 class ScanWeakRefClosure: public OopClosure {
-protected:
-  DefNewGeneration*  _g;
-  HeapWord*          _boundary;
-public:
+ protected:
+  DefNewGeneration* _g;
+  HeapWord*         _boundary;
+  template <class T> inline void do_oop_work(T* p);
+ public:
   ScanWeakRefClosure(DefNewGeneration* g);
-  void do_oop(oop* p);
-  void do_oop_nv(oop* p);
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+  inline void do_oop_nv(oop* p);
+  inline void do_oop_nv(narrowOop* p);
 };
 
 class VerifyOopClosure: public OopClosure {
-public:
-  void do_oop(oop* p) {
-    guarantee((*p)->is_oop_or_null(), "invalid oop");
+ protected:
+  template <class T> inline void do_oop_work(T* p) {
+    oop obj = oopDesc::load_decode_heap_oop(p);
+    guarantee(obj->is_oop_or_null(), "invalid oop");
   }
+ public:
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
   static VerifyOopClosure verify_oop;
 };
--- a/hotspot/src/share/vm/memory/genOopClosures.inline.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/genOopClosures.inline.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -38,10 +38,10 @@
   }
 }
 
-inline void OopsInGenClosure::do_barrier(oop* p) {
+template <class T> inline void OopsInGenClosure::do_barrier(T* p) {
   assert(generation()->is_in_reserved(p), "expected ref in generation");
-  oop obj = *p;
-  assert(obj != NULL, "expected non-null object");
+  assert(!oopDesc::is_null(*p), "expected non-null object");
+  oop obj = oopDesc::load_decode_heap_oop_not_null(p);
   // If p points to a younger generation, mark the card.
   if ((HeapWord*)obj < _gen_boundary) {
     _rs->inline_write_ref_field_gc(p, obj);
@@ -49,18 +49,17 @@
 }
 
 // NOTE! Any changes made here should also be made
-// in FastScanClosure::do_oop();
-inline void ScanClosure::do_oop(oop* p) {
-  oop obj = *p;
+// in FastScanClosure::do_oop_work()
+template <class T> inline void ScanClosure::do_oop_work(T* p) {
+  T heap_oop = oopDesc::load_heap_oop(p);
   // Should we copy the obj?
-  if (obj != NULL) {
+  if (!oopDesc::is_null(heap_oop)) {
+    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
     if ((HeapWord*)obj < _boundary) {
       assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
-      if (obj->is_forwarded()) {
-        *p = obj->forwardee();
-      } else {
-        *p = _g->copy_to_survivor_space(obj, p);
-      }
+      oop new_obj = obj->is_forwarded() ? obj->forwardee()
+                                        : _g->copy_to_survivor_space(obj);
+      oopDesc::encode_store_heap_oop_not_null(p, new_obj);
     }
     if (_gc_barrier) {
       // Now call parent closure
@@ -69,23 +68,21 @@
   }
 }
 
-inline void ScanClosure::do_oop_nv(oop* p) {
-  ScanClosure::do_oop(p);
-}
+inline void ScanClosure::do_oop_nv(oop* p)       { ScanClosure::do_oop_work(p); }
+inline void ScanClosure::do_oop_nv(narrowOop* p) { ScanClosure::do_oop_work(p); }
 
 // NOTE! Any changes made here should also be made
-// in ScanClosure::do_oop();
-inline void FastScanClosure::do_oop(oop* p) {
-  oop obj = *p;
+// in ScanClosure::do_oop_work()
+template <class T> inline void FastScanClosure::do_oop_work(T* p) {
+  T heap_oop = oopDesc::load_heap_oop(p);
   // Should we copy the obj?
-  if (obj != NULL) {
+  if (!oopDesc::is_null(heap_oop)) {
+    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
     if ((HeapWord*)obj < _boundary) {
       assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
-      if (obj->is_forwarded()) {
-        *p = obj->forwardee();
-      } else {
-        *p = _g->copy_to_survivor_space(obj, p);
-      }
+      oop new_obj = obj->is_forwarded() ? obj->forwardee()
+                                        : _g->copy_to_survivor_space(obj);
+      oopDesc::encode_store_heap_oop_not_null(p, new_obj);
       if (_gc_barrier) {
         // Now call parent closure
         do_barrier(p);
@@ -94,26 +91,22 @@
   }
 }
 
-inline void FastScanClosure::do_oop_nv(oop* p) {
-  FastScanClosure::do_oop(p);
-}
+inline void FastScanClosure::do_oop_nv(oop* p)       { FastScanClosure::do_oop_work(p); }
+inline void FastScanClosure::do_oop_nv(narrowOop* p) { FastScanClosure::do_oop_work(p); }
 
 // Note similarity to ScanClosure; the difference is that
 // the barrier set is taken care of outside this closure.
-inline void ScanWeakRefClosure::do_oop(oop* p) {
-  oop obj = *p;
-  assert (obj != NULL, "null weak reference?");
+template <class T> inline void ScanWeakRefClosure::do_oop_work(T* p) {
+  assert(!oopDesc::is_null(*p), "null weak reference?");
+  oop obj = oopDesc::load_decode_heap_oop_not_null(p);
   // weak references are sometimes scanned twice; must check
   // that to-space doesn't already contain this object
   if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) {
-    if (obj->is_forwarded()) {
-      *p = obj->forwardee();
-    } else {
-      *p = _g->copy_to_survivor_space(obj, p);
-    }
+    oop new_obj = obj->is_forwarded() ? obj->forwardee()
+                                      : _g->copy_to_survivor_space(obj);
+    oopDesc::encode_store_heap_oop_not_null(p, new_obj);
   }
 }
 
-inline void ScanWeakRefClosure::do_oop_nv(oop* p) {
-  ScanWeakRefClosure::do_oop(p);
-}
+inline void ScanWeakRefClosure::do_oop_nv(oop* p)       { ScanWeakRefClosure::do_oop_work(p); }
+inline void ScanWeakRefClosure::do_oop_nv(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
--- a/hotspot/src/share/vm/memory/genRemSet.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/genRemSet.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -68,13 +68,13 @@
 
   // This method is used to notify the remembered set that "new_val" has
   // been written into "field" by the garbage collector.
-  void write_ref_field_gc(oop* field, oop new_val);
+  void write_ref_field_gc(void* field, oop new_val);
 protected:
-  virtual void write_ref_field_gc_work(oop* field, oop new_val) = 0;
+  virtual void write_ref_field_gc_work(void* field, oop new_val) = 0;
 public:
 
   // A version of the above suitable for use by parallel collectors.
-  virtual void write_ref_field_gc_par(oop* field, oop new_val) = 0;
+  virtual void write_ref_field_gc_par(void* field, oop new_val) = 0;
 
   // Resize one of the regions covered by the remembered set.
   virtual void resize_covered_region(MemRegion new_region) = 0;
--- a/hotspot/src/share/vm/memory/genRemSet.inline.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/genRemSet.inline.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -26,7 +26,7 @@
 // performance-critical call when when the rem set is the most common
 // card-table kind.
 
-void GenRemSet::write_ref_field_gc(oop* field, oop new_val) {
+void GenRemSet::write_ref_field_gc(void* field, oop new_val) {
   if (kind() == CardTableModRef) {
     ((CardTableRS*)this)->inline_write_ref_field_gc(field, new_val);
   } else {
--- a/hotspot/src/share/vm/memory/generation.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/generation.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -171,7 +171,7 @@
 }
 
 // Ignores "ref" and calls allocate().
-oop Generation::promote(oop obj, size_t obj_size, oop* ref) {
+oop Generation::promote(oop obj, size_t obj_size) {
   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
 
 #ifndef PRODUCT
@@ -186,7 +186,7 @@
     return oop(result);
   } else {
     GenCollectedHeap* gch = GenCollectedHeap::heap();
-    return gch->handle_failed_promotion(this, obj, obj_size, ref);
+    return gch->handle_failed_promotion(this, obj, obj_size);
   }
 }
 
--- a/hotspot/src/share/vm/memory/generation.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/generation.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -295,13 +295,7 @@
   //
   // The "obj_size" argument is just obj->size(), passed along so the caller can
   // avoid repeating the virtual call to retrieve it.
-  //
-  // The "ref" argument, if non-NULL, is the address of some reference to "obj"
-  // (that is "*ref == obj"); some generations may use this information to, for
-  // example, influence placement decisions.
-  //
-  // The default implementation ignores "ref" and calls allocate().
-  virtual oop promote(oop obj, size_t obj_size, oop* ref);
+  virtual oop promote(oop obj, size_t obj_size);
 
   // Thread "thread_num" (0 <= i < ParalleGCThreads) wants to promote
   // object "obj", whose original mark word was "m", and whose size is
--- a/hotspot/src/share/vm/memory/iterator.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/iterator.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -35,6 +35,8 @@
   OopClosure() : _ref_processor(NULL) { }
   virtual void do_oop(oop* o) = 0;
   virtual void do_oop_v(oop* o) { do_oop(o); }
+  virtual void do_oop(narrowOop* o) = 0;
+  virtual void do_oop_v(narrowOop* o) { do_oop(o); }
 
   // In support of post-processing of weak links of KlassKlass objects;
   // see KlassKlass::oop_oop_iterate().
--- a/hotspot/src/share/vm/memory/modRefBarrierSet.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/modRefBarrierSet.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -37,19 +37,19 @@
   bool has_write_ref_barrier() { return true; }
   bool has_write_prim_barrier() { return false; }
 
-  bool read_ref_needs_barrier(oop* field) { return false; }
+  bool read_ref_needs_barrier(void* field) { return false; }
   bool read_prim_needs_barrier(HeapWord* field, size_t bytes) { return false; }
-  virtual bool write_ref_needs_barrier(oop* field, oop new_val) = 0;
+  virtual bool write_ref_needs_barrier(void* field, oop new_val) = 0;
   bool write_prim_needs_barrier(HeapWord* field, size_t bytes,
                                 juint val1, juint val2) { return false; }
 
   void write_prim_field(oop obj, size_t offset, size_t bytes,
                         juint val1, juint val2) {}
 
-  void read_ref_field(oop* field) {}
+  void read_ref_field(void* field) {}
   void read_prim_field(HeapWord* field, size_t bytes) {}
 protected:
-  virtual void write_ref_field_work(oop* field, oop new_val) = 0;
+  virtual void write_ref_field_work(void* field, oop new_val) = 0;
 public:
   void write_prim_field(HeapWord* field, size_t bytes,
                         juint val1, juint val2) {}
--- a/hotspot/src/share/vm/memory/referenceProcessor.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/referenceProcessor.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -28,16 +28,32 @@
 // List of discovered references.
 class DiscoveredList {
 public:
-         DiscoveredList() : _head(NULL), _len(0) { }
-  oop    head() const           { return _head; }
-  oop*   head_ptr()             { return &_head; }
-  void   set_head(oop o)        { _head = o; }
-  bool   empty() const          { return _head == ReferenceProcessor::_sentinelRef; }
+  DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
+  oop head() const     {
+     return UseCompressedOops ?  oopDesc::decode_heap_oop_not_null(_compressed_head) :
+                                _oop_head;
+  }
+  HeapWord* adr_head() {
+    return UseCompressedOops ? (HeapWord*)&_compressed_head :
+                               (HeapWord*)&_oop_head;
+  }
+  void   set_head(oop o) {
+    if (UseCompressedOops) {
+      // Must compress the head ptr.
+      _compressed_head = oopDesc::encode_heap_oop_not_null(o);
+    } else {
+      _oop_head = o;
+    }
+  }
+  bool   empty() const          { return head() == ReferenceProcessor::sentinel_ref(); }
   size_t length()               { return _len; }
   void   set_length(size_t len) { _len = len; }
 private:
+  // Set value depending on UseCompressedOops. This could be a template class
+  // but then we have to fix all the instantiations and declarations that use this class.
+  oop       _oop_head;
+  narrowOop _compressed_head;
   size_t _len;
-  oop   _head;
 };
 
 oop  ReferenceProcessor::_sentinelRef = NULL;
@@ -49,11 +65,11 @@
 }
 
 void ReferenceProcessor::init_statics() {
-  assert(_sentinelRef == NULL, "should be initialized precsiely once");
+  assert(_sentinelRef == NULL, "should be initialized precisely once");
   EXCEPTION_MARK;
   _sentinelRef = instanceKlass::cast(
-                   SystemDictionary::object_klass())->
-                     allocate_permanent_instance(THREAD);
+                    SystemDictionary::reference_klass())->
+                      allocate_permanent_instance(THREAD);
 
   // Initialize the master soft ref clock.
   java_lang_ref_SoftReference::set_clock(os::javaTimeMillis());
@@ -69,15 +85,13 @@
             "Unrecongnized RefDiscoveryPolicy");
 }
 
-
-ReferenceProcessor* ReferenceProcessor::create_ref_processor(
-    MemRegion          span,
-    bool               atomic_discovery,
-    bool               mt_discovery,
-    BoolObjectClosure* is_alive_non_header,
-    int                parallel_gc_threads,
-    bool               mt_processing)
-{
+ReferenceProcessor*
+ReferenceProcessor::create_ref_processor(MemRegion          span,
+                                         bool               atomic_discovery,
+                                         bool               mt_discovery,
+                                         BoolObjectClosure* is_alive_non_header,
+                                         int                parallel_gc_threads,
+                                         bool               mt_processing) {
   int mt_degree = 1;
   if (parallel_gc_threads > 1) {
     mt_degree = parallel_gc_threads;
@@ -93,10 +107,11 @@
   return rp;
 }
 
-
 ReferenceProcessor::ReferenceProcessor(MemRegion span,
-  bool atomic_discovery, bool mt_discovery, int mt_degree,
-  bool mt_processing) :
+                                       bool      atomic_discovery,
+                                       bool      mt_discovery,
+                                       int       mt_degree,
+                                       bool      mt_processing) :
   _discovering_refs(false),
   _enqueuing_is_done(false),
   _is_alive_non_header(NULL),
@@ -114,10 +129,10 @@
   _discoveredWeakRefs    = &_discoveredSoftRefs[_num_q];
   _discoveredFinalRefs   = &_discoveredWeakRefs[_num_q];
   _discoveredPhantomRefs = &_discoveredFinalRefs[_num_q];
-  assert(_sentinelRef != NULL, "_sentinelRef is NULL");
+  assert(sentinel_ref() != NULL, "_sentinelRef is NULL");
   // Initialized all entries to _sentinelRef
   for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
-        _discoveredSoftRefs[i].set_head(_sentinelRef);
+        _discoveredSoftRefs[i].set_head(sentinel_ref());
     _discoveredSoftRefs[i].set_length(0);
   }
 }
@@ -134,16 +149,19 @@
 
 void ReferenceProcessor::weak_oops_do(OopClosure* f) {
   for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
-    f->do_oop(_discoveredSoftRefs[i].head_ptr());
+    if (UseCompressedOops) {
+      f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head());
+    } else {
+      f->do_oop((oop*)_discoveredSoftRefs[i].adr_head());
+    }
   }
 }
 
 void ReferenceProcessor::oops_do(OopClosure* f) {
-  f->do_oop(&_sentinelRef);
+  f->do_oop(adr_sentinel_ref());
 }
 
-void ReferenceProcessor::update_soft_ref_master_clock()
-{
+void ReferenceProcessor::update_soft_ref_master_clock() {
   // Update (advance) the soft ref master clock field. This must be done
   // after processing the soft ref list.
   jlong now = os::javaTimeMillis();
@@ -164,9 +182,7 @@
   // past clock value.
 }
 
-
-void
-ReferenceProcessor::process_discovered_references(
+void ReferenceProcessor::process_discovered_references(
   ReferencePolicy*             policy,
   BoolObjectClosure*           is_alive,
   OopClosure*                  keep_alive,
@@ -223,15 +239,13 @@
   }
 }
 
-
 #ifndef PRODUCT
 // Calculate the number of jni handles.
-unsigned int ReferenceProcessor::count_jni_refs()
-{
+uint ReferenceProcessor::count_jni_refs() {
   class AlwaysAliveClosure: public BoolObjectClosure {
   public:
-    bool do_object_b(oop obj) { return true; }
-    void do_object(oop obj) { assert(false, "Don't call"); }
+    virtual bool do_object_b(oop obj) { return true; }
+    virtual void do_object(oop obj) { assert(false, "Don't call"); }
   };
 
   class CountHandleClosure: public OopClosure {
@@ -239,9 +253,8 @@
     int _count;
   public:
     CountHandleClosure(): _count(0) {}
-    void do_oop(oop* unused) {
-      _count++;
-    }
+    void do_oop(oop* unused)       { _count++; }
+    void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
     int count() { return _count; }
   };
   CountHandleClosure global_handle_count;
@@ -262,36 +275,48 @@
 #endif
   JNIHandles::weak_oops_do(is_alive, keep_alive);
   // Finally remember to keep sentinel around
-  keep_alive->do_oop(&_sentinelRef);
+  keep_alive->do_oop(adr_sentinel_ref());
   complete_gc->do_void();
 }
 
-bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) {
-  NOT_PRODUCT(verify_ok_to_handle_reflists());
+
+template <class T>
+static bool enqueue_discovered_ref_helper(ReferenceProcessor* ref,
+                                          AbstractRefProcTaskExecutor* task_executor) {
+
   // Remember old value of pending references list
-  oop* pending_list_addr = java_lang_ref_Reference::pending_list_addr();
-  oop old_pending_list_value = *pending_list_addr;
+  T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr();
+  T old_pending_list_value = *pending_list_addr;
 
   // Enqueue references that are not made active again, and
   // clear the decks for the next collection (cycle).
-  enqueue_discovered_reflists(pending_list_addr, task_executor);
+  ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor);
   // Do the oop-check on pending_list_addr missed in
   // enqueue_discovered_reflist. We should probably
   // do a raw oop_check so that future such idempotent
   // oop_stores relying on the oop-check side-effect
   // may be elided automatically and safely without
   // affecting correctness.
-  oop_store(pending_list_addr, *(pending_list_addr));
+  oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr));
 
   // Stop treating discovered references specially.
-  disable_discovery();
+  ref->disable_discovery();
 
   // Return true if new pending references were added
   return old_pending_list_value != *pending_list_addr;
 }
 
+bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) {
+  NOT_PRODUCT(verify_ok_to_handle_reflists());
+  if (UseCompressedOops) {
+    return enqueue_discovered_ref_helper<narrowOop>(this, task_executor);
+  } else {
+    return enqueue_discovered_ref_helper<oop>(this, task_executor);
+  }
+}
+
 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
-  oop* pending_list_addr) {
+                                                    HeapWord* pending_list_addr) {
   // Given a list of refs linked through the "discovered" field
   // (java.lang.ref.Reference.discovered) chain them through the
   // "next" field (java.lang.ref.Reference.next) and prepend
@@ -305,19 +330,19 @@
   // the next field and clearing it (except for the last
   // non-sentinel object which is treated specially to avoid
   // confusion with an active reference).
-  while (obj != _sentinelRef) {
+  while (obj != sentinel_ref()) {
     assert(obj->is_instanceRef(), "should be reference object");
     oop next = java_lang_ref_Reference::discovered(obj);
     if (TraceReferenceGC && PrintGCDetails) {
-      gclog_or_tty->print_cr("  obj " INTPTR_FORMAT "/next " INTPTR_FORMAT,
-                             (oopDesc*) obj, (oopDesc*) next);
+      gclog_or_tty->print_cr("        obj " INTPTR_FORMAT "/next " INTPTR_FORMAT,
+                             obj, next);
     }
-    assert(*java_lang_ref_Reference::next_addr(obj) == NULL,
-      "The reference should not be enqueued");
-    if (next == _sentinelRef) {  // obj is last
+    assert(java_lang_ref_Reference::next(obj) == NULL,
+           "The reference should not be enqueued");
+    if (next == sentinel_ref()) {  // obj is last
       // Swap refs_list into pendling_list_addr and
       // set obj's next to what we read from pending_list_addr.
-      oop old = (oop)Atomic::xchg_ptr(refs_list.head(), pending_list_addr);
+      oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
       // Need oop_check on pending_list_addr above;
       // see special oop-check code at the end of
       // enqueue_discovered_reflists() further below.
@@ -341,15 +366,14 @@
 public:
   RefProcEnqueueTask(ReferenceProcessor& ref_processor,
                      DiscoveredList      discovered_refs[],
-                     oop*                pending_list_addr,
+                     HeapWord*           pending_list_addr,
                      oop                 sentinel_ref,
                      int                 n_queues)
     : EnqueueTask(ref_processor, discovered_refs,
                   pending_list_addr, sentinel_ref, n_queues)
   { }
 
-  virtual void work(unsigned int work_id)
-  {
+  virtual void work(unsigned int work_id) {
     assert(work_id < (unsigned int)_ref_processor.num_q(), "Index out-of-bounds");
     // Simplest first cut: static partitioning.
     int index = work_id;
@@ -363,18 +387,18 @@
 };
 
 // Enqueue references that are not made active again
-void ReferenceProcessor::enqueue_discovered_reflists(oop* pending_list_addr,
+void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr,
   AbstractRefProcTaskExecutor* task_executor) {
   if (_processing_is_mt && task_executor != NULL) {
     // Parallel code
     RefProcEnqueueTask tsk(*this, _discoveredSoftRefs,
-                           pending_list_addr, _sentinelRef, _num_q);
+                           pending_list_addr, sentinel_ref(), _num_q);
     task_executor->execute(tsk);
   } else {
     // Serial code: call the parent class's implementation
     for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
       enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr);
-      _discoveredSoftRefs[i].set_head(_sentinelRef);
+      _discoveredSoftRefs[i].set_head(sentinel_ref());
       _discoveredSoftRefs[i].set_length(0);
     }
   }
@@ -388,14 +412,13 @@
                                 BoolObjectClosure* is_alive);
 
   // End Of List.
-  inline bool has_next() const
-  { return _next != ReferenceProcessor::_sentinelRef; }
+  inline bool has_next() const { return _next != ReferenceProcessor::sentinel_ref(); }
 
   // Get oop to the Reference object.
-  inline oop  obj() const { return _ref; }
+  inline oop obj() const { return _ref; }
 
   // Get oop to the referent object.
-  inline oop  referent() const { return _referent; }
+  inline oop referent() const { return _referent; }
 
   // Returns true if referent is alive.
   inline bool is_referent_alive() const;
@@ -417,13 +440,26 @@
   inline void make_active() { java_lang_ref_Reference::set_next(_ref, NULL); }
 
   // Make the referent alive.
-  inline void make_referent_alive() { _keep_alive->do_oop(_referent_addr); }
+  inline void make_referent_alive() {
+    if (UseCompressedOops) {
+      _keep_alive->do_oop((narrowOop*)_referent_addr);
+    } else {
+      _keep_alive->do_oop((oop*)_referent_addr);
+    }
+  }
 
   // Update the discovered field.
-  inline void update_discovered() { _keep_alive->do_oop(_prev_next); }
+  inline void update_discovered() {
+    // First _prev_next ref actually points into DiscoveredList (gross).
+    if (UseCompressedOops) {
+      _keep_alive->do_oop((narrowOop*)_prev_next);
+    } else {
+      _keep_alive->do_oop((oop*)_prev_next);
+    }
+  }
 
   // NULL out referent pointer.
-  inline void clear_referent() { *_referent_addr = NULL; }
+  inline void clear_referent() { oop_store_raw(_referent_addr, NULL); }
 
   // Statistics
   NOT_PRODUCT(
@@ -436,11 +472,11 @@
 
 private:
   DiscoveredList&    _refs_list;
-  oop*               _prev_next;
+  HeapWord*          _prev_next;
   oop                _ref;
-  oop*               _discovered_addr;
+  HeapWord*          _discovered_addr;
   oop                _next;
-  oop*               _referent_addr;
+  HeapWord*          _referent_addr;
   oop                _referent;
   OopClosure*        _keep_alive;
   BoolObjectClosure* _is_alive;
@@ -457,7 +493,7 @@
                                                       OopClosure*        keep_alive,
                                                       BoolObjectClosure* is_alive)
   : _refs_list(refs_list),
-    _prev_next(refs_list.head_ptr()),
+    _prev_next(refs_list.adr_head()),
     _ref(refs_list.head()),
 #ifdef ASSERT
     _first_seen(refs_list.head()),
@@ -471,19 +507,18 @@
     _is_alive(is_alive)
 { }
 
-inline bool DiscoveredListIterator::is_referent_alive() const
-{
+inline bool DiscoveredListIterator::is_referent_alive() const {
   return _is_alive->do_object_b(_referent);
 }
 
-inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent))
-{
+inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
   _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
-  assert(_discovered_addr && (*_discovered_addr)->is_oop_or_null(),
+  oop discovered = java_lang_ref_Reference::discovered(_ref);
+  assert(_discovered_addr && discovered->is_oop_or_null(),
          "discovered field is bad");
-  _next = *_discovered_addr;
+  _next = discovered;
   _referent_addr = java_lang_ref_Reference::referent_addr(_ref);
-  _referent = *_referent_addr;
+  _referent = java_lang_ref_Reference::referent(_ref);
   assert(Universe::heap()->is_in_reserved_or_null(_referent),
          "Wrong oop found in java.lang.Reference object");
   assert(allow_null_referent ?
@@ -492,32 +527,32 @@
          "bad referent");
 }
 
-inline void DiscoveredListIterator::next()
-{
+inline void DiscoveredListIterator::next() {
   _prev_next = _discovered_addr;
   move_to_next();
 }
 
-inline void DiscoveredListIterator::remove()
-{
+inline void DiscoveredListIterator::remove() {
   assert(_ref->is_oop(), "Dropping a bad reference");
-  // Clear the discovered_addr field so that the object does
-  // not look like it has been discovered.
-  *_discovered_addr = NULL;
-  // Remove Reference object from list.
-  *_prev_next = _next;
+  oop_store_raw(_discovered_addr, NULL);
+  // First _prev_next ref actually points into DiscoveredList (gross).
+  if (UseCompressedOops) {
+    // Remove Reference object from list.
+    oopDesc::encode_store_heap_oop_not_null((narrowOop*)_prev_next, _next);
+  } else {
+    // Remove Reference object from list.
+    oopDesc::store_heap_oop((oop*)_prev_next, _next);
+  }
   NOT_PRODUCT(_removed++);
   move_to_next();
 }
 
-inline void DiscoveredListIterator::move_to_next()
-{
+inline void DiscoveredListIterator::move_to_next() {
   _ref = _next;
   assert(_ref != _first_seen, "cyclic ref_list found");
   NOT_PRODUCT(_processed++);
 }
 
-
 // NOTE: process_phase*() are largely similar, and at a high level
 // merely iterate over the extant list applying a predicate to
 // each of its elements and possibly removing that element from the
@@ -531,13 +566,13 @@
 // referents are not alive, but that should be kept alive for policy reasons.
 // Keep alive the transitive closure of all such referents.
 void
-ReferenceProcessor::process_phase1(DiscoveredList&    refs_list_addr,
+ReferenceProcessor::process_phase1(DiscoveredList&    refs_list,
                                    ReferencePolicy*   policy,
                                    BoolObjectClosure* is_alive,
                                    OopClosure*        keep_alive,
                                    VoidClosure*       complete_gc) {
   assert(policy != NULL, "Must have a non-NULL policy");
-  DiscoveredListIterator iter(refs_list_addr, keep_alive, is_alive);
+  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
   // Decide which softly reachable refs should be kept alive.
   while (iter.has_next()) {
     iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
@@ -545,7 +580,7 @@
     if (referent_is_dead && !policy->should_clear_reference(iter.obj())) {
       if (TraceReferenceGC) {
         gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s"  ") by policy",
-                               (address)iter.obj(), iter.obj()->blueprint()->internal_name());
+                               iter.obj(), iter.obj()->blueprint()->internal_name());
       }
       // Make the Reference object active again
       iter.make_active();
@@ -570,20 +605,19 @@
 // Traverse the list and remove any Refs that are not active, or
 // whose referents are either alive or NULL.
 void
-ReferenceProcessor::pp2_work(DiscoveredList&    refs_list_addr,
+ReferenceProcessor::pp2_work(DiscoveredList&    refs_list,
                              BoolObjectClosure* is_alive,
-                             OopClosure*        keep_alive)
-{
+                             OopClosure*        keep_alive) {
   assert(discovery_is_atomic(), "Error");
-  DiscoveredListIterator iter(refs_list_addr, keep_alive, is_alive);
+  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
   while (iter.has_next()) {
     iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
-    DEBUG_ONLY(oop* next_addr = java_lang_ref_Reference::next_addr(iter.obj());)
-    assert(*next_addr == NULL, "Should not discover inactive Reference");
+    DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
+    assert(next == NULL, "Should not discover inactive Reference");
     if (iter.is_referent_alive()) {
       if (TraceReferenceGC) {
         gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)",
-                               (address)iter.obj(), iter.obj()->blueprint()->internal_name());
+                               iter.obj(), iter.obj()->blueprint()->internal_name());
       }
       // The referent is reachable after all.
       // Update the referent pointer as necessary: Note that this
@@ -605,25 +639,28 @@
 }
 
 void
-ReferenceProcessor::pp2_work_concurrent_discovery(
-  DiscoveredList&    refs_list_addr,
-  BoolObjectClosure* is_alive,
-  OopClosure*        keep_alive,
-  VoidClosure*       complete_gc)
-{
+ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList&    refs_list,
+                                                  BoolObjectClosure* is_alive,
+                                                  OopClosure*        keep_alive,
+                                                  VoidClosure*       complete_gc) {
   assert(!discovery_is_atomic(), "Error");
-  DiscoveredListIterator iter(refs_list_addr, keep_alive, is_alive);
+  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
   while (iter.has_next()) {
     iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
-    oop* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
+    HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
+    oop next = java_lang_ref_Reference::next(iter.obj());
     if ((iter.referent() == NULL || iter.is_referent_alive() ||
-         *next_addr != NULL)) {
-      assert((*next_addr)->is_oop_or_null(), "bad next field");
+         next != NULL)) {
+      assert(next->is_oop_or_null(), "bad next field");
       // Remove Reference object from list
       iter.remove();
       // Trace the cohorts
       iter.make_referent_alive();
-      keep_alive->do_oop(next_addr);
+      if (UseCompressedOops) {
+        keep_alive->do_oop((narrowOop*)next_addr);
+      } else {
+        keep_alive->do_oop((oop*)next_addr);
+      }
     } else {
       iter.next();
     }
@@ -639,15 +676,15 @@
 }
 
 // Traverse the list and process the referents, by either
-// either clearing them or keeping them (and their reachable
+// clearing them or keeping them (and their reachable
 // closure) alive.
 void
-ReferenceProcessor::process_phase3(DiscoveredList&    refs_list_addr,
+ReferenceProcessor::process_phase3(DiscoveredList&    refs_list,
                                    bool               clear_referent,
                                    BoolObjectClosure* is_alive,
                                    OopClosure*        keep_alive,
                                    VoidClosure*       complete_gc) {
-  DiscoveredListIterator iter(refs_list_addr, keep_alive, is_alive);
+  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
   while (iter.has_next()) {
     iter.update_discovered();
     iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
@@ -661,7 +698,7 @@
     if (TraceReferenceGC) {
       gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending",
                              clear_referent ? "cleared " : "",
-                             (address)iter.obj(), iter.obj()->blueprint()->internal_name());
+                             iter.obj(), iter.obj()->blueprint()->internal_name());
     }
     assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference");
     // If discovery is concurrent, we may have objects with null referents,
@@ -679,15 +716,15 @@
 }
 
 void
-ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& ref_list) {
-  oop obj = ref_list.head();
-  while (obj != _sentinelRef) {
-    oop* discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
-    obj = *discovered_addr;
-    *discovered_addr = NULL;
+ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) {
+  oop obj = refs_list.head();
+  while (obj != sentinel_ref()) {
+    oop discovered = java_lang_ref_Reference::discovered(obj);
+    java_lang_ref_Reference::set_discovered_raw(obj, NULL);
+    obj = discovered;
   }
-  ref_list.set_head(_sentinelRef);
-  ref_list.set_length(0);
+  refs_list.set_head(sentinel_ref());
+  refs_list.set_length(0);
 }
 
 void
@@ -777,7 +814,7 @@
         // find an element to split the list on
         for (size_t j = 0; j < refs_to_move; ++j) {
           move_tail = new_head;
-          new_head = *java_lang_ref_Reference::discovered_addr(new_head);
+          new_head = java_lang_ref_Reference::discovered(new_head);
         }
         java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head());
         ref_lists[to_idx].set_head(move_head);
@@ -875,17 +912,17 @@
   size_t length = refs_list.length();
   while (iter.has_next()) {
     iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
-    oop* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
-    assert((*next_addr)->is_oop_or_null(), "bad next field");
+    oop next = java_lang_ref_Reference::next(iter.obj());
+    assert(next->is_oop_or_null(), "bad next field");
     // If referent has been cleared or Reference is not active,
     // drop it.
-    if (iter.referent() == NULL || *next_addr != NULL) {
+    if (iter.referent() == NULL || next != NULL) {
       debug_only(
         if (PrintGCDetails && TraceReferenceGC) {
           gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: "
             INTPTR_FORMAT " with next field: " INTPTR_FORMAT
             " and referent: " INTPTR_FORMAT,
-            (address)iter.obj(), (address)*next_addr, (address)iter.referent());
+            iter.obj(), next, iter.referent());
         }
       )
       // Remove Reference object from list
@@ -950,18 +987,21 @@
   return list;
 }
 
-inline void ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& list,
-  oop obj, oop* discovered_addr) {
+inline void
+ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
+                                              oop             obj,
+                                              HeapWord*       discovered_addr) {
   assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller");
   // First we must make sure this object is only enqueued once. CAS in a non null
   // discovered_addr.
-  oop retest = (oop)Atomic::cmpxchg_ptr(list.head(), discovered_addr, NULL);
+  oop retest = oopDesc::atomic_compare_exchange_oop(refs_list.head(), discovered_addr,
+                                                    NULL);
   if (retest == NULL) {
     // This thread just won the right to enqueue the object.
     // We have separate lists for enqueueing so no synchronization
     // is necessary.
-    list.set_head(obj);
-    list.set_length(list.length() + 1);
+    refs_list.set_head(obj);
+    refs_list.set_length(refs_list.length() + 1);
   } else {
     // If retest was non NULL, another thread beat us to it:
     // The reference has already been discovered...
@@ -972,7 +1012,6 @@
   }
 }
 
-
 // We mention two of several possible choices here:
 // #0: if the reference object is not in the "originating generation"
 //     (or part of the heap being collected, indicated by our "span"
@@ -1006,8 +1045,8 @@
     return false;
   }
   // We only enqueue active references.
-  oop* next_addr = java_lang_ref_Reference::next_addr(obj);
-  if (*next_addr != NULL) {
+  oop next = java_lang_ref_Reference::next(obj);
+  if (next != NULL) {
     return false;
   }
 
@@ -1034,14 +1073,14 @@
     }
   }
 
-  oop* discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
-  assert(discovered_addr != NULL && (*discovered_addr)->is_oop_or_null(),
-         "bad discovered field");
-  if (*discovered_addr != NULL) {
+  HeapWord* discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
+  oop  discovered = java_lang_ref_Reference::discovered(obj);
+  assert(discovered->is_oop_or_null(), "bad discovered field");
+  if (discovered != NULL) {
     // The reference has already been discovered...
     if (TraceReferenceGC) {
       gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
-                             (oopDesc*)obj, obj->blueprint()->internal_name());
+                             obj, obj->blueprint()->internal_name());
     }
     if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
       // assumes that an object is not processed twice;
@@ -1088,7 +1127,7 @@
   if (_discovery_is_mt) {
     add_to_discovered_list_mt(*list, obj, discovered_addr);
   } else {
-    *discovered_addr = list->head();
+    oop_store_raw(discovered_addr, list->head());
     list->set_head(obj);
     list->set_length(list->length() + 1);
   }
@@ -1106,7 +1145,7 @@
     oop referent = java_lang_ref_Reference::referent(obj);
     if (PrintGCDetails) {
       gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)",
-                             (oopDesc*) obj, obj->blueprint()->internal_name());
+                             obj, obj->blueprint()->internal_name());
     }
     assert(referent->is_oop(), "Enqueued a bad referent");
   }
@@ -1181,17 +1220,20 @@
 // are not active (have a non-NULL next field). NOTE: For this to work
 // correctly, refs discovery can not be happening concurrently with this
 // step.
-void ReferenceProcessor::preclean_discovered_reflist(
-  DiscoveredList& refs_list, BoolObjectClosure* is_alive,
-  OopClosure* keep_alive, VoidClosure* complete_gc, YieldClosure* yield) {
-
+void
+ReferenceProcessor::preclean_discovered_reflist(DiscoveredList&    refs_list,
+                                                BoolObjectClosure* is_alive,
+                                                OopClosure*        keep_alive,
+                                                VoidClosure*       complete_gc,
+                                                YieldClosure*      yield) {
   DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
   size_t length = refs_list.length();
   while (iter.has_next()) {
     iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
-    oop* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
+    oop obj = iter.obj();
+    oop next = java_lang_ref_Reference::next(obj);
     if (iter.referent() == NULL || iter.is_referent_alive() ||
-        *next_addr != NULL) {
+        next != NULL) {
       // The referent has been cleared, or is alive, or the Reference is not
       // active; we need to trace and mark its cohort.
       if (TraceReferenceGC) {
@@ -1203,7 +1245,13 @@
       --length;
       // Keep alive its cohort.
       iter.make_referent_alive();
-      keep_alive->do_oop(next_addr);
+      if (UseCompressedOops) {
+        narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj);
+        keep_alive->do_oop(next_addr);
+      } else {
+        oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj);
+        keep_alive->do_oop(next_addr);
+      }
     } else {
       iter.next();
     }
@@ -1241,7 +1289,7 @@
 #endif
 
 void ReferenceProcessor::verify() {
-  guarantee(_sentinelRef != NULL && _sentinelRef->is_oop(), "Lost _sentinelRef");
+  guarantee(sentinel_ref() != NULL && sentinel_ref()->is_oop(), "Lost _sentinelRef");
 }
 
 #ifndef PRODUCT
@@ -1249,12 +1297,12 @@
   guarantee(!_discovering_refs, "Discovering refs?");
   for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
     oop obj = _discoveredSoftRefs[i].head();
-    while (obj != _sentinelRef) {
+    while (obj != sentinel_ref()) {
       oop next = java_lang_ref_Reference::discovered(obj);
       java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
       obj = next;
     }
-    _discoveredSoftRefs[i].set_head(_sentinelRef);
+    _discoveredSoftRefs[i].set_head(sentinel_ref());
     _discoveredSoftRefs[i].set_length(0);
   }
 }
--- a/hotspot/src/share/vm/memory/referenceProcessor.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/referenceProcessor.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -45,8 +45,6 @@
 class DiscoveredList;
 
 class ReferenceProcessor : public CHeapObj {
- friend class DiscoveredList;
- friend class DiscoveredListIterator;
  protected:
   // End of list marker
   static oop  _sentinelRef;
@@ -70,16 +68,20 @@
   BoolObjectClosure* _is_alive_non_header;
 
   // The discovered ref lists themselves
-  int             _num_q;       // the MT'ness degree of the queues below
-  DiscoveredList* _discoveredSoftRefs; // pointer to array of oops
+
+  // The MT'ness degree of the queues below
+  int             _num_q;
+  // Arrays of lists of oops, one per thread
+  DiscoveredList* _discoveredSoftRefs;
   DiscoveredList* _discoveredWeakRefs;
   DiscoveredList* _discoveredFinalRefs;
   DiscoveredList* _discoveredPhantomRefs;
 
  public:
-  int  num_q()                           { return _num_q; }
+  int num_q()                            { return _num_q; }
   DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
-  static oop* sentinel_ref()             { return &_sentinelRef; }
+  static oop  sentinel_ref()             { return _sentinelRef; }
+  static oop* adr_sentinel_ref()         { return &_sentinelRef; }
 
  public:
   // Process references with a certain reachability level.
@@ -98,45 +100,45 @@
   // Work methods used by the method process_discovered_reflist
   // Phase1: keep alive all those referents that are otherwise
   // dead but which must be kept alive by policy (and their closure).
-  void process_phase1(DiscoveredList&     refs_list_addr,
+  void process_phase1(DiscoveredList&     refs_list,
                       ReferencePolicy*    policy,
                       BoolObjectClosure*  is_alive,
                       OopClosure*         keep_alive,
                       VoidClosure*        complete_gc);
   // Phase2: remove all those references whose referents are
   // reachable.
-  inline void process_phase2(DiscoveredList&    refs_list_addr,
+  inline void process_phase2(DiscoveredList&    refs_list,
                              BoolObjectClosure* is_alive,
                              OopClosure*        keep_alive,
                              VoidClosure*       complete_gc) {
     if (discovery_is_atomic()) {
       // complete_gc is ignored in this case for this phase
-      pp2_work(refs_list_addr, is_alive, keep_alive);
+      pp2_work(refs_list, is_alive, keep_alive);
     } else {
       assert(complete_gc != NULL, "Error");
-      pp2_work_concurrent_discovery(refs_list_addr, is_alive,
+      pp2_work_concurrent_discovery(refs_list, is_alive,
                                     keep_alive, complete_gc);
     }
   }
   // Work methods in support of process_phase2
-  void pp2_work(DiscoveredList&    refs_list_addr,
+  void pp2_work(DiscoveredList&    refs_list,
                 BoolObjectClosure* is_alive,
                 OopClosure*        keep_alive);
   void pp2_work_concurrent_discovery(
-                DiscoveredList&    refs_list_addr,
+                DiscoveredList&    refs_list,
                 BoolObjectClosure* is_alive,
                 OopClosure*        keep_alive,
                 VoidClosure*       complete_gc);
   // Phase3: process the referents by either clearing them
   // or keeping them alive (and their closure)
-  void process_phase3(DiscoveredList&    refs_list_addr,
+  void process_phase3(DiscoveredList&    refs_list,
                       bool               clear_referent,
                       BoolObjectClosure* is_alive,
                       OopClosure*        keep_alive,
                       VoidClosure*       complete_gc);
 
   // Enqueue references with a certain reachability level
-  void enqueue_discovered_reflist(DiscoveredList& refs_list, oop* pending_list_addr);
+  void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr);
 
   // "Preclean" all the discovered reference lists
   // by removing references with strongly reachable referents.
@@ -169,6 +171,8 @@
   // occupying the i / _num_q slot.
   const char* list_name(int i);
 
+  void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
+
  protected:
   // "Preclean" the given discovered reference list
   // by removing references with strongly reachable referents.
@@ -179,7 +183,6 @@
                                    VoidClosure*       complete_gc,
                                    YieldClosure*      yield);
 
-  void enqueue_discovered_reflists(oop* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
   int next_id() {
     int id = _next_id;
     if (++_next_id == _num_q) {
@@ -189,7 +192,7 @@
   }
   DiscoveredList* get_discovered_list(ReferenceType rt);
   inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
-                                        oop* discovered_addr);
+                                        HeapWord* discovered_addr);
   void verify_ok_to_handle_reflists() PRODUCT_RETURN;
 
   void abandon_partial_discovered_list(DiscoveredList& refs_list);
@@ -477,7 +480,7 @@
 protected:
   EnqueueTask(ReferenceProcessor& ref_processor,
               DiscoveredList      refs_lists[],
-              oop*                pending_list_addr,
+              HeapWord*           pending_list_addr,
               oop                 sentinel_ref,
               int                 n_queues)
     : _ref_processor(ref_processor),
@@ -493,7 +496,7 @@
 protected:
   ReferenceProcessor& _ref_processor;
   DiscoveredList*     _refs_lists;
-  oop*                _pending_list_addr;
+  HeapWord*           _pending_list_addr;
   oop                 _sentinel_ref;
   int                 _n_queues;
 };
--- a/hotspot/src/share/vm/memory/restore.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/restore.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -50,6 +50,8 @@
     *p = obj;
   }
 
+  void do_oop(narrowOop* p) { ShouldNotReachHere(); }
+
   void do_ptr(void** p) {
     assert(*p == NULL, "initializing previous initialized pointer.");
     void* obj = nextOop();
--- a/hotspot/src/share/vm/memory/serialize.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/serialize.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -41,17 +41,18 @@
   int tag = 0;
   soc->do_tag(--tag);
 
+  assert(!UseCompressedOops, "UseCompressedOops doesn't work with shared archive");
   // Verify the sizes of various oops in the system.
   soc->do_tag(sizeof(oopDesc));
   soc->do_tag(sizeof(instanceOopDesc));
   soc->do_tag(sizeof(methodOopDesc));
   soc->do_tag(sizeof(constMethodOopDesc));
   soc->do_tag(sizeof(methodDataOopDesc));
-  soc->do_tag(sizeof(arrayOopDesc));
+  soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE));
   soc->do_tag(sizeof(constantPoolOopDesc));
   soc->do_tag(sizeof(constantPoolCacheOopDesc));
-  soc->do_tag(sizeof(objArrayOopDesc));
-  soc->do_tag(sizeof(typeArrayOopDesc));
+  soc->do_tag(objArrayOopDesc::base_offset_in_bytes(T_BYTE));
+  soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE));
   soc->do_tag(sizeof(symbolOopDesc));
   soc->do_tag(sizeof(klassOopDesc));
   soc->do_tag(sizeof(markOopDesc));
--- a/hotspot/src/share/vm/memory/sharedHeap.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/sharedHeap.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -74,9 +74,10 @@
 
 class AssertIsPermClosure: public OopClosure {
 public:
-  void do_oop(oop* p) {
+  virtual void do_oop(oop* p) {
     assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm.");
   }
+  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 };
 static AssertIsPermClosure assert_is_perm_closure;
 
@@ -187,12 +188,13 @@
 public:
   SkipAdjustingSharedStrings(OopClosure* clo) : _clo(clo) {}
 
-  void do_oop(oop* p) {
+  virtual void do_oop(oop* p) {
     oop o = (*p);
     if (!o->is_shared_readwrite()) {
       _clo->do_oop(p);
     }
   }
+  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 };
 
 // Unmarked shared Strings in the StringTable (which got there due to
--- a/hotspot/src/share/vm/memory/space.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/space.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -25,6 +25,9 @@
 # include "incls/_precompiled.incl"
 # include "incls/_space.cpp.incl"
 
+void SpaceMemRegionOopsIterClosure::do_oop(oop* p)       { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
+void SpaceMemRegionOopsIterClosure::do_oop(narrowOop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
+
 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
                                                 HeapWord* top_obj) {
   if (top_obj != NULL) {
@@ -150,10 +153,6 @@
   return new DirtyCardToOopClosure(this, cl, precision, boundary);
 }
 
-void FilteringClosure::do_oop(oop* p) {
-  do_oop_nv(p);
-}
-
 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
                                                HeapWord* top_obj) {
   if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
@@ -337,7 +336,7 @@
     assert(q->forwardee() == NULL, "should be forwarded to NULL");
   }
 
-  debug_only(MarkSweep::register_live_oop(q, size));
+  VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, size));
   compact_top += size;
 
   // we need to update the offset table so that the beginnings of objects can be
@@ -406,13 +405,13 @@
     if (oop(q)->is_gc_marked()) {
       // q is alive
 
-      debug_only(MarkSweep::track_interior_pointers(oop(q)));
+      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));
       // point all the oops to the new location
       size_t size = oop(q)->adjust_pointers();
-      debug_only(MarkSweep::check_interior_pointers());
+      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());
 
       debug_only(prev_q = q);
-      debug_only(MarkSweep::validate_live_oop(oop(q), size));
+      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));
 
       q += size;
     } else {
@@ -884,10 +883,13 @@
 
 class VerifyOldOopClosure : public OopClosure {
  public:
-  oop the_obj;
-  bool allow_dirty;
+  oop  _the_obj;
+  bool _allow_dirty;
   void do_oop(oop* p) {
-    the_obj->verify_old_oop(p, allow_dirty);
+    _the_obj->verify_old_oop(p, _allow_dirty);
+  }
+  void do_oop(narrowOop* p) {
+    _the_obj->verify_old_oop(p, _allow_dirty);
   }
 };
 
@@ -898,7 +900,7 @@
   HeapWord* p = bottom();
   HeapWord* prev_p = NULL;
   VerifyOldOopClosure blk;      // Does this do anything?
-  blk.allow_dirty = allow_dirty;
+  blk._allow_dirty = allow_dirty;
   int objs = 0;
   int blocks = 0;
 
@@ -919,7 +921,7 @@
 
     if (objs == OBJ_SAMPLE_INTERVAL) {
       oop(p)->verify();
-      blk.the_obj = oop(p);
+      blk._the_obj = oop(p);
       oop(p)->oop_iterate(&blk);
       objs = 0;
     } else {
--- a/hotspot/src/share/vm/memory/space.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/space.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -52,21 +52,24 @@
 class CardTableRS;
 class DirtyCardToOopClosure;
 
-
 // An oop closure that is circumscribed by a filtering memory region.
-class SpaceMemRegionOopsIterClosure: public virtual OopClosure {
-  OopClosure* cl;
-  MemRegion mr;
-public:
-  void do_oop(oop* p) {
-    if (mr.contains(p)) {
-      cl->do_oop(p);
+class SpaceMemRegionOopsIterClosure: public OopClosure {
+ private:
+  OopClosure* _cl;
+  MemRegion   _mr;
+ protected:
+  template <class T> void do_oop_work(T* p) {
+    if (_mr.contains(p)) {
+      _cl->do_oop(p);
     }
   }
-  SpaceMemRegionOopsIterClosure(OopClosure* _cl, MemRegion _mr): cl(_cl), mr(_mr) {}
+ public:
+  SpaceMemRegionOopsIterClosure(OopClosure* cl, MemRegion mr):
+    _cl(cl), _mr(mr) {}
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
 };
 
-
 // A Space describes a heap area. Class Space is an abstract
 // base class.
 //
@@ -279,7 +282,7 @@
   CardTableModRefBS::PrecisionStyle _precision;
   HeapWord* _boundary;          // If non-NULL, process only non-NULL oops
                                 // pointing below boundary.
-  HeapWord* _min_done;          // ObjHeadPreciseArray precision requires
+  HeapWord* _min_done;                // ObjHeadPreciseArray precision requires
                                 // a downwards traversal; this is the
                                 // lowest location already done (or,
                                 // alternatively, the lowest address that
@@ -508,7 +511,7 @@
       /* prefetch beyond q */                                                \
       Prefetch::write(q, interval);                                          \
       /* size_t size = oop(q)->size();  changing this for cms for perm gen */\
-      size_t size = block_size(q);                                           \
+      size_t size = block_size(q);                                             \
       compact_top = cp->space->forward(oop(q), size, cp, compact_top);       \
       q += size;                                                             \
       end_of_live = q;                                                       \
@@ -572,147 +575,149 @@
   cp->space->set_compaction_top(compact_top);                                \
 }
 
-#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) {                             \
-  /* adjust all the interior pointers to point at the new locations of objects  \
-   * Used by MarkSweep::mark_sweep_phase3() */                                  \
+#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) {                                \
+  /* adjust all the interior pointers to point at the new locations of objects        \
+   * Used by MarkSweep::mark_sweep_phase3() */                                        \
                                                                                 \
-  HeapWord* q = bottom();                                                       \
-  HeapWord* t = _end_of_live;  /* Established by "prepare_for_compaction". */   \
+  HeapWord* q = bottom();                                                        \
+  HeapWord* t = _end_of_live;  /* Established by "prepare_for_compaction". */        \
                                                                                 \
-  assert(_first_dead <= _end_of_live, "Stands to reason, no?");                 \
+  assert(_first_dead <= _end_of_live, "Stands to reason, no?");                        \
                                                                                 \
-  if (q < t && _first_dead > q &&                                               \
+  if (q < t && _first_dead > q &&                                                \
       !oop(q)->is_gc_marked()) {                                                \
     /* we have a chunk of the space which hasn't moved and we've                \
      * reinitialized the mark word during the previous pass, so we can't        \
-     * use is_gc_marked for the traversal. */                                   \
+     * use is_gc_marked for the traversal. */                                        \
     HeapWord* end = _first_dead;                                                \
                                                                                 \
-    while (q < end) {                                                           \
-      /* I originally tried to conjoin "block_start(q) == q" to the             \
-       * assertion below, but that doesn't work, because you can't              \
-       * accurately traverse previous objects to get to the current one         \
-       * after their pointers (including pointers into permGen) have been       \
-       * updated, until the actual compaction is done.  dld, 4/00 */            \
-      assert(block_is_obj(q),                                                   \
-             "should be at block boundaries, and should be looking at objs");   \
+    while (q < end) {                                                                \
+      /* I originally tried to conjoin "block_start(q) == q" to the                \
+       * assertion below, but that doesn't work, because you can't                \
+       * accurately traverse previous objects to get to the current one                \
+       * after their pointers (including pointers into permGen) have been        \
+       * updated, until the actual compaction is done.  dld, 4/00 */                \
+      assert(block_is_obj(q),                                                        \
+             "should be at block boundaries, and should be looking at objs");        \
                                                                                 \
-      debug_only(MarkSweep::track_interior_pointers(oop(q)));                   \
+      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));     \
                                                                                 \
-      /* point all the oops to the new location */                              \
-      size_t size = oop(q)->adjust_pointers();                                  \
-      size = adjust_obj_size(size);                                             \
+      /* point all the oops to the new location */                                \
+      size_t size = oop(q)->adjust_pointers();                                        \
+      size = adjust_obj_size(size);                                                \
                                                                                 \
-      debug_only(MarkSweep::check_interior_pointers());                         \
-                                                                                \
-      debug_only(MarkSweep::validate_live_oop(oop(q), size));                   \
-                                                                                \
+      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());           \
+                                                                                      \
+      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));     \
+                                                                                      \
       q += size;                                                                \
-    }                                                                           \
+    }                                                                                \
                                                                                 \
-    if (_first_dead == t) {                                                     \
-      q = t;                                                                    \
-    } else {                                                                    \
-      /* $$$ This is funky.  Using this to read the previously written          \
-       * LiveRange.  See also use below. */                                     \
+    if (_first_dead == t) {                                                        \
+      q = t;                                                                        \
+    } else {                                                                        \
+      /* $$$ This is funky.  Using this to read the previously written                \
+       * LiveRange.  See also use below. */                                        \
       q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer();                \
-    }                                                                           \
-  }                                                                             \
+    }                                                                                \
+  }                                                                                \
                                                                                 \
   const intx interval = PrefetchScanIntervalInBytes;                            \
                                                                                 \
-  debug_only(HeapWord* prev_q = NULL);                                          \
-  while (q < t) {                                                               \
-    /* prefetch beyond q */                                                     \
+  debug_only(HeapWord* prev_q = NULL);                                                \
+  while (q < t) {                                                                \
+    /* prefetch beyond q */                                                        \
     Prefetch::write(q, interval);                                               \
-    if (oop(q)->is_gc_marked()) {                                               \
-      /* q is alive */                                                          \
-      debug_only(MarkSweep::track_interior_pointers(oop(q)));                   \
-      /* point all the oops to the new location */                              \
-      size_t size = oop(q)->adjust_pointers();                                  \
-      size = adjust_obj_size(size);                                             \
-      debug_only(MarkSweep::check_interior_pointers());                         \
-      debug_only(MarkSweep::validate_live_oop(oop(q), size));                   \
-      debug_only(prev_q = q);                                                   \
+    if (oop(q)->is_gc_marked()) {                                                \
+      /* q is alive */                                                                \
+      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));     \
+      /* point all the oops to the new location */                                \
+      size_t size = oop(q)->adjust_pointers();                                        \
+      size = adjust_obj_size(size);                                                \
+      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());                \
+      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));     \
+      debug_only(prev_q = q);                                                        \
       q += size;                                                                \
-    } else {                                                                    \
-      /* q is not a live object, so its mark should point at the next           \
-       * live object */                                                         \
-      debug_only(prev_q = q);                                                   \
-      q = (HeapWord*) oop(q)->mark()->decode_pointer();                         \
-      assert(q > prev_q, "we should be moving forward through memory");         \
-    }                                                                           \
-  }                                                                             \
+    } else {                                                                        \
+      /* q is not a live object, so its mark should point at the next                \
+       * live object */                                                                \
+      debug_only(prev_q = q);                                                        \
+      q = (HeapWord*) oop(q)->mark()->decode_pointer();                                \
+      assert(q > prev_q, "we should be moving forward through memory");                \
+    }                                                                                \
+  }                                                                                \
                                                                                 \
-  assert(q == t, "just checking");                                              \
+  assert(q == t, "just checking");                                                \
 }
 
-#define SCAN_AND_COMPACT(obj_size) {                                            \
+#define SCAN_AND_COMPACT(obj_size) {                                                \
   /* Copy all live objects to their new location                                \
-   * Used by MarkSweep::mark_sweep_phase4() */                                  \
+   * Used by MarkSweep::mark_sweep_phase4() */                                        \
                                                                                 \
-  HeapWord*       q = bottom();                                                 \
-  HeapWord* const t = _end_of_live;                                             \
-  debug_only(HeapWord* prev_q = NULL);                                          \
+  HeapWord*       q = bottom();                                                        \
+  HeapWord* const t = _end_of_live;                                                \
+  debug_only(HeapWord* prev_q = NULL);                                                \
                                                                                 \
-  if (q < t && _first_dead > q &&                                               \
+  if (q < t && _first_dead > q &&                                                \
       !oop(q)->is_gc_marked()) {                                                \
-    debug_only(                                                                 \
-    /* we have a chunk of the space which hasn't moved and we've reinitialized the              \
-     * mark word during the previous pass, so we can't use is_gc_marked for the \
-     * traversal. */                                                            \
-    HeapWord* const end = _first_dead;                                          \
-                                                                                \
-    while (q < end) {                                                           \
+    debug_only(                                                                        \
+    /* we have a chunk of the space which hasn't moved and we've reinitialized  \
+     * the mark word during the previous pass, so we can't use is_gc_marked for \
+     * the traversal. */                                                        \
+    HeapWord* const end = _first_dead;                                                \
+                                                                                      \
+    while (q < end) {                                                                \
       size_t size = obj_size(q);                                                \
-      assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)"); \
-      debug_only(MarkSweep::live_oop_moved_to(q, size, q));                     \
-      debug_only(prev_q = q);                                                   \
+      assert(!oop(q)->is_gc_marked(),                                           \
+             "should be unmarked (special dense prefix handling)");             \
+      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q));        \
+      debug_only(prev_q = q);                                                        \
       q += size;                                                                \
-    }                                                                           \
-    )  /* debug_only */                                                         \
-                                                                                \
-    if (_first_dead == t) {                                                     \
-      q = t;                                                                    \
-    } else {                                                                    \
-      /* $$$ Funky */                                                           \
-      q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer();               \
-    }                                                                           \
-  }                                                                             \
+    }                                                                                \
+    )  /* debug_only */                                                                \
+                                                                                      \
+    if (_first_dead == t) {                                                        \
+      q = t;                                                                        \
+    } else {                                                                        \
+      /* $$$ Funky */                                                                 \
+      q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer();                \
+    }                                                                                \
+  }                                                                                \
                                                                                 \
-  const intx scan_interval = PrefetchScanIntervalInBytes;                       \
-  const intx copy_interval = PrefetchCopyIntervalInBytes;                       \
-  while (q < t) {                                                               \
-    if (!oop(q)->is_gc_marked()) {                                              \
-      /* mark is pointer to next marked oop */                                  \
-      debug_only(prev_q = q);                                                   \
-      q = (HeapWord*) oop(q)->mark()->decode_pointer();                         \
-      assert(q > prev_q, "we should be moving forward through memory");         \
-    } else {                                                                    \
-      /* prefetch beyond q */                                                   \
+  const intx scan_interval = PrefetchScanIntervalInBytes;                        \
+  const intx copy_interval = PrefetchCopyIntervalInBytes;                        \
+  while (q < t) {                                                                \
+    if (!oop(q)->is_gc_marked()) {                                                \
+      /* mark is pointer to next marked oop */                                        \
+      debug_only(prev_q = q);                                                        \
+      q = (HeapWord*) oop(q)->mark()->decode_pointer();                                \
+      assert(q > prev_q, "we should be moving forward through memory");                \
+    } else {                                                                        \
+      /* prefetch beyond q */                                                        \
       Prefetch::read(q, scan_interval);                                         \
                                                                                 \
       /* size and destination */                                                \
       size_t size = obj_size(q);                                                \
       HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();                \
                                                                                 \
-      /* prefetch beyond compaction_top */                                      \
+      /* prefetch beyond compaction_top */                                        \
       Prefetch::write(compaction_top, copy_interval);                           \
                                                                                 \
-      /* copy object and reinit its mark */                                     \
-      debug_only(MarkSweep::live_oop_moved_to(q, size, compaction_top));        \
-      assert(q != compaction_top, "everything in this pass should be moving");  \
-      Copy::aligned_conjoint_words(q, compaction_top, size);                    \
-      oop(compaction_top)->init_mark();                                         \
-      assert(oop(compaction_top)->klass() != NULL, "should have a class");      \
+      /* copy object and reinit its mark */                                        \
+      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size,            \
+                                                            compaction_top));   \
+      assert(q != compaction_top, "everything in this pass should be moving");        \
+      Copy::aligned_conjoint_words(q, compaction_top, size);                        \
+      oop(compaction_top)->init_mark();                                                \
+      assert(oop(compaction_top)->klass() != NULL, "should have a class");        \
                                                                                 \
-      debug_only(prev_q = q);                                                   \
+      debug_only(prev_q = q);                                                        \
       q += size;                                                                \
-    }                                                                           \
-  }                                                                             \
+    }                                                                                \
+  }                                                                                \
                                                                                 \
   /* Reset space after compaction is complete */                                \
-  reset_after_compaction();                                                     \
+  reset_after_compaction();                                                        \
   /* We do this clear, below, since it has overloaded meanings for some */      \
   /* space subtypes.  For example, OffsetTableContigSpace's that were   */      \
   /* compacted into will have had their offset table thresholds updated */      \
--- a/hotspot/src/share/vm/memory/universe.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/universe.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -99,6 +99,7 @@
 size_t          Universe::_heap_used_at_last_gc;
 
 CollectedHeap*  Universe::_collectedHeap = NULL;
+address         Universe::_heap_base = NULL;
 
 
 void Universe::basic_type_classes_do(void f(klassOop)) {
@@ -464,7 +465,7 @@
 
 class FixupMirrorClosure: public ObjectClosure {
  public:
-  void do_object(oop obj) {
+  virtual void do_object(oop obj) {
     if (obj->is_klass()) {
       EXCEPTION_MARK;
       KlassHandle k(THREAD, klassOop(obj));
@@ -667,7 +668,7 @@
          "LogHeapWordSize is incorrect.");
   guarantee(sizeof(oop) >= sizeof(HeapWord), "HeapWord larger than oop?");
   guarantee(sizeof(oop) % sizeof(HeapWord) == 0,
-         "oop size is not not a multiple of HeapWord size");
+            "oop size is not not a multiple of HeapWord size");
   TraceTime timer("Genesis", TraceStartupTime);
   GC_locker::lock();  // do not allow gc during bootstrapping
   JavaClasses::compute_hard_coded_offsets();
@@ -759,6 +760,15 @@
   if (status != JNI_OK) {
     return status;
   }
+  if (UseCompressedOops) {
+    // Subtract a page because something can get allocated at heap base.
+    // This also makes implicit null checking work, because the
+    // memory+1 page below heap_base needs to cause a signal.
+    // See needs_explicit_null_check.
+    // Only set the heap base for compressed oops because it indicates
+    // compressed oops for pstack code.
+    Universe::_heap_base = Universe::heap()->base() - os::vm_page_size();
+  }
 
   // We will never reach the CATCH below since Exceptions::_throw will cause
   // the VM to exit if an exception is thrown during initialization
--- a/hotspot/src/share/vm/memory/universe.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/memory/universe.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -180,10 +180,13 @@
 
   // The particular choice of collected heap.
   static CollectedHeap* _collectedHeap;
+  // Base address for oop-within-java-object materialization.
+  // NULL if using wide oops.  Doubles as heap oop null value.
+  static address        _heap_base;
 
   // array of dummy objects used with +FullGCAlot
   debug_only(static objArrayOop _fullgc_alot_dummy_array;)
- // index of next entry to clear
+  // index of next entry to clear
   debug_only(static int         _fullgc_alot_dummy_next;)
 
   // Compiler/dispatch support
@@ -323,6 +326,10 @@
   // The particular choice of collected heap.
   static CollectedHeap* heap() { return _collectedHeap; }
 
+  // For UseCompressedOops
+  static address heap_base()       { return _heap_base; }
+  static address* heap_base_addr() { return &_heap_base; }
+
   // Historic gc information
   static size_t get_heap_capacity_at_last_gc()         { return _heap_capacity_at_last_gc; }
   static size_t get_heap_free_at_last_gc()             { return _heap_capacity_at_last_gc - _heap_used_at_last_gc; }
--- a/hotspot/src/share/vm/oops/arrayOop.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/arrayOop.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -22,34 +22,79 @@
  *
  */
 
-// arrayOopDesc is the abstract baseclass for all arrays.
+// arrayOopDesc is the abstract baseclass for all arrays.  It doesn't
+// declare pure virtual to enforce this because that would allocate a vtbl
+// in each instance, which we don't want.
+
+// The layout of array Oops is:
+//
+//  markOop
+//  klassOop  // 32 bits if compressed but declared 64 in LP64.
+//  length    // shares klass memory or allocated after declared fields.
+
 
 class arrayOopDesc : public oopDesc {
   friend class VMStructs;
- private:
-  int _length; // number of elements in the array
+
+  // Interpreter/Compiler offsets
+
+  // Header size computation.
+  // The header is considered the oop part of this type plus the length.
+  // Returns the aligned header_size_in_bytes.  This is not equivalent to
+  // sizeof(arrayOopDesc) which should not appear in the code, except here.
+  static int header_size_in_bytes() {
+    size_t hs = UseCompressedOops ?
+            sizeof(arrayOopDesc) :
+            align_size_up(sizeof(arrayOopDesc) + sizeof(int), HeapWordSize);
+#ifdef ASSERT
+    // make sure it isn't called before UseCompressedOops is initialized.
+    static size_t arrayoopdesc_hs = 0;
+    if (arrayoopdesc_hs == 0) arrayoopdesc_hs = hs;
+    assert(arrayoopdesc_hs == hs, "header size can't change");
+#endif // ASSERT
+    return (int)hs;
+  }
 
  public:
-  // Interpreter/Compiler offsets
-  static int length_offset_in_bytes()             { return offset_of(arrayOopDesc, _length); }
-  static int base_offset_in_bytes(BasicType type) { return header_size(type) * HeapWordSize; }
+  // The _length field is not declared in C++.  It is allocated after the
+  // declared nonstatic fields in arrayOopDesc if not compressed, otherwise
+  // it occupies the second half of the _klass field in oopDesc.
+  static int length_offset_in_bytes() {
+    return UseCompressedOops ? klass_gap_offset_in_bytes() :
+                               sizeof(arrayOopDesc);
+  }
+
+  // Returns the offset of the first element.
+  static int base_offset_in_bytes(BasicType type) {
+    return header_size(type) * HeapWordSize;
+  }
 
   // Returns the address of the first element.
-  void* base(BasicType type) const              { return (void*) (((intptr_t) this) + base_offset_in_bytes(type)); }
+  void* base(BasicType type) const {
+    return (void*) (((intptr_t) this) + base_offset_in_bytes(type));
+  }
 
   // Tells whether index is within bounds.
   bool is_within_bounds(int index) const        { return 0 <= index && index < length(); }
 
-  // Accessores for instance variable
-  int length() const                            { return _length;   }
-  void set_length(int length)                   { _length = length; }
+  // Accessors for instance variable which is not a C++ declared nonstatic
+  // field.
+  int length() const {
+    return *(int*)(((intptr_t)this) + length_offset_in_bytes());
+  }
+  void set_length(int length) {
+    *(int*)(((intptr_t)this) + length_offset_in_bytes()) = length;
+  }
 
-  // Header size computation.
-  // Should only be called with constants as argument (will not constant fold otherwise)
+  // Should only be called with constants as argument
+  // (will not constant fold otherwise)
+  // Returns the header size in words aligned to the requirements of the
+  // array object type.
   static int header_size(BasicType type) {
-    return Universe::element_type_should_be_aligned(type)
-      ? align_object_size(sizeof(arrayOopDesc)/HeapWordSize)
-      : sizeof(arrayOopDesc)/HeapWordSize;
+    size_t typesize_in_bytes = header_size_in_bytes();
+    return (int)(Universe::element_type_should_be_aligned(type)
+      ? align_object_size(typesize_in_bytes/HeapWordSize)
+      : typesize_in_bytes/HeapWordSize);
   }
 
   // This method returns the  maximum length that can passed into
@@ -62,7 +107,7 @@
     // We use max_jint, since object_size is internally represented by an 'int'
     // This gives us an upper bound of max_jint words for the size of the oop.
     int32_t max_words = (max_jint - header_size(type) - 2);
-    int elembytes = (type == T_OBJECT) ? T_OBJECT_aelem_bytes : type2aelembytes(type);
+    int elembytes = type2aelembytes(type);
     jlong len = ((jlong)max_words * HeapWordSize) / elembytes;
     return (len > max_jint) ? max_jint : (int32_t)len;
   }
--- a/hotspot/src/share/vm/oops/constantPoolKlass.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/constantPoolKlass.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -29,8 +29,9 @@
   int size = constantPoolOopDesc::object_size(length);
   KlassHandle klass (THREAD, as_klassOop());
   constantPoolOop c =
-    (constantPoolOop)CollectedHeap::permanent_array_allocate(klass, size, length, CHECK_NULL);
+    (constantPoolOop)CollectedHeap::permanent_obj_allocate(klass, size, CHECK_NULL);
 
+  c->set_length(length);
   c->set_tags(NULL);
   c->set_cache(NULL);
   c->set_pool_holder(NULL);
@@ -54,14 +55,14 @@
 
 klassOop constantPoolKlass::create_klass(TRAPS) {
   constantPoolKlass o;
-  KlassHandle klassklass(THREAD, Universe::arrayKlassKlassObj());
-  arrayKlassHandle k = base_create_array_klass(o.vtbl_value(), header_size(), klassklass, CHECK_NULL);
-  arrayKlassHandle super (THREAD, k->super());
-  complete_create_array_klass(k, super, CHECK_NULL);
+  KlassHandle h_this_klass(THREAD, Universe::klassKlassObj());
+  KlassHandle k = base_create_klass(h_this_klass, header_size(), o.vtbl_value(), CHECK_NULL);
+  // Make sure size calculation is right
+  assert(k()->size() == align_object_size(header_size()), "wrong size for object");
+  java_lang_Class::create_mirror(k, CHECK_NULL); // Allocate mirror
   return k();
 }
 
-
 int constantPoolKlass::oop_size(oop obj) const {
   assert(obj->is_constantPool(), "must be constantPool");
   return constantPoolOop(obj)->object_size();
@@ -275,7 +276,7 @@
   EXCEPTION_MARK;
   oop anObj;
   assert(obj->is_constantPool(), "must be constantPool");
-  arrayKlass::oop_print_on(obj, st);
+  Klass::oop_print_on(obj, st);
   constantPoolOop cp = constantPoolOop(obj);
 
   // Temp. remove cache so we can do lookups with original indicies.
--- a/hotspot/src/share/vm/oops/constantPoolKlass.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/constantPoolKlass.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -24,7 +24,8 @@
 
 // A constantPoolKlass is the klass of a constantPoolOop
 
-class constantPoolKlass : public arrayKlass {
+class constantPoolKlass : public Klass {
+  juint    _alloc_size;        // allocation profiling support
  public:
   // Dispatched klass operations
   bool oop_is_constantPool() const  { return true; }
@@ -44,7 +45,7 @@
 
   // Sizing
   static int header_size()        { return oopDesc::header_size() + sizeof(constantPoolKlass)/HeapWordSize; }
-  int object_size() const         { return arrayKlass::object_size(header_size()); }
+  int object_size() const        { return align_object_size(header_size()); }
 
   // Garbage collection
   void oop_follow_contents(oop obj);
@@ -57,6 +58,11 @@
   int oop_oop_iterate(oop obj, OopClosure* blk);
   int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
 
+  // Allocation profiling support
+  // no idea why this is pure virtual and not in Klass ???
+  juint alloc_size() const              { return _alloc_size; }
+  void set_alloc_size(juint n)          { _alloc_size = n; }
+
 #ifndef PRODUCT
  public:
   // Printing
--- a/hotspot/src/share/vm/oops/constantPoolOop.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/constantPoolOop.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -34,13 +34,14 @@
 
 class SymbolHashMap;
 
-class constantPoolOopDesc : public arrayOopDesc {
+class constantPoolOopDesc : public oopDesc {
   friend class VMStructs;
   friend class BytecodeInterpreter;  // Directly extracts an oop in the pool for fast instanceof/checkcast
  private:
   typeArrayOop         _tags; // the tag array describing the constant pool's contents
   constantPoolCacheOop _cache;         // the cache holding interpreter runtime information
   klassOop             _pool_holder;   // the corresponding class
+  int                  _length; // number of elements in the array
   // only set to non-zero if constant pool is merged by RedefineClasses
   int                  _orig_length;
 
@@ -330,6 +331,14 @@
   bool klass_name_at_matches(instanceKlassHandle k, int which);
 
   // Sizing
+  int length() const                   { return _length; }
+  void set_length(int length)          { _length = length; }
+
+  // Tells whether index is within bounds.
+  bool is_within_bounds(int index) const {
+    return 0 <= index && index < length();
+  }
+
   static int header_size()             { return sizeof(constantPoolOopDesc)/HeapWordSize; }
   static int object_size(int length)   { return align_object_size(header_size() + length); }
   int object_size()                    { return object_size(length()); }
--- a/hotspot/src/share/vm/oops/cpCacheKlass.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/cpCacheKlass.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -37,18 +37,19 @@
   int size = constantPoolCacheOopDesc::object_size(length);
   KlassHandle klass (THREAD, as_klassOop());
   constantPoolCacheOop cache = (constantPoolCacheOop)
-    CollectedHeap::permanent_array_allocate(klass, size, length, CHECK_NULL);
+    CollectedHeap::permanent_obj_allocate(klass, size, CHECK_NULL);
+  cache->set_length(length);
   cache->set_constant_pool(NULL);
   return cache;
 }
 
-
 klassOop constantPoolCacheKlass::create_klass(TRAPS) {
   constantPoolCacheKlass o;
-  KlassHandle klassklass(THREAD, Universe::arrayKlassKlassObj());
-  arrayKlassHandle k = base_create_array_klass(o.vtbl_value(), header_size(), klassklass, CHECK_NULL);
-  KlassHandle super (THREAD, k->super());
-  complete_create_array_klass(k, super, CHECK_NULL);
+  KlassHandle h_this_klass(THREAD, Universe::klassKlassObj());
+  KlassHandle k = base_create_klass(h_this_klass, header_size(), o.vtbl_value(), CHECK_NULL);
+  // Make sure size calculation is right
+  assert(k()->size() == align_object_size(header_size()), "wrong size for object");
+  java_lang_Class::create_mirror(k, CHECK_NULL); // Allocate mirror
   return k();
 }
 
@@ -183,7 +184,7 @@
   assert(obj->is_constantPoolCache(), "obj must be constant pool cache");
   constantPoolCacheOop cache = (constantPoolCacheOop)obj;
   // super print
-  arrayKlass::oop_print_on(obj, st);
+  Klass::oop_print_on(obj, st);
   // print constant pool cache entries
   for (int i = 0; i < cache->length(); i++) cache->entry_at(i)->print(st, i);
 }
@@ -194,7 +195,7 @@
   guarantee(obj->is_constantPoolCache(), "obj must be constant pool cache");
   constantPoolCacheOop cache = (constantPoolCacheOop)obj;
   // super verify
-  arrayKlass::oop_verify_on(obj, st);
+  Klass::oop_verify_on(obj, st);
   // print constant pool cache entries
   for (int i = 0; i < cache->length(); i++) cache->entry_at(i)->verify(st);
 }
--- a/hotspot/src/share/vm/oops/cpCacheKlass.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/cpCacheKlass.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -22,7 +22,8 @@
  *
  */
 
-class constantPoolCacheKlass: public arrayKlass {
+class constantPoolCacheKlass: public Klass {
+  juint    _alloc_size;        // allocation profiling support
  public:
   // Dispatched klass operations
   bool oop_is_constantPoolCache() const          { return true; }
@@ -41,8 +42,8 @@
   }
 
   // Sizing
-  static int header_size()                       { return oopDesc::header_size() + sizeof(constantPoolCacheKlass)/HeapWordSize; }
-  int object_size() const                        { return arrayKlass::object_size(header_size()); }
+  static int header_size()       { return oopDesc::header_size() + sizeof(constantPoolCacheKlass)/HeapWordSize; }
+  int object_size() const        { return align_object_size(header_size()); }
 
   // Garbage collection
   void oop_follow_contents(oop obj);
@@ -55,6 +56,10 @@
   int oop_oop_iterate(oop obj, OopClosure* blk);
   int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
 
+  // Allocation profiling support
+  juint alloc_size() const              { return _alloc_size; }
+  void set_alloc_size(juint n)          { _alloc_size = n; }
+
 #ifndef PRODUCT
  public:
   // Printing
--- a/hotspot/src/share/vm/oops/cpCacheOop.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/cpCacheOop.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -218,6 +218,7 @@
  public:
   LocalOopClosure(void f(oop*))        { _f = f; }
   virtual void do_oop(oop* o)          { _f(o); }
+  virtual void do_oop(narrowOop *o)    { ShouldNotReachHere(); }
 };
 
 
--- a/hotspot/src/share/vm/oops/cpCacheOop.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/cpCacheOop.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -286,12 +286,17 @@
 // is created and initialized before a class is actively used (i.e., initialized), the indivi-
 // dual cache entries are filled at resolution (i.e., "link") time (see also: rewriter.*).
 
-class constantPoolCacheOopDesc: public arrayOopDesc {
+class constantPoolCacheOopDesc: public oopDesc {
   friend class VMStructs;
  private:
+  int             _length;
   constantPoolOop _constant_pool;                // the corresponding constant pool
 
   // Sizing
+  debug_only(friend class ClassVerifier;)
+  int length() const                             { return _length; }
+  void set_length(int length)                    { _length = length; }
+
   static int header_size()                       { return sizeof(constantPoolCacheOopDesc) / HeapWordSize; }
   static int object_size(int length)             { return align_object_size(header_size() + length * in_words(ConstantPoolCacheEntry::size())); }
   int object_size()                              { return object_size(length()); }
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -1255,218 +1255,298 @@
 #endif //PRODUCT
 
 
+#ifdef ASSERT
+template <class T> void assert_is_in(T *p) {
+  T heap_oop = oopDesc::load_heap_oop(p);
+  if (!oopDesc::is_null(heap_oop)) {
+    oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
+    assert(Universe::heap()->is_in(o), "should be in heap");
+  }
+}
+template <class T> void assert_is_in_closed_subset(T *p) {
+  T heap_oop = oopDesc::load_heap_oop(p);
+  if (!oopDesc::is_null(heap_oop)) {
+    oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
+    assert(Universe::heap()->is_in_closed_subset(o), "should be in closed");
+  }
+}
+template <class T> void assert_is_in_reserved(T *p) {
+  T heap_oop = oopDesc::load_heap_oop(p);
+  if (!oopDesc::is_null(heap_oop)) {
+    oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
+    assert(Universe::heap()->is_in_reserved(o), "should be in reserved");
+  }
+}
+template <class T> void assert_nothing(T *p) {}
+
+#else
+template <class T> void assert_is_in(T *p) {}
+template <class T> void assert_is_in_closed_subset(T *p) {}
+template <class T> void assert_is_in_reserved(T *p) {}
+template <class T> void assert_nothing(T *p) {}
+#endif // ASSERT
+
+//
+// Macros that iterate over areas of oops which are specialized on type of
+// oop pointer either narrow or wide, depending on UseCompressedOops
+//
+// Parameters are:
+//   T         - type of oop to point to (either oop or narrowOop)
+//   start_p   - starting pointer for region to iterate over
+//   count     - number of oops or narrowOops to iterate over
+//   do_oop    - action to perform on each oop (it's arbitrary C code which
+//               makes it more efficient to put in a macro rather than making
+//               it a template function)
+//   assert_fn - assert function which is template function because performance
+//               doesn't matter when enabled.
+#define InstanceKlass_SPECIALIZED_OOP_ITERATE( \
+  T, start_p, count, do_oop,                \
+  assert_fn)                                \
+{                                           \
+  T* p         = (T*)(start_p);             \
+  T* const end = p + (count);               \
+  while (p < end) {                         \
+    (assert_fn)(p);                         \
+    do_oop;                                 \
+    ++p;                                    \
+  }                                         \
+}
+
+#define InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE( \
+  T, start_p, count, do_oop,                \
+  assert_fn)                                \
+{                                           \
+  T* const start = (T*)(start_p);           \
+  T*       p     = start + (count);         \
+  while (start < p) {                       \
+    --p;                                    \
+    (assert_fn)(p);                         \
+    do_oop;                                 \
+  }                                         \
+}
+
+#define InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \
+  T, start_p, count, low, high,             \
+  do_oop, assert_fn)                        \
+{                                           \
+  T* const l = (T*)(low);                   \
+  T* const h = (T*)(high);                  \
+  assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \
+         mask_bits((intptr_t)h, sizeof(T)-1) == 0,   \
+         "bounded region must be properly aligned"); \
+  T* p       = (T*)(start_p);               \
+  T* end     = p + (count);                 \
+  if (p < l) p = l;                         \
+  if (end > h) end = h;                     \
+  while (p < end) {                         \
+    (assert_fn)(p);                         \
+    do_oop;                                 \
+    ++p;                                    \
+  }                                         \
+}
+
+
+// The following macros call specialized macros, passing either oop or
+// narrowOop as the specialization type.  These test the UseCompressedOops
+// flag.
+#define InstanceKlass_OOP_ITERATE(start_p, count,    \
+                                  do_oop, assert_fn) \
+{                                                    \
+  if (UseCompressedOops) {                           \
+    InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
+      start_p, count,                                \
+      do_oop, assert_fn)                             \
+  } else {                                           \
+    InstanceKlass_SPECIALIZED_OOP_ITERATE(oop,       \
+      start_p, count,                                \
+      do_oop, assert_fn)                             \
+  }                                                  \
+}
+
+#define InstanceKlass_BOUNDED_OOP_ITERATE(start_p, count, low, high,    \
+                                          do_oop, assert_fn) \
+{                                                            \
+  if (UseCompressedOops) {                                   \
+    InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
+      start_p, count,                                        \
+      low, high,                                             \
+      do_oop, assert_fn)                                     \
+  } else {                                                   \
+    InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop,       \
+      start_p, count,                                        \
+      low, high,                                             \
+      do_oop, assert_fn)                                     \
+  }                                                          \
+}
+
+#define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn)            \
+{                                                                        \
+  /* Compute oopmap block range. The common case                         \
+     is nonstatic_oop_map_size == 1. */                                  \
+  OopMapBlock* map           = start_of_nonstatic_oop_maps();            \
+  OopMapBlock* const end_map = map + nonstatic_oop_map_size();           \
+  if (UseCompressedOops) {                                               \
+    while (map < end_map) {                                              \
+      InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop,                   \
+        obj->obj_field_addr<narrowOop>(map->offset()), map->length(),    \
+        do_oop, assert_fn)                                               \
+      ++map;                                                             \
+    }                                                                    \
+  } else {                                                               \
+    while (map < end_map) {                                              \
+      InstanceKlass_SPECIALIZED_OOP_ITERATE(oop,                         \
+        obj->obj_field_addr<oop>(map->offset()), map->length(),          \
+        do_oop, assert_fn)                                               \
+      ++map;                                                             \
+    }                                                                    \
+  }                                                                      \
+}
+
+#define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn)    \
+{                                                                        \
+  OopMapBlock* const start_map = start_of_nonstatic_oop_maps();          \
+  OopMapBlock* map             = start_map + nonstatic_oop_map_size();   \
+  if (UseCompressedOops) {                                               \
+    while (start_map < map) {                                            \
+      --map;                                                             \
+      InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop,           \
+        obj->obj_field_addr<narrowOop>(map->offset()), map->length(),    \
+        do_oop, assert_fn)                                               \
+    }                                                                    \
+  } else {                                                               \
+    while (start_map < map) {                                            \
+      --map;                                                             \
+      InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop,                 \
+        obj->obj_field_addr<oop>(map->offset()), map->length(),          \
+        do_oop, assert_fn)                                               \
+    }                                                                    \
+  }                                                                      \
+}
+
+#define InstanceKlass_BOUNDED_OOP_MAP_ITERATE(obj, low, high, do_oop,    \
+                                              assert_fn)                 \
+{                                                                        \
+  /* Compute oopmap block range. The common case is                      \
+     nonstatic_oop_map_size == 1, so we accept the                       \
+     usually non-existent extra overhead of examining                    \
+     all the maps. */                                                    \
+  OopMapBlock* map           = start_of_nonstatic_oop_maps();            \
+  OopMapBlock* const end_map = map + nonstatic_oop_map_size();           \
+  if (UseCompressedOops) {                                               \
+    while (map < end_map) {                                              \
+      InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop,           \
+        obj->obj_field_addr<narrowOop>(map->offset()), map->length(),    \
+        low, high,                                                       \
+        do_oop, assert_fn)                                               \
+      ++map;                                                             \
+    }                                                                    \
+  } else {                                                               \
+    while (map < end_map) {                                              \
+      InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop,                 \
+        obj->obj_field_addr<oop>(map->offset()), map->length(),          \
+        low, high,                                                       \
+        do_oop, assert_fn)                                               \
+      ++map;                                                             \
+    }                                                                    \
+  }                                                                      \
+}
+
 void instanceKlass::follow_static_fields() {
-  oop* start = start_of_static_fields();
-  oop* end   = start + static_oop_field_size();
-  while (start < end) {
-    if (*start != NULL) {
-      assert(Universe::heap()->is_in_closed_subset(*start),
-             "should be in heap");
-      MarkSweep::mark_and_push(start);
-    }
-    start++;
-  }
+  InstanceKlass_OOP_ITERATE( \
+    start_of_static_fields(), static_oop_field_size(), \
+    MarkSweep::mark_and_push(p), \
+    assert_is_in_closed_subset)
 }
 
 #ifndef SERIALGC
 void instanceKlass::follow_static_fields(ParCompactionManager* cm) {
-  oop* start = start_of_static_fields();
-  oop* end   = start + static_oop_field_size();
-  while (start < end) {
-    if (*start != NULL) {
-      assert(Universe::heap()->is_in(*start), "should be in heap");
-      PSParallelCompact::mark_and_push(cm, start);
-    }
-    start++;
-  }
+  InstanceKlass_OOP_ITERATE( \
+    start_of_static_fields(), static_oop_field_size(), \
+    PSParallelCompact::mark_and_push(cm, p), \
+    assert_is_in)
 }
 #endif // SERIALGC
 
-
 void instanceKlass::adjust_static_fields() {
-  oop* start = start_of_static_fields();
-  oop* end   = start + static_oop_field_size();
-  while (start < end) {
-    MarkSweep::adjust_pointer(start);
-    start++;
-  }
+  InstanceKlass_OOP_ITERATE( \
+    start_of_static_fields(), static_oop_field_size(), \
+    MarkSweep::adjust_pointer(p), \
+    assert_nothing)
 }
 
 #ifndef SERIALGC
 void instanceKlass::update_static_fields() {
-  oop* const start = start_of_static_fields();
-  oop* const beg_oop = start;
-  oop* const end_oop = start + static_oop_field_size();
-  for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
-    PSParallelCompact::adjust_pointer(cur_oop);
-  }
+  InstanceKlass_OOP_ITERATE( \
+    start_of_static_fields(), static_oop_field_size(), \
+    PSParallelCompact::adjust_pointer(p), \
+    assert_nothing)
 }
 
-void
-instanceKlass::update_static_fields(HeapWord* beg_addr, HeapWord* end_addr) {
-  oop* const start = start_of_static_fields();
-  oop* const beg_oop = MAX2((oop*)beg_addr, start);
-  oop* const end_oop = MIN2((oop*)end_addr, start + static_oop_field_size());
-  for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
-    PSParallelCompact::adjust_pointer(cur_oop);
-  }
+void instanceKlass::update_static_fields(HeapWord* beg_addr, HeapWord* end_addr) {
+  InstanceKlass_BOUNDED_OOP_ITERATE( \
+    start_of_static_fields(), static_oop_field_size(), \
+    beg_addr, end_addr, \
+    PSParallelCompact::adjust_pointer(p), \
+    assert_nothing )
 }
 #endif // SERIALGC
 
 void instanceKlass::oop_follow_contents(oop obj) {
-  assert (obj!=NULL, "can't follow the content of NULL object");
+  assert(obj != NULL, "can't follow the content of NULL object");
   obj->follow_header();
-  OopMapBlock* map     = start_of_nonstatic_oop_maps();
-  OopMapBlock* end_map = map + nonstatic_oop_map_size();
-  while (map < end_map) {
-    oop* start = obj->obj_field_addr(map->offset());
-    oop* end   = start + map->length();
-    while (start < end) {
-      if (*start != NULL) {
-        assert(Universe::heap()->is_in_closed_subset(*start),
-               "should be in heap");
-        MarkSweep::mark_and_push(start);
-      }
-      start++;
-    }
-    map++;
-  }
+  InstanceKlass_OOP_MAP_ITERATE( \
+    obj, \
+    MarkSweep::mark_and_push(p), \
+    assert_is_in_closed_subset)
 }
 
 #ifndef SERIALGC
 void instanceKlass::oop_follow_contents(ParCompactionManager* cm,
                                         oop obj) {
-  assert (obj!=NULL, "can't follow the content of NULL object");
+  assert(obj != NULL, "can't follow the content of NULL object");
   obj->follow_header(cm);
-  OopMapBlock* map     = start_of_nonstatic_oop_maps();
-  OopMapBlock* end_map = map + nonstatic_oop_map_size();
-  while (map < end_map) {
-    oop* start = obj->obj_field_addr(map->offset());
-    oop* end   = start + map->length();
-    while (start < end) {
-      if (*start != NULL) {
-        assert(Universe::heap()->is_in(*start), "should be in heap");
-        PSParallelCompact::mark_and_push(cm, start);
-      }
-      start++;
-    }
-    map++;
-  }
+  InstanceKlass_OOP_MAP_ITERATE( \
+    obj, \
+    PSParallelCompact::mark_and_push(cm, p), \
+    assert_is_in)
 }
 #endif // SERIALGC
 
-#define invoke_closure_on(start, closure, nv_suffix) {                          \
-  oop obj = *(start);                                                           \
-  if (obj != NULL) {                                                            \
-    assert(Universe::heap()->is_in_closed_subset(obj), "should be in heap");    \
-    (closure)->do_oop##nv_suffix(start);                                        \
-  }                                                                             \
-}
-
 // closure's do_header() method dicates whether the given closure should be
 // applied to the klass ptr in the object header.
 
-#define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)           \
-                                                                                \
-int instanceKlass::oop_oop_iterate##nv_suffix(oop obj,                          \
-                                              OopClosureType* closure) {        \
-  SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \
-  /* header */                                                                  \
-  if (closure->do_header()) {                                                   \
-    obj->oop_iterate_header(closure);                                           \
-  }                                                                             \
-  /* instance variables */                                                      \
-  OopMapBlock* map     = start_of_nonstatic_oop_maps();                         \
-  OopMapBlock* const end_map = map + nonstatic_oop_map_size();                  \
-  const intx field_offset    = PrefetchFieldsAhead;                             \
-  if (field_offset > 0) {                                                       \
-    while (map < end_map) {                                                     \
-      oop* start = obj->obj_field_addr(map->offset());                          \
-      oop* const end   = start + map->length();                                 \
-      while (start < end) {                                                     \
-        prefetch_beyond(start, (oop*)end, field_offset,                         \
-                        closure->prefetch_style());                             \
-        SpecializationStats::                                                   \
-          record_do_oop_call##nv_suffix(SpecializationStats::ik);               \
-        invoke_closure_on(start, closure, nv_suffix);                           \
-        start++;                                                                \
-      }                                                                         \
-      map++;                                                                    \
-    }                                                                           \
-  } else {                                                                      \
-    while (map < end_map) {                                                     \
-      oop* start = obj->obj_field_addr(map->offset());                          \
-      oop* const end   = start + map->length();                                 \
-      while (start < end) {                                                     \
-        SpecializationStats::                                                   \
-          record_do_oop_call##nv_suffix(SpecializationStats::ik);               \
-        invoke_closure_on(start, closure, nv_suffix);                           \
-        start++;                                                                \
-      }                                                                         \
-      map++;                                                                    \
-    }                                                                           \
-  }                                                                             \
-  return size_helper();                                                         \
+#define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)   \
+                                                                        \
+int instanceKlass::oop_oop_iterate##nv_suffix(oop obj,                  \
+                                              OopClosureType* closure) {\
+  SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
+  /* header */                                                          \
+  if (closure->do_header()) {                                           \
+    obj->oop_iterate_header(closure);                                   \
+  }                                                                     \
+  InstanceKlass_OOP_MAP_ITERATE(                                        \
+    obj,                                                                \
+    SpecializationStats::                                               \
+      record_do_oop_call##nv_suffix(SpecializationStats::ik);           \
+    (closure)->do_oop##nv_suffix(p),                                    \
+    assert_is_in_closed_subset)                                         \
+  return size_helper();                                                 \
 }
 
-#define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix)         \
-                                                                                \
-int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj,                      \
-                                                  OopClosureType* closure,      \
-                                                  MemRegion mr) {               \
-  SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \
-  /* header */                                                                  \
-  if (closure->do_header()) {                                                   \
-    obj->oop_iterate_header(closure, mr);                                       \
-  }                                                                             \
-  /* instance variables */                                                      \
-  OopMapBlock* map     = start_of_nonstatic_oop_maps();                         \
-  OopMapBlock* const end_map = map + nonstatic_oop_map_size();                  \
-  HeapWord* bot = mr.start();                                                   \
-  HeapWord* top = mr.end();                                                     \
-  oop* start = obj->obj_field_addr(map->offset());                              \
-  HeapWord* end = MIN2((HeapWord*)(start + map->length()), top);                \
-  /* Find the first map entry that extends onto mr. */                          \
-  while (map < end_map && end <= bot) {                                         \
-    map++;                                                                      \
-    start = obj->obj_field_addr(map->offset());                                 \
-    end = MIN2((HeapWord*)(start + map->length()), top);                        \
-  }                                                                             \
-  if (map != end_map) {                                                         \
-    /* The current map's end is past the start of "mr".  Skip up to the first   \
-       entry on "mr". */                                                        \
-    while ((HeapWord*)start < bot) {                                            \
-      start++;                                                                  \
-    }                                                                           \
-    const intx field_offset = PrefetchFieldsAhead;                              \
-    for (;;) {                                                                  \
-      if (field_offset > 0) {                                                   \
-        while ((HeapWord*)start < end) {                                        \
-          prefetch_beyond(start, (oop*)end, field_offset,                       \
-                          closure->prefetch_style());                           \
-          invoke_closure_on(start, closure, nv_suffix);                         \
-          start++;                                                              \
-        }                                                                       \
-      } else {                                                                  \
-        while ((HeapWord*)start < end) {                                        \
-          invoke_closure_on(start, closure, nv_suffix);                         \
-          start++;                                                              \
-        }                                                                       \
-      }                                                                         \
-      /* Go to the next map. */                                                 \
-      map++;                                                                    \
-      if (map == end_map) {                                                     \
-        break;                                                                  \
-      }                                                                         \
-      /* Otherwise,  */                                                         \
-      start = obj->obj_field_addr(map->offset());                               \
-      if ((HeapWord*)start >= top) {                                            \
-        break;                                                                  \
-      }                                                                         \
-      end = MIN2((HeapWord*)(start + map->length()), top);                      \
-    }                                                                           \
-  }                                                                             \
-  return size_helper();                                                         \
+#define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
+                                                                        \
+int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj,              \
+                                                  OopClosureType* closure, \
+                                                  MemRegion mr) {          \
+  SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
+  if (closure->do_header()) {                                            \
+    obj->oop_iterate_header(closure, mr);                                \
+  }                                                                      \
+  InstanceKlass_BOUNDED_OOP_MAP_ITERATE(                                 \
+    obj, mr.start(), mr.end(),                                           \
+    (closure)->do_oop##nv_suffix(p),                                     \
+    assert_is_in_closed_subset)                                          \
+  return size_helper();                                                  \
 }
 
 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN)
@@ -1474,56 +1554,28 @@
 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
 ALL_OOP_OOP_ITERATE_CLOSURES_3(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
 
-
 void instanceKlass::iterate_static_fields(OopClosure* closure) {
-  oop* start = start_of_static_fields();
-  oop* end   = start + static_oop_field_size();
-  while (start < end) {
-    assert(Universe::heap()->is_in_reserved_or_null(*start), "should be in heap");
-    closure->do_oop(start);
-    start++;
-  }
+    InstanceKlass_OOP_ITERATE( \
+      start_of_static_fields(), static_oop_field_size(), \
+      closure->do_oop(p), \
+      assert_is_in_reserved)
 }
 
 void instanceKlass::iterate_static_fields(OopClosure* closure,
                                           MemRegion mr) {
-  oop* start = start_of_static_fields();
-  oop* end   = start + static_oop_field_size();
-  // I gather that the the static fields of reference types come first,
-  // hence the name of "oop_field_size", and that is what makes this safe.
-  assert((intptr_t)mr.start() ==
-         align_size_up((intptr_t)mr.start(), sizeof(oop)) &&
-         (intptr_t)mr.end() == align_size_up((intptr_t)mr.end(), sizeof(oop)),
-         "Memregion must be oop-aligned.");
-  if ((HeapWord*)start < mr.start()) start = (oop*)mr.start();
-  if ((HeapWord*)end   > mr.end())   end   = (oop*)mr.end();
-  while (start < end) {
-    invoke_closure_on(start, closure,_v);
-    start++;
-  }
+  InstanceKlass_BOUNDED_OOP_ITERATE( \
+    start_of_static_fields(), static_oop_field_size(), \
+    mr.start(), mr.end(), \
+    (closure)->do_oop_v(p), \
+    assert_is_in_closed_subset)
 }
 
-
 int instanceKlass::oop_adjust_pointers(oop obj) {
   int size = size_helper();
-
-  // Compute oopmap block range. The common case is nonstatic_oop_map_size == 1.
-  OopMapBlock* map     = start_of_nonstatic_oop_maps();
-  OopMapBlock* const end_map = map + nonstatic_oop_map_size();
-  // Iterate over oopmap blocks
-  while (map < end_map) {
-    // Compute oop range for this block
-    oop* start = obj->obj_field_addr(map->offset());
-    oop* end   = start + map->length();
-    // Iterate over oops
-    while (start < end) {
-      assert(Universe::heap()->is_in_or_null(*start), "should be in heap");
-      MarkSweep::adjust_pointer(start);
-      start++;
-    }
-    map++;
-  }
-
+  InstanceKlass_OOP_MAP_ITERATE( \
+    obj, \
+    MarkSweep::adjust_pointer(p), \
+    assert_is_in)
   obj->adjust_header();
   return size;
 }
@@ -1531,132 +1583,66 @@
 #ifndef SERIALGC
 void instanceKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
   assert(!pm->depth_first(), "invariant");
-  // Compute oopmap block range. The common case is nonstatic_oop_map_size == 1.
-  OopMapBlock* start_map = start_of_nonstatic_oop_maps();
-  OopMapBlock* map       = start_map + nonstatic_oop_map_size();
-
-  // Iterate over oopmap blocks
-  while (start_map < map) {
-    --map;
-    // Compute oop range for this block
-    oop* start = obj->obj_field_addr(map->offset());
-    oop* curr  = start + map->length();
-    // Iterate over oops
-    while (start < curr) {
-      --curr;
-      if (PSScavenge::should_scavenge(*curr)) {
-        assert(Universe::heap()->is_in(*curr), "should be in heap");
-        pm->claim_or_forward_breadth(curr);
-      }
-    }
-  }
+  InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
+    obj, \
+    if (PSScavenge::should_scavenge(p)) { \
+      pm->claim_or_forward_breadth(p); \
+    }, \
+    assert_nothing )
 }
 
 void instanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
   assert(pm->depth_first(), "invariant");
-  // Compute oopmap block range. The common case is nonstatic_oop_map_size == 1.
-  OopMapBlock* start_map = start_of_nonstatic_oop_maps();
-  OopMapBlock* map       = start_map + nonstatic_oop_map_size();
-
-  // Iterate over oopmap blocks
-  while (start_map < map) {
-    --map;
-    // Compute oop range for this block
-    oop* start = obj->obj_field_addr(map->offset());
-    oop* curr  = start + map->length();
-    // Iterate over oops
-    while (start < curr) {
-      --curr;
-      if (PSScavenge::should_scavenge(*curr)) {
-        assert(Universe::heap()->is_in(*curr), "should be in heap");
-        pm->claim_or_forward_depth(curr);
-      }
-    }
-  }
+  InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
+    obj, \
+    if (PSScavenge::should_scavenge(p)) { \
+      pm->claim_or_forward_depth(p); \
+    }, \
+    assert_nothing )
 }
 
 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
-  // Compute oopmap block range.  The common case is nonstatic_oop_map_size==1.
-  OopMapBlock* map           = start_of_nonstatic_oop_maps();
-  OopMapBlock* const end_map = map + nonstatic_oop_map_size();
-  // Iterate over oopmap blocks
-  while (map < end_map) {
-    // Compute oop range for this oopmap block.
-    oop* const map_start = obj->obj_field_addr(map->offset());
-    oop* const beg_oop = map_start;
-    oop* const end_oop = map_start + map->length();
-    for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
-      PSParallelCompact::adjust_pointer(cur_oop);
-    }
-    ++map;
-  }
-
+  InstanceKlass_OOP_MAP_ITERATE( \
+    obj, \
+    PSParallelCompact::adjust_pointer(p), \
+    assert_nothing)
   return size_helper();
 }
 
 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
                                        HeapWord* beg_addr, HeapWord* end_addr) {
-  // Compute oopmap block range.  The common case is nonstatic_oop_map_size==1.
-  OopMapBlock* map           = start_of_nonstatic_oop_maps();
-  OopMapBlock* const end_map = map + nonstatic_oop_map_size();
-  // Iterate over oopmap blocks
-  while (map < end_map) {
-    // Compute oop range for this oopmap block.
-    oop* const map_start = obj->obj_field_addr(map->offset());
-    oop* const beg_oop = MAX2((oop*)beg_addr, map_start);
-    oop* const end_oop = MIN2((oop*)end_addr, map_start + map->length());
-    for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
-      PSParallelCompact::adjust_pointer(cur_oop);
-    }
-    ++map;
-  }
-
+  InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \
+    obj, beg_addr, end_addr, \
+    PSParallelCompact::adjust_pointer(p), \
+    assert_nothing)
   return size_helper();
 }
 
 void instanceKlass::copy_static_fields(PSPromotionManager* pm) {
   assert(!pm->depth_first(), "invariant");
-  // Compute oop range
-  oop* start = start_of_static_fields();
-  oop* end   = start + static_oop_field_size();
-  // Iterate over oops
-  while (start < end) {
-    if (PSScavenge::should_scavenge(*start)) {
-      assert(Universe::heap()->is_in(*start), "should be in heap");
-      pm->claim_or_forward_breadth(start);
-    }
-    start++;
-  }
+  InstanceKlass_OOP_ITERATE( \
+    start_of_static_fields(), static_oop_field_size(), \
+    if (PSScavenge::should_scavenge(p)) { \
+      pm->claim_or_forward_breadth(p); \
+    }, \
+    assert_nothing )
 }
 
 void instanceKlass::push_static_fields(PSPromotionManager* pm) {
   assert(pm->depth_first(), "invariant");
-  // Compute oop range
-  oop* start = start_of_static_fields();
-  oop* end   = start + static_oop_field_size();
-  // Iterate over oops
-  while (start < end) {
-    if (PSScavenge::should_scavenge(*start)) {
-      assert(Universe::heap()->is_in(*start), "should be in heap");
-      pm->claim_or_forward_depth(start);
-    }
-    start++;
-  }
+  InstanceKlass_OOP_ITERATE( \
+    start_of_static_fields(), static_oop_field_size(), \
+    if (PSScavenge::should_scavenge(p)) { \
+      pm->claim_or_forward_depth(p); \
+    }, \
+    assert_nothing )
 }
 
 void instanceKlass::copy_static_fields(ParCompactionManager* cm) {
-  // Compute oop range
-  oop* start = start_of_static_fields();
-  oop* end   = start + static_oop_field_size();
-  // Iterate over oops
-  while (start < end) {
-    if (*start != NULL) {
-      assert(Universe::heap()->is_in(*start), "should be in heap");
-      // *start = (oop) cm->summary_data()->calc_new_pointer(*start);
-      PSParallelCompact::adjust_pointer(start);
-    }
-    start++;
-  }
+  InstanceKlass_OOP_ITERATE( \
+    start_of_static_fields(), static_oop_field_size(), \
+    PSParallelCompact::adjust_pointer(p), \
+    assert_is_in)
 }
 #endif // SERIALGC
 
@@ -1687,18 +1673,15 @@
   Klass::follow_weak_klass_links(is_alive, keep_alive);
 }
 
-
 void instanceKlass::remove_unshareable_info() {
   Klass::remove_unshareable_info();
   init_implementor();
 }
 
-
 static void clear_all_breakpoints(methodOop m) {
   m->clear_all_breakpoints();
 }
 
-
 void instanceKlass::release_C_heap_structures() {
   // Deallocate oop map cache
   if (_oop_map_cache != NULL) {
@@ -2047,29 +2030,30 @@
   obj->print_address_on(st);
 }
 
-#endif
+#endif // ndef PRODUCT
 
 const char* instanceKlass::internal_name() const {
   return external_name();
 }
 
-
-
 // Verification
 
 class VerifyFieldClosure: public OopClosure {
- public:
-  void do_oop(oop* p) {
+ protected:
+  template <class T> void do_oop_work(T* p) {
     guarantee(Universe::heap()->is_in_closed_subset(p), "should be in heap");
-    if (!(*p)->is_oop_or_null()) {
-      tty->print_cr("Failed: %p -> %p",p,(address)*p);
+    oop obj = oopDesc::load_decode_heap_oop(p);
+    if (!obj->is_oop_or_null()) {
+      tty->print_cr("Failed: " PTR_FORMAT " -> " PTR_FORMAT, p, (address)obj);
       Universe::print();
       guarantee(false, "boom");
     }
   }
+ public:
+  virtual void do_oop(oop* p)       { VerifyFieldClosure::do_oop_work(p); }
+  virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); }
 };
 
-
 void instanceKlass::oop_verify_on(oop obj, outputStream* st) {
   Klass::oop_verify_on(obj, st);
   VerifyFieldClosure blk;
@@ -2110,26 +2094,28 @@
   }
 }
 
-#endif
+#endif // ndef PRODUCT
+
+// JNIid class for jfieldIDs only
+// Note to reviewers:
+// These JNI functions are just moved over to column 1 and not changed
+// in the compressed oops workspace.
+JNIid::JNIid(klassOop holder, int offset, JNIid* next) {
+  _holder = holder;
+  _offset = offset;
+  _next = next;
+  debug_only(_is_static_field_id = false;)
+}
 
 
-/* JNIid class for jfieldIDs only */
- JNIid::JNIid(klassOop holder, int offset, JNIid* next) {
-   _holder = holder;
-   _offset = offset;
-   _next = next;
-   debug_only(_is_static_field_id = false;)
- }
-
-
- JNIid* JNIid::find(int offset) {
-   JNIid* current = this;
-   while (current != NULL) {
-     if (current->offset() == offset) return current;
-     current = current->next();
-   }
-   return NULL;
- }
+JNIid* JNIid::find(int offset) {
+  JNIid* current = this;
+  while (current != NULL) {
+    if (current->offset() == offset) return current;
+    current = current->next();
+  }
+  return NULL;
+}
 
 void JNIid::oops_do(OopClosure* f) {
   for (JNIid* cur = this; cur != NULL; cur = cur->next()) {
@@ -2138,40 +2124,40 @@
 }
 
 void JNIid::deallocate(JNIid* current) {
-   while (current != NULL) {
-     JNIid* next = current->next();
-     delete current;
-     current = next;
-   }
- }
+  while (current != NULL) {
+    JNIid* next = current->next();
+    delete current;
+    current = next;
+  }
+}
 
 
- void JNIid::verify(klassOop holder) {
-   int first_field_offset  = instanceKlass::cast(holder)->offset_of_static_fields();
-   int end_field_offset;
-   end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize);
+void JNIid::verify(klassOop holder) {
+  int first_field_offset  = instanceKlass::cast(holder)->offset_of_static_fields();
+  int end_field_offset;
+  end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize);
 
-   JNIid* current = this;
-   while (current != NULL) {
-     guarantee(current->holder() == holder, "Invalid klass in JNIid");
- #ifdef ASSERT
-     int o = current->offset();
-     if (current->is_static_field_id()) {
-       guarantee(o >= first_field_offset  && o < end_field_offset,  "Invalid static field offset in JNIid");
-     }
- #endif
-     current = current->next();
-   }
- }
+  JNIid* current = this;
+  while (current != NULL) {
+    guarantee(current->holder() == holder, "Invalid klass in JNIid");
+#ifdef ASSERT
+    int o = current->offset();
+    if (current->is_static_field_id()) {
+      guarantee(o >= first_field_offset  && o < end_field_offset,  "Invalid static field offset in JNIid");
+    }
+#endif
+    current = current->next();
+  }
+}
 
 
 #ifdef ASSERT
-  void instanceKlass::set_init_state(ClassState state) {
-    bool good_state = as_klassOop()->is_shared() ? (_init_state <= state)
-                                                 : (_init_state < state);
-    assert(good_state || state == allocated, "illegal state transition");
-    _init_state = state;
-  }
+void instanceKlass::set_init_state(ClassState state) {
+  bool good_state = as_klassOop()->is_shared() ? (_init_state <= state)
+                                               : (_init_state < state);
+  assert(good_state || state == allocated, "illegal state transition");
+  _init_state = state;
+}
 #endif
 
 
@@ -2180,9 +2166,9 @@
 // Add an information node that contains weak references to the
 // interesting parts of the previous version of the_class.
 void instanceKlass::add_previous_version(instanceKlassHandle ikh,
-       BitMap * emcp_methods, int emcp_method_count) {
+       BitMap* emcp_methods, int emcp_method_count) {
   assert(Thread::current()->is_VM_thread(),
-    "only VMThread can add previous versions");
+         "only VMThread can add previous versions");
 
   if (_previous_versions == NULL) {
     // This is the first previous version so make some space.
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -180,12 +180,16 @@
   // End of the oop block.
   //
 
-  int             _nonstatic_field_size; // number of non-static fields in this klass (including inherited fields)
-  int             _static_field_size;    // number of static fields (oop and non-oop) in this klass
+  // number of words used by non-static fields in this klass (including
+  // inherited fields but after header_size()).  If fields are compressed into
+  // header, this can be zero so it's not the same as number of static fields.
+  int             _nonstatic_field_size;
+  int             _static_field_size;    // number words used by static fields (oop and non-oop) in this klass
   int             _static_oop_field_size;// number of static oop fields in this klass
   int             _nonstatic_oop_map_size;// number of nonstatic oop-map blocks allocated at end of this klass
   bool            _is_marked_dependent;  // used for marking during flushing and deoptimization
   bool            _rewritten;            // methods rewritten.
+  bool            _has_nonstatic_fields; // for sizing with UseCompressedOops
   u2              _minor_version;        // minor version number of class file
   u2              _major_version;        // major version number of class file
   ClassState      _init_state;           // state of class
@@ -221,6 +225,9 @@
   friend class SystemDictionary;
 
  public:
+  bool has_nonstatic_fields() const        { return _has_nonstatic_fields; }
+  void set_has_nonstatic_fields(bool b)    { _has_nonstatic_fields = b; }
+
   // field sizes
   int nonstatic_field_size() const         { return _nonstatic_field_size; }
   void set_nonstatic_field_size(int size)  { _nonstatic_field_size = size; }
@@ -340,8 +347,7 @@
 
   // find a non-static or static field given its offset within the class.
   bool contains_field_offset(int offset) {
-      return ((offset/wordSize) >= instanceOopDesc::header_size() &&
-             (offset/wordSize)-instanceOopDesc::header_size() < nonstatic_field_size());
+    return instanceOopDesc::contains_field_offset(offset, nonstatic_field_size());
   }
 
   bool find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const;
@@ -570,12 +576,21 @@
   intptr_t* start_of_itable() const        { return start_of_vtable() + align_object_offset(vtable_length()); }
   int  itable_offset_in_words() const { return start_of_itable() - (intptr_t*)as_klassOop(); }
 
-  oop* start_of_static_fields() const { return (oop*)(start_of_itable() + align_object_offset(itable_length())); }
+  // Static field offset is an offset into the Heap, should be converted by
+  // based on UseCompressedOop for traversal
+  HeapWord* start_of_static_fields() const {
+    return (HeapWord*)(start_of_itable() + align_object_offset(itable_length()));
+  }
+
   intptr_t* end_of_itable() const          { return start_of_itable() + itable_length(); }
-  oop* end_of_static_fields() const   { return start_of_static_fields() + static_field_size(); }
-  int offset_of_static_fields() const { return (intptr_t)start_of_static_fields() - (intptr_t)as_klassOop(); }
 
-  OopMapBlock* start_of_nonstatic_oop_maps() const { return (OopMapBlock*) (start_of_static_fields() + static_field_size()); }
+  int offset_of_static_fields() const {
+    return (intptr_t)start_of_static_fields() - (intptr_t)as_klassOop();
+  }
+
+  OopMapBlock* start_of_nonstatic_oop_maps() const {
+    return (OopMapBlock*) (start_of_static_fields() + static_field_size());
+  }
 
   // Allocation profiling support
   juint alloc_size() const            { return _alloc_count * size_helper(); }
--- a/hotspot/src/share/vm/oops/instanceKlassKlass.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/instanceKlassKlass.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -286,17 +286,17 @@
   ik->copy_static_fields(pm);
 
   oop* loader_addr = ik->adr_class_loader();
-  if (PSScavenge::should_scavenge(*loader_addr)) {
+  if (PSScavenge::should_scavenge(loader_addr)) {
     pm->claim_or_forward_breadth(loader_addr);
   }
 
   oop* pd_addr = ik->adr_protection_domain();
-  if (PSScavenge::should_scavenge(*pd_addr)) {
+  if (PSScavenge::should_scavenge(pd_addr)) {
     pm->claim_or_forward_breadth(pd_addr);
   }
 
   oop* sg_addr = ik->adr_signers();
-  if (PSScavenge::should_scavenge(*sg_addr)) {
+  if (PSScavenge::should_scavenge(sg_addr)) {
     pm->claim_or_forward_breadth(sg_addr);
   }
 
@@ -309,17 +309,17 @@
   ik->push_static_fields(pm);
 
   oop* loader_addr = ik->adr_class_loader();
-  if (PSScavenge::should_scavenge(*loader_addr)) {
+  if (PSScavenge::should_scavenge(loader_addr)) {
     pm->claim_or_forward_depth(loader_addr);
   }
 
   oop* pd_addr = ik->adr_protection_domain();
-  if (PSScavenge::should_scavenge(*pd_addr)) {
+  if (PSScavenge::should_scavenge(pd_addr)) {
     pm->claim_or_forward_depth(pd_addr);
   }
 
   oop* sg_addr = ik->adr_signers();
-  if (PSScavenge::should_scavenge(*sg_addr)) {
+  if (PSScavenge::should_scavenge(sg_addr)) {
     pm->claim_or_forward_depth(sg_addr);
   }
 
@@ -602,16 +602,18 @@
 
 // Verification
 
-
 class VerifyFieldClosure: public OopClosure {
- public:
-  void do_oop(oop* p) {
+ protected:
+  template <class T> void do_oop_work(T* p) {
     guarantee(Universe::heap()->is_in(p), "should be in heap");
-    guarantee((*p)->is_oop_or_null(), "should be in heap");
+    oop obj = oopDesc::load_decode_heap_oop(p);
+    guarantee(obj->is_oop_or_null(), "should be in heap");
   }
+ public:
+  virtual void do_oop(oop* p)       { VerifyFieldClosure::do_oop_work(p); }
+  virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); }
 };
 
-
 void instanceKlassKlass::oop_verify_on(oop obj, outputStream* st) {
   klassKlass::oop_verify_on(obj, st);
   if (!obj->partially_loaded()) {
--- a/hotspot/src/share/vm/oops/instanceOop.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/instanceOop.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -27,5 +27,26 @@
 
 class instanceOopDesc : public oopDesc {
  public:
+  // aligned header size.
   static int header_size() { return sizeof(instanceOopDesc)/HeapWordSize; }
+
+  // If compressed, the offset of the fields of the instance may not be aligned.
+  static int base_offset_in_bytes() {
+    return UseCompressedOops ?
+             klass_gap_offset_in_bytes() :
+             sizeof(instanceOopDesc);
+  }
+
+  static bool contains_field_offset(int offset, int nonstatic_field_size) {
+    int base_in_bytes = base_offset_in_bytes();
+    if (UseCompressedOops) {
+      return (offset >= base_in_bytes &&
+              // field can be embedded in header, or is after header.
+              (offset < (int)sizeof(instanceOopDesc) ||
+              (offset-(int)sizeof(instanceOopDesc))/wordSize < nonstatic_field_size));
+    } else {
+      return (offset >= base_in_bytes &&
+              (offset-base_in_bytes)/wordSize < nonstatic_field_size);
+    }
+  }
 };
--- a/hotspot/src/share/vm/oops/instanceRefKlass.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/instanceRefKlass.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -25,23 +25,77 @@
 # include "incls/_precompiled.incl"
 # include "incls/_instanceRefKlass.cpp.incl"
 
-void instanceRefKlass::oop_follow_contents(oop obj) {
-  oop* referent_addr = java_lang_ref_Reference::referent_addr(obj);
-  oop referent = *referent_addr;
+template <class T>
+static void specialized_oop_follow_contents(instanceRefKlass* ref, oop obj) {
+  T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
+  oop referent = oopDesc::load_decode_heap_oop(referent_addr);
   debug_only(
     if(TraceReferenceGC && PrintGCDetails) {
-      gclog_or_tty->print_cr("instanceRefKlass::oop_follow_contents " INTPTR_FORMAT, (address)obj);
+      gclog_or_tty->print_cr("instanceRefKlass::oop_follow_contents " INTPTR_FORMAT, obj);
     }
   )
   if (referent != NULL) {
     if (!referent->is_gc_marked() &&
         MarkSweep::ref_processor()->
-          discover_reference(obj, reference_type())) {
+          discover_reference(obj, ref->reference_type())) {
       // reference already enqueued, referent will be traversed later
-      instanceKlass::oop_follow_contents(obj);
+      ref->instanceKlass::oop_follow_contents(obj);
+      debug_only(
+        if(TraceReferenceGC && PrintGCDetails) {
+          gclog_or_tty->print_cr("       Non NULL enqueued " INTPTR_FORMAT, obj);
+        }
+      )
+      return;
+    } else {
+      // treat referent as normal oop
       debug_only(
         if(TraceReferenceGC && PrintGCDetails) {
-          gclog_or_tty->print_cr("       Non NULL enqueued " INTPTR_FORMAT, (address)obj);
+          gclog_or_tty->print_cr("       Non NULL normal " INTPTR_FORMAT, obj);
+        }
+      )
+      MarkSweep::mark_and_push(referent_addr);
+    }
+  }
+  // treat next as normal oop.  next is a link in the pending list.
+  T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
+  debug_only(
+    if(TraceReferenceGC && PrintGCDetails) {
+      gclog_or_tty->print_cr("   Process next as normal " INTPTR_FORMAT, next_addr);
+    }
+  )
+  MarkSweep::mark_and_push(next_addr);
+  ref->instanceKlass::oop_follow_contents(obj);
+}
+
+void instanceRefKlass::oop_follow_contents(oop obj) {
+  if (UseCompressedOops) {
+    specialized_oop_follow_contents<narrowOop>(this, obj);
+  } else {
+    specialized_oop_follow_contents<oop>(this, obj);
+  }
+}
+
+#ifndef SERIALGC
+template <class T>
+static void specialized_oop_follow_contents(instanceRefKlass* ref,
+                                            ParCompactionManager* cm,
+                                            oop obj) {
+  T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
+  oop referent = oopDesc::load_decode_heap_oop(referent_addr);
+  debug_only(
+    if(TraceReferenceGC && PrintGCDetails) {
+      gclog_or_tty->print_cr("instanceRefKlass::oop_follow_contents " INTPTR_FORMAT, obj);
+    }
+  )
+  if (referent != NULL) {
+    if (PSParallelCompact::mark_bitmap()->is_unmarked(referent) &&
+        PSParallelCompact::ref_processor()->
+          discover_reference(obj, ref->reference_type())) {
+      // reference already enqueued, referent will be traversed later
+      ref->instanceKlass::oop_follow_contents(cm, obj);
+      debug_only(
+        if(TraceReferenceGC && PrintGCDetails) {
+          gclog_or_tty->print_cr("       Non NULL enqueued " INTPTR_FORMAT, obj);
         }
       )
       return;
@@ -49,98 +103,106 @@
       // treat referent as normal oop
       debug_only(
         if(TraceReferenceGC && PrintGCDetails) {
-          gclog_or_tty->print_cr("       Non NULL normal " INTPTR_FORMAT, (address)obj);
-        }
-      )
-      MarkSweep::mark_and_push(referent_addr);
-    }
-  }
-  // treat next as normal oop.  next is a link in the pending list.
-  oop* next_addr = java_lang_ref_Reference::next_addr(obj);
-  debug_only(
-    if(TraceReferenceGC && PrintGCDetails) {
-      gclog_or_tty->print_cr("   Process next as normal " INTPTR_FORMAT, next_addr);
-    }
-  )
-  MarkSweep::mark_and_push(next_addr);
-  instanceKlass::oop_follow_contents(obj);
-}
-
-#ifndef SERIALGC
-void instanceRefKlass::oop_follow_contents(ParCompactionManager* cm,
-                                           oop obj) {
-  oop* referent_addr = java_lang_ref_Reference::referent_addr(obj);
-  oop referent = *referent_addr;
-  debug_only(
-    if(TraceReferenceGC && PrintGCDetails) {
-      gclog_or_tty->print_cr("instanceRefKlass::oop_follow_contents " INTPTR_FORMAT, (address)obj);
-    }
-  )
-  if (referent != NULL) {
-    if (PSParallelCompact::mark_bitmap()->is_unmarked(referent) &&
-        PSParallelCompact::ref_processor()->
-          discover_reference(obj, reference_type())) {
-      // reference already enqueued, referent will be traversed later
-      instanceKlass::oop_follow_contents(cm, obj);
-      debug_only(
-        if(TraceReferenceGC && PrintGCDetails) {
-          gclog_or_tty->print_cr("       Non NULL enqueued " INTPTR_FORMAT, (address)obj);
-        }
-      )
-      return;
-    } else {
-      // treat referent as normal oop
-      debug_only(
-        if(TraceReferenceGC && PrintGCDetails) {
-          gclog_or_tty->print_cr("       Non NULL normal " INTPTR_FORMAT, (address)obj);
+          gclog_or_tty->print_cr("       Non NULL normal " INTPTR_FORMAT, obj);
         }
       )
       PSParallelCompact::mark_and_push(cm, referent_addr);
     }
   }
   // treat next as normal oop.  next is a link in the pending list.
-  oop* next_addr = java_lang_ref_Reference::next_addr(obj);
+  T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
   debug_only(
     if(TraceReferenceGC && PrintGCDetails) {
       gclog_or_tty->print_cr("   Process next as normal " INTPTR_FORMAT, next_addr);
     }
   )
   PSParallelCompact::mark_and_push(cm, next_addr);
-  instanceKlass::oop_follow_contents(cm, obj);
+  ref->instanceKlass::oop_follow_contents(cm, obj);
+}
+
+void instanceRefKlass::oop_follow_contents(ParCompactionManager* cm,
+                                           oop obj) {
+  if (UseCompressedOops) {
+    specialized_oop_follow_contents<narrowOop>(this, cm, obj);
+  } else {
+    specialized_oop_follow_contents<oop>(this, cm, obj);
+  }
 }
 #endif // SERIALGC
 
+#ifdef ASSERT
+template <class T> void trace_reference_gc(const char *s, oop obj,
+                                           T* referent_addr,
+                                           T* next_addr,
+                                           T* discovered_addr) {
+  if(TraceReferenceGC && PrintGCDetails) {
+    gclog_or_tty->print_cr("%s obj " INTPTR_FORMAT, s, (address)obj);
+    gclog_or_tty->print_cr("     referent_addr/* " INTPTR_FORMAT " / "
+         INTPTR_FORMAT, referent_addr,
+         referent_addr ?
+           (address)oopDesc::load_decode_heap_oop(referent_addr) : NULL);
+    gclog_or_tty->print_cr("     next_addr/* " INTPTR_FORMAT " / "
+         INTPTR_FORMAT, next_addr,
+         next_addr ? (address)oopDesc::load_decode_heap_oop(next_addr) : NULL);
+    gclog_or_tty->print_cr("     discovered_addr/* " INTPTR_FORMAT " / "
+         INTPTR_FORMAT, discovered_addr,
+         discovered_addr ?
+           (address)oopDesc::load_decode_heap_oop(discovered_addr) : NULL);
+  }
+}
+#endif
+
+template <class T> void specialized_oop_adjust_pointers(instanceRefKlass *ref, oop obj) {
+  T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
+  MarkSweep::adjust_pointer(referent_addr);
+  T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
+  MarkSweep::adjust_pointer(next_addr);
+  T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
+  MarkSweep::adjust_pointer(discovered_addr);
+  debug_only(trace_reference_gc("instanceRefKlass::oop_adjust_pointers", obj,
+                                referent_addr, next_addr, discovered_addr);)
+}
 
 int instanceRefKlass::oop_adjust_pointers(oop obj) {
   int size = size_helper();
   instanceKlass::oop_adjust_pointers(obj);
 
-  oop* referent_addr = java_lang_ref_Reference::referent_addr(obj);
-  MarkSweep::adjust_pointer(referent_addr);
-  oop* next_addr = java_lang_ref_Reference::next_addr(obj);
-  MarkSweep::adjust_pointer(next_addr);
-  oop* discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
-  MarkSweep::adjust_pointer(discovered_addr);
-
-#ifdef ASSERT
-  if(TraceReferenceGC && PrintGCDetails) {
-    gclog_or_tty->print_cr("instanceRefKlass::oop_adjust_pointers obj "
-                           INTPTR_FORMAT, (address)obj);
-    gclog_or_tty->print_cr("     referent_addr/* " INTPTR_FORMAT " / "
-                           INTPTR_FORMAT, referent_addr,
-                           referent_addr ? (address)*referent_addr : NULL);
-    gclog_or_tty->print_cr("     next_addr/* " INTPTR_FORMAT " / "
-                           INTPTR_FORMAT, next_addr,
-                           next_addr ? (address)*next_addr : NULL);
-    gclog_or_tty->print_cr("     discovered_addr/* " INTPTR_FORMAT " / "
-                           INTPTR_FORMAT, discovered_addr,
-                           discovered_addr ? (address)*discovered_addr : NULL);
+  if (UseCompressedOops) {
+    specialized_oop_adjust_pointers<narrowOop>(this, obj);
+  } else {
+    specialized_oop_adjust_pointers<oop>(this, obj);
   }
-#endif
-
   return size;
 }
 
+#define InstanceRefKlass_SPECIALIZED_OOP_ITERATE(T, nv_suffix, contains)        \
+  T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);           \
+  oop referent = oopDesc::load_decode_heap_oop(referent_addr);                  \
+  if (referent != NULL && contains(referent_addr)) {                            \
+    ReferenceProcessor* rp = closure->_ref_processor;                           \
+    if (!referent->is_gc_marked() && (rp != NULL) &&                            \
+        rp->discover_reference(obj, reference_type())) {                        \
+      return size;                                                              \
+    } else {                                                                    \
+      /* treat referent as normal oop */                                        \
+      SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk);\
+      closure->do_oop##nv_suffix(referent_addr);                                \
+    }                                                                           \
+  }                                                                             \
+  /* treat next as normal oop */                                                \
+  T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);                   \
+  if (contains(next_addr)) {                                                    \
+    SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk); \
+    closure->do_oop##nv_suffix(next_addr);                                      \
+  }                                                                             \
+  return size;                                                                  \
+
+
+template <class T> bool contains(T *t) { return true; }
+
+// Macro to define instanceRefKlass::oop_oop_iterate for virtual/nonvirtual for
+// all closures.  Macros calling macros above for each oop size.
+
 #define InstanceRefKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)        \
                                                                                 \
 int instanceRefKlass::                                                          \
@@ -150,25 +212,11 @@
                                                                                 \
   int size = instanceKlass::oop_oop_iterate##nv_suffix(obj, closure);           \
                                                                                 \
-  oop* referent_addr = java_lang_ref_Reference::referent_addr(obj);             \
-  oop referent = *referent_addr;                                                \
-  if (referent != NULL) {                                                       \
-    ReferenceProcessor* rp = closure->_ref_processor;                           \
-    if (!referent->is_gc_marked() && (rp != NULL) &&                            \
-        rp->discover_reference(obj, reference_type())) {              \
-      return size;                                                              \
-    } else {                                                                    \
-      /* treat referent as normal oop */                                        \
-      SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk);\
-      closure->do_oop##nv_suffix(referent_addr);                                \
-    }                                                                           \
+  if (UseCompressedOops) {                                                      \
+    InstanceRefKlass_SPECIALIZED_OOP_ITERATE(narrowOop, nv_suffix, contains);   \
+  } else {                                                                      \
+    InstanceRefKlass_SPECIALIZED_OOP_ITERATE(oop, nv_suffix, contains);         \
   }                                                                             \
-                                                                                \
-  /* treat next as normal oop */                                                \
-  oop* next_addr = java_lang_ref_Reference::next_addr(obj);                     \
-  SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk); \
-  closure->do_oop##nv_suffix(next_addr);                                        \
-  return size;                                                                  \
 }
 
 #define InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix)      \
@@ -180,28 +228,11 @@
   SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk);\
                                                                                 \
   int size = instanceKlass::oop_oop_iterate##nv_suffix##_m(obj, closure, mr);   \
-                                                                                \
-  oop* referent_addr = java_lang_ref_Reference::referent_addr(obj);             \
-  oop referent = *referent_addr;                                                \
-  if (referent != NULL && mr.contains(referent_addr)) {                         \
-    ReferenceProcessor* rp = closure->_ref_processor;                           \
-    if (!referent->is_gc_marked() && (rp != NULL) &&                            \
-        rp->discover_reference(obj, reference_type())) {              \
-      return size;                                                              \
-    } else {                                                                    \
-      /* treat referent as normal oop */                                        \
-      SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk);\
-      closure->do_oop##nv_suffix(referent_addr);                                \
-    }                                                                           \
+  if (UseCompressedOops) {                                                      \
+    InstanceRefKlass_SPECIALIZED_OOP_ITERATE(narrowOop, nv_suffix, mr.contains); \
+  } else {                                                                      \
+    InstanceRefKlass_SPECIALIZED_OOP_ITERATE(oop, nv_suffix, mr.contains);      \
   }                                                                             \
-                                                                                \
-  /* treat next as normal oop */                                                \
-  oop* next_addr = java_lang_ref_Reference::next_addr(obj);                     \
-  if (mr.contains(next_addr)) {                                                 \
-    SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk);\
-    closure->do_oop##nv_suffix(next_addr);                                      \
-  }                                                                             \
-  return size;                                                                  \
 }
 
 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_DEFN)
@@ -209,16 +240,17 @@
 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m)
 ALL_OOP_OOP_ITERATE_CLOSURES_3(InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m)
 
-
 #ifndef SERIALGC
-void instanceRefKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
+template <class T>
+void specialized_oop_copy_contents(instanceRefKlass *ref,
+                                   PSPromotionManager* pm, oop obj) {
   assert(!pm->depth_first(), "invariant");
-  oop* referent_addr = java_lang_ref_Reference::referent_addr(obj);
-  if (PSScavenge::should_scavenge(*referent_addr)) {
+  T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
+  if (PSScavenge::should_scavenge(referent_addr)) {
     ReferenceProcessor* rp = PSScavenge::reference_processor();
-    if (rp->discover_reference(obj, reference_type())) {
+    if (rp->discover_reference(obj, ref->reference_type())) {
       // reference already enqueued, referent and next will be traversed later
-      instanceKlass::oop_copy_contents(pm, obj);
+      ref->instanceKlass::oop_copy_contents(pm, obj);
       return;
     } else {
       // treat referent as normal oop
@@ -226,21 +258,31 @@
     }
   }
   // treat next as normal oop
-  oop* next_addr = java_lang_ref_Reference::next_addr(obj);
-  if (PSScavenge::should_scavenge(*next_addr)) {
+  T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
+  if (PSScavenge::should_scavenge(next_addr)) {
     pm->claim_or_forward_breadth(next_addr);
   }
-  instanceKlass::oop_copy_contents(pm, obj);
+  ref->instanceKlass::oop_copy_contents(pm, obj);
 }
 
-void instanceRefKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
+void instanceRefKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
+  if (UseCompressedOops) {
+    specialized_oop_copy_contents<narrowOop>(this, pm, obj);
+  } else {
+    specialized_oop_copy_contents<oop>(this, pm, obj);
+  }
+}
+
+template <class T>
+void specialized_oop_push_contents(instanceRefKlass *ref,
+                                   PSPromotionManager* pm, oop obj) {
   assert(pm->depth_first(), "invariant");
-  oop* referent_addr = java_lang_ref_Reference::referent_addr(obj);
-  if (PSScavenge::should_scavenge(*referent_addr)) {
+  T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
+  if (PSScavenge::should_scavenge(referent_addr)) {
     ReferenceProcessor* rp = PSScavenge::reference_processor();
-    if (rp->discover_reference(obj, reference_type())) {
+    if (rp->discover_reference(obj, ref->reference_type())) {
       // reference already enqueued, referent and next will be traversed later
-      instanceKlass::oop_push_contents(pm, obj);
+      ref->instanceKlass::oop_push_contents(pm, obj);
       return;
     } else {
       // treat referent as normal oop
@@ -248,71 +290,68 @@
     }
   }
   // treat next as normal oop
-  oop* next_addr = java_lang_ref_Reference::next_addr(obj);
-  if (PSScavenge::should_scavenge(*next_addr)) {
+  T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
+  if (PSScavenge::should_scavenge(next_addr)) {
     pm->claim_or_forward_depth(next_addr);
   }
-  instanceKlass::oop_push_contents(pm, obj);
+  ref->instanceKlass::oop_push_contents(pm, obj);
+}
+
+void instanceRefKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
+  if (UseCompressedOops) {
+    specialized_oop_push_contents<narrowOop>(this, pm, obj);
+  } else {
+    specialized_oop_push_contents<oop>(this, pm, obj);
+  }
+}
+
+template <class T>
+void specialized_oop_update_pointers(instanceRefKlass *ref,
+                                    ParCompactionManager* cm, oop obj) {
+  T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
+  PSParallelCompact::adjust_pointer(referent_addr);
+  T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
+  PSParallelCompact::adjust_pointer(next_addr);
+  T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
+  PSParallelCompact::adjust_pointer(discovered_addr);
+  debug_only(trace_reference_gc("instanceRefKlass::oop_update_ptrs", obj,
+                                referent_addr, next_addr, discovered_addr);)
 }
 
 int instanceRefKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
   instanceKlass::oop_update_pointers(cm, obj);
+  if (UseCompressedOops) {
+    specialized_oop_update_pointers<narrowOop>(this, cm, obj);
+  } else {
+    specialized_oop_update_pointers<oop>(this, cm, obj);
+  }
+  return size_helper();
+}
 
-  oop* referent_addr = java_lang_ref_Reference::referent_addr(obj);
-  PSParallelCompact::adjust_pointer(referent_addr);
-  oop* next_addr = java_lang_ref_Reference::next_addr(obj);
-  PSParallelCompact::adjust_pointer(next_addr);
-  oop* discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
-  PSParallelCompact::adjust_pointer(discovered_addr);
 
-#ifdef ASSERT
-  if(TraceReferenceGC && PrintGCDetails) {
-    gclog_or_tty->print_cr("instanceRefKlass::oop_update_pointers obj "
-                           INTPTR_FORMAT, (oopDesc*) obj);
-    gclog_or_tty->print_cr("     referent_addr/* " INTPTR_FORMAT " / "
-                           INTPTR_FORMAT, referent_addr,
-                           referent_addr ? (oopDesc*) *referent_addr : NULL);
-    gclog_or_tty->print_cr("     next_addr/* " INTPTR_FORMAT " / "
-                           INTPTR_FORMAT, next_addr,
-                           next_addr ? (oopDesc*) *next_addr : NULL);
-    gclog_or_tty->print_cr("     discovered_addr/* " INTPTR_FORMAT " / "
-                   INTPTR_FORMAT, discovered_addr,
-                   discovered_addr ? (oopDesc*) *discovered_addr : NULL);
-  }
-#endif
-
-  return size_helper();
+template <class T> void
+specialized_oop_update_pointers(ParCompactionManager* cm, oop obj,
+                                HeapWord* beg_addr, HeapWord* end_addr) {
+  T* p;
+  T* referent_addr = p = (T*)java_lang_ref_Reference::referent_addr(obj);
+  PSParallelCompact::adjust_pointer(p, beg_addr, end_addr);
+  T* next_addr = p = (T*)java_lang_ref_Reference::next_addr(obj);
+  PSParallelCompact::adjust_pointer(p, beg_addr, end_addr);
+  T* discovered_addr = p = (T*)java_lang_ref_Reference::discovered_addr(obj);
+  PSParallelCompact::adjust_pointer(p, beg_addr, end_addr);
+  debug_only(trace_reference_gc("instanceRefKlass::oop_update_ptrs", obj,
+                                referent_addr, next_addr, discovered_addr);)
 }
 
 int
 instanceRefKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
                                       HeapWord* beg_addr, HeapWord* end_addr) {
   instanceKlass::oop_update_pointers(cm, obj, beg_addr, end_addr);
-
-  oop* p;
-  oop* referent_addr = p = java_lang_ref_Reference::referent_addr(obj);
-  PSParallelCompact::adjust_pointer(p, beg_addr, end_addr);
-  oop* next_addr = p = java_lang_ref_Reference::next_addr(obj);
-  PSParallelCompact::adjust_pointer(p, beg_addr, end_addr);
-  oop* discovered_addr = p = java_lang_ref_Reference::discovered_addr(obj);
-  PSParallelCompact::adjust_pointer(p, beg_addr, end_addr);
-
-#ifdef ASSERT
-  if(TraceReferenceGC && PrintGCDetails) {
-    gclog_or_tty->print_cr("instanceRefKlass::oop_update_pointers obj "
-                           INTPTR_FORMAT, (oopDesc*) obj);
-    gclog_or_tty->print_cr("     referent_addr/* " INTPTR_FORMAT " / "
-                           INTPTR_FORMAT, referent_addr,
-                           referent_addr ? (oopDesc*) *referent_addr : NULL);
-    gclog_or_tty->print_cr("     next_addr/* " INTPTR_FORMAT " / "
-                           INTPTR_FORMAT, next_addr,
-                           next_addr ? (oopDesc*) *next_addr : NULL);
-    gclog_or_tty->print_cr("     discovered_addr/* " INTPTR_FORMAT " / "
-                   INTPTR_FORMAT, discovered_addr,
-                   discovered_addr ? (oopDesc*) *discovered_addr : NULL);
+  if (UseCompressedOops) {
+    specialized_oop_update_pointers<narrowOop>(cm, obj, beg_addr, end_addr);
+  } else {
+    specialized_oop_update_pointers<oop>(cm, obj, beg_addr, end_addr);
   }
-#endif
-
   return size_helper();
 }
 #endif // SERIALGC
@@ -338,7 +377,7 @@
   // offset 2 (words) and has 4 map entries.
   debug_only(int offset = java_lang_ref_Reference::referent_offset);
   debug_only(int length = ((java_lang_ref_Reference::discovered_offset -
-    java_lang_ref_Reference::referent_offset)/wordSize) + 1);
+    java_lang_ref_Reference::referent_offset)/heapOopSize) + 1);
 
   if (UseSharedSpaces) {
     assert(map->offset() == java_lang_ref_Reference::queue_offset &&
@@ -368,22 +407,35 @@
 
   if (referent != NULL) {
     guarantee(referent->is_oop(), "referent field heap failed");
-    if (gch != NULL && !gch->is_in_youngest(obj))
+    if (gch != NULL && !gch->is_in_youngest(obj)) {
       // We do a specific remembered set check here since the referent
       // field is not part of the oop mask and therefore skipped by the
       // regular verify code.
-      obj->verify_old_oop(java_lang_ref_Reference::referent_addr(obj), true);
+      if (UseCompressedOops) {
+        narrowOop* referent_addr = (narrowOop*)java_lang_ref_Reference::referent_addr(obj);
+        obj->verify_old_oop(referent_addr, true);
+      } else {
+        oop* referent_addr = (oop*)java_lang_ref_Reference::referent_addr(obj);
+        obj->verify_old_oop(referent_addr, true);
+      }
+    }
   }
   // Verify next field
   oop next = java_lang_ref_Reference::next(obj);
   if (next != NULL) {
-    guarantee(next->is_oop(), "next field verify failed");
+    guarantee(next->is_oop(), "next field verify fa iled");
     guarantee(next->is_instanceRef(), "next field verify failed");
     if (gch != NULL && !gch->is_in_youngest(obj)) {
       // We do a specific remembered set check here since the next field is
       // not part of the oop mask and therefore skipped by the regular
       // verify code.
-      obj->verify_old_oop(java_lang_ref_Reference::next_addr(obj), true);
+      if (UseCompressedOops) {
+        narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj);
+        obj->verify_old_oop(next_addr, true);
+      } else {
+        oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj);
+        obj->verify_old_oop(next_addr, true);
+      }
     }
   }
 }
--- a/hotspot/src/share/vm/oops/klass.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/klass.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -542,11 +542,10 @@
 
 void Klass::oop_verify_old_oop(oop obj, oop* p, bool allow_dirty) {
   /* $$$ I think this functionality should be handled by verification of
-
   RememberedSet::verify_old_oop(obj, p, allow_dirty, false);
-
   the card table. */
 }
+void Klass::oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty) { }
 
 #ifndef PRODUCT
 
--- a/hotspot/src/share/vm/oops/klass.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/klass.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -757,6 +757,7 @@
   virtual const char* internal_name() const = 0;
   virtual void oop_verify_on(oop obj, outputStream* st);
   virtual void oop_verify_old_oop(oop obj, oop* p, bool allow_dirty);
+  virtual void oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty);
   // tells whether obj is partially constructed (gc during class loading)
   virtual bool oop_partially_loaded(oop obj) const { return false; }
   virtual void oop_set_partially_loaded(oop obj) {};
--- a/hotspot/src/share/vm/oops/klassVtable.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/klassVtable.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -1118,8 +1118,8 @@
   itableOffsetEntry* ioe = (itableOffsetEntry*)klass->start_of_itable();
   itableMethodEntry* ime = (itableMethodEntry*)(ioe + nof_interfaces);
   intptr_t* end               = klass->end_of_itable();
-  assert((oop*)(ime + nof_methods) <= klass->start_of_static_fields(), "wrong offset calculation (1)");
-  assert((oop*)(end) == (oop*)(ime + nof_methods),                     "wrong offset calculation (2)");
+  assert((oop*)(ime + nof_methods) <= (oop*)klass->start_of_static_fields(), "wrong offset calculation (1)");
+  assert((oop*)(end) == (oop*)(ime + nof_methods),                      "wrong offset calculation (2)");
 
   // Visit all interfaces and initialize itable offset table
   SetupItableClosure sic((address)klass->as_klassOop(), ioe, ime);
--- a/hotspot/src/share/vm/oops/markOop.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/markOop.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -89,7 +89,7 @@
   enum { age_bits                 = 4,
          lock_bits                = 2,
          biased_lock_bits         = 1,
-         max_hash_bits            = BitsPerOop - age_bits - lock_bits - biased_lock_bits,
+         max_hash_bits            = BitsPerWord - age_bits - lock_bits - biased_lock_bits,
          hash_bits                = max_hash_bits > 31 ? 31 : max_hash_bits,
          epoch_bits               = 2
   };
--- a/hotspot/src/share/vm/oops/methodDataKlass.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/methodDataKlass.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -95,6 +95,7 @@
 }
 #endif // SERIALGC
 
+
 int methodDataKlass::oop_oop_iterate(oop obj, OopClosure* blk) {
   assert (obj->is_methodData(), "object must be method data");
   methodDataOop m = methodDataOop(obj);
@@ -113,7 +114,6 @@
   return size;
 }
 
-
 int methodDataKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) {
   assert (obj->is_methodData(), "object must be method data");
   methodDataOop m = methodDataOop(obj);
@@ -158,14 +158,14 @@
   assert (obj->is_methodData(), "object must be method data");
   methodDataOop m = methodDataOop(obj);
   // This should never point into the young gen.
-  assert(!PSScavenge::should_scavenge(oop(*m->adr_method())), "Sanity");
+  assert(!PSScavenge::should_scavenge(m->adr_method()), "Sanity");
 }
 
 void methodDataKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
   assert (obj->is_methodData(), "object must be method data");
   methodDataOop m = methodDataOop(obj);
   // This should never point into the young gen.
-  assert(!PSScavenge::should_scavenge(oop(*m->adr_method())), "Sanity");
+  assert(!PSScavenge::should_scavenge(m->adr_method()), "Sanity");
 }
 
 int methodDataKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
--- a/hotspot/src/share/vm/oops/methodOop.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/methodOop.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -430,11 +430,11 @@
 bool methodOopDesc::is_accessor() const {
   if (code_size() != 5) return false;
   if (size_of_parameters() != 1) return false;
-  if (Bytecodes::java_code_at(code_base()+0) != Bytecodes::_aload_0 ) return false;
-  if (Bytecodes::java_code_at(code_base()+1) != Bytecodes::_getfield) return false;
-  Bytecodes::Code ret_bc = Bytecodes::java_code_at(code_base()+4);
-  if (Bytecodes::java_code_at(code_base()+4) != Bytecodes::_areturn &&
-      Bytecodes::java_code_at(code_base()+4) != Bytecodes::_ireturn ) return false;
+  methodOop m = (methodOop)this;  // pass to code_at() to avoid method_from_bcp
+  if (Bytecodes::java_code_at(code_base()+0, m) != Bytecodes::_aload_0 ) return false;
+  if (Bytecodes::java_code_at(code_base()+1, m) != Bytecodes::_getfield) return false;
+  if (Bytecodes::java_code_at(code_base()+4, m) != Bytecodes::_areturn &&
+      Bytecodes::java_code_at(code_base()+4, m) != Bytecodes::_ireturn ) return false;
   return true;
 }
 
@@ -955,7 +955,7 @@
 // This is only done during class loading, so it is OK to assume method_idnum matches the methods() array
 static void reorder_based_on_method_index(objArrayOop methods,
                                           objArrayOop annotations,
-                                          oop* temp_array) {
+                                          GrowableArray<oop>* temp_array) {
   if (annotations == NULL) {
     return;
   }
@@ -963,12 +963,15 @@
   int length = methods->length();
   int i;
   // Copy to temp array
-  memcpy(temp_array, annotations->obj_at_addr(0), length * sizeof(oop));
+  temp_array->clear();
+  for (i = 0; i < length; i++) {
+    temp_array->append(annotations->obj_at(i));
+  }
 
   // Copy back using old method indices
   for (i = 0; i < length; i++) {
     methodOop m = (methodOop) methods->obj_at(i);
-    annotations->obj_at_put(i, temp_array[m->method_idnum()]);
+    annotations->obj_at_put(i, temp_array->at(m->method_idnum()));
   }
 }
 
@@ -997,7 +1000,7 @@
 
     // Use a simple bubble sort for small number of methods since
     // qsort requires a functional pointer call for each comparison.
-    if (length < 8) {
+    if (UseCompressedOops || length < 8) {
       bool sorted = true;
       for (int i=length-1; i>0; i--) {
         for (int j=0; j<i; j++) {
@@ -1010,11 +1013,14 @@
           }
         }
         if (sorted) break;
-        sorted = true;
+          sorted = true;
       }
     } else {
+      // XXX This doesn't work for UseCompressedOops because the compare fn
+      // will have to decode the methodOop anyway making it not much faster
+      // than above.
       compareFn compare = (compareFn) (idempotent ? method_compare_idempotent : method_compare);
-      qsort(methods->obj_at_addr(0), length, oopSize, compare);
+      qsort(methods->base(), length, heapOopSize, compare);
     }
 
     // Sort annotations if necessary
@@ -1022,8 +1028,9 @@
     assert(methods_parameter_annotations == NULL || methods_parameter_annotations->length() == methods->length(), "");
     assert(methods_default_annotations == NULL   || methods_default_annotations->length() == methods->length(), "");
     if (do_annotations) {
+      ResourceMark rm;
       // Allocate temporary storage
-      oop* temp_array = NEW_RESOURCE_ARRAY(oop, length);
+      GrowableArray<oop>* temp_array = new GrowableArray<oop>(length);
       reorder_based_on_method_index(methods, methods_annotations, temp_array);
       reorder_based_on_method_index(methods, methods_parameter_annotations, temp_array);
       reorder_based_on_method_index(methods, methods_default_annotations, temp_array);
--- a/hotspot/src/share/vm/oops/objArrayKlass.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/objArrayKlass.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -80,6 +80,56 @@
   return h_array();
 }
 
+// Either oop or narrowOop depending on UseCompressedOops.
+template <class T> void objArrayKlass::do_copy(arrayOop s, T* src,
+                               arrayOop d, T* dst, int length, TRAPS) {
+
+  const size_t word_len = objArrayOopDesc::array_size(length);
+
+  // For performance reasons, we assume we are using a card marking write
+  // barrier. The assert will fail if this is not the case.
+  BarrierSet* bs = Universe::heap()->barrier_set();
+  assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
+
+  if (s == d) {
+    // since source and destination are equal we do not need conversion checks.
+    assert(length > 0, "sanity check");
+    Copy::conjoint_oops_atomic(src, dst, length);
+  } else {
+    // We have to make sure all elements conform to the destination array
+    klassOop bound = objArrayKlass::cast(d->klass())->element_klass();
+    klassOop stype = objArrayKlass::cast(s->klass())->element_klass();
+    if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) {
+      // elements are guaranteed to be subtypes, so no check necessary
+      Copy::conjoint_oops_atomic(src, dst, length);
+    } else {
+      // slow case: need individual subtype checks
+      // note: don't use obj_at_put below because it includes a redundant store check
+      T* from = src;
+      T* end = from + length;
+      for (T* p = dst; from < end; from++, p++) {
+        // XXX this is going to be slow.
+        T element = *from;
+        if (oopDesc::is_null(element) ||
+            Klass::cast(oopDesc::decode_heap_oop_not_null(element)->klass())->is_subtype_of(bound)) {
+          *p = *from;
+        } else {
+          // We must do a barrier to cover the partial copy.
+          const size_t pd = pointer_delta(p, dst, (size_t)heapOopSize);
+          // pointer delta is scaled to number of elements (length field in
+          // objArrayOop) which we assume is 32 bit.
+          assert(pd == (size_t)(int)pd, "length field overflow");
+          const size_t done_word_len = objArrayOopDesc::array_size((int)pd);
+          bs->write_ref_array(MemRegion((HeapWord*)dst, done_word_len));
+          THROW(vmSymbols::java_lang_ArrayStoreException());
+          return;
+        }
+      }
+    }
+  }
+  bs->write_ref_array(MemRegion((HeapWord*)dst, word_len));
+}
+
 void objArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d,
                                int dst_pos, int length, TRAPS) {
   assert(s->is_objArray(), "must be obj array");
@@ -105,48 +155,15 @@
   if (length==0) {
     return;
   }
-
-  oop* const src = objArrayOop(s)->obj_at_addr(src_pos);
-  oop* const dst = objArrayOop(d)->obj_at_addr(dst_pos);
-  const size_t word_len = length * HeapWordsPerOop;
-
-  // For performance reasons, we assume we are using a card marking write
-  // barrier. The assert will fail if this is not the case.
-  BarrierSet* bs = Universe::heap()->barrier_set();
-  assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
-
-  if (s == d) {
-    // since source and destination are equal we do not need conversion checks.
-    assert(length > 0, "sanity check");
-    Copy::conjoint_oops_atomic(src, dst, length);
+  if (UseCompressedOops) {
+    narrowOop* const src = objArrayOop(s)->obj_at_addr<narrowOop>(src_pos);
+    narrowOop* const dst = objArrayOop(d)->obj_at_addr<narrowOop>(dst_pos);
+    do_copy<narrowOop>(s, src, d, dst, length, CHECK);
   } else {
-    // We have to make sure all elements conform to the destination array
-    klassOop bound = objArrayKlass::cast(d->klass())->element_klass();
-    klassOop stype = objArrayKlass::cast(s->klass())->element_klass();
-    if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) {
-      // elements are guaranteed to be subtypes, so no check necessary
-      Copy::conjoint_oops_atomic(src, dst, length);
-    } else {
-      // slow case: need individual subtype checks
-      // note: don't use obj_at_put below because it includes a redundant store check
-      oop* from = src;
-      oop* end = from + length;
-      for (oop* p = dst; from < end; from++, p++) {
-        oop element = *from;
-        if (element == NULL || Klass::cast(element->klass())->is_subtype_of(bound)) {
-          *p = element;
-        } else {
-          // We must do a barrier to cover the partial copy.
-          const size_t done_word_len = pointer_delta(p, dst, oopSize) *
-                                       HeapWordsPerOop;
-          bs->write_ref_array(MemRegion((HeapWord*)dst, done_word_len));
-          THROW(vmSymbols::java_lang_ArrayStoreException());
-          return;
-        }
-      }
-    }
+    oop* const src = objArrayOop(s)->obj_at_addr<oop>(src_pos);
+    oop* const dst = objArrayOop(d)->obj_at_addr<oop>(dst_pos);
+    do_copy<oop> (s, src, d, dst, length, CHECK);
   }
-  bs->write_ref_array(MemRegion((HeapWord*)dst, word_len));
 }
 
 
@@ -242,49 +259,75 @@
   return element_klass()->klass_part()->is_subtype_of(oak->element_klass());
 }
 
-
 void objArrayKlass::initialize(TRAPS) {
   Klass::cast(bottom_klass())->initialize(THREAD);  // dispatches to either instanceKlass or typeArrayKlass
 }
 
+#define ObjArrayKlass_SPECIALIZED_OOP_ITERATE(T, a, p, do_oop) \
+{                                   \
+  T* p         = (T*)(a)->base();   \
+  T* const end = p + (a)->length(); \
+  while (p < end) {                 \
+    do_oop;                         \
+    p++;                            \
+  }                                 \
+}
+
+#define ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(T, a, p, low, high, do_oop) \
+{                                   \
+  T* const l = (T*)(low);           \
+  T* const h = (T*)(high);          \
+  T* p       = (T*)(a)->base();     \
+  T* end     = p + (a)->length();   \
+  if (p < l) p = l;                 \
+  if (end > h) end = h;             \
+  while (p < end) {                 \
+    do_oop;                         \
+    ++p;                            \
+  }                                 \
+}
+
+#define ObjArrayKlass_OOP_ITERATE(a, p, do_oop)      \
+  if (UseCompressedOops) {                           \
+    ObjArrayKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
+      a, p, do_oop)                                  \
+  } else {                                           \
+    ObjArrayKlass_SPECIALIZED_OOP_ITERATE(oop,       \
+      a, p, do_oop)                                  \
+  }
+
+#define ObjArrayKlass_BOUNDED_OOP_ITERATE(a, p, low, high, do_oop) \
+  if (UseCompressedOops) {                                   \
+    ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
+      a, p, low, high, do_oop)                               \
+  } else {                                                   \
+    ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop,       \
+      a, p, low, high, do_oop)                               \
+  }
 
 void objArrayKlass::oop_follow_contents(oop obj) {
   assert (obj->is_array(), "obj must be array");
-  arrayOop a = arrayOop(obj);
+  objArrayOop a = objArrayOop(obj);
   a->follow_header();
-  oop* base      = (oop*)a->base(T_OBJECT);
-  oop* const end = base + a->length();
-  while (base < end) {
-    if (*base != NULL)
-      // we call mark_and_follow here to avoid excessive marking stack usage
-      MarkSweep::mark_and_follow(base);
-    base++;
-  }
+  ObjArrayKlass_OOP_ITERATE( \
+    a, p, \
+    /* we call mark_and_follow here to avoid excessive marking stack usage */ \
+    MarkSweep::mark_and_follow(p))
 }
 
 #ifndef SERIALGC
 void objArrayKlass::oop_follow_contents(ParCompactionManager* cm,
                                         oop obj) {
   assert (obj->is_array(), "obj must be array");
-  arrayOop a = arrayOop(obj);
+  objArrayOop a = objArrayOop(obj);
   a->follow_header(cm);
-  oop* base      = (oop*)a->base(T_OBJECT);
-  oop* const end = base + a->length();
-  while (base < end) {
-    if (*base != NULL)
-      // we call mark_and_follow here to avoid excessive marking stack usage
-      PSParallelCompact::mark_and_follow(cm, base);
-    base++;
-  }
+  ObjArrayKlass_OOP_ITERATE( \
+    a, p, \
+    /* we call mark_and_follow here to avoid excessive marking stack usage */ \
+    PSParallelCompact::mark_and_follow(cm, p))
 }
 #endif // SERIALGC
 
-#define invoke_closure_on(base, closure, nv_suffix) {                                  \
-  if (*(base) != NULL) {                                                               \
-    (closure)->do_oop##nv_suffix(base);                                                \
-  }                                                                                    \
-}
-
 #define ObjArrayKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)           \
                                                                                 \
 int objArrayKlass::oop_oop_iterate##nv_suffix(oop obj,                          \
@@ -298,21 +341,7 @@
   if (closure->do_header()) {                                                   \
     a->oop_iterate_header(closure);                                             \
   }                                                                             \
-  oop* base               = a->base();                                          \
-  oop* const end          = base + a->length();                                 \
-  const intx field_offset = PrefetchFieldsAhead;                                \
-  if (field_offset > 0) {                                                       \
-    while (base < end) {                                                        \
-      prefetch_beyond(base, end, field_offset, closure->prefetch_style());      \
-      invoke_closure_on(base, closure, nv_suffix);                              \
-      base++;                                                                   \
-    }                                                                           \
-  } else {                                                                      \
-    while (base < end) {                                                        \
-      invoke_closure_on(base, closure, nv_suffix);                              \
-      base++;                                                                   \
-    }                                                                           \
-  }                                                                             \
+  ObjArrayKlass_OOP_ITERATE(a, p, (closure)->do_oop##nv_suffix(p))              \
   return size;                                                                  \
 }
 
@@ -330,28 +359,43 @@
   if (closure->do_header()) {                                                   \
     a->oop_iterate_header(closure, mr);                                         \
   }                                                                             \
-  oop* bottom = (oop*)mr.start();                                               \
-  oop* top    = (oop*)mr.end();                                                 \
-  oop* base = a->base();                                                        \
-  oop* end    = base + a->length();                                             \
-  if (base < bottom) {                                                          \
-    base = bottom;                                                              \
-  }                                                                             \
-  if (end > top) {                                                              \
-    end = top;                                                                  \
-  }                                                                             \
-  const intx field_offset = PrefetchFieldsAhead;                                \
-  if (field_offset > 0) {                                                       \
-    while (base < end) {                                                        \
-      prefetch_beyond(base, end, field_offset, closure->prefetch_style());      \
-      invoke_closure_on(base, closure, nv_suffix);                              \
-      base++;                                                                   \
+  ObjArrayKlass_BOUNDED_OOP_ITERATE(                                            \
+    a, p, mr.start(), mr.end(), (closure)->do_oop##nv_suffix(p))                \
+  return size;                                                                  \
+}
+
+// Like oop_oop_iterate but only iterates over a specified range and only used
+// for objArrayOops.
+#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r(OopClosureType, nv_suffix)         \
+                                                                                \
+int objArrayKlass::oop_oop_iterate_range##nv_suffix(oop obj,                    \
+                                                  OopClosureType* closure,      \
+                                                  int start, int end) {         \
+  SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::oa); \
+  assert(obj->is_array(), "obj must be array");                                 \
+  objArrayOop a  = objArrayOop(obj);                                            \
+  /* Get size before changing pointers. */                                      \
+  /* Don't call size() or oop_size() since that is a virtual call */            \
+  int size = a->object_size();                                                  \
+  if (UseCompressedOops) {                                                      \
+    HeapWord* low = start == 0 ? (HeapWord*)a : (HeapWord*)a->obj_at_addr<narrowOop>(start);\
+    /* this might be wierd if end needs to be aligned on HeapWord boundary */   \
+    HeapWord* high = (HeapWord*)((narrowOop*)a->base() + end);                  \
+    MemRegion mr(low, high);                                                    \
+    if (closure->do_header()) {                                                 \
+      a->oop_iterate_header(closure, mr);                                       \
     }                                                                           \
+    ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop,                    \
+      a, p, low, high, (closure)->do_oop##nv_suffix(p))                         \
   } else {                                                                      \
-    while (base < end) {                                                        \
-      invoke_closure_on(base, closure, nv_suffix);                              \
-      base++;                                                                   \
+    HeapWord* low = start == 0 ? (HeapWord*)a : (HeapWord*)a->obj_at_addr<oop>(start);  \
+    HeapWord* high = (HeapWord*)((oop*)a->base() + end);                        \
+    MemRegion mr(low, high);                                                    \
+    if (closure->do_header()) {                                                 \
+      a->oop_iterate_header(closure, mr);                                       \
     }                                                                           \
+    ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop,                          \
+      a, p, low, high, (closure)->do_oop##nv_suffix(p))                         \
   }                                                                             \
   return size;                                                                  \
 }
@@ -360,6 +404,8 @@
 ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayKlass_OOP_OOP_ITERATE_DEFN)
 ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m)
 ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m)
+ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r)
+ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r)
 
 int objArrayKlass::oop_adjust_pointers(oop obj) {
   assert(obj->is_objArray(), "obj must be obj array");
@@ -368,12 +414,7 @@
   // Don't call size() or oop_size() since that is a virtual call.
   int size = a->object_size();
   a->adjust_header();
-  oop* base      = a->base();
-  oop* const end = base + a->length();
-  while (base < end) {
-    MarkSweep::adjust_pointer(base);
-    base++;
-  }
+  ObjArrayKlass_OOP_ITERATE(a, p, MarkSweep::adjust_pointer(p))
   return size;
 }
 
@@ -381,51 +422,27 @@
 void objArrayKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
   assert(!pm->depth_first(), "invariant");
   assert(obj->is_objArray(), "obj must be obj array");
-  // Compute oop range
-  oop* curr = objArrayOop(obj)->base();
-  oop* end = curr + objArrayOop(obj)->length();
-  //  assert(align_object_size(end - (oop*)obj) == oop_size(obj), "checking size");
-  assert(align_object_size(pointer_delta(end, obj, sizeof(oop*)))
-                                  == oop_size(obj), "checking size");
-
-  // Iterate over oops
-  while (curr < end) {
-    if (PSScavenge::should_scavenge(*curr)) {
-      pm->claim_or_forward_breadth(curr);
-    }
-    ++curr;
-  }
+  ObjArrayKlass_OOP_ITERATE( \
+    objArrayOop(obj), p, \
+    if (PSScavenge::should_scavenge(p)) { \
+      pm->claim_or_forward_breadth(p); \
+    })
 }
 
 void objArrayKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
   assert(pm->depth_first(), "invariant");
   assert(obj->is_objArray(), "obj must be obj array");
-  // Compute oop range
-  oop* curr = objArrayOop(obj)->base();
-  oop* end = curr + objArrayOop(obj)->length();
-  //  assert(align_object_size(end - (oop*)obj) == oop_size(obj), "checking size");
-  assert(align_object_size(pointer_delta(end, obj, sizeof(oop*)))
-                                  == oop_size(obj), "checking size");
-
-  // Iterate over oops
-  while (curr < end) {
-    if (PSScavenge::should_scavenge(*curr)) {
-      pm->claim_or_forward_depth(curr);
-    }
-    ++curr;
-  }
+  ObjArrayKlass_OOP_ITERATE( \
+    objArrayOop(obj), p, \
+    if (PSScavenge::should_scavenge(p)) { \
+      pm->claim_or_forward_depth(p); \
+    })
 }
 
 int objArrayKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
   assert (obj->is_objArray(), "obj must be obj array");
   objArrayOop a = objArrayOop(obj);
-
-  oop* const base = a->base();
-  oop* const beg_oop = base;
-  oop* const end_oop = base + a->length();
-  for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
-    PSParallelCompact::adjust_pointer(cur_oop);
-  }
+  ObjArrayKlass_OOP_ITERATE(a, p, PSParallelCompact::adjust_pointer(p))
   return a->object_size();
 }
 
@@ -433,13 +450,9 @@
                                        HeapWord* beg_addr, HeapWord* end_addr) {
   assert (obj->is_objArray(), "obj must be obj array");
   objArrayOop a = objArrayOop(obj);
-
-  oop* const base = a->base();
-  oop* const beg_oop = MAX2((oop*)beg_addr, base);
-  oop* const end_oop = MIN2((oop*)end_addr, base + a->length());
-  for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
-    PSParallelCompact::adjust_pointer(cur_oop);
-  }
+  ObjArrayKlass_BOUNDED_OOP_ITERATE( \
+     a, p, beg_addr, end_addr, \
+     PSParallelCompact::adjust_pointer(p))
   return a->object_size();
 }
 #endif // SERIALGC
@@ -509,3 +522,4 @@
   RememberedSet::verify_old_oop(obj, p, allow_dirty, true);
   */
 }
+void objArrayKlass::oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty) {}
--- a/hotspot/src/share/vm/oops/objArrayKlass.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/objArrayKlass.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -63,6 +63,11 @@
   // Compute class loader
   oop class_loader() const { return Klass::cast(bottom_klass())->class_loader(); }
 
+ private:
+  // Either oop or narrowOop depending on UseCompressedOops.
+  // must be called from within objArrayKlass.cpp
+  template <class T> void do_copy(arrayOop s, T* src, arrayOop d,
+                                  T* dst, int length, TRAPS);
  protected:
   // Returns the objArrayKlass for n'th dimension.
   virtual klassOop array_klass_impl(bool or_null, int n, TRAPS);
@@ -101,7 +106,9 @@
 #define ObjArrayKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix)   \
   int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk);         \
   int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk,      \
-                                     MemRegion mr);
+                                     MemRegion mr);                     \
+  int oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* blk,    \
+                                     int start, int end);
 
   ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayKlass_OOP_OOP_ITERATE_DECL)
@@ -124,5 +131,6 @@
   const char* internal_name() const;
   void oop_verify_on(oop obj, outputStream* st);
   void oop_verify_old_oop(oop obj, oop* p, bool allow_dirty);
+  void oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty);
 
 };
--- a/hotspot/src/share/vm/oops/objArrayOop.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/objArrayOop.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -25,4 +25,12 @@
 # include "incls/_precompiled.incl"
 # include "incls/_objArrayOop.cpp.incl"
 
-// <<this page is intentionally left blank>>
+#define ObjArrayOop_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)                    \
+                                                                                   \
+int objArrayOopDesc::oop_iterate_range(OopClosureType* blk, int start, int end) {  \
+  SpecializationStats::record_call();                                              \
+  return ((objArrayKlass*)blueprint())->oop_oop_iterate_range##nv_suffix(this, blk, start, end); \
+}
+
+ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayOop_OOP_ITERATE_DEFN)
+ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayOop_OOP_ITERATE_DEFN)
--- a/hotspot/src/share/vm/oops/objArrayOop.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/objArrayOop.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -26,20 +26,67 @@
 // Evaluating "String arg[10]" will create an objArrayOop.
 
 class objArrayOopDesc : public arrayOopDesc {
+  friend class objArrayKlass;
+  friend class Runtime1;
+  friend class psPromotionManager;
+
+  template <class T> T* obj_at_addr(int index) const {
+    assert(is_within_bounds(index), "index out of bounds");
+    return &((T*)base())[index];
+  }
+
  public:
+  // base is the address following the header.
+  HeapWord* base() const      { return (HeapWord*) arrayOopDesc::base(T_OBJECT); }
+
   // Accessing
-  oop obj_at(int index) const           { return *obj_at_addr(index);           }
-  void obj_at_put(int index, oop value) { oop_store(obj_at_addr(index), value); }
-  oop* base() const                     { return (oop*) arrayOopDesc::base(T_OBJECT); }
+  oop obj_at(int index) const {
+    // With UseCompressedOops decode the narrow oop in the objArray to an
+    // uncompressed oop.  Otherwise this is simply a "*" operator.
+    if (UseCompressedOops) {
+      return load_decode_heap_oop(obj_at_addr<narrowOop>(index));
+    } else {
+      return load_decode_heap_oop(obj_at_addr<oop>(index));
+    }
+  }
 
+  void obj_at_put(int index, oop value) {
+    if (UseCompressedOops) {
+      oop_store(obj_at_addr<narrowOop>(index), value);
+    } else {
+      oop_store(obj_at_addr<oop>(index), value);
+    }
+  }
   // Sizing
-  static int header_size()              { return arrayOopDesc::header_size(T_OBJECT); }
-  static int object_size(int length)    { return align_object_size(header_size() + length); }
-  int object_size()                     { return object_size(length()); }
+  static int header_size()    { return arrayOopDesc::header_size(T_OBJECT); }
+  int object_size()           { return object_size(length()); }
+  int array_size()            { return array_size(length()); }
+
+  static int object_size(int length) {
+    // This returns the object size in HeapWords.
+    return align_object_size(header_size() + array_size(length));
+  }
 
-  // Returns the address of the index'th element
-  oop* obj_at_addr(int index) const {
-    assert(is_within_bounds(index), "index out of bounds");
-    return &base()[index];
+  // Give size of objArrayOop in HeapWords minus the header
+  static int array_size(int length) {
+    // Without UseCompressedOops, this is simply:
+    // oop->length() * HeapWordsPerOop;
+    // With narrowOops, HeapWordsPerOop is 1/2 or equal 0 as an integer.
+    // The oop elements are aligned up to wordSize
+    const int HeapWordsPerOop = heapOopSize/HeapWordSize;
+    if (HeapWordsPerOop > 0) {
+      return length * HeapWordsPerOop;
+    } else {
+      const int OopsPerHeapWord = HeapWordSize/heapOopSize;
+      int word_len = align_size_up(length, OopsPerHeapWord)/OopsPerHeapWord;
+      return word_len;
+    }
   }
+
+  // special iterators for index ranges, returns size of object
+#define ObjArrayOop_OOP_ITERATE_DECL(OopClosureType, nv_suffix)     \
+  int oop_iterate_range(OopClosureType* blk, int start, int end);
+
+  ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayOop_OOP_ITERATE_DECL)
+  ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayOop_OOP_ITERATE_DECL)
 };
--- a/hotspot/src/share/vm/oops/oop.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/oop.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -105,10 +105,14 @@
 }
 
 
+// XXX verify_old_oop doesn't do anything (should we remove?)
 void oopDesc::verify_old_oop(oop* p, bool allow_dirty) {
   blueprint()->oop_verify_old_oop(this, p, allow_dirty);
 }
 
+void oopDesc::verify_old_oop(narrowOop* p, bool allow_dirty) {
+  blueprint()->oop_verify_old_oop(this, p, allow_dirty);
+}
 
 bool oopDesc::partially_loaded() {
   return blueprint()->oop_partially_loaded(this);
@@ -130,3 +134,6 @@
 }
 
 VerifyOopClosure VerifyOopClosure::verify_oop;
+
+void VerifyOopClosure::do_oop(oop* p)       { VerifyOopClosure::do_oop_work(p); }
+void VerifyOopClosure::do_oop(narrowOop* p) { VerifyOopClosure::do_oop_work(p); }
--- a/hotspot/src/share/vm/oops/oop.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/oop.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -30,12 +30,12 @@
 // no virtual functions allowed
 
 // store into oop with store check
-void oop_store(oop* p, oop v);
-void oop_store(volatile oop* p, oop v);
+template <class T> void oop_store(T* p, oop v);
+template <class T> void oop_store(volatile T* p, oop v);
 
 // store into oop without store check
-void oop_store_without_check(oop* p, oop v);
-void oop_store_without_check(volatile oop* p, oop v);
+template <class T> void oop_store_without_check(T* p, oop v);
+template <class T> void oop_store_without_check(volatile T* p, oop v);
 
 
 extern bool always_do_update_barrier;
@@ -55,7 +55,10 @@
   friend class VMStructs;
  private:
   volatile markOop  _mark;
-  klassOop _klass;
+  union _metadata {
+    wideKlassOop    _klass;
+    narrowOop       _compressed_klass;
+  } _metadata;
 
   // Fast access to barrier set.  Must be initialized.
   static BarrierSet* _bs;
@@ -73,16 +76,16 @@
   // objects during a GC) -- requires a valid klass pointer
   void init_mark();
 
-  klassOop klass() const        { return _klass; }
-  oop* klass_addr() const       { return (oop*) &_klass; }
+  klassOop klass() const;
+  oop* klass_addr();
+  narrowOop* compressed_klass_addr();
 
   void set_klass(klassOop k);
   // For when the klass pointer is being used as a linked list "next" field.
   void set_klass_to_list_ptr(oop k);
 
-  // size of object header
-  static int header_size()      { return sizeof(oopDesc)/HeapWordSize; }
-  static int header_size_in_bytes() { return sizeof(oopDesc); }
+  // size of object header, aligned to platform wordSize
+  static int header_size()          { return sizeof(oopDesc)/HeapWordSize; }
 
   Klass* blueprint() const;
 
@@ -119,7 +122,6 @@
 
  private:
   // field addresses in oop
-  // byte/char/bool/short fields are always stored as full words
   void*     field_base(int offset)        const;
 
   jbyte*    byte_field_addr(int offset)   const;
@@ -130,13 +132,66 @@
   jlong*    long_field_addr(int offset)   const;
   jfloat*   float_field_addr(int offset)  const;
   jdouble*  double_field_addr(int offset) const;
+  address*  address_field_addr(int offset) const;
 
  public:
-  // need this as public for garbage collection
-  oop* obj_field_addr(int offset) const;
+  // Need this as public for garbage collection.
+  template <class T> T* obj_field_addr(int offset) const;
+
+  static bool is_null(oop obj);
+  static bool is_null(narrowOop obj);
+
+  // Decode an oop pointer from a narrowOop if compressed.
+  // These are overloaded for oop and narrowOop as are the other functions
+  // below so that they can be called in template functions.
+  static oop decode_heap_oop_not_null(oop v);
+  static oop decode_heap_oop_not_null(narrowOop v);
+  static oop decode_heap_oop(oop v);
+  static oop decode_heap_oop(narrowOop v);
+
+  // Encode an oop pointer to a narrow oop.  The or_null versions accept
+  // null oop pointer, others do not in order to eliminate the
+  // null checking branches.
+  static narrowOop encode_heap_oop_not_null(oop v);
+  static narrowOop encode_heap_oop(oop v);
+
+  // Load an oop out of the Java heap
+  static narrowOop load_heap_oop(narrowOop* p);
+  static oop       load_heap_oop(oop* p);
 
+  // Load an oop out of Java heap and decode it to an uncompressed oop.
+  static oop load_decode_heap_oop_not_null(narrowOop* p);
+  static oop load_decode_heap_oop_not_null(oop* p);
+  static oop load_decode_heap_oop(narrowOop* p);
+  static oop load_decode_heap_oop(oop* p);
+
+  // Store an oop into the heap.
+  static void store_heap_oop(narrowOop* p, narrowOop v);
+  static void store_heap_oop(oop* p, oop v);
+
+  // Encode oop if UseCompressedOops and store into the heap.
+  static void encode_store_heap_oop_not_null(narrowOop* p, oop v);
+  static void encode_store_heap_oop_not_null(oop* p, oop v);
+  static void encode_store_heap_oop(narrowOop* p, oop v);
+  static void encode_store_heap_oop(oop* p, oop v);
+
+  static void release_store_heap_oop(volatile narrowOop* p, narrowOop v);
+  static void release_store_heap_oop(volatile oop* p, oop v);
+
+  static void release_encode_store_heap_oop_not_null(volatile narrowOop* p, oop v);
+  static void release_encode_store_heap_oop_not_null(volatile oop* p, oop v);
+  static void release_encode_store_heap_oop(volatile narrowOop* p, oop v);
+  static void release_encode_store_heap_oop(volatile oop* p, oop v);
+
+  static oop atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest);
+  static oop atomic_compare_exchange_oop(oop exchange_value,
+                                         volatile HeapWord *dest,
+                                         oop compare_value);
+
+  // Access to fields in a instanceOop through these methods.
   oop obj_field(int offset) const;
   void obj_field_put(int offset, oop value);
+  void obj_field_raw_put(int offset, oop value);
 
   jbyte byte_field(int offset) const;
   void byte_field_put(int offset, jbyte contents);
@@ -162,6 +217,9 @@
   jdouble double_field(int offset) const;
   void double_field_put(int offset, jdouble contents);
 
+  address address_field(int offset) const;
+  void address_field_put(int offset, address contents);
+
   oop obj_field_acquire(int offset) const;
   void release_obj_field_put(int offset, oop value);
 
@@ -207,6 +265,7 @@
   void verify_on(outputStream* st);
   void verify();
   void verify_old_oop(oop* p, bool allow_dirty);
+  void verify_old_oop(narrowOop* p, bool allow_dirty);
 
   // tells whether this oop is partially constructed (gc during class loading)
   bool partially_loaded();
@@ -228,8 +287,8 @@
   bool is_gc_marked() const;
   // Apply "MarkSweep::mark_and_push" to (the address of) every non-NULL
   // reference field in "this".
-  void follow_contents();
-  void follow_header();
+  void follow_contents(void);
+  void follow_header(void);
 
 #ifndef SERIALGC
   // Parallel Scavenge
@@ -317,6 +376,7 @@
   void     set_displaced_mark(markOop m);
 
   // for code generation
-  static int klass_offset_in_bytes()   { return offset_of(oopDesc, _klass); }
   static int mark_offset_in_bytes()    { return offset_of(oopDesc, _mark); }
+  static int klass_offset_in_bytes()   { return offset_of(oopDesc, _metadata._klass); }
+  static int klass_gap_offset_in_bytes();
 };
--- a/hotspot/src/share/vm/oops/oop.inline.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/oop.inline.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -25,7 +25,6 @@
 // Implementation of all inlined member functions defined in oop.hpp
 // We need a separate file to avoid circular references
 
-
 inline void oopDesc::release_set_mark(markOop m) {
   OrderAccess::release_store_ptr(&_mark, m);
 }
@@ -34,17 +33,54 @@
   return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark);
 }
 
+inline klassOop oopDesc::klass() const {
+  if (UseCompressedOops) {
+    return (klassOop)decode_heap_oop_not_null(_metadata._compressed_klass);
+      // can be NULL in CMS, but isn't supported on CMS yet.
+  } else {
+    return _metadata._klass;
+  }
+}
+
+inline int oopDesc::klass_gap_offset_in_bytes() {
+  assert(UseCompressedOops, "only applicable to compressed headers");
+  return oopDesc::klass_offset_in_bytes() + sizeof(narrowOop);
+}
+
+inline oop* oopDesc::klass_addr() {
+  // Only used internally and with CMS and will not work with
+  // UseCompressedOops
+  assert(!UseCompressedOops, "only supported with uncompressed oops");
+  return (oop*) &_metadata._klass;
+}
+
+inline narrowOop* oopDesc::compressed_klass_addr() {
+  assert(UseCompressedOops, "only called by compressed oops");
+  return (narrowOop*) &_metadata._compressed_klass;
+}
+
 inline void oopDesc::set_klass(klassOop k) {
   // since klasses are promoted no store check is needed
   assert(Universe::is_bootstrapping() || k != NULL, "must be a real klassOop");
   assert(Universe::is_bootstrapping() || k->is_klass(), "not a klassOop");
-  oop_store_without_check((oop*) &_klass, (oop) k);
+  if (UseCompressedOops) {
+    // zero the gap when the klass is set, by zeroing the pointer sized
+    // part of the union.
+    _metadata._klass = NULL;
+    oop_store_without_check(compressed_klass_addr(), (oop)k);
+  } else {
+    oop_store_without_check(klass_addr(), (oop) k);
+  }
 }
 
 inline void oopDesc::set_klass_to_list_ptr(oop k) {
   // This is only to be used during GC, for from-space objects, so no
   // barrier is needed.
-  _klass = (klassOop)k;
+  if (UseCompressedOops) {
+    _metadata._compressed_klass = encode_heap_oop_not_null(k);
+  } else {
+    _metadata._klass = (klassOop)k;
+  }
 }
 
 inline void   oopDesc::init_mark()                 { set_mark(markOopDesc::prototype_for_object(this)); }
@@ -70,7 +106,7 @@
 
 inline void*     oopDesc::field_base(int offset)        const { return (void*)&((char*)this)[offset]; }
 
-inline oop*      oopDesc::obj_field_addr(int offset)    const { return (oop*)     field_base(offset); }
+template <class T> inline T* oopDesc::obj_field_addr(int offset) const { return (T*)field_base(offset); }
 inline jbyte*    oopDesc::byte_field_addr(int offset)   const { return (jbyte*)   field_base(offset); }
 inline jchar*    oopDesc::char_field_addr(int offset)   const { return (jchar*)   field_base(offset); }
 inline jboolean* oopDesc::bool_field_addr(int offset)   const { return (jboolean*)field_base(offset); }
@@ -79,9 +115,156 @@
 inline jlong*    oopDesc::long_field_addr(int offset)   const { return (jlong*)   field_base(offset); }
 inline jfloat*   oopDesc::float_field_addr(int offset)  const { return (jfloat*)  field_base(offset); }
 inline jdouble*  oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); }
+inline address*  oopDesc::address_field_addr(int offset) const { return (address*) field_base(offset); }
 
-inline oop oopDesc::obj_field(int offset) const                     { return *obj_field_addr(offset);             }
-inline void oopDesc::obj_field_put(int offset, oop value)           { oop_store(obj_field_addr(offset), value);   }
+
+// Functions for getting and setting oops within instance objects.
+// If the oops are compressed, the type passed to these overloaded functions
+// is narrowOop.  All functions are overloaded so they can be called by
+// template functions without conditionals (the compiler instantiates via
+// the right type and inlines the appopriate code).
+
+inline bool oopDesc::is_null(oop obj)       { return obj == NULL; }
+inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
+
+// Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
+// offset from the heap base.  Saving the check for null can save instructions
+// in inner GC loops so these are separated.
+
+inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
+  assert(!is_null(v), "oop value can never be zero");
+  address heap_base = Universe::heap_base();
+  uint64_t result = (uint64_t)(pointer_delta((void*)v, (void*)heap_base, 1) >> LogMinObjAlignmentInBytes);
+  assert((result & 0xffffffff00000000L) == 0, "narrow oop overflow");
+  return (narrowOop)result;
+}
+
+inline narrowOop oopDesc::encode_heap_oop(oop v) {
+  return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
+}
+
+inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
+  assert(!is_null(v), "narrow oop value can never be zero");
+  address heap_base = Universe::heap_base();
+  return (oop)(void*)((uintptr_t)heap_base + ((uintptr_t)v << LogMinObjAlignmentInBytes));
+}
+
+inline oop oopDesc::decode_heap_oop(narrowOop v) {
+  return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
+}
+
+inline oop oopDesc::decode_heap_oop_not_null(oop v) { return v; }
+inline oop oopDesc::decode_heap_oop(oop v)  { return v; }
+
+// Load an oop out of the Java heap as is without decoding.
+// Called by GC to check for null before decoding.
+inline oop       oopDesc::load_heap_oop(oop* p)          { return *p; }
+inline narrowOop oopDesc::load_heap_oop(narrowOop* p)    { return *p; }
+
+// Load and decode an oop out of the Java heap into a wide oop.
+inline oop oopDesc::load_decode_heap_oop_not_null(oop* p)       { return *p; }
+inline oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
+  return decode_heap_oop_not_null(*p);
+}
+
+// Load and decode an oop out of the heap accepting null
+inline oop oopDesc::load_decode_heap_oop(oop* p) { return *p; }
+inline oop oopDesc::load_decode_heap_oop(narrowOop* p) {
+  return decode_heap_oop(*p);
+}
+
+// Store already encoded heap oop into the heap.
+inline void oopDesc::store_heap_oop(oop* p, oop v)                 { *p = v; }
+inline void oopDesc::store_heap_oop(narrowOop* p, narrowOop v)     { *p = v; }
+
+// Encode and store a heap oop.
+inline void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) {
+  *p = encode_heap_oop_not_null(v);
+}
+inline void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; }
+
+// Encode and store a heap oop allowing for null.
+inline void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) {
+  *p = encode_heap_oop(v);
+}
+inline void oopDesc::encode_store_heap_oop(oop* p, oop v) { *p = v; }
+
+// Store heap oop as is for volatile fields.
+inline void oopDesc::release_store_heap_oop(volatile oop* p, oop v) {
+  OrderAccess::release_store_ptr(p, v);
+}
+inline void oopDesc::release_store_heap_oop(volatile narrowOop* p,
+                                            narrowOop v) {
+  OrderAccess::release_store(p, v);
+}
+
+inline void oopDesc::release_encode_store_heap_oop_not_null(
+                                                volatile narrowOop* p, oop v) {
+  // heap oop is not pointer sized.
+  OrderAccess::release_store(p, encode_heap_oop_not_null(v));
+}
+
+inline void oopDesc::release_encode_store_heap_oop_not_null(
+                                                      volatile oop* p, oop v) {
+  OrderAccess::release_store_ptr(p, v);
+}
+
+inline void oopDesc::release_encode_store_heap_oop(volatile oop* p,
+                                                           oop v) {
+  OrderAccess::release_store_ptr(p, v);
+}
+inline void oopDesc::release_encode_store_heap_oop(
+                                                volatile narrowOop* p, oop v) {
+  OrderAccess::release_store(p, encode_heap_oop(v));
+}
+
+
+// These functions are only used to exchange oop fields in instances,
+// not headers.
+inline oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) {
+  if (UseCompressedOops) {
+    // encode exchange value from oop to T
+    narrowOop val = encode_heap_oop(exchange_value);
+    narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest);
+    // decode old from T to oop
+    return decode_heap_oop(old);
+  } else {
+    return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest);
+  }
+}
+
+inline oop oopDesc::atomic_compare_exchange_oop(oop exchange_value,
+                                                volatile HeapWord *dest,
+                                                oop compare_value) {
+  if (UseCompressedOops) {
+    // encode exchange and compare value from oop to T
+    narrowOop val = encode_heap_oop(exchange_value);
+    narrowOop cmp = encode_heap_oop(compare_value);
+
+    narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp);
+    // decode old from T to oop
+    return decode_heap_oop(old);
+  } else {
+    return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value);
+  }
+}
+
+// In order to put or get a field out of an instance, must first check
+// if the field has been compressed and uncompress it.
+inline oop oopDesc::obj_field(int offset) const {
+  return UseCompressedOops ?
+    load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) :
+    load_decode_heap_oop(obj_field_addr<oop>(offset));
+}
+inline void oopDesc::obj_field_put(int offset, oop value) {
+  UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) :
+                      oop_store(obj_field_addr<oop>(offset),       value);
+}
+inline void oopDesc::obj_field_raw_put(int offset, oop value) {
+  UseCompressedOops ?
+    encode_store_heap_oop(obj_field_addr<narrowOop>(offset), value) :
+    encode_store_heap_oop(obj_field_addr<oop>(offset),       value);
+}
 
 inline jbyte oopDesc::byte_field(int offset) const                  { return (jbyte) *byte_field_addr(offset);    }
 inline void oopDesc::byte_field_put(int offset, jbyte contents)     { *byte_field_addr(offset) = (jint) contents; }
@@ -107,8 +290,21 @@
 inline jdouble oopDesc::double_field(int offset) const              { return *double_field_addr(offset);     }
 inline void oopDesc::double_field_put(int offset, jdouble contents) { *double_field_addr(offset) = contents; }
 
-inline oop oopDesc::obj_field_acquire(int offset) const                     { return (oop)OrderAccess::load_ptr_acquire(obj_field_addr(offset)); }
-inline void oopDesc::release_obj_field_put(int offset, oop value)           { oop_store((volatile oop*)obj_field_addr(offset), value);           }
+inline address oopDesc::address_field(int offset) const              { return *address_field_addr(offset);     }
+inline void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; }
+
+inline oop oopDesc::obj_field_acquire(int offset) const {
+  return UseCompressedOops ?
+             decode_heap_oop((narrowOop)
+               OrderAccess::load_acquire(obj_field_addr<narrowOop>(offset)))
+           : decode_heap_oop((oop)
+               OrderAccess::load_ptr_acquire(obj_field_addr<oop>(offset)));
+}
+inline void oopDesc::release_obj_field_put(int offset, oop value) {
+  UseCompressedOops ?
+    oop_store((volatile narrowOop*)obj_field_addr<narrowOop>(offset), value) :
+    oop_store((volatile oop*)      obj_field_addr<oop>(offset),       value);
+}
 
 inline jbyte oopDesc::byte_field_acquire(int offset) const                  { return OrderAccess::load_acquire(byte_field_addr(offset));     }
 inline void oopDesc::release_byte_field_put(int offset, jbyte contents)     { OrderAccess::release_store(byte_field_addr(offset), contents); }
@@ -134,7 +330,6 @@
 inline jdouble oopDesc::double_field_acquire(int offset) const              { return OrderAccess::load_acquire(double_field_addr(offset));     }
 inline void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); }
 
-
 inline int oopDesc::size_given_klass(Klass* klass)  {
   int lh = klass->layout_helper();
   int s  = lh >> LogHeapWordSize;  // deliver size scaled by wordSize
@@ -200,7 +395,7 @@
       // technique) we will need to suitably modify the assertion.
       assert((s == klass->oop_size(this)) ||
              (((UseParNewGC || UseParallelGC) &&
-                                           Universe::heap()->is_gc_active()) &&
+              Universe::heap()->is_gc_active()) &&
               (is_typeArray() ||
                (is_objArray() && is_forwarded()))),
              "wrong array object size");
@@ -224,52 +419,58 @@
   return blueprint()->oop_is_parsable(this);
 }
 
-
-inline void update_barrier_set(oop *p, oop v) {
+inline void update_barrier_set(void* p, oop v) {
   assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!");
   oopDesc::bs()->write_ref_field(p, v);
 }
 
-
-inline void oop_store(oop* p, oop v) {
+template <class T> inline void oop_store(T* p, oop v) {
   if (always_do_update_barrier) {
-    oop_store((volatile oop*)p, v);
+    oop_store((volatile T*)p, v);
   } else {
-    *p = v;
+    oopDesc::encode_store_heap_oop(p, v);
     update_barrier_set(p, v);
   }
 }
 
-inline void oop_store(volatile oop* p, oop v) {
+template <class T> inline void oop_store(volatile T* p, oop v) {
   // Used by release_obj_field_put, so use release_store_ptr.
-  OrderAccess::release_store_ptr(p, v);
-  update_barrier_set((oop *)p, v);
+  oopDesc::release_encode_store_heap_oop(p, v);
+  update_barrier_set((void*)p, v);
 }
 
-inline void oop_store_without_check(oop* p, oop v) {
+template <class T> inline void oop_store_without_check(T* p, oop v) {
   // XXX YSR FIX ME!!!
   if (always_do_update_barrier) {
-   oop_store(p, v);
+    oop_store(p, v);
   } else {
     assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier(p, v),
            "oop store without store check failed");
-    *p = v;
+    oopDesc::encode_store_heap_oop(p, v);
   }
 }
 
 // When it absolutely has to get there.
-inline void oop_store_without_check(volatile oop* p, oop v) {
+template <class T> inline void oop_store_without_check(volatile T* p, oop v) {
   // XXX YSR FIX ME!!!
   if (always_do_update_barrier) {
     oop_store(p, v);
   } else {
-    assert(!Universe::heap()->barrier_set()->
-                      write_ref_needs_barrier((oop *)p, v),
+    assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier((T*)p, v),
            "oop store without store check failed");
-    OrderAccess::release_store_ptr(p, v);
+    oopDesc::release_encode_store_heap_oop(p, v);
   }
 }
 
+// Should replace *addr = oop assignments where addr type depends on UseCompressedOops
+// (without having to remember the function name this calls).
+inline void oop_store_raw(HeapWord* addr, oop value) {
+  if (UseCompressedOops) {
+    oopDesc::encode_store_heap_oop((narrowOop*)addr, value);
+  } else {
+    oopDesc::encode_store_heap_oop((oop*)addr, value);
+  }
+}
 
 // Used only for markSweep, scavenging
 inline bool oopDesc::is_gc_marked() const {
@@ -340,15 +541,17 @@
   if (!Universe::heap()->is_in_reserved(this)) return false;
   return mark()->is_unlocked();
 }
-
-
 #endif // PRODUCT
 
 inline void oopDesc::follow_header() {
-  MarkSweep::mark_and_push((oop*)&_klass);
+  if (UseCompressedOops) {
+    MarkSweep::mark_and_push(compressed_klass_addr());
+  } else {
+    MarkSweep::mark_and_push(klass_addr());
+  }
 }
 
-inline void oopDesc::follow_contents() {
+inline void oopDesc::follow_contents(void) {
   assert (is_gc_marked(), "should be marked");
   blueprint()->oop_follow_contents(this);
 }
@@ -362,7 +565,6 @@
   return mark()->is_marked();
 }
 
-
 // Used by scavengers
 inline void oopDesc::forward_to(oop p) {
   assert(Universe::heap()->is_in_reserved(p),
@@ -384,8 +586,9 @@
 // Note that the forwardee is not the same thing as the displaced_mark.
 // The forwardee is used when copying during scavenge and mark-sweep.
 // It does need to clear the low two locking- and GC-related bits.
-inline oop oopDesc::forwardee() const           { return (oop) mark()->decode_pointer(); }
-
+inline oop oopDesc::forwardee() const {
+  return (oop) mark()->decode_pointer();
+}
 
 inline bool oopDesc::has_displaced_mark() const {
   return mark()->has_displaced_mark_helper();
@@ -432,17 +635,24 @@
   }
 }
 
-
 inline void oopDesc::oop_iterate_header(OopClosure* blk) {
-  blk->do_oop((oop*)&_klass);
+  if (UseCompressedOops) {
+    blk->do_oop(compressed_klass_addr());
+  } else {
+    blk->do_oop(klass_addr());
+  }
 }
 
-
 inline void oopDesc::oop_iterate_header(OopClosure* blk, MemRegion mr) {
-  if (mr.contains(&_klass)) blk->do_oop((oop*)&_klass);
+  if (UseCompressedOops) {
+    if (mr.contains(compressed_klass_addr())) {
+      blk->do_oop(compressed_klass_addr());
+    }
+  } else {
+    if (mr.contains(klass_addr())) blk->do_oop(klass_addr());
+  }
 }
 
-
 inline int oopDesc::adjust_pointers() {
   debug_only(int check_size = size());
   int s = blueprint()->oop_adjust_pointers(this);
@@ -451,7 +661,11 @@
 }
 
 inline void oopDesc::adjust_header() {
-  MarkSweep::adjust_pointer((oop*)&_klass);
+  if (UseCompressedOops) {
+    MarkSweep::adjust_pointer(compressed_klass_addr());
+  } else {
+    MarkSweep::adjust_pointer(klass_addr());
+  }
 }
 
 #define OOP_ITERATE_DEFN(OopClosureType, nv_suffix)                        \
--- a/hotspot/src/share/vm/oops/oop.pcgc.inline.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/oop.pcgc.inline.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -67,8 +67,8 @@
   // update_header();
   // The klass has moved.  Is the location of the klass
   // within the limits?
-  if ((((HeapWord*)&_klass) >= begin_limit) &&
-      (((HeapWord*)&_klass) < end_limit)) {
+  if ((((HeapWord*)&_metadata._klass) >= begin_limit) &&
+      (((HeapWord*)&_metadata._klass) < end_limit)) {
     set_klass(updated_klass);
   }
 
@@ -89,7 +89,11 @@
 // Used by parallel old GC.
 
 inline void oopDesc::follow_header(ParCompactionManager* cm) {
-  PSParallelCompact::mark_and_push(cm, (oop*)&_klass);
+  if (UseCompressedOops) {
+    PSParallelCompact::mark_and_push(cm, compressed_klass_addr());
+  } else {
+    PSParallelCompact::mark_and_push(cm, klass_addr());
+  }
 }
 
 inline oop oopDesc::forward_to_atomic(oop p) {
@@ -114,9 +118,18 @@
 }
 
 inline void oopDesc::update_header() {
-  PSParallelCompact::adjust_pointer((oop*)&_klass);
+  if (UseCompressedOops) {
+    PSParallelCompact::adjust_pointer(compressed_klass_addr());
+  } else {
+    PSParallelCompact::adjust_pointer(klass_addr());
+  }
 }
 
 inline void oopDesc::update_header(HeapWord* beg_addr, HeapWord* end_addr) {
-  PSParallelCompact::adjust_pointer((oop*)&_klass, beg_addr, end_addr);
+  if (UseCompressedOops) {
+    PSParallelCompact::adjust_pointer(compressed_klass_addr(),
+                                      beg_addr, end_addr);
+  } else {
+    PSParallelCompact::adjust_pointer(klass_addr(), beg_addr, end_addr);
+  }
 }
--- a/hotspot/src/share/vm/oops/oopsHierarchy.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/oops/oopsHierarchy.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -26,21 +26,25 @@
 // This hierarchy is a representation hierarchy, i.e. if A is a superclass
 // of B, A's representation is a prefix of B's representation.
 
+typedef juint narrowOop; // Offset instead of address for an oop within a java object
+typedef class klassOopDesc* wideKlassOop; // to keep SA happy and unhandled oop
+                                          // detector happy.
+
 #ifndef CHECK_UNHANDLED_OOPS
 
-typedef class oopDesc*                      oop;
+typedef class oopDesc*                            oop;
 typedef class   instanceOopDesc*            instanceOop;
-typedef class   methodOopDesc*              methodOop;
-typedef class   constMethodOopDesc*         constMethodOop;
-typedef class   methodDataOopDesc*          methodDataOop;
-typedef class   arrayOopDesc*               arrayOop;
-typedef class     constantPoolOopDesc*      constantPoolOop;
-typedef class     constantPoolCacheOopDesc* constantPoolCacheOop;
-typedef class     objArrayOopDesc*          objArrayOop;
-typedef class     typeArrayOopDesc*         typeArrayOop;
-typedef class   symbolOopDesc*              symbolOop;
-typedef class   klassOopDesc*               klassOop;
-typedef class   markOopDesc*                markOop;
+typedef class   methodOopDesc*                    methodOop;
+typedef class   constMethodOopDesc*            constMethodOop;
+typedef class   methodDataOopDesc*            methodDataOop;
+typedef class   arrayOopDesc*                    arrayOop;
+typedef class     objArrayOopDesc*            objArrayOop;
+typedef class     typeArrayOopDesc*            typeArrayOop;
+typedef class   constantPoolOopDesc*            constantPoolOop;
+typedef class   constantPoolCacheOopDesc*   constantPoolCacheOop;
+typedef class   symbolOopDesc*                    symbolOop;
+typedef class   klassOopDesc*                    klassOop;
+typedef class   markOopDesc*                    markOop;
 typedef class   compiledICHolderOopDesc*    compiledICHolderOop;
 
 #else
@@ -172,9 +176,9 @@
 class       objArrayKlassKlass;
 class       typeArrayKlassKlass;
 class   arrayKlass;
-class     constantPoolKlass;
-class     constantPoolCacheKlass;
 class     objArrayKlass;
 class     typeArrayKlass;
-class       symbolKlass;
+class   constantPoolKlass;
+class   constantPoolCacheKlass;
+class   symbolKlass;
 class   compiledICHolderKlass;
--- a/hotspot/src/share/vm/opto/buildOopMap.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/buildOopMap.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -315,6 +315,26 @@
         }
       }
 
+    } else if( t->isa_narrowoop() ) {
+      assert( !OptoReg::is_valid(_callees[reg]), "oop can't be callee save" );
+      // Check for a legal reg name in the oopMap and bailout if it is not.
+      if (!omap->legal_vm_reg_name(r)) {
+        regalloc->C->record_method_not_compilable("illegal oopMap register name");
+        continue;
+      }
+      if( mcall ) {
+          // Outgoing argument GC mask responsibility belongs to the callee,
+          // not the caller.  Inspect the inputs to the call, to see if
+          // this live-range is one of them.
+        uint cnt = mcall->tf()->domain()->cnt();
+        uint j;
+        for( j = TypeFunc::Parms; j < cnt; j++)
+          if( mcall->in(j) == def )
+            break;            // reaching def is an argument oop
+        if( j < cnt )         // arg oops dont go in GC map
+          continue;           // Continue on to the next register
+      }
+      omap->set_narrowoop(r);
     } else if( OptoReg::is_valid(_callees[reg])) { // callee-save?
       // It's a callee-save value
       assert( dup_check[_callees[reg]]==0, "trying to callee save same reg twice" );
--- a/hotspot/src/share/vm/opto/callnode.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/callnode.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -725,7 +725,8 @@
 
   // Conservatively small estimate of offset of first non-header byte.
   int minimum_header_size() {
-    return is_AllocateArray() ? sizeof(arrayOopDesc) : sizeof(oopDesc);
+    return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) :
+                                instanceOopDesc::base_offset_in_bytes();
   }
 
   // Return the corresponding initialization barrier (or null if none).
--- a/hotspot/src/share/vm/opto/cfgnode.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/cfgnode.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -848,7 +848,7 @@
   // Until we have harmony between classes and interfaces in the type
   // lattice, we must tread carefully around phis which implicitly
   // convert the one to the other.
-  const TypeInstPtr* ttip = _type->isa_instptr();
+  const TypeInstPtr* ttip = _type->isa_narrowoop() ? _type->isa_narrowoop()->make_oopptr()->isa_instptr() :_type->isa_instptr();
   bool is_intf = false;
   if (ttip != NULL) {
     ciKlass* k = ttip->klass();
@@ -867,7 +867,7 @@
       // of all the input types.  The lattice is not distributive in
       // such cases.  Ward off asserts in type.cpp by refusing to do
       // meets between interfaces and proper classes.
-      const TypeInstPtr* tiip = ti->isa_instptr();
+      const TypeInstPtr* tiip = ti->isa_narrowoop() ? ti->is_narrowoop()->make_oopptr()->isa_instptr() : ti->isa_instptr();
       if (tiip) {
         bool ti_is_intf = false;
         ciKlass* k = tiip->klass();
@@ -924,12 +924,15 @@
     // class-typed Phi and an interface flows in, it's possible that the meet &
     // join report an interface back out.  This isn't possible but happens
     // because the type system doesn't interact well with interfaces.
-    const TypeInstPtr *jtip = jt->isa_instptr();
+    const TypeInstPtr *jtip = jt->isa_narrowoop() ? jt->isa_narrowoop()->make_oopptr()->isa_instptr() : jt->isa_instptr();
     if( jtip && ttip ) {
       if( jtip->is_loaded() &&  jtip->klass()->is_interface() &&
-          ttip->is_loaded() && !ttip->klass()->is_interface() )
+          ttip->is_loaded() && !ttip->klass()->is_interface() ) {
         // Happens in a CTW of rt.jar, 320-341, no extra flags
-        { assert(ft == ttip->cast_to_ptr_type(jtip->ptr()), ""); jt = ft; }
+        assert(ft == ttip->cast_to_ptr_type(jtip->ptr()) ||
+               ft->isa_narrowoop() && ft->isa_narrowoop()->make_oopptr() == ttip->cast_to_ptr_type(jtip->ptr()), "");
+        jt = ft;
+      }
     }
     if (jt != ft && jt->base() == ft->base()) {
       if (jt->isa_int() &&
--- a/hotspot/src/share/vm/opto/chaitin.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/chaitin.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -682,6 +682,7 @@
           break;
         case Op_RegF:
         case Op_RegI:
+        case Op_RegN:
         case Op_RegFlags:
         case 0:                 // not an ideal register
           lrg.set_num_regs(1);
--- a/hotspot/src/share/vm/opto/classes.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/classes.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -64,6 +64,7 @@
 macro(CMoveI)
 macro(CMoveL)
 macro(CMoveP)
+macro(CmpN)
 macro(CmpD)
 macro(CmpD3)
 macro(CmpF)
@@ -77,7 +78,9 @@
 macro(CompareAndSwapI)
 macro(CompareAndSwapL)
 macro(CompareAndSwapP)
+macro(CompareAndSwapN)
 macro(Con)
+macro(ConN)
 macro(ConD)
 macro(ConF)
 macro(ConI)
@@ -100,6 +103,7 @@
 macro(CountedLoop)
 macro(CountedLoopEnd)
 macro(CreateEx)
+macro(DecodeN)
 macro(DivD)
 macro(DivF)
 macro(DivI)
@@ -107,6 +111,7 @@
 macro(DivMod)
 macro(DivModI)
 macro(DivModL)
+macro(EncodeP)
 macro(ExpD)
 macro(FastLock)
 macro(FastUnlock)
@@ -133,6 +138,7 @@
 macro(LoadPLocked)
 macro(LoadLLocked)
 macro(LoadP)
+macro(LoadN)
 macro(LoadRange)
 macro(LoadS)
 macro(Lock)
@@ -201,6 +207,7 @@
 macro(StoreI)
 macro(StoreL)
 macro(StoreP)
+macro(StoreN)
 macro(StrComp)
 macro(SubD)
 macro(SubF)
--- a/hotspot/src/share/vm/opto/compile.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/compile.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -1031,6 +1031,10 @@
       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,offset, ta->instance_id());
     }
     // Arrays of known objects become arrays of unknown objects.
+    if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
+      const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
+      tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset, ta->instance_id());
+    }
     if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
       const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset, ta->instance_id());
@@ -1069,7 +1073,7 @@
     }
     // Canonicalize the holder of this field
     ciInstanceKlass *k = to->klass()->as_instance_klass();
-    if (offset >= 0 && offset < oopDesc::header_size() * wordSize) {
+    if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
       // First handle header references such as a LoadKlassNode, even if the
       // object's klass is unloaded at compile time (4965979).
       tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset, to->instance_id());
@@ -1310,7 +1314,7 @@
 
     // Check for final instance fields.
     const TypeInstPtr* tinst = flat->isa_instptr();
-    if (tinst && tinst->offset() >= oopDesc::header_size() * wordSize) {
+    if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
       ciInstanceKlass *k = tinst->klass()->as_instance_klass();
       ciField* field = k->get_field_by_offset(tinst->offset(), false);
       // Set field() and is_rewritable() attributes.
@@ -1731,6 +1735,8 @@
           starts_bundle = '+';
       }
 
+      if (WizardMode) n->dump();
+
       if( !n->is_Region() &&    // Dont print in the Assembly
           !n->is_Phi() &&       // a few noisely useless nodes
           !n->is_Proj() &&
@@ -1755,6 +1761,8 @@
       // then back up and print it
       if (valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) {
         assert(delay != NULL, "no unconditional delay instruction");
+        if (WizardMode) delay->dump();
+
         if (node_bundling(delay)->starts_bundle())
           starts_bundle = '+';
         if (pcs && n->_idx < pc_limit)
@@ -1819,7 +1827,7 @@
 static bool oop_offset_is_sane(const TypeInstPtr* tp) {
   ciInstanceKlass *k = tp->klass()->as_instance_klass();
   // Make sure the offset goes inside the instance layout.
-  return (uint)tp->offset() < (uint)(oopDesc::header_size() + k->nonstatic_field_size())*wordSize;
+  return k->contains_field_offset(tp->offset());
   // Note that OffsetBot and OffsetTop are very negative.
 }
 
@@ -1946,7 +1954,9 @@
   case Op_CompareAndSwapI:
   case Op_CompareAndSwapL:
   case Op_CompareAndSwapP:
+  case Op_CompareAndSwapN:
   case Op_StoreP:
+  case Op_StoreN:
   case Op_LoadB:
   case Op_LoadC:
   case Op_LoadI:
@@ -1956,6 +1966,7 @@
   case Op_LoadPLocked:
   case Op_LoadLLocked:
   case Op_LoadP:
+  case Op_LoadN:
   case Op_LoadRange:
   case Op_LoadS: {
   handle_mem:
--- a/hotspot/src/share/vm/opto/connode.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/connode.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -35,6 +35,7 @@
 
 //------------------------------make-------------------------------------------
 ConNode *ConNode::make( Compile* C, const Type *t ) {
+  if (t->isa_narrowoop()) return new (C, 1) ConNNode( t->is_narrowoop() );
   switch( t->basic_type() ) {
   case T_INT:       return new (C, 1) ConINode( t->is_int() );
   case T_ARRAY:     return new (C, 1) ConPNode( t->is_aryptr() );
@@ -461,7 +462,8 @@
     possible_alias = n->is_Phi() ||
         opc == Op_CheckCastPP ||
         opc == Op_StorePConditional ||
-        opc == Op_CompareAndSwapP;
+        opc == Op_CompareAndSwapP ||
+        opc == Op_CompareAndSwapN;
   }
   return possible_alias;
 }
@@ -549,6 +551,41 @@
   return (in(0) && remove_dead_region(phase, can_reshape)) ? this : NULL;
 }
 
+
+Node* DecodeNNode::Identity(PhaseTransform* phase) {
+  const Type *t = phase->type( in(1) );
+  if( t == Type::TOP ) return in(1);
+
+  if (in(1)->Opcode() == Op_EncodeP) {
+    // (DecodeN (EncodeP p)) -> p
+    return in(1)->in(1);
+  }
+  return this;
+}
+
+Node* EncodePNode::Identity(PhaseTransform* phase) {
+  const Type *t = phase->type( in(1) );
+  if( t == Type::TOP ) return in(1);
+
+  if (in(1)->Opcode() == Op_DecodeN) {
+    // (EncodeP (DecodeN p)) -> p
+    return in(1)->in(1);
+  }
+  return this;
+}
+
+
+Node* EncodePNode::encode(PhaseGVN* phase, Node* value) {
+  const Type* newtype = value->bottom_type();
+  if (newtype == TypePtr::NULL_PTR) {
+    return phase->transform(new (phase->C, 1) ConNNode(TypeNarrowOop::NULL_PTR));
+  } else {
+    return phase->transform(new (phase->C, 2) EncodePNode(value,
+                                                          newtype->is_oopptr()->make_narrowoop()));
+  }
+}
+
+
 //=============================================================================
 //------------------------------Identity---------------------------------------
 Node *Conv2BNode::Identity( PhaseTransform *phase ) {
--- a/hotspot/src/share/vm/opto/connode.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/connode.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -78,6 +78,20 @@
 };
 
 
+//------------------------------ConNNode--------------------------------------
+// Simple narrow oop constants
+class ConNNode : public ConNode {
+public:
+  ConNNode( const TypeNarrowOop *t ) : ConNode(t) {}
+  virtual int Opcode() const;
+
+  static ConNNode* make( Compile *C, ciObject* con ) {
+    return new (C, 1) ConNNode( TypeNarrowOop::make_from_constant(con) );
+  }
+
+};
+
+
 //------------------------------ConLNode---------------------------------------
 // Simple long constants
 class ConLNode : public ConNode {
@@ -254,6 +268,41 @@
   //virtual Node *Ideal_DU_postCCP( PhaseCCP * );
 };
 
+
+//------------------------------EncodeP--------------------------------
+// Encodes an oop pointers into its compressed form
+// Takes an extra argument which is the real heap base as a long which
+// may be useful for code generation in the backend.
+class EncodePNode : public TypeNode {
+ public:
+  EncodePNode(Node* value, const Type* type):
+    TypeNode(type, 2) {
+    init_req(0, NULL);
+    init_req(1, value);
+  }
+  virtual int Opcode() const;
+  virtual Node *Identity( PhaseTransform *phase );
+  virtual uint  ideal_reg() const { return Op_RegN; }
+
+  static Node* encode(PhaseGVN* phase, Node* value);
+};
+
+//------------------------------DecodeN--------------------------------
+// Converts a narrow oop into a real oop ptr.
+// Takes an extra argument which is the real heap base as a long which
+// may be useful for code generation in the backend.
+class DecodeNNode : public TypeNode {
+ public:
+  DecodeNNode(Node* value, const Type* type):
+    TypeNode(type, 2) {
+    init_req(0, NULL);
+    init_req(1, value);
+  }
+  virtual int Opcode() const;
+  virtual Node *Identity( PhaseTransform *phase );
+  virtual uint  ideal_reg() const { return Op_RegP; }
+};
+
 //------------------------------Conv2BNode-------------------------------------
 // Convert int/pointer to a Boolean.  Map zero to zero, all else to 1.
 class Conv2BNode : public Node {
--- a/hotspot/src/share/vm/opto/escape.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/escape.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -1749,15 +1749,28 @@
       add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
       break;
     }
+    case Op_ConN:
+    {
+      // assume all narrow oop constants globally escape except for null
+      PointsToNode::EscapeState es;
+      if (phase->type(n) == TypeNarrowOop::NULL_PTR)
+        es = PointsToNode::NoEscape;
+      else
+        es = PointsToNode::GlobalEscape;
+
+      add_node(n, PointsToNode::JavaObject, es, true);
+      break;
+    }
     case Op_LoadKlass:
     {
       add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
       break;
     }
     case Op_LoadP:
+    case Op_LoadN:
     {
       const Type *t = phase->type(n);
-      if (t->isa_ptr() == NULL) {
+      if (!t->isa_narrowoop() && t->isa_ptr() == NULL) {
         _processed.set(n->_idx);
         return;
       }
@@ -1847,8 +1860,12 @@
       break;
     }
     case Op_StoreP:
+    case Op_StoreN:
     {
       const Type *adr_type = phase->type(n->in(MemNode::Address));
+      if (adr_type->isa_narrowoop()) {
+        adr_type = adr_type->is_narrowoop()->make_oopptr();
+      }
       if (adr_type->isa_oopptr()) {
         add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
       } else {
@@ -1870,8 +1887,12 @@
     }
     case Op_StorePConditional:
     case Op_CompareAndSwapP:
+    case Op_CompareAndSwapN:
     {
       const Type *adr_type = phase->type(n->in(MemNode::Address));
+      if (adr_type->isa_narrowoop()) {
+        adr_type = adr_type->is_narrowoop()->make_oopptr();
+      }
       if (adr_type->isa_oopptr()) {
         add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
       } else {
@@ -1927,6 +1948,8 @@
     }
     case Op_CastPP:
     case Op_CheckCastPP:
+    case Op_EncodeP:
+    case Op_DecodeN:
     {
       int ti = n->in(1)->_idx;
       if (_nodes->adr_at(ti)->node_type() == PointsToNode::JavaObject) {
--- a/hotspot/src/share/vm/opto/graphKit.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/graphKit.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -1328,7 +1328,7 @@
   if (require_atomic_access && bt == T_LONG) {
     ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t);
   } else {
-    ld = LoadNode::make(C, ctl, mem, adr, adr_type, t, bt);
+    ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt);
   }
   return _gvn.transform(ld);
 }
@@ -1344,7 +1344,7 @@
   if (require_atomic_access && bt == T_LONG) {
     st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val);
   } else {
-    st = StoreNode::make(C, ctl, mem, adr, adr_type, val, bt);
+    st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt);
   }
   st = _gvn.transform(st);
   set_memory(st, adr_idx);
--- a/hotspot/src/share/vm/opto/idealKit.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/idealKit.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -345,7 +345,7 @@
   if (require_atomic_access && bt == T_LONG) {
     ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t);
   } else {
-    ld = LoadNode::make(C, ctl, mem, adr, adr_type, t, bt);
+    ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt);
   }
   return transform(ld);
 }
@@ -361,7 +361,7 @@
   if (require_atomic_access && bt == T_LONG) {
     st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val);
   } else {
-    st = StoreNode::make(C, ctl, mem, adr, adr_type, val, bt);
+    st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt);
   }
   st = transform(st);
   set_memory(st, adr_idx);
--- a/hotspot/src/share/vm/opto/lcm.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/lcm.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -110,6 +110,7 @@
     case Op_LoadI:
     case Op_LoadL:
     case Op_LoadP:
+    case Op_LoadN:
     case Op_LoadS:
     case Op_LoadKlass:
     case Op_LoadRange:
@@ -124,6 +125,7 @@
     case Op_StoreI:
     case Op_StoreL:
     case Op_StoreP:
+    case Op_StoreN:
       was_store = true;         // Memory op is a store op
       // Stores will have their address in slot 2 (memory in slot 1).
       // If the value being nul-checked is in another slot, it means we
--- a/hotspot/src/share/vm/opto/library_call.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -1847,7 +1847,7 @@
 
     // See if it is a narrow oop array.
     if (adr_type->isa_aryptr()) {
-      if (adr_type->offset() >= objArrayOopDesc::header_size() * wordSize) {
+      if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes(type)) {
         const TypeOopPtr *elem_type = adr_type->is_aryptr()->elem()->isa_oopptr();
         if (elem_type != NULL) {
           sharpened_klass = elem_type->klass();
@@ -2164,10 +2164,19 @@
     cas = _gvn.transform(new (C, 5) CompareAndSwapLNode(control(), mem, adr, newval, oldval));
     break;
   case T_OBJECT:
-    // reference stores need a store barrier.
+     // reference stores need a store barrier.
     // (They don't if CAS fails, but it isn't worth checking.)
     pre_barrier(control(), base, adr, alias_idx, newval, value_type, T_OBJECT);
-    cas = _gvn.transform(new (C, 5) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
+#ifdef _LP64
+    if (adr->bottom_type()->is_narrow()) {
+      cas = _gvn.transform(new (C, 5) CompareAndSwapNNode(control(), mem, adr,
+                                                           EncodePNode::encode(&_gvn, newval),
+                                                           EncodePNode::encode(&_gvn, oldval)));
+    } else
+#endif
+      {
+        cas = _gvn.transform(new (C, 5) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
+      }
     post_barrier(control(), cas, base, adr, alias_idx, newval, T_OBJECT, true);
     break;
   default:
@@ -3824,7 +3833,15 @@
     Node* size = _gvn.transform(alloc_siz);
 
     // Exclude the header.
-    int base_off = sizeof(oopDesc);
+    int base_off = instanceOopDesc::base_offset_in_bytes();
+    if (UseCompressedOops) {
+      // copy the header gap though.
+      Node* sptr = basic_plus_adr(src,  base_off);
+      Node* dptr = basic_plus_adr(dest, base_off);
+      Node* sval = make_load(control(), sptr, TypeInt::INT, T_INT, raw_adr_type);
+      store_to_memory(control(), dptr, sval, T_INT, raw_adr_type);
+      base_off += sizeof(int);
+    }
     src  = basic_plus_adr(src,  base_off);
     dest = basic_plus_adr(dest, base_off);
     end  = basic_plus_adr(end,  size);
@@ -4389,7 +4406,7 @@
     // Let's see if we need card marks:
     if (alloc != NULL && use_ReduceInitialCardMarks()) {
       // If we do not need card marks, copy using the jint or jlong stub.
-      copy_type = LP64_ONLY(T_LONG) NOT_LP64(T_INT);
+      copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT);
       assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type),
              "sizes agree");
     }
@@ -4715,23 +4732,25 @@
       int to_clear = (bump_bit | clear_low);
       // Align up mod 8, then store a jint zero unconditionally
       // just before the mod-8 boundary.
-      // This would only fail if the first array element were immediately
-      // after the length field, and were also at an even offset mod 8.
-      assert(((abase + bump_bit) & ~to_clear) - BytesPerInt
-             >= arrayOopDesc::length_offset_in_bytes() + BytesPerInt,
-             "store must not trash length field");
-
-      // Bump 'start' up to (or past) the next jint boundary:
-      start = _gvn.transform( new(C,3) AddXNode(start, MakeConX(bump_bit)) );
+      if (((abase + bump_bit) & ~to_clear) - bump_bit
+          < arrayOopDesc::length_offset_in_bytes() + BytesPerInt) {
+        bump_bit = 0;
+        assert((abase & to_clear) == 0, "array base must be long-aligned");
+      } else {
+        // Bump 'start' up to (or past) the next jint boundary:
+        start = _gvn.transform( new(C,3) AddXNode(start, MakeConX(bump_bit)) );
+        assert((abase & clear_low) == 0, "array base must be int-aligned");
+      }
       // Round bumped 'start' down to jlong boundary in body of array.
       start = _gvn.transform( new(C,3) AndXNode(start, MakeConX(~to_clear)) );
-      // Store a zero to the immediately preceding jint:
-      Node* x1 = _gvn.transform( new(C,3) AddXNode(start, MakeConX(-BytesPerInt)) );
-      Node* p1 = basic_plus_adr(dest, x1);
-      mem = StoreNode::make(C, control(), mem, p1, adr_type, intcon(0), T_INT);
-      mem = _gvn.transform(mem);
+      if (bump_bit != 0) {
+        // Store a zero to the immediately preceding jint:
+        Node* x1 = _gvn.transform( new(C,3) AddXNode(start, MakeConX(-bump_bit)) );
+        Node* p1 = basic_plus_adr(dest, x1);
+        mem = StoreNode::make(_gvn, control(), mem, p1, adr_type, intcon(0), T_INT);
+        mem = _gvn.transform(mem);
+      }
     }
-
     Node* end = dest_size; // pre-rounded
     mem = ClearArrayNode::clear_memory(control(), mem, dest,
                                        start, end, &_gvn);
--- a/hotspot/src/share/vm/opto/loopTransform.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/loopTransform.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -1513,7 +1513,8 @@
              (bol->in(1)->Opcode() == Op_StoreLConditional ) ||
              (bol->in(1)->Opcode() == Op_CompareAndSwapI ) ||
              (bol->in(1)->Opcode() == Op_CompareAndSwapL ) ||
-             (bol->in(1)->Opcode() == Op_CompareAndSwapP )))
+             (bol->in(1)->Opcode() == Op_CompareAndSwapP ) ||
+             (bol->in(1)->Opcode() == Op_CompareAndSwapN )))
           return;               // Allocation loops RARELY take backedge
         // Find the OTHER exit path from the IF
         Node* ex = iff->proj_out(1-test_con);
--- a/hotspot/src/share/vm/opto/machnode.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/machnode.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -263,6 +263,13 @@
     // See if it adds up to a base + offset.
     if (index != NULL) {
       if (!index->is_Con()) {
+        const TypeNarrowOop* narrowoop = index->bottom_type()->isa_narrowoop();
+        if (narrowoop != NULL) {
+          // Memory references through narrow oops have a
+          // funny base so grab the type from the index.
+          adr_type = narrowoop->make_oopptr();
+          return NULL;
+        }
         disp = Type::OffsetBot;
       } else if (disp != Type::OffsetBot) {
         const TypeX* ti = index->bottom_type()->isa_intptr_t();
--- a/hotspot/src/share/vm/opto/macro.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/macro.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -819,7 +819,7 @@
 Node* PhaseMacroExpand::make_load(Node* ctl, Node* mem, Node* base, int offset, const Type* value_type, BasicType bt) {
   Node* adr = basic_plus_adr(base, offset);
   const TypePtr* adr_type = TypeRawPtr::BOTTOM;
-  Node* value = LoadNode::make(C, ctl, mem, adr, adr_type, value_type, bt);
+  Node* value = LoadNode::make(_igvn, ctl, mem, adr, adr_type, value_type, bt);
   transform_later(value);
   return value;
 }
@@ -827,7 +827,7 @@
 
 Node* PhaseMacroExpand::make_store(Node* ctl, Node* mem, Node* base, int offset, Node* value, BasicType bt) {
   Node* adr = basic_plus_adr(base, offset);
-  mem = StoreNode::make(C, ctl, mem, adr, NULL, value, bt);
+  mem = StoreNode::make(_igvn, ctl, mem, adr, NULL, value, bt);
   transform_later(mem);
   return mem;
 }
@@ -1270,6 +1270,13 @@
     mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype()));
   }
   rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS);
+
+  if (UseCompressedOops) {
+    Node *zeronode = makecon(TypeInt::ZERO);
+    // store uncompressed 0 into klass ptr to zero out gap.  The gap is
+    // used for primitive fields and has to be zeroed.
+    rawmem = make_store(control, rawmem, object, oopDesc::klass_gap_offset_in_bytes(), zeronode, T_INT);
+  }
   rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_OBJECT);
   int header_size = alloc->minimum_header_size();  // conservatively small
 
@@ -1277,7 +1284,7 @@
   if (length != NULL) {         // Arrays need length field
     rawmem = make_store(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT);
     // conservatively small header size:
-    header_size = sizeof(arrayOopDesc);
+    header_size = arrayOopDesc::base_offset_in_bytes(T_BYTE);
     ciKlass* k = _igvn.type(klass_node)->is_klassptr()->klass();
     if (k->is_array_klass())    // we know the exact header size in most cases:
       header_size = Klass::layout_helper_header_size(k->layout_helper());
@@ -1306,7 +1313,6 @@
       rawmem = init->complete_stores(control, rawmem, object,
                                      header_size, size_in_bytes, &_igvn);
     }
-
     // We have no more use for this link, since the AllocateNode goes away:
     init->set_req(InitializeNode::RawAddress, top());
     // (If we keep the link, it just confuses the register allocator,
@@ -1705,6 +1711,8 @@
     assert(C->macro_count() < macro_count, "must have deleted a node from macro list");
     if (C->failing())  return true;
   }
+
+  _igvn.set_delay_transform(false);
   _igvn.optimize();
   return false;
 }
--- a/hotspot/src/share/vm/opto/macro.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/macro.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -110,7 +110,9 @@
                             Node* length);
 
 public:
-  PhaseMacroExpand(PhaseIterGVN &igvn) : Phase(Macro_Expand), _igvn(igvn) {}
+  PhaseMacroExpand(PhaseIterGVN &igvn) : Phase(Macro_Expand), _igvn(igvn) {
+    _igvn.set_delay_transform(true);
+  }
   bool expand_macro_nodes();
 
 };
--- a/hotspot/src/share/vm/opto/matcher.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/matcher.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -30,7 +30,7 @@
 
 
 const int Matcher::base2reg[Type::lastype] = {
-  Node::NotAMachineReg,0,0, Op_RegI, Op_RegL, 0,
+  Node::NotAMachineReg,0,0, Op_RegI, Op_RegL, 0, Op_RegN,
   Node::NotAMachineReg, Node::NotAMachineReg, /* tuple, array */
   Op_RegP, Op_RegP, Op_RegP, Op_RegP, Op_RegP, Op_RegP, /* the pointers */
   0, 0/*abio*/,
@@ -70,12 +70,14 @@
   C->set_matcher(this);
 
   idealreg2spillmask[Op_RegI] = NULL;
+  idealreg2spillmask[Op_RegN] = NULL;
   idealreg2spillmask[Op_RegL] = NULL;
   idealreg2spillmask[Op_RegF] = NULL;
   idealreg2spillmask[Op_RegD] = NULL;
   idealreg2spillmask[Op_RegP] = NULL;
 
   idealreg2debugmask[Op_RegI] = NULL;
+  idealreg2debugmask[Op_RegN] = NULL;
   idealreg2debugmask[Op_RegL] = NULL;
   idealreg2debugmask[Op_RegF] = NULL;
   idealreg2debugmask[Op_RegD] = NULL;
@@ -366,17 +368,19 @@
 void Matcher::init_first_stack_mask() {
 
   // Allocate storage for spill masks as masks for the appropriate load type.
-  RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask)*10);
-  idealreg2spillmask[Op_RegI] = &rms[0];
-  idealreg2spillmask[Op_RegL] = &rms[1];
-  idealreg2spillmask[Op_RegF] = &rms[2];
-  idealreg2spillmask[Op_RegD] = &rms[3];
-  idealreg2spillmask[Op_RegP] = &rms[4];
-  idealreg2debugmask[Op_RegI] = &rms[5];
-  idealreg2debugmask[Op_RegL] = &rms[6];
-  idealreg2debugmask[Op_RegF] = &rms[7];
-  idealreg2debugmask[Op_RegD] = &rms[8];
-  idealreg2debugmask[Op_RegP] = &rms[9];
+  RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask)*12);
+  idealreg2spillmask[Op_RegN] = &rms[0];
+  idealreg2spillmask[Op_RegI] = &rms[1];
+  idealreg2spillmask[Op_RegL] = &rms[2];
+  idealreg2spillmask[Op_RegF] = &rms[3];
+  idealreg2spillmask[Op_RegD] = &rms[4];
+  idealreg2spillmask[Op_RegP] = &rms[5];
+  idealreg2debugmask[Op_RegN] = &rms[6];
+  idealreg2debugmask[Op_RegI] = &rms[7];
+  idealreg2debugmask[Op_RegL] = &rms[8];
+  idealreg2debugmask[Op_RegF] = &rms[9];
+  idealreg2debugmask[Op_RegD] = &rms[10];
+  idealreg2debugmask[Op_RegP] = &rms[11];
 
   OptoReg::Name i;
 
@@ -399,6 +403,10 @@
   C->FIRST_STACK_mask().set_AllStack();
 
   // Make spill masks.  Registers for their class, plus FIRST_STACK_mask.
+#ifdef _LP64
+  *idealreg2spillmask[Op_RegN] = *idealreg2regmask[Op_RegN];
+   idealreg2spillmask[Op_RegN]->OR(C->FIRST_STACK_mask());
+#endif
   *idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI];
    idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask());
   *idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL];
@@ -413,6 +421,7 @@
   // Make up debug masks.  Any spill slot plus callee-save registers.
   // Caller-save registers are assumed to be trashable by the various
   // inline-cache fixup routines.
+  *idealreg2debugmask[Op_RegN]= *idealreg2spillmask[Op_RegN];
   *idealreg2debugmask[Op_RegI]= *idealreg2spillmask[Op_RegI];
   *idealreg2debugmask[Op_RegL]= *idealreg2spillmask[Op_RegL];
   *idealreg2debugmask[Op_RegF]= *idealreg2spillmask[Op_RegF];
@@ -428,6 +437,7 @@
     if( _register_save_policy[i] == 'C' ||
         _register_save_policy[i] == 'A' ||
         (_register_save_policy[i] == 'E' && exclude_soe) ) {
+      idealreg2debugmask[Op_RegN]->Remove(i);
       idealreg2debugmask[Op_RegI]->Remove(i); // Exclude save-on-call
       idealreg2debugmask[Op_RegL]->Remove(i); // registers from debug
       idealreg2debugmask[Op_RegF]->Remove(i); // masks
@@ -661,6 +671,9 @@
   set_shared(fp);
 
   // Compute generic short-offset Loads
+#ifdef _LP64
+  MachNode *spillCP = match_tree(new (C, 3) LoadNNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM));
+#endif
   MachNode *spillI  = match_tree(new (C, 3) LoadINode(NULL,mem,fp,atp));
   MachNode *spillL  = match_tree(new (C, 3) LoadLNode(NULL,mem,fp,atp));
   MachNode *spillF  = match_tree(new (C, 3) LoadFNode(NULL,mem,fp,atp));
@@ -670,6 +683,9 @@
          spillD != NULL && spillP != NULL, "");
 
   // Get the ADLC notion of the right regmask, for each basic type.
+#ifdef _LP64
+  idealreg2regmask[Op_RegN] = &spillCP->out_RegMask();
+#endif
   idealreg2regmask[Op_RegI] = &spillI->out_RegMask();
   idealreg2regmask[Op_RegL] = &spillL->out_RegMask();
   idealreg2regmask[Op_RegF] = &spillF->out_RegMask();
@@ -1227,6 +1243,13 @@
       if( j == max_scan )       // No post-domination before scan end?
         return true;            // Then break the match tree up
     }
+
+    if (m->Opcode() == Op_DecodeN && m->outcnt() == 2) {
+      // These are commonly used in address expressions and can
+      // efficiently fold into them in some cases but because they are
+      // consumed by AddP they commonly have two users.
+      if (m->raw_out(0) == m->raw_out(1) && m->raw_out(0)->Opcode() == Op_AddP) return false;
+    }
   }
 
   // Not forceably cloning.  If shared, put it into a register.
@@ -1714,6 +1737,7 @@
       case Op_StoreI:
       case Op_StoreL:
       case Op_StoreP:
+      case Op_StoreN:
       case Op_Store16B:
       case Op_Store8B:
       case Op_Store4B:
@@ -1739,6 +1763,7 @@
       case Op_LoadL:
       case Op_LoadS:
       case Op_LoadP:
+      case Op_LoadN:
       case Op_LoadRange:
       case Op_LoadD_unaligned:
       case Op_LoadL_unaligned:
@@ -1853,7 +1878,8 @@
       case Op_StoreLConditional:
       case Op_CompareAndSwapI:
       case Op_CompareAndSwapL:
-      case Op_CompareAndSwapP: {   // Convert trinary to binary-tree
+      case Op_CompareAndSwapP:
+      case Op_CompareAndSwapN: {   // Convert trinary to binary-tree
         Node *newval = n->in(MemNode::ValueIn );
         Node *oldval  = n->in(LoadStoreNode::ExpectedIn);
         Node *pair = new (C, 3) BinaryNode( oldval, newval );
@@ -1905,22 +1931,25 @@
     // During matching If's have Bool & Cmp side-by-side
     BoolNode *b = iff->in(1)->as_Bool();
     Node *cmp = iff->in(2);
-    if( cmp->Opcode() == Op_CmpP ) {
-      if( cmp->in(2)->bottom_type() == TypePtr::NULL_PTR ) {
+    int opc = cmp->Opcode();
+    if (opc != Op_CmpP && opc != Op_CmpN) return;
+
+    const Type* ct = cmp->in(2)->bottom_type();
+    if (ct == TypePtr::NULL_PTR ||
+        (opc == Op_CmpN && ct == TypeNarrowOop::NULL_PTR)) {
 
-        if( proj->Opcode() == Op_IfTrue ) {
-          extern int all_null_checks_found;
-          all_null_checks_found++;
-          if( b->_test._test == BoolTest::ne ) {
-            _null_check_tests.push(proj);
-            _null_check_tests.push(cmp->in(1));
-          }
-        } else {
-          assert( proj->Opcode() == Op_IfFalse, "" );
-          if( b->_test._test == BoolTest::eq ) {
-            _null_check_tests.push(proj);
-            _null_check_tests.push(cmp->in(1));
-          }
+      if( proj->Opcode() == Op_IfTrue ) {
+        extern int all_null_checks_found;
+        all_null_checks_found++;
+        if( b->_test._test == BoolTest::ne ) {
+          _null_check_tests.push(proj);
+          _null_check_tests.push(cmp->in(1));
+        }
+      } else {
+        assert( proj->Opcode() == Op_IfFalse, "" );
+        if( b->_test._test == BoolTest::eq ) {
+          _null_check_tests.push(proj);
+          _null_check_tests.push(cmp->in(1));
         }
       }
     }
@@ -2038,6 +2067,7 @@
         xop == Op_FastLock ||
         xop == Op_CompareAndSwapL ||
         xop == Op_CompareAndSwapP ||
+        xop == Op_CompareAndSwapN ||
         xop == Op_CompareAndSwapI)
       return true;
 
--- a/hotspot/src/share/vm/opto/memnode.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/memnode.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -549,6 +549,10 @@
         adr = adr->in(AddPNode::Base);
         continue;
 
+      case Op_DecodeN:         // No change to NULL-ness, so peek thru
+        adr = adr->in(1);
+        continue;
+
       case Op_CastPP:
         // If the CastPP is useless, just peek on through it.
         if( ccp->type(adr) == ccp->type(adr->in(1)) ) {
@@ -605,6 +609,7 @@
       case Op_CastX2P:          // no null checks on native pointers
       case Op_Parm:             // 'this' pointer is not null
       case Op_LoadP:            // Loading from within a klass
+      case Op_LoadN:            // Loading from within a klass
       case Op_LoadKlass:        // Loading from within a klass
       case Op_ConP:             // Loading from a klass
       case Op_CreateEx:         // Sucking up the guts of an exception oop
@@ -669,7 +674,9 @@
 
 //----------------------------LoadNode::make-----------------------------------
 // Polymorphic factory method:
-LoadNode *LoadNode::make( Compile *C, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt ) {
+Node *LoadNode::make( PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt ) {
+  Compile* C = gvn.C;
+
   // sanity check the alias category against the created node type
   assert(!(adr_type->isa_oopptr() &&
            adr_type->offset() == oopDesc::klass_offset_in_bytes()),
@@ -687,7 +694,25 @@
   case T_FLOAT:   return new (C, 3) LoadFNode(ctl, mem, adr, adr_type, rt              );
   case T_DOUBLE:  return new (C, 3) LoadDNode(ctl, mem, adr, adr_type, rt              );
   case T_ADDRESS: return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_ptr()    );
-  case T_OBJECT:  return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr());
+  case T_OBJECT:
+#ifdef _LP64
+    if (adr->bottom_type()->is_narrow()) {
+      const TypeNarrowOop* narrowtype;
+      if (rt->isa_narrowoop()) {
+        narrowtype = rt->is_narrowoop();
+        rt = narrowtype->make_oopptr();
+      } else {
+        narrowtype = rt->is_oopptr()->make_narrowoop();
+      }
+      Node* load  = gvn.transform(new (C, 3) LoadNNode(ctl, mem, adr, adr_type, narrowtype));
+
+      return new (C, 2) DecodeNNode(load, rt);
+    } else
+#endif
+      {
+        assert(!adr->bottom_type()->is_narrow(), "should have got back a narrow oop");
+        return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr());
+      }
   }
   ShouldNotReachHere();
   return (LoadNode*)NULL;
@@ -1743,7 +1768,9 @@
 //=============================================================================
 //---------------------------StoreNode::make-----------------------------------
 // Polymorphic factory method:
-StoreNode* StoreNode::make( Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt ) {
+StoreNode* StoreNode::make( PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt ) {
+  Compile* C = gvn.C;
+
   switch (bt) {
   case T_BOOLEAN:
   case T_BYTE:    return new (C, 4) StoreBNode(ctl, mem, adr, adr_type, val);
@@ -1754,7 +1781,27 @@
   case T_FLOAT:   return new (C, 4) StoreFNode(ctl, mem, adr, adr_type, val);
   case T_DOUBLE:  return new (C, 4) StoreDNode(ctl, mem, adr, adr_type, val);
   case T_ADDRESS:
-  case T_OBJECT:  return new (C, 4) StorePNode(ctl, mem, adr, adr_type, val);
+  case T_OBJECT:
+#ifdef _LP64
+    if (adr->bottom_type()->is_narrow() ||
+        (UseCompressedOops && val->bottom_type()->isa_klassptr() &&
+         adr->bottom_type()->isa_rawptr())) {
+      const TypePtr* type = val->bottom_type()->is_ptr();
+      Node* cp;
+      if (type->isa_oopptr()) {
+        const TypeNarrowOop* etype = type->is_oopptr()->make_narrowoop();
+        cp = gvn.transform(new (C, 2) EncodePNode(val, etype));
+      } else if (type == TypePtr::NULL_PTR) {
+        cp = gvn.transform(new (C, 1) ConNNode(TypeNarrowOop::NULL_PTR));
+      } else {
+        ShouldNotReachHere();
+      }
+      return new (C, 4) StoreNNode(ctl, mem, adr, adr_type, cp);
+    } else
+#endif
+      {
+        return new (C, 4) StorePNode(ctl, mem, adr, adr_type, val);
+      }
   }
   ShouldNotReachHere();
   return (StoreNode*)NULL;
@@ -2136,7 +2183,7 @@
     Node* adr = new (C, 4) AddPNode(dest, dest, phase->MakeConX(offset));
     adr = phase->transform(adr);
     const TypePtr* atp = TypeRawPtr::BOTTOM;
-    mem = StoreNode::make(C, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT);
+    mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT);
     mem = phase->transform(mem);
     offset += BytesPerInt;
   }
@@ -2199,7 +2246,7 @@
     Node* adr = new (C, 4) AddPNode(dest, dest, phase->MakeConX(done_offset));
     adr = phase->transform(adr);
     const TypePtr* atp = TypeRawPtr::BOTTOM;
-    mem = StoreNode::make(C, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT);
+    mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT);
     mem = phase->transform(mem);
     done_offset += BytesPerInt;
   }
@@ -2556,9 +2603,7 @@
   assert(allocation() != NULL, "must be present");
 
   // no negatives, no header fields:
-  if (start < (intptr_t) sizeof(oopDesc))  return FAIL;
-  if (start < (intptr_t) sizeof(arrayOopDesc) &&
-      start < (intptr_t) allocation()->minimum_header_size())  return FAIL;
+  if (start < (intptr_t) allocation()->minimum_header_size())  return FAIL;
 
   // after a certain size, we bail out on tracking all the stores:
   intptr_t ti_limit = (TrackedInitializationLimit * HeapWordSize);
@@ -2895,14 +2940,14 @@
     if (!split) {
       ++new_long;
       off[nst] = offset;
-      st[nst++] = StoreNode::make(C, ctl, zmem, adr, atp,
+      st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
                                   phase->longcon(con), T_LONG);
     } else {
       // Omit either if it is a zero.
       if (con0 != 0) {
         ++new_int;
         off[nst]  = offset;
-        st[nst++] = StoreNode::make(C, ctl, zmem, adr, atp,
+        st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
                                     phase->intcon(con0), T_INT);
       }
       if (con1 != 0) {
@@ -2910,7 +2955,7 @@
         offset += BytesPerInt;
         adr = make_raw_address(offset, phase);
         off[nst]  = offset;
-        st[nst++] = StoreNode::make(C, ctl, zmem, adr, atp,
+        st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
                                     phase->intcon(con1), T_INT);
       }
     }
@@ -3018,9 +3063,10 @@
   Node* zmem = zero_memory();   // initially zero memory state
   Node* inits = zmem;           // accumulating a linearized chain of inits
   #ifdef ASSERT
-  intptr_t last_init_off = sizeof(oopDesc);  // previous init offset
-  intptr_t last_init_end = sizeof(oopDesc);  // previous init offset+size
-  intptr_t last_tile_end = sizeof(oopDesc);  // previous tile offset+size
+  intptr_t first_offset = allocation()->minimum_header_size();
+  intptr_t last_init_off = first_offset;  // previous init offset
+  intptr_t last_init_end = first_offset;  // previous init offset+size
+  intptr_t last_tile_end = first_offset;  // previous tile offset+size
   #endif
   intptr_t zeroes_done = header_size;
 
@@ -3155,7 +3201,8 @@
 bool InitializeNode::stores_are_sane(PhaseTransform* phase) {
   if (is_complete())
     return true;                // stores could be anything at this point
-  intptr_t last_off = sizeof(oopDesc);
+  assert(allocation() != NULL, "must be present");
+  intptr_t last_off = allocation()->minimum_header_size();
   for (uint i = InitializeNode::RawStores; i < req(); i++) {
     Node* st = in(i);
     intptr_t st_off = get_store_offset(st, phase);
--- a/hotspot/src/share/vm/opto/memnode.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/memnode.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -137,7 +137,8 @@
   }
 
   // Polymorphic factory method:
-  static LoadNode* make( Compile *C, Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, BasicType bt );
+  static Node* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
+                     const TypePtr* at, const Type *rt, BasicType bt );
 
   virtual uint hash()   const;  // Check the type
 
@@ -330,6 +331,29 @@
   virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
 };
 
+
+//------------------------------LoadNNode--------------------------------------
+// Load a narrow oop from memory (either object or array)
+class LoadNNode : public LoadNode {
+public:
+  LoadNNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t )
+    : LoadNode(c,mem,adr,at,t) {}
+  virtual int Opcode() const;
+  virtual uint ideal_reg() const { return Op_RegN; }
+  virtual int store_Opcode() const { return Op_StoreN; }
+  virtual BasicType memory_type() const { return T_NARROWOOP; }
+  // depends_only_on_test is almost always true, and needs to be almost always
+  // true to enable key hoisting & commoning optimizations.  However, for the
+  // special case of RawPtr loads from TLS top & end, the control edge carries
+  // the dependence preventing hoisting past a Safepoint instead of the memory
+  // edge.  (An unfortunate consequence of having Safepoints not set Raw
+  // Memory; itself an unfortunate consequence of having Nodes which produce
+  // results (new raw memory state) inside of loops preventing all manner of
+  // other optimizations).  Basically, it's ugly but so is the alternative.
+  // See comment in macro.cpp, around line 125 expand_allocate_common().
+  virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
+};
+
 //------------------------------LoadKlassNode----------------------------------
 // Load a Klass from an object
 class LoadKlassNode : public LoadPNode {
@@ -376,7 +400,8 @@
   }
 
   // Polymorphic factory method:
-  static StoreNode* make( Compile *C, Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, BasicType bt );
+  static StoreNode* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
+                          const TypePtr* at, Node *val, BasicType bt );
 
   virtual uint hash() const;    // Check the type
 
@@ -488,6 +513,15 @@
   virtual BasicType memory_type() const { return T_ADDRESS; }
 };
 
+//------------------------------StoreNNode-------------------------------------
+// Store narrow oop to memory
+class StoreNNode : public StoreNode {
+public:
+  StoreNNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
+  virtual int Opcode() const;
+  virtual BasicType memory_type() const { return T_NARROWOOP; }
+};
+
 //------------------------------StoreCMNode-----------------------------------
 // Store card-mark byte to memory for CM
 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store
@@ -600,6 +634,13 @@
   virtual int Opcode() const;
 };
 
+//------------------------------CompareAndSwapNNode---------------------------
+class CompareAndSwapNNode : public LoadStoreNode {
+public:
+  CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
+  virtual int Opcode() const;
+};
+
 //------------------------------ClearArray-------------------------------------
 class ClearArrayNode: public Node {
 public:
--- a/hotspot/src/share/vm/opto/node.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/node.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -1169,6 +1169,12 @@
   return ((ConPNode*)this)->type()->is_ptr()->get_con();
 }
 
+// Get a narrow oop constant from a ConNNode.
+intptr_t Node::get_narrowcon() const {
+  assert( Opcode() == Op_ConN, "" );
+  return ((ConNNode*)this)->type()->is_narrowoop()->get_con();
+}
+
 // Get a long constant from a ConNode.
 // Return a default value if there is no apparent constant here.
 const TypeLong* Node::find_long_type() const {
--- a/hotspot/src/share/vm/opto/node.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/node.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -917,6 +917,7 @@
 
   // These guys are called by code generated by ADLC:
   intptr_t get_ptr() const;
+  intptr_t get_narrowcon() const;
   jdouble getd() const;
   jfloat getf() const;
 
--- a/hotspot/src/share/vm/opto/opcodes.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/opcodes.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -29,6 +29,7 @@
 const char *NodeClassNames[] = {
   "Node",
   "Set",
+  "RegN",
   "RegI",
   "RegP",
   "RegF",
--- a/hotspot/src/share/vm/opto/opcodes.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/opcodes.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -27,6 +27,7 @@
 enum Opcodes {
   Op_Node = 0,
   macro(Set)                    // Instruction selection match rule
+  macro(RegN)                   // Machine narrow oop register
   macro(RegI)                   // Machine integer register
   macro(RegP)                   // Machine pointer register
   macro(RegF)                   // Machine float   register
--- a/hotspot/src/share/vm/opto/parse2.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/parse2.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -67,12 +67,16 @@
   const Type*       elemtype = arytype->elem();
 
   if (UseUniqueSubclasses && result2 != NULL) {
-    const TypeInstPtr* toop = elemtype->isa_instptr();
+    const Type* el = elemtype;
+    if (elemtype->isa_narrowoop()) {
+      el = elemtype->is_narrowoop()->make_oopptr();
+    }
+    const TypeInstPtr* toop = el->isa_instptr();
     if (toop) {
       if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) {
         // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
         const Type* subklass = Type::get_const_type(toop->klass());
-        elemtype = subklass->join(elemtype);
+        elemtype = subklass->join(el);
       }
     }
   }
--- a/hotspot/src/share/vm/opto/parse3.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/parse3.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -365,7 +365,7 @@
     const intptr_t header   = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
     for (jint i = 0; i < length_con; i++) {
       Node*    elem   = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1);
-      intptr_t offset = header + ((intptr_t)i << LogBytesPerWord);
+      intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop);
       Node*    eaddr  = basic_plus_adr(array, offset);
       store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT);
     }
--- a/hotspot/src/share/vm/opto/phaseX.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/phaseX.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -744,20 +744,23 @@
 //=============================================================================
 //------------------------------PhaseIterGVN-----------------------------------
 // Initialize hash table to fresh and clean for +VerifyOpto
-PhaseIterGVN::PhaseIterGVN( PhaseIterGVN *igvn, const char *dummy ) : PhaseGVN(igvn,dummy), _worklist( ) {
+PhaseIterGVN::PhaseIterGVN( PhaseIterGVN *igvn, const char *dummy ) : PhaseGVN(igvn,dummy), _worklist( ),
+                                                                      _delay_transform(false) {
 }
 
 //------------------------------PhaseIterGVN-----------------------------------
 // Initialize with previous PhaseIterGVN info; used by PhaseCCP
 PhaseIterGVN::PhaseIterGVN( PhaseIterGVN *igvn ) : PhaseGVN(igvn),
-  _worklist( igvn->_worklist )
+                                                   _worklist( igvn->_worklist ),
+                                                   _delay_transform(igvn->_delay_transform)
 {
 }
 
 //------------------------------PhaseIterGVN-----------------------------------
 // Initialize with previous PhaseGVN info from Parser
 PhaseIterGVN::PhaseIterGVN( PhaseGVN *gvn ) : PhaseGVN(gvn),
-  _worklist(*C->for_igvn())
+                                              _worklist(*C->for_igvn()),
+                                              _delay_transform(false)
 {
   uint max;
 
@@ -953,6 +956,12 @@
 //------------------------------transform--------------------------------------
 // Non-recursive: idealize Node 'n' with respect to its inputs and its value
 Node *PhaseIterGVN::transform( Node *n ) {
+  if (_delay_transform) {
+    // Register the node but don't optimize for now
+    register_new_node_with_optimizer(n);
+    return n;
+  }
+
   // If brand new node, make space in type array, and give it a type.
   ensure_type_or_null(n);
   if (type_or_null(n) == NULL) {
--- a/hotspot/src/share/vm/opto/phaseX.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/phaseX.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -383,6 +383,10 @@
 // Phase for iteratively performing local, pessimistic GVN-style optimizations.
 // and ideal transformations on the graph.
 class PhaseIterGVN : public PhaseGVN {
+ private:
+  bool _delay_transform;  // When true simply register the node when calling transform
+                          // instead of actually optimizing it
+
   // Idealize old Node 'n' with respect to its inputs and its value
   virtual Node *transform_old( Node *a_node );
 protected:
@@ -446,6 +450,10 @@
     subsume_node(old, nn);
   }
 
+  void set_delay_transform(bool delay) {
+    _delay_transform = delay;
+  }
+
 #ifndef PRODUCT
 protected:
   // Sub-quadratic implementation of VerifyIterativeGVN.
--- a/hotspot/src/share/vm/opto/subnode.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/subnode.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -736,6 +736,75 @@
 }
 
 //=============================================================================
+//------------------------------sub--------------------------------------------
+// Simplify an CmpN (compare 2 pointers) node, based on local information.
+// If both inputs are constants, compare them.
+const Type *CmpNNode::sub( const Type *t1, const Type *t2 ) const {
+  const TypePtr *r0 = t1->is_narrowoop()->make_oopptr(); // Handy access
+  const TypePtr *r1 = t2->is_narrowoop()->make_oopptr();
+
+  // Undefined inputs makes for an undefined result
+  if( TypePtr::above_centerline(r0->_ptr) ||
+      TypePtr::above_centerline(r1->_ptr) )
+    return Type::TOP;
+
+  if (r0 == r1 && r0->singleton()) {
+    // Equal pointer constants (klasses, nulls, etc.)
+    return TypeInt::CC_EQ;
+  }
+
+  // See if it is 2 unrelated classes.
+  const TypeOopPtr* p0 = r0->isa_oopptr();
+  const TypeOopPtr* p1 = r1->isa_oopptr();
+  if (p0 && p1) {
+    ciKlass* klass0 = p0->klass();
+    bool    xklass0 = p0->klass_is_exact();
+    ciKlass* klass1 = p1->klass();
+    bool    xklass1 = p1->klass_is_exact();
+    int kps = (p0->isa_klassptr()?1:0) + (p1->isa_klassptr()?1:0);
+    if (klass0 && klass1 &&
+        kps != 1 &&             // both or neither are klass pointers
+        !klass0->is_interface() && // do not trust interfaces
+        !klass1->is_interface()) {
+      // See if neither subclasses the other, or if the class on top
+      // is precise.  In either of these cases, the compare must fail.
+      if (klass0->equals(klass1)   ||   // if types are unequal but klasses are
+          !klass0->is_java_klass() ||   // types not part of Java language?
+          !klass1->is_java_klass()) {   // types not part of Java language?
+        // Do nothing; we know nothing for imprecise types
+      } else if (klass0->is_subtype_of(klass1)) {
+        // If klass1's type is PRECISE, then we can fail.
+        if (xklass1)  return TypeInt::CC_GT;
+      } else if (klass1->is_subtype_of(klass0)) {
+        // If klass0's type is PRECISE, then we can fail.
+        if (xklass0)  return TypeInt::CC_GT;
+      } else {                  // Neither subtypes the other
+        return TypeInt::CC_GT;  // ...so always fail
+      }
+    }
+  }
+
+  // Known constants can be compared exactly
+  // Null can be distinguished from any NotNull pointers
+  // Unknown inputs makes an unknown result
+  if( r0->singleton() ) {
+    intptr_t bits0 = r0->get_con();
+    if( r1->singleton() )
+      return bits0 == r1->get_con() ? TypeInt::CC_EQ : TypeInt::CC_GT;
+    return ( r1->_ptr == TypePtr::NotNull && bits0==0 ) ? TypeInt::CC_GT : TypeInt::CC;
+  } else if( r1->singleton() ) {
+    intptr_t bits1 = r1->get_con();
+    return ( r0->_ptr == TypePtr::NotNull && bits1==0 ) ? TypeInt::CC_GT : TypeInt::CC;
+  } else
+    return TypeInt::CC;
+}
+
+//------------------------------Ideal------------------------------------------
+Node *CmpNNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
+  return NULL;
+}
+
+//=============================================================================
 //------------------------------Value------------------------------------------
 // Simplify an CmpF (compare 2 floats ) node, based on local information.
 // If both inputs are constants, compare them.
--- a/hotspot/src/share/vm/opto/subnode.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/subnode.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -163,6 +163,16 @@
   virtual const Type *sub( const Type *, const Type * ) const;
 };
 
+//------------------------------CmpNNode--------------------------------------
+// Compare 2 narrow oop values, returning condition codes (-1, 0 or 1).
+class CmpNNode : public CmpNode {
+public:
+  CmpNNode( Node *in1, Node *in2 ) : CmpNode(in1,in2) {}
+  virtual int Opcode() const;
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual const Type *sub( const Type *, const Type * ) const;
+};
+
 //------------------------------CmpLNode---------------------------------------
 // Compare 2 long values, returning condition codes (-1, 0 or 1).
 class CmpLNode : public CmpNode {
--- a/hotspot/src/share/vm/opto/superword.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/superword.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -1424,6 +1424,7 @@
 //---------------------------container_type---------------------------
 // Smallest type containing range of values
 const Type* SuperWord::container_type(const Type* t) {
+  if (t->isa_narrowoop()) t = t->is_narrowoop()->make_oopptr();
   if (t->isa_aryptr()) {
     t = t->is_aryptr()->elem();
   }
--- a/hotspot/src/share/vm/opto/type.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/type.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -40,6 +40,7 @@
   T_INT,        // Int
   T_LONG,       // Long
   T_VOID,       // Half
+  T_NARROWOOP,  // NarrowOop
 
   T_ILLEGAL,    // Tuple
   T_ARRAY,      // Array
@@ -279,15 +280,6 @@
   TypeRawPtr::BOTTOM = TypeRawPtr::make( TypePtr::BotPTR );
   TypeRawPtr::NOTNULL= TypeRawPtr::make( TypePtr::NotNull );
 
-  mreg2type[Op_Node] = Type::BOTTOM;
-  mreg2type[Op_Set ] = 0;
-  mreg2type[Op_RegI] = TypeInt::INT;
-  mreg2type[Op_RegP] = TypePtr::BOTTOM;
-  mreg2type[Op_RegF] = Type::FLOAT;
-  mreg2type[Op_RegD] = Type::DOUBLE;
-  mreg2type[Op_RegL] = TypeLong::LONG;
-  mreg2type[Op_RegFlags] = TypeInt::CC;
-
   const Type **fmembar = TypeTuple::fields(0);
   TypeTuple::MEMBAR = TypeTuple::make(TypeFunc::Parms+0, fmembar);
 
@@ -305,6 +297,19 @@
                                            false, 0, oopDesc::klass_offset_in_bytes());
   TypeOopPtr::BOTTOM  = TypeOopPtr::make(TypePtr::BotPTR, OffsetBot);
 
+  TypeNarrowOop::NULL_PTR = TypeNarrowOop::make( TypePtr::NULL_PTR );
+  TypeNarrowOop::BOTTOM   = TypeNarrowOop::make( TypeInstPtr::BOTTOM );
+
+  mreg2type[Op_Node] = Type::BOTTOM;
+  mreg2type[Op_Set ] = 0;
+  mreg2type[Op_RegN] = TypeNarrowOop::BOTTOM;
+  mreg2type[Op_RegI] = TypeInt::INT;
+  mreg2type[Op_RegP] = TypePtr::BOTTOM;
+  mreg2type[Op_RegF] = Type::FLOAT;
+  mreg2type[Op_RegD] = Type::DOUBLE;
+  mreg2type[Op_RegL] = TypeLong::LONG;
+  mreg2type[Op_RegFlags] = TypeInt::CC;
+
   TypeAryPtr::RANGE   = TypeAryPtr::make( TypePtr::BotPTR, TypeAry::make(Type::BOTTOM,TypeInt::POS), current->env()->Object_klass(), false, arrayOopDesc::length_offset_in_bytes());
   // There is no shared klass for Object[].  See note in TypeAryPtr::klass().
   TypeAryPtr::OOPS    = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInstPtr::BOTTOM,TypeInt::POS), NULL /*ciArrayKlass::make(o)*/,  false,  Type::OffsetBot);
@@ -316,6 +321,7 @@
   TypeAryPtr::FLOATS  = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(Type::FLOAT        ,TypeInt::POS), ciTypeArrayKlass::make(T_FLOAT),  true,  Type::OffsetBot);
   TypeAryPtr::DOUBLES = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(Type::DOUBLE       ,TypeInt::POS), ciTypeArrayKlass::make(T_DOUBLE), true,  Type::OffsetBot);
 
+  TypeAryPtr::_array_body_type[T_NARROWOOP] = NULL; // what should this be?
   TypeAryPtr::_array_body_type[T_OBJECT]  = TypeAryPtr::OOPS;
   TypeAryPtr::_array_body_type[T_ARRAY]   = TypeAryPtr::OOPS;   // arrays are stored in oop arrays
   TypeAryPtr::_array_body_type[T_BYTE]    = TypeAryPtr::BYTES;
@@ -345,6 +351,7 @@
   longpair[1] = TypeLong::LONG;
   TypeTuple::LONG_PAIR = TypeTuple::make(2, longpair);
 
+  _const_basic_type[T_NARROWOOP] = TypeNarrowOop::BOTTOM;
   _const_basic_type[T_BOOLEAN] = TypeInt::BOOL;
   _const_basic_type[T_CHAR]    = TypeInt::CHAR;
   _const_basic_type[T_BYTE]    = TypeInt::BYTE;
@@ -359,6 +366,7 @@
   _const_basic_type[T_ADDRESS] = TypeRawPtr::BOTTOM;  // both interpreter return addresses & random raw ptrs
   _const_basic_type[T_CONFLICT]= Type::BOTTOM;        // why not?
 
+  _zero_type[T_NARROWOOP] = TypeNarrowOop::NULL_PTR;
   _zero_type[T_BOOLEAN] = TypeInt::ZERO;     // false == 0
   _zero_type[T_CHAR]    = TypeInt::ZERO;     // '\0' == 0
   _zero_type[T_BYTE]    = TypeInt::ZERO;     // 0x00 == 0
@@ -400,6 +408,10 @@
     Type* t = (Type*)i._value;
     tdic->Insert(t,t);  // New Type, insert into Type table
   }
+
+#ifdef ASSERT
+  verify_lastype();
+#endif
 }
 
 //------------------------------hashcons---------------------------------------
@@ -467,7 +479,19 @@
 // Compute the MEET of two types.  NOT virtual.  It enforces that meet is
 // commutative and the lattice is symmetric.
 const Type *Type::meet( const Type *t ) const {
+  if (isa_narrowoop() && t->isa_narrowoop()) {
+    const Type* result = is_narrowoop()->make_oopptr()->meet(t->is_narrowoop()->make_oopptr());
+    if (result->isa_oopptr()) {
+      return result->isa_oopptr()->make_narrowoop();
+    } else if (result == TypePtr::NULL_PTR) {
+      return TypeNarrowOop::NULL_PTR;
+    } else {
+      return result;
+    }
+  }
+
   const Type *mt = xmeet(t);
+  if (isa_narrowoop() || t->isa_narrowoop()) return mt;
 #ifdef ASSERT
   assert( mt == t->xmeet(this), "meet not commutative" );
   const Type* dual_join = mt->_dual;
@@ -556,6 +580,9 @@
   case AryPtr:
     return t->xmeet(this);
 
+  case NarrowOop:
+    return t->xmeet(this);
+
   case Bad:                     // Type check
   default:                      // Bogus type not in lattice
     typerr(t);
@@ -613,6 +640,7 @@
   Bad,          // Int - handled in v-call
   Bad,          // Long - handled in v-call
   Half,         // Half
+  Bad,          // NarrowOop - handled in v-call
 
   Bad,          // Tuple - handled in v-call
   Bad,          // Array - handled in v-call
@@ -668,11 +696,14 @@
   ResourceMark rm;
   Dict d(cmpkey,hashkey);       // Stop recursive type dumping
   dump2(d,1, st);
+  if (isa_ptr() && is_ptr()->is_narrow()) {
+    st->print(" [narrow]");
+  }
 }
 
 //------------------------------data-------------------------------------------
 const char * const Type::msg[Type::lastype] = {
-  "bad","control","top","int:","long:","half",
+  "bad","control","top","int:","long:","half", "narrowoop:",
   "tuple:", "aryptr",
   "anyptr:", "rawptr:", "java:", "inst:", "ary:", "klass:",
   "func", "abIO", "return_address", "memory",
@@ -735,7 +766,7 @@
 //------------------------------isa_oop_ptr------------------------------------
 // Return true if type is an oop pointer type.  False for raw pointers.
 static char isa_oop_ptr_tbl[Type::lastype] = {
-  0,0,0,0,0,0,0/*tuple*/, 0/*ary*/,
+  0,0,0,0,0,0,0/*narrowoop*/,0/*tuple*/, 0/*ary*/,
   0/*anyptr*/,0/*rawptr*/,1/*OopPtr*/,1/*InstPtr*/,1/*AryPtr*/,1/*KlassPtr*/,
   0/*func*/,0,0/*return_address*/,0,
   /*floats*/0,0,0, /*doubles*/0,0,0,
@@ -1051,6 +1082,7 @@
   case DoubleTop:
   case DoubleCon:
   case DoubleBot:
+  case NarrowOop:
   case Bottom:                  // Ye Olde Default
     return Type::BOTTOM;
   default:                      // All else is a mistake
@@ -1718,6 +1750,9 @@
 
 //------------------------------make-------------------------------------------
 const TypeAry *TypeAry::make( const Type *elem, const TypeInt *size) {
+  if (UseCompressedOops && elem->isa_oopptr()) {
+    elem = elem->is_oopptr()->make_narrowoop();
+  }
   size = normalize_array_size(size);
   return (TypeAry*)(new TypeAry(elem,size))->hashcons();
 }
@@ -1800,14 +1835,28 @@
   // In such cases, an array built on this ary must have no subclasses.
   if (_elem == BOTTOM)      return false;  // general array not exact
   if (_elem == TOP   )      return false;  // inverted general array not exact
-  const TypeOopPtr*  toop = _elem->isa_oopptr();
+  const TypeOopPtr*  toop = NULL;
+  if (UseCompressedOops) {
+    const TypeNarrowOop* noop = _elem->isa_narrowoop();
+    if (noop) toop = noop->make_oopptr()->isa_oopptr();
+  } else {
+    toop = _elem->isa_oopptr();
+  }
   if (!toop)                return true;   // a primitive type, like int
   ciKlass* tklass = toop->klass();
   if (tklass == NULL)       return false;  // unloaded class
   if (!tklass->is_loaded()) return false;  // unloaded class
-  const TypeInstPtr* tinst = _elem->isa_instptr();
+  const TypeInstPtr* tinst;
+  if (_elem->isa_narrowoop())
+    tinst = _elem->is_narrowoop()->make_oopptr()->isa_instptr();
+  else
+    tinst = _elem->isa_instptr();
   if (tinst)                return tklass->as_instance_klass()->is_final();
-  const TypeAryPtr*  tap = _elem->isa_aryptr();
+  const TypeAryPtr*  tap;
+  if (_elem->isa_narrowoop())
+    tap = _elem->is_narrowoop()->make_oopptr()->isa_aryptr();
+  else
+    tap = _elem->isa_aryptr();
   if (tap)                  return tap->ary()->ary_must_be_exact();
   return false;
 }
@@ -1864,6 +1913,7 @@
   case DoubleTop:
   case DoubleCon:
   case DoubleBot:
+  case NarrowOop:
   case Bottom:                  // Ye Olde Default
     return Type::BOTTOM;
   case Top:
@@ -2455,6 +2505,10 @@
   return make( _ptr, xadd_offset(offset) );
 }
 
+const TypeNarrowOop* TypeOopPtr::make_narrowoop() const {
+  return TypeNarrowOop::make(this);
+}
+
 int TypeOopPtr::meet_instance(int iid) const {
   if (iid == 0) {
     return (_instance_id < 0)  ? _instance_id : UNKNOWN_INSTANCE;
@@ -2607,6 +2661,7 @@
   case DoubleTop:
   case DoubleCon:
   case DoubleBot:
+  case NarrowOop:
   case Bottom:                  // Ye Olde Default
     return Type::BOTTOM;
   case Top:
@@ -3021,6 +3076,9 @@
   jint res = cache;
   if (res == 0) {
     switch (etype) {
+    case T_NARROWOOP:
+      etype = T_OBJECT;
+      break;
     case T_CONFLICT:
     case T_ILLEGAL:
     case T_VOID:
@@ -3093,6 +3151,7 @@
   case DoubleTop:
   case DoubleCon:
   case DoubleBot:
+  case NarrowOop:
   case Bottom:                  // Ye Olde Default
     return Type::BOTTOM;
   case Top:
@@ -3293,6 +3352,124 @@
 
 
 //=============================================================================
+const TypeNarrowOop *TypeNarrowOop::BOTTOM;
+const TypeNarrowOop *TypeNarrowOop::NULL_PTR;
+
+
+const TypeNarrowOop* TypeNarrowOop::make(const TypePtr* type) {
+  return (const TypeNarrowOop*)(new TypeNarrowOop(type))->hashcons();
+}
+
+//------------------------------hash-------------------------------------------
+// Type-specific hashing function.
+int TypeNarrowOop::hash(void) const {
+  return _ooptype->hash() + 7;
+}
+
+
+bool TypeNarrowOop::eq( const Type *t ) const {
+  const TypeNarrowOop* tc = t->isa_narrowoop();
+  if (tc != NULL) {
+    if (_ooptype->base() != tc->_ooptype->base()) {
+      return false;
+    }
+    return tc->_ooptype->eq(_ooptype);
+  }
+  return false;
+}
+
+bool TypeNarrowOop::singleton(void) const {    // TRUE if type is a singleton
+  return _ooptype->singleton();
+}
+
+bool TypeNarrowOop::empty(void) const {
+  return _ooptype->empty();
+}
+
+//------------------------------meet-------------------------------------------
+// Compute the MEET of two types.  It returns a new Type object.
+const Type *TypeNarrowOop::xmeet( const Type *t ) const {
+  // Perform a fast test for common case; meeting the same types together.
+  if( this == t ) return this;  // Meeting same type-rep?
+
+
+  // Current "this->_base" is OopPtr
+  switch (t->base()) {          // switch on original type
+
+  case Int:                     // Mixing ints & oops happens when javac
+  case Long:                    // reuses local variables
+  case FloatTop:
+  case FloatCon:
+  case FloatBot:
+  case DoubleTop:
+  case DoubleCon:
+  case DoubleBot:
+  case Bottom:                  // Ye Olde Default
+    return Type::BOTTOM;
+  case Top:
+    return this;
+
+  case NarrowOop: {
+    const Type* result = _ooptype->xmeet(t->is_narrowoop()->make_oopptr());
+    if (result->isa_ptr()) {
+      return TypeNarrowOop::make(result->is_ptr());
+    }
+    return result;
+  }
+
+  default:                      // All else is a mistake
+    typerr(t);
+
+  case RawPtr:
+  case AnyPtr:
+  case OopPtr:
+  case InstPtr:
+  case KlassPtr:
+  case AryPtr:
+    typerr(t);
+    return Type::BOTTOM;
+
+  } // End of switch
+}
+
+const Type *TypeNarrowOop::xdual() const {    // Compute dual right now.
+  const TypePtr* odual = _ooptype->dual()->is_ptr();
+  return new TypeNarrowOop(odual);
+}
+
+const Type *TypeNarrowOop::filter( const Type *kills ) const {
+  if (kills->isa_narrowoop()) {
+    const Type* ft =_ooptype->filter(kills->is_narrowoop()->_ooptype);
+    if (ft->empty())
+      return Type::TOP;           // Canonical empty value
+    if (ft->isa_ptr()) {
+      return make(ft->isa_ptr());
+    }
+    return ft;
+  } else if (kills->isa_ptr()) {
+    const Type* ft = _ooptype->join(kills);
+    if (ft->empty())
+      return Type::TOP;           // Canonical empty value
+    return ft;
+  } else {
+    return Type::TOP;
+  }
+}
+
+
+intptr_t TypeNarrowOop::get_con() const {
+  return _ooptype->get_con();
+}
+
+#ifndef PRODUCT
+void TypeNarrowOop::dump2( Dict & d, uint depth, outputStream *st ) const {
+  tty->print("narrowoop: ");
+  _ooptype->dump2(d, depth, st);
+}
+#endif
+
+
+//=============================================================================
 // Convenience common pre-built types.
 
 // Not-null object klass or below
@@ -3341,28 +3518,33 @@
   ciKlass* k_ary = NULL;
   const TypeInstPtr *tinst;
   const TypeAryPtr *tary;
+  const Type* el = elem();
+  if (el->isa_narrowoop()) {
+    el = el->is_narrowoop()->make_oopptr();
+  }
+
   // Get element klass
-  if ((tinst = elem()->isa_instptr()) != NULL) {
+  if ((tinst = el->isa_instptr()) != NULL) {
     // Compute array klass from element klass
     k_ary = ciObjArrayKlass::make(tinst->klass());
-  } else if ((tary = elem()->isa_aryptr()) != NULL) {
+  } else if ((tary = el->isa_aryptr()) != NULL) {
     // Compute array klass from element klass
     ciKlass* k_elem = tary->klass();
     // If element type is something like bottom[], k_elem will be null.
     if (k_elem != NULL)
       k_ary = ciObjArrayKlass::make(k_elem);
-  } else if ((elem()->base() == Type::Top) ||
-             (elem()->base() == Type::Bottom)) {
+  } else if ((el->base() == Type::Top) ||
+             (el->base() == Type::Bottom)) {
     // element type of Bottom occurs from meet of basic type
     // and object; Top occurs when doing join on Bottom.
     // Leave k_ary at NULL.
   } else {
     // Cannot compute array klass directly from basic type,
     // since subtypes of TypeInt all have basic type T_INT.
-    assert(!elem()->isa_int(),
+    assert(!el->isa_int(),
            "integral arrays must be pre-equipped with a class");
     // Compute array klass directly from basic type
-    k_ary = ciTypeArrayKlass::make(elem()->basic_type());
+    k_ary = ciTypeArrayKlass::make(el->basic_type());
   }
 
   if( this != TypeAryPtr::OOPS )
@@ -3710,7 +3892,7 @@
 //------------------------------print_flattened--------------------------------
 // Print a 'flattened' signature
 static const char * const flat_type_msg[Type::lastype] = {
-  "bad","control","top","int","long","_",
+  "bad","control","top","int","long","_", "narrowoop",
   "tuple:", "array:",
   "ptr", "rawptr", "ptr", "ptr", "ptr", "ptr",
   "func", "abIO", "return_address", "mem",
--- a/hotspot/src/share/vm/opto/type.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/opto/type.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -41,6 +41,7 @@
 class   TypeF;
 class   TypeInt;
 class   TypeLong;
+class   TypeNarrowOop;
 class   TypeAry;
 class   TypeTuple;
 class   TypePtr;
@@ -64,6 +65,7 @@
     Int,                        // Integer range (lo-hi)
     Long,                       // Long integer range (lo-hi)
     Half,                       // Placeholder half of doubleword
+    NarrowOop,                  // Compressed oop pointer
 
     Tuple,                      // Method signature or object layout
     Array,                      // Array types
@@ -188,6 +190,11 @@
   // Currently, it also works around limitations involving interface types.
   virtual const Type *filter( const Type *kills ) const;
 
+  // Returns true if this pointer points at memory which contains a
+  // compressed oop references.  In 32-bit builds it's non-virtual
+  // since we don't support compressed oops at all in the mode.
+  LP64_ONLY(virtual) bool is_narrow() const { return false; }
+
   // Convenience access
   float getf() const;
   double getd() const;
@@ -204,15 +211,18 @@
   const TypeAry    *is_ary() const;              // Array, NOT array pointer
   const TypePtr    *is_ptr() const;              // Asserts it is a ptr type
   const TypePtr    *isa_ptr() const;             // Returns NULL if not ptr type
-  const TypeRawPtr *is_rawptr() const;           // NOT Java oop
-  const TypeOopPtr *isa_oopptr() const;          // Returns NULL if not ptr type
-  const TypeKlassPtr *isa_klassptr() const; // Returns NULL if not KlassPtr
-  const TypeKlassPtr *is_klassptr() const; // assert if not KlassPtr
-  const TypeOopPtr  *is_oopptr() const;          // Java-style GC'd pointer
-  const TypeInstPtr *isa_instptr() const;        // Returns NULL if not InstPtr
-  const TypeInstPtr *is_instptr() const;         // Instance
-  const TypeAryPtr *isa_aryptr() const;          // Returns NULL if not AryPtr
-  const TypeAryPtr *is_aryptr() const;           // Array oop
+  const TypeRawPtr *isa_rawptr() const;          // NOT Java oop
+  const TypeRawPtr *is_rawptr() const;           // Asserts is rawptr
+  const TypeNarrowOop  *is_narrowoop() const;        // Java-style GC'd pointer
+  const TypeNarrowOop  *isa_narrowoop() const;       // Returns NULL if not oop ptr type
+  const TypeOopPtr   *isa_oopptr() const;        // Returns NULL if not oop ptr type
+  const TypeOopPtr   *is_oopptr() const;         // Java-style GC'd pointer
+  const TypeKlassPtr *isa_klassptr() const;      // Returns NULL if not KlassPtr
+  const TypeKlassPtr *is_klassptr() const;       // assert if not KlassPtr
+  const TypeInstPtr  *isa_instptr() const;       // Returns NULL if not InstPtr
+  const TypeInstPtr  *is_instptr() const;        // Instance
+  const TypeAryPtr   *isa_aryptr() const;        // Returns NULL if not AryPtr
+  const TypeAryPtr   *is_aryptr() const;         // Array oop
   virtual bool      is_finite() const;           // Has a finite value
   virtual bool      is_nan()    const;           // Is not a number (NaN)
 
@@ -540,6 +550,7 @@
 // Otherwise the _base will indicate which subset of pointers is affected,
 // and the class will be inherited from.
 class TypePtr : public Type {
+  friend class TypeNarrowOop;
 public:
   enum PTR { TopPTR, AnyNull, Constant, Null, NotNull, BotPTR, lastPTR };
 protected:
@@ -701,6 +712,15 @@
 
   virtual const TypePtr *add_offset( int offset ) const;
 
+  // returns the equivalent compressed version of this pointer type
+  virtual const TypeNarrowOop* make_narrowoop() const;
+
+#ifdef _LP64
+  virtual bool is_narrow() const {
+    return (UseCompressedOops && _offset != 0);
+  }
+#endif
+
   virtual const Type *xmeet( const Type *t ) const;
   virtual const Type *xdual() const;    // Compute dual right now.
 
@@ -822,6 +842,12 @@
   virtual const Type *xmeet( const Type *t ) const;
   virtual const Type *xdual() const;    // Compute dual right now.
 
+#ifdef _LP64
+  virtual bool is_narrow() const {
+    return (UseCompressedOops && klass() != NULL && _offset != 0);
+  }
+#endif
+
   // Convenience common pre-built types.
   static const TypeAryPtr *RANGE;
   static const TypeAryPtr *OOPS;
@@ -874,6 +900,18 @@
   virtual const Type    *xmeet( const Type *t ) const;
   virtual const Type    *xdual() const;      // Compute dual right now.
 
+#ifdef _LP64
+  // Perm objects don't use compressed references, except for static fields
+  // which are currently compressed
+  virtual bool is_narrow() const {
+    if (UseCompressedOops && _offset != 0 && _klass->is_instance_klass()) {
+      ciInstanceKlass* ik = _klass->as_instance_klass();
+      return ik != NULL && ik->get_field_by_offset(_offset, true) != NULL;
+    }
+    return false;
+  }
+#endif
+
   // Convenience common pre-built types.
   static const TypeKlassPtr* OBJECT; // Not-null object klass or below
   static const TypeKlassPtr* OBJECT_OR_NULL; // Maybe-null version of same
@@ -882,6 +920,56 @@
 #endif
 };
 
+//------------------------------TypeNarrowOop----------------------------------------
+// A compressed reference to some kind of Oop.  This type wraps around
+// a preexisting TypeOopPtr and forwards most of it's operations to
+// the underlying type.  It's only real purpose is to track the
+// oopness of the compressed oop value when we expose the conversion
+// between the normal and the compressed form.
+class TypeNarrowOop : public Type {
+protected:
+  const TypePtr* _ooptype;
+
+  TypeNarrowOop( const TypePtr* ooptype): Type(NarrowOop),
+    _ooptype(ooptype) {
+    assert(ooptype->offset() == 0 ||
+           ooptype->offset() == OffsetBot ||
+           ooptype->offset() == OffsetTop, "no real offsets");
+  }
+public:
+  virtual bool eq( const Type *t ) const;
+  virtual int  hash() const;             // Type specific hashing
+  virtual bool singleton(void) const;    // TRUE if type is a singleton
+
+  virtual const Type *xmeet( const Type *t ) const;
+  virtual const Type *xdual() const;    // Compute dual right now.
+
+  virtual intptr_t get_con() const;
+
+  // Do not allow interface-vs.-noninterface joins to collapse to top.
+  virtual const Type *filter( const Type *kills ) const;
+
+  virtual bool empty(void) const;        // TRUE if type is vacuous
+
+  static const TypeNarrowOop *make( const TypePtr* type);
+
+  static const TypeNarrowOop* make_from_constant(ciObject* con) {
+    return make(TypeOopPtr::make_from_constant(con));
+  }
+
+  // returns the equivalent oopptr type for this compressed pointer
+  virtual const TypePtr *make_oopptr() const {
+    return _ooptype;
+  }
+
+  static const TypeNarrowOop *BOTTOM;
+  static const TypeNarrowOop *NULL_PTR;
+
+#ifndef PRODUCT
+  virtual void dump2( Dict &d, uint depth, outputStream *st ) const;
+#endif
+};
+
 //------------------------------TypeFunc---------------------------------------
 // Class of Array Types
 class TypeFunc : public Type {
@@ -1002,6 +1090,10 @@
   return (_base >= OopPtr && _base <= KlassPtr) ? (TypeOopPtr*)this : NULL;
 }
 
+inline const TypeRawPtr *Type::isa_rawptr() const {
+  return (_base == RawPtr) ? (TypeRawPtr*)this : NULL;
+}
+
 inline const TypeRawPtr *Type::is_rawptr() const {
   assert( _base == RawPtr, "Not a raw pointer" );
   return (TypeRawPtr*)this;
@@ -1025,6 +1117,17 @@
   return (TypeAryPtr*)this;
 }
 
+inline const TypeNarrowOop *Type::is_narrowoop() const {
+  // OopPtr is the first and KlassPtr the last, with no non-oops between.
+  assert(_base == NarrowOop, "Not a narrow oop" ) ;
+  return (TypeNarrowOop*)this;
+}
+
+inline const TypeNarrowOop *Type::isa_narrowoop() const {
+  // OopPtr is the first and KlassPtr the last, with no non-oops between.
+  return (_base == NarrowOop) ? (TypeNarrowOop*)this : NULL;
+}
+
 inline const TypeKlassPtr *Type::isa_klassptr() const {
   return (_base == KlassPtr) ? (TypeKlassPtr*)this : NULL;
 }
--- a/hotspot/src/share/vm/prims/jni.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/prims/jni.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -135,7 +135,10 @@
   if (offset <= small_offset_mask) {
     klassOop field_klass = k;
     klassOop super_klass = Klass::cast(field_klass)->super();
-    while (instanceKlass::cast(super_klass)->contains_field_offset(offset)) {
+    // With compressed oops the most super class with nonstatic fields would
+    // be the owner of fields embedded in the header.
+    while (instanceKlass::cast(super_klass)->has_nonstatic_fields() &&
+           instanceKlass::cast(super_klass)->contains_field_offset(offset)) {
       field_klass = super_klass;   // super contains the field also
       super_klass = Klass::cast(field_klass)->super();
     }
--- a/hotspot/src/share/vm/prims/jvmtiTagMap.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/prims/jvmtiTagMap.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -2662,6 +2662,7 @@
     _continue = CallbackInvoker::report_simple_root(kind, o);
 
   }
+  virtual void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
 };
 
 // A supporting closure used to process JNI locals
@@ -2704,6 +2705,7 @@
     // invoke the callback
     _continue = CallbackInvoker::report_jni_local_root(_thread_tag, _tid, _depth, _method, o);
   }
+  virtual void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
 };
 
 
@@ -2878,9 +2880,11 @@
 }
 
 // verify that a static oop field is in range
-static inline bool verify_static_oop(instanceKlass* ik, oop* obj_p) {
-  oop* start = ik->start_of_static_fields();
-  oop* end = start + ik->static_oop_field_size();
+static inline bool verify_static_oop(instanceKlass* ik,
+                                     klassOop k, int offset) {
+  address obj_p = (address)k + offset;
+  address start = (address)ik->start_of_static_fields();
+  address end = start + (ik->static_oop_field_size() * heapOopSize);
   assert(end >= start, "sanity check");
 
   if (obj_p >= start && obj_p < end) {
@@ -2981,10 +2985,8 @@
       ClassFieldDescriptor* field = field_map->field_at(i);
       char type = field->field_type();
       if (!is_primitive_field_type(type)) {
-        address addr = (address)k + field->field_offset();
-        oop* f = (oop*)addr;
-        assert(verify_static_oop(ik, f), "sanity check");
-        oop fld_o = *f;
+        oop fld_o = k->obj_field(field->field_offset());
+        assert(verify_static_oop(ik, k, field->field_offset()), "sanity check");
         if (fld_o != NULL) {
           int slot = field->field_index();
           if (!CallbackInvoker::report_static_field_reference(mirror, fld_o, slot)) {
@@ -3026,9 +3028,7 @@
     ClassFieldDescriptor* field = field_map->field_at(i);
     char type = field->field_type();
     if (!is_primitive_field_type(type)) {
-      address addr = (address)o + field->field_offset();
-      oop* f = (oop*)addr;
-      oop fld_o = *f;
+      oop fld_o = o->obj_field(field->field_offset());
       if (fld_o != NULL) {
         // reflection code may have a reference to a klassOop.
         // - see sun.reflect.UnsafeStaticFieldAccessorImpl and sun.misc.Unsafe
--- a/hotspot/src/share/vm/prims/unsafe.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/prims/unsafe.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -100,7 +100,7 @@
     assert(byte_offset >= 0 && byte_offset <= (jlong)MAX_OBJECT_SIZE, "sane offset");
     if (byte_offset == (jint)byte_offset) {
       void* ptr_plus_disp = (address)p + byte_offset;
-      assert((void*)p->obj_field_addr((jint)byte_offset) == ptr_plus_disp,
+      assert((void*)p->obj_field_addr<oop>((jint)byte_offset) == ptr_plus_disp,
              "raw [ptr+disp] must be consistent with oop::field_base");
     }
   }
@@ -146,13 +146,36 @@
   *(volatile type_name*)index_oop_from_field_offset_long(p, offset) = x; \
   OrderAccess::fence();
 
+// Macros for oops that check UseCompressedOops
+
+#define GET_OOP_FIELD(obj, offset, v) \
+  oop p = JNIHandles::resolve(obj);   \
+  oop v;                              \
+  if (UseCompressedOops) {            \
+    narrowOop n = *(narrowOop*)index_oop_from_field_offset_long(p, offset); \
+    v = oopDesc::decode_heap_oop(n);                                \
+  } else {                            \
+    v = *(oop*)index_oop_from_field_offset_long(p, offset);                 \
+  }
+
+#define GET_OOP_FIELD_VOLATILE(obj, offset, v) \
+  oop p = JNIHandles::resolve(obj);   \
+  volatile oop v;                     \
+  if (UseCompressedOops) {            \
+    volatile narrowOop n = *(volatile narrowOop*)index_oop_from_field_offset_long(p, offset); \
+    v = oopDesc::decode_heap_oop(n);                               \
+  } else {                            \
+    v = *(volatile oop*)index_oop_from_field_offset_long(p, offset);       \
+  }
+
+
 // Get/SetObject must be special-cased, since it works with handles.
 
 // The xxx140 variants for backward compatibility do not allow a full-width offset.
 UNSAFE_ENTRY(jobject, Unsafe_GetObject140(JNIEnv *env, jobject unsafe, jobject obj, jint offset))
   UnsafeWrapper("Unsafe_GetObject");
   if (obj == NULL)  THROW_0(vmSymbols::java_lang_NullPointerException());
-  GET_FIELD(obj, offset, oop, v);
+  GET_OOP_FIELD(obj, offset, v)
   return JNIHandles::make_local(env, v);
 UNSAFE_END
 
@@ -162,11 +185,21 @@
   oop x = JNIHandles::resolve(x_h);
   //SET_FIELD(obj, offset, oop, x);
   oop p = JNIHandles::resolve(obj);
-  if (x != NULL) {
-    // If there is a heap base pointer, we are obliged to emit a store barrier.
-    oop_store((oop*)index_oop_from_field_offset_long(p, offset), x);
+  if (UseCompressedOops) {
+    if (x != NULL) {
+      // If there is a heap base pointer, we are obliged to emit a store barrier.
+      oop_store((narrowOop*)index_oop_from_field_offset_long(p, offset), x);
+    } else {
+      narrowOop n = oopDesc::encode_heap_oop_not_null(x);
+      *(narrowOop*)index_oop_from_field_offset_long(p, offset) = n;
+    }
   } else {
-    *(oop*)index_oop_from_field_offset_long(p, offset) = x;
+    if (x != NULL) {
+      // If there is a heap base pointer, we are obliged to emit a store barrier.
+      oop_store((oop*)index_oop_from_field_offset_long(p, offset), x);
+    } else {
+      *(oop*)index_oop_from_field_offset_long(p, offset) = x;
+    }
   }
 UNSAFE_END
 
@@ -175,7 +208,7 @@
 // That is, it should be in the range [0, MAX_OBJECT_SIZE].
 UNSAFE_ENTRY(jobject, Unsafe_GetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset))
   UnsafeWrapper("Unsafe_GetObject");
-  GET_FIELD(obj, offset, oop, v);
+  GET_OOP_FIELD(obj, offset, v)
   return JNIHandles::make_local(env, v);
 UNSAFE_END
 
@@ -183,12 +216,16 @@
   UnsafeWrapper("Unsafe_SetObject");
   oop x = JNIHandles::resolve(x_h);
   oop p = JNIHandles::resolve(obj);
-  oop_store((oop*)index_oop_from_field_offset_long(p, offset), x);
+  if (UseCompressedOops) {
+    oop_store((narrowOop*)index_oop_from_field_offset_long(p, offset), x);
+  } else {
+    oop_store((oop*)index_oop_from_field_offset_long(p, offset), x);
+  }
 UNSAFE_END
 
 UNSAFE_ENTRY(jobject, Unsafe_GetObjectVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset))
   UnsafeWrapper("Unsafe_GetObjectVolatile");
-  GET_FIELD_VOLATILE(obj, offset, oop, v);
+  GET_OOP_FIELD_VOLATILE(obj, offset, v)
   return JNIHandles::make_local(env, v);
 UNSAFE_END
 
@@ -196,7 +233,11 @@
   UnsafeWrapper("Unsafe_SetObjectVolatile");
   oop x = JNIHandles::resolve(x_h);
   oop p = JNIHandles::resolve(obj);
-  oop_store((oop*)index_oop_from_field_offset_long(p, offset), x);
+  if (UseCompressedOops) {
+    oop_store((narrowOop*)index_oop_from_field_offset_long(p, offset), x);
+  } else {
+    oop_store((oop*)index_oop_from_field_offset_long(p, offset), x);
+  }
   OrderAccess::fence();
 UNSAFE_END
 
@@ -311,7 +352,11 @@
   UnsafeWrapper("Unsafe_SetOrderedObject");
   oop x = JNIHandles::resolve(x_h);
   oop p = JNIHandles::resolve(obj);
-  oop_store((oop*)index_oop_from_field_offset_long(p, offset), x);
+  if (UseCompressedOops) {
+    oop_store((narrowOop*)index_oop_from_field_offset_long(p, offset), x);
+  } else {
+    oop_store((oop*)index_oop_from_field_offset_long(p, offset), x);
+  }
   OrderAccess::fence();
 UNSAFE_END
 
@@ -647,7 +692,7 @@
     THROW(vmSymbols::java_lang_InvalidClassException());
   } else if (k->klass_part()->oop_is_objArray()) {
     base  = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
-    scale = oopSize;
+    scale = heapOopSize;
   } else if (k->klass_part()->oop_is_typeArray()) {
     typeArrayKlass* tak = typeArrayKlass::cast(k);
     base  = tak->array_header_in_bytes();
@@ -845,11 +890,11 @@
   oop x = JNIHandles::resolve(x_h);
   oop e = JNIHandles::resolve(e_h);
   oop p = JNIHandles::resolve(obj);
-  intptr_t* addr = (intptr_t *)index_oop_from_field_offset_long(p, offset);
-  intptr_t res = Atomic::cmpxchg_ptr((intptr_t)x, addr, (intptr_t)e);
-  jboolean success  = (res == (intptr_t)e);
+  HeapWord* addr = (HeapWord *)index_oop_from_field_offset_long(p, offset);
+  oop res = oopDesc::atomic_compare_exchange_oop(x, addr, e);
+  jboolean success  = (res == e);
   if (success)
-    update_barrier_set((oop*)addr, x);
+    update_barrier_set((void*)addr, x);
   return success;
 UNSAFE_END
 
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -1163,6 +1163,31 @@
       no_shared_spaces();
     }
   }
+
+#ifdef _LP64
+  // Compressed Headers do not work with CMS, which uses a bit in the klass
+  // field offset to determine free list chunk markers.
+  // Check that UseCompressedOops can be set with the max heap size allocated
+  // by ergonomics.
+  if (!UseConcMarkSweepGC && MaxHeapSize <= (32*G - os::vm_page_size())) {
+    if (FLAG_IS_DEFAULT(UseCompressedOops)) {
+      FLAG_SET_ERGO(bool, UseCompressedOops, true);
+    }
+  } else {
+    if (UseCompressedOops && !FLAG_IS_DEFAULT(UseCompressedOops)) {
+      // If specified, give a warning
+      if (UseConcMarkSweepGC){
+        warning("Compressed Oops does not work with CMS");
+      } else {
+        warning(
+          "Max heap size too large for Compressed Oops");
+      }
+      FLAG_SET_DEFAULT(UseCompressedOops, false);
+    }
+  }
+  // Also checks that certain machines are slower with compressed oops
+  // in vm_version initialization code.
+#endif // _LP64
 }
 
 void Arguments::set_parallel_gc_flags() {
--- a/hotspot/src/share/vm/runtime/atomic.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/runtime/atomic.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -44,3 +44,15 @@
   }
   return cur_as_bytes[offset];
 }
+
+unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
+  assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
+  return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
+}
+
+unsigned Atomic::cmpxchg(unsigned int exchange_value,
+                         volatile unsigned int* dest, unsigned int compare_value) {
+  assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
+  return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest,
+                                       (jint)compare_value);
+}
--- a/hotspot/src/share/vm/runtime/atomic.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/runtime/atomic.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -55,7 +55,10 @@
   static void dec_ptr(volatile void*     dest);
 
   // Performs atomic exchange of *dest with exchange_value.  Returns old prior value of *dest.
-  static jint     xchg    (jint     exchange_value, volatile jint*     dest);
+  static jint         xchg(jint     exchange_value, volatile jint*     dest);
+  static unsigned int xchg(unsigned int exchange_value,
+                           volatile unsigned int* dest);
+
   static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest);
   static void*    xchg_ptr(void*    exchange_value, volatile void*   dest);
 
@@ -65,6 +68,11 @@
   static jbyte    cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value);
   static jint     cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value);
   static jlong    cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value);
+
+  static unsigned int cmpxchg(unsigned int exchange_value,
+                              volatile unsigned int* dest,
+                              unsigned int compare_value);
+
   static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value);
   static void*    cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value);
 };
--- a/hotspot/src/share/vm/runtime/frame.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/runtime/frame.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -1153,9 +1153,8 @@
     // If it is passed in a register, it got spilled in the stub frame.
     return (oop *)reg_map->location(reg);
   } else {
-    int sp_offset_in_stack_slots = reg->reg2stack();
-    int sp_offset = sp_offset_in_stack_slots >> (LogBytesPerWord - LogBytesPerInt);
-    return (oop *)&unextended_sp()[sp_offset];
+    int sp_offset_in_bytes = reg->reg2stack() * VMRegImpl::stack_slot_size;
+    return (oop*)(((address)unextended_sp()) + sp_offset_in_bytes);
   }
 }
 
@@ -1331,8 +1330,7 @@
   ResourceMark rm(thread);
   assert(_cb != NULL, "sanity check");
   if (_cb->oop_maps() != NULL) {
-    OopMapSet::all_do(this, reg_map, &_check_oop, check_derived_oop,
-                      &_check_value, &_zap_dead);
+    OopMapSet::all_do(this, reg_map, &_check_oop, check_derived_oop, &_check_value);
   }
 }
 
--- a/hotspot/src/share/vm/runtime/frame.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/runtime/frame.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -250,7 +250,7 @@
   oop interpreter_callee_receiver(symbolHandle signature)     { return *interpreter_callee_receiver_addr(signature); }
 
 
-  oop *interpreter_callee_receiver_addr(symbolHandle signature);
+  oop* interpreter_callee_receiver_addr(symbolHandle signature);
 
 
   // expression stack (may go up or down, direction == 1 or -1)
@@ -402,19 +402,25 @@
 # ifdef ENABLE_ZAP_DEAD_LOCALS
  private:
   class CheckValueClosure: public OopClosure {
-  public: void do_oop(oop* p);
+   public:
+    void do_oop(oop* p);
+    void do_oop(narrowOop* p) { ShouldNotReachHere(); }
   };
   static CheckValueClosure _check_value;
 
   class CheckOopClosure: public OopClosure {
-  public: void do_oop(oop* p);
+   public:
+    void do_oop(oop* p);
+    void do_oop(narrowOop* p) { ShouldNotReachHere(); }
   };
   static CheckOopClosure _check_oop;
 
   static void check_derived_oop(oop* base, oop* derived);
 
   class ZapDeadClosure: public OopClosure {
-  public: void do_oop(oop* p);
+   public:
+    void do_oop(oop* p);
+    void do_oop(narrowOop* p) { ShouldNotReachHere(); }
   };
   static ZapDeadClosure _zap_dead;
 
--- a/hotspot/src/share/vm/runtime/globals.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/runtime/globals.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -29,7 +29,8 @@
 RUNTIME_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, \
               MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, \
               MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG, \
-              MATERIALIZE_MANAGEABLE_FLAG, MATERIALIZE_PRODUCT_RW_FLAG)
+              MATERIALIZE_MANAGEABLE_FLAG, MATERIALIZE_PRODUCT_RW_FLAG, \
+              MATERIALIZE_LP64_PRODUCT_FLAG)
 
 RUNTIME_OS_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, \
                  MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, \
@@ -137,6 +138,12 @@
   #define RUNTIME_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{notproduct}", DEFAULT },
 #endif
 
+#ifdef _LP64
+  #define RUNTIME_LP64_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{lp64_product}", DEFAULT },
+#else
+  #define RUNTIME_LP64_PRODUCT_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
+#endif // _LP64
+
 #define C1_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C1 product}", DEFAULT },
 #define C1_PD_PRODUCT_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, "{C1 pd product}", DEFAULT },
 #ifdef PRODUCT
@@ -165,7 +172,7 @@
 
 
 static Flag flagTable[] = {
- RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT)
+ RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT, RUNTIME_LP64_PRODUCT_FLAG_STRUCT)
  RUNTIME_OS_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT)
 #ifdef COMPILER1
  C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT)
--- a/hotspot/src/share/vm/runtime/globals.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -237,7 +237,6 @@
 #define falseInTiered true
 #endif
 
-
 // develop flags are settable / visible only during development and are constant in the PRODUCT version
 // product flags are always settable / visible
 // notproduct flags are settable / visible only during development and are not declared in the PRODUCT version
@@ -286,7 +285,11 @@
 // Note that when there is a need to support develop flags to be writeable,
 // it can be done in the same way as product_rw.
 
-#define RUNTIME_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct, manageable, product_rw) \
+#define RUNTIME_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct, manageable, product_rw, lp64_product) \
+                                                                            \
+  lp64_product(bool, UseCompressedOops, false,                              \
+            "Use 32-bit object references in 64-bit VM. "                   \
+            "lp64_product means flag is always constant in 32 bit VM")      \
                                                                             \
   /* UseMembar is theoretically a temp flag used for memory barrier         \
    * removal testing.  It was supposed to be removed before FCS but has     \
@@ -3209,6 +3212,12 @@
 #define DECLARE_PD_DEVELOPER_FLAG(type, name, doc)      extern "C" type name;
 #define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc)  extern "C" type name;
 #endif
+// Special LP64 flags, product only needed for now.
+#ifdef _LP64
+#define DECLARE_LP64_PRODUCT_FLAG(type, name, value, doc) extern "C" type name;
+#else
+#define DECLARE_LP64_PRODUCT_FLAG(type, name, value, doc) const type name = value;
+#endif // _LP64
 
 // Implementation macros
 #define MATERIALIZE_PRODUCT_FLAG(type, name, value, doc)   type name = value;
@@ -3225,7 +3234,12 @@
 #define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc)     type name = pd_##name;
 #define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) type name = value;
 #endif
+#ifdef _LP64
+#define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc)   type name = value;
+#else
+#define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) /* flag is constant */
+#endif // _LP64
 
-RUNTIME_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
+RUNTIME_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG, DECLARE_LP64_PRODUCT_FLAG)
 
 RUNTIME_OS_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG)
--- a/hotspot/src/share/vm/runtime/globals_extension.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/runtime/globals_extension.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -41,6 +41,11 @@
   #define RUNTIME_PD_DEVELOP_FLAG_MEMBER(type, name, doc)      FLAG_MEMBER(name),
   #define RUNTIME_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
 #endif
+#ifdef _LP64
+#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER(type, name, value, doc)    FLAG_MEMBER(name),
+#else
+#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER(type, name, value, doc)    /* flag is constant */
+#endif // _LP64
 
 #define C1_PRODUCT_FLAG_MEMBER(type, name, value, doc)         FLAG_MEMBER(name),
 #define C1_PD_PRODUCT_FLAG_MEMBER(type, name, doc)             FLAG_MEMBER(name),
@@ -71,7 +76,9 @@
 typedef enum {
  RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER,
                RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER,
-               RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER, RUNTIME_PRODUCT_RW_FLAG_MEMBER)
+               RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER,
+               RUNTIME_PRODUCT_RW_FLAG_MEMBER,
+               RUNTIME_LP64_PRODUCT_FLAG_MEMBER)
  RUNTIME_OS_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER,
                RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER,
                RUNTIME_NOTPRODUCT_FLAG_MEMBER)
@@ -116,6 +123,11 @@
   #define C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)           FLAG_MEMBER_WITH_TYPE(name,type),
   #define C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)    FLAG_MEMBER_WITH_TYPE(name,type),
 #endif
+#ifdef _LP64
+#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)    FLAG_MEMBER_WITH_TYPE(name,type),
+#else
+#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)    /* flag is constant */
+#endif // _LP64
 
 
 #define C2_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)         FLAG_MEMBER_WITH_TYPE(name,type),
@@ -137,7 +149,8 @@
                RUNTIME_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE,
                RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE,
                RUNTIME_MANAGEABLE_FLAG_MEMBER_WITH_TYPE,
-               RUNTIME_PRODUCT_RW_FLAG_MEMBER_WITH_TYPE)
+               RUNTIME_PRODUCT_RW_FLAG_MEMBER_WITH_TYPE,
+               RUNTIME_LP64_PRODUCT_FLAG_MEMBER_WITH_TYPE)
 RUNTIME_OS_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE, RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE,
                RUNTIME_PRODUCT_FLAG_MEMBER_WITH_TYPE, RUNTIME_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE,
                RUNTIME_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE,
--- a/hotspot/src/share/vm/runtime/hpi.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/runtime/hpi.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -27,7 +27,8 @@
 
 extern "C" {
   static void unimplemented_panic(const char *fmt, ...) {
-    Unimplemented();
+    // mitigate testing damage from bug 6626677
+    warning("hpi::unimplemented_panic called");
   }
 
   static void unimplemented_monitorRegister(sys_mon_t *mid, char *info_str) {
--- a/hotspot/src/share/vm/runtime/init.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/runtime/init.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -27,7 +27,6 @@
 
 // Initialization done by VM thread in vm_init_globals()
 void check_ThreadShadow();
-void check_basic_types();
 void eventlog_init();
 void mutex_init();
 void chunkpool_init();
@@ -73,7 +72,7 @@
 
 void vm_init_globals() {
   check_ThreadShadow();
-  check_basic_types();
+  basic_types_init();
   eventlog_init();
   mutex_init();
   chunkpool_init();
--- a/hotspot/src/share/vm/runtime/jniHandles.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/runtime/jniHandles.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -206,9 +206,10 @@
   int _count;
 public:
   CountHandleClosure(): _count(0) {}
-  void do_oop(oop* unused) {
+  virtual void do_oop(oop* unused) {
     _count++;
   }
+  virtual void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
   int count() { return _count; }
 };
 
@@ -230,9 +231,10 @@
 
 class VerifyHandleClosure: public OopClosure {
 public:
-  void do_oop(oop* root) {
+  virtual void do_oop(oop* root) {
     (*root)->verify();
   }
+  virtual void do_oop(narrowOop* root) { ShouldNotReachHere(); }
 };
 
 void JNIHandles::verify() {
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -71,7 +71,8 @@
   /******************************************************************/                                                               \
                                                                                                                                      \
   volatile_nonstatic_field(oopDesc,            _mark,                                         markOop)                               \
-  nonstatic_field(oopDesc,                     _klass,                                        klassOop)                              \
+  volatile_nonstatic_field(oopDesc,            _metadata._klass,                              wideKlassOop)                          \
+  volatile_nonstatic_field(oopDesc,            _metadata._compressed_klass,                   narrowOop)                             \
      static_field(oopDesc,                     _bs,                                           BarrierSet*)                           \
   nonstatic_field(arrayKlass,                  _dimension,                                    int)                                   \
   nonstatic_field(arrayKlass,                  _higher_dimension,                             klassOop)                              \
@@ -79,13 +80,14 @@
   nonstatic_field(arrayKlass,                  _vtable_len,                                   int)                                   \
   nonstatic_field(arrayKlass,                  _alloc_size,                                   juint)                                 \
   nonstatic_field(arrayKlass,                  _component_mirror,                             oop)                                   \
-  nonstatic_field(arrayOopDesc,                _length,                                       int)                                   \
   nonstatic_field(compiledICHolderKlass,       _alloc_size,                                   juint)                                 \
   nonstatic_field(compiledICHolderOopDesc,     _holder_method,                                methodOop)                             \
   nonstatic_field(compiledICHolderOopDesc,     _holder_klass,                                 klassOop)                              \
   nonstatic_field(constantPoolOopDesc,         _tags,                                         typeArrayOop)                          \
   nonstatic_field(constantPoolOopDesc,         _cache,                                        constantPoolCacheOop)                  \
   nonstatic_field(constantPoolOopDesc,         _pool_holder,                                  klassOop)                              \
+  nonstatic_field(constantPoolOopDesc,         _length,                                       int)                                   \
+  nonstatic_field(constantPoolCacheOopDesc,    _length,                                       int)                                   \
   nonstatic_field(constantPoolCacheOopDesc,    _constant_pool,                                constantPoolOop)                       \
   nonstatic_field(instanceKlass,               _array_klasses,                                klassOop)                              \
   nonstatic_field(instanceKlass,               _methods,                                      objArrayOop)                           \
@@ -261,6 +263,7 @@
      static_field(Universe,                    _bootstrapping,                                bool)                                  \
      static_field(Universe,                    _fully_initialized,                            bool)                                  \
      static_field(Universe,                    _verify_count,                                 int)                                   \
+     static_field(Universe,                    _heap_base,                                    address)                                   \
                                                                                                                                      \
   /**********************************************************************************/                                               \
   /* Generation and Space hierarchies                                               */                                               \
@@ -305,8 +308,6 @@
   nonstatic_field(SharedHeap,                  _perm_gen,                                     PermGen*)                              \
   nonstatic_field(CollectedHeap,               _barrier_set,                                  BarrierSet*)                           \
   nonstatic_field(CollectedHeap,               _is_gc_active,                                 bool)                                  \
-  nonstatic_field(CollectedHeap,               _max_heap_capacity,                            size_t)                                \
-                                                                                                                                     \
   nonstatic_field(CompactibleSpace,            _compaction_top,                               HeapWord*)                             \
   nonstatic_field(CompactibleSpace,            _first_dead,                                   HeapWord*)                             \
   nonstatic_field(CompactibleSpace,            _end_of_live,                                  HeapWord*)                             \
@@ -912,12 +913,12 @@
            declare_type(arrayKlass, Klass)                                \
            declare_type(arrayKlassKlass, klassKlass)                      \
            declare_type(arrayOopDesc, oopDesc)                            \
-   declare_type(compiledICHolderKlass, Klass)                     \
-   declare_type(compiledICHolderOopDesc, oopDesc)                 \
-           declare_type(constantPoolKlass, arrayKlass)                    \
-           declare_type(constantPoolOopDesc, arrayOopDesc)                \
-           declare_type(constantPoolCacheKlass, arrayKlass)               \
-           declare_type(constantPoolCacheOopDesc, arrayOopDesc)           \
+   declare_type(compiledICHolderKlass, Klass)                             \
+   declare_type(compiledICHolderOopDesc, oopDesc)                         \
+           declare_type(constantPoolKlass, Klass)                         \
+           declare_type(constantPoolOopDesc, oopDesc)                     \
+           declare_type(constantPoolCacheKlass, Klass)                    \
+           declare_type(constantPoolCacheOopDesc, oopDesc)                \
            declare_type(instanceKlass, Klass)                             \
            declare_type(instanceKlassKlass, klassKlass)                   \
            declare_type(instanceOopDesc, oopDesc)                         \
@@ -949,9 +950,11 @@
   declare_oop_type(klassOop)                                              \
   declare_oop_type(markOop)                                               \
   declare_oop_type(methodOop)                                             \
-  declare_oop_type(methodDataOop)                                 \
+  declare_oop_type(methodDataOop)                                         \
   declare_oop_type(objArrayOop)                                           \
   declare_oop_type(oop)                                                   \
+  declare_oop_type(narrowOop)                                             \
+  declare_oop_type(wideKlassOop)                                          \
   declare_oop_type(constMethodOop)                                        \
   declare_oop_type(symbolOop)                                             \
   declare_oop_type(typeArrayOop)                                          \
@@ -1307,6 +1310,7 @@
   /* Object sizes */                                                      \
   /****************/                                                      \
                                                                           \
+  declare_constant(oopSize)                                               \
   declare_constant(LogBytesPerWord)                                       \
   declare_constant(BytesPerLong)                                          \
                                                                           \
@@ -1314,7 +1318,9 @@
   /* Object alignment */                                                  \
   /********************/                                                  \
                                                                           \
+  declare_constant(MinObjAlignment)                                       \
   declare_constant(MinObjAlignmentInBytes)                                \
+  declare_constant(LogMinObjAlignmentInBytes)                             \
                                                                           \
   /********************************************/                          \
   /* Generation and Space Hierarchy Constants */                          \
@@ -1361,7 +1367,6 @@
                                                                           \
   declare_constant(HeapWordSize)                                          \
   declare_constant(LogHeapWordSize)                                       \
-  declare_constant(HeapWordsPerOop)                                       \
                                                                           \
   /* constants from PermGen::Name enum */                                 \
                                                                           \
@@ -1610,7 +1615,7 @@
   declare_constant(OopMapValue::unused_value)                             \
   declare_constant(OopMapValue::oop_value)                                \
   declare_constant(OopMapValue::value_value)                              \
-  declare_constant(OopMapValue::dead_value)                               \
+  declare_constant(OopMapValue::narrowoop_value)                          \
   declare_constant(OopMapValue::callee_saved_value)                       \
   declare_constant(OopMapValue::derived_oop_value)                        \
                                                                           \
--- a/hotspot/src/share/vm/services/heapDumper.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/services/heapDumper.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -670,8 +670,12 @@
   switch (type) {
     case JVM_SIGNATURE_CLASS :
     case JVM_SIGNATURE_ARRAY : {
-      oop* f = (oop*)addr;
-      oop o = *f;
+      oop o;
+      if (UseCompressedOops) {
+        o = oopDesc::load_decode_heap_oop((narrowOop*)addr);
+      } else {
+        o = oopDesc::load_decode_heap_oop((oop*)addr);
+      }
 
       // reflection and sun.misc.Unsafe classes may have a reference to a
       // klassOop so filter it out.
@@ -1077,6 +1081,7 @@
  public:
   SymbolTableDumper(DumpWriter* writer)     { _writer = writer; }
   void do_oop(oop* obj_p);
+  void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
 };
 
 void SymbolTableDumper::do_oop(oop* obj_p) {
@@ -1106,6 +1111,7 @@
     _thread_serial_num = thread_serial_num;
   }
   void do_oop(oop* obj_p);
+  void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
 };
 
 
@@ -1133,6 +1139,7 @@
     _writer = writer;
   }
   void do_oop(oop* obj_p);
+  void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
 };
 
 void JNIGlobalsDumper::do_oop(oop* obj_p) {
@@ -1164,6 +1171,7 @@
     writer()->write_u1(HPROF_GC_ROOT_MONITOR_USED);
     writer()->write_objectID(*obj_p);
   }
+  void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
 };
 
 
@@ -1178,6 +1186,7 @@
     _writer = writer;
   }
   void do_oop(oop* obj_p);
+  void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
 };
 
 void StickyClassDumper::do_oop(oop* obj_p) {
--- a/hotspot/src/share/vm/utilities/copy.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/utilities/copy.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -148,11 +148,19 @@
 
   // oops,                  conjoint, atomic on each oop
   static void conjoint_oops_atomic(oop* from, oop* to, size_t count) {
-    assert_params_ok(from, to, LogBytesPerOop);
+    assert_params_ok(from, to, LogBytesPerHeapOop);
     assert_non_zero(count);
     pd_conjoint_oops_atomic(from, to, count);
   }
 
+  // overloaded for UseCompressedOops
+  static void conjoint_oops_atomic(narrowOop* from, narrowOop* to, size_t count) {
+    assert(sizeof(narrowOop) == sizeof(jint), "this cast is wrong");
+    assert_params_ok(from, to, LogBytesPerInt);
+    assert_non_zero(count);
+    pd_conjoint_jints_atomic((jint*)from, (jint*)to, count);
+  }
+
   // Copy a span of memory.  If the span is an integral number of aligned
   // longs, words, or ints, copy those units atomically.
   // The largest atomic transfer unit is 8 bytes, or the largest power
@@ -188,7 +196,7 @@
 
   // oops,                  conjoint array, atomic on each oop
   static void arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) {
-    assert_params_ok(from, to, LogBytesPerOop);
+    assert_params_ok(from, to, LogBytesPerHeapOop);
     assert_non_zero(count);
     pd_arrayof_conjoint_oops(from, to, count);
   }
--- a/hotspot/src/share/vm/utilities/debug.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/utilities/debug.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -669,6 +669,7 @@
       tty->print_cr("0x%08x", o);
     }
   }
+  void do_oop(narrowOop* o) { ShouldNotReachHere(); }
 };
 
 
--- a/hotspot/src/share/vm/utilities/globalDefinitions.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -24,18 +24,23 @@
 
 # include "incls/_precompiled.incl"
 # include "incls/_globalDefinitions.cpp.incl"
-
+// Basic error support
 
-// Basic error support
+// Info for oops within a java object.  Defaults are zero so
+// things will break badly if incorrectly initialized.
+int heapOopSize        = 0;
+int LogBytesPerHeapOop = 0;
+int LogBitsPerHeapOop  = 0;
+int BytesPerHeapOop    = 0;
+int BitsPerHeapOop     = 0;
 
 void basic_fatal(const char* msg) {
   fatal(msg);
 }
 
-
 // Something to help porters sleep at night
 
-void check_basic_types() {
+void basic_types_init() {
 #ifdef ASSERT
 #ifdef _LP64
   assert(min_intx ==  (intx)CONST64(0x8000000000000000), "correct constant");
@@ -92,6 +97,7 @@
       case T_LONG:
       case T_OBJECT:
       case T_ADDRESS:   // random raw pointer
+      case T_NARROWOOP: // compressed pointer
       case T_CONFLICT:  // might as well support a bottom type
       case T_VOID:      // padding or other unaddressed word
         // layout type must map to itself
@@ -134,11 +140,30 @@
     os::java_to_os_priority[9] = JavaPriority9_To_OSPriority;
   if(JavaPriority10_To_OSPriority != -1 )
     os::java_to_os_priority[10] = JavaPriority10_To_OSPriority;
+
+  // Set the size of basic types here (after argument parsing but before
+  // stub generation).
+  if (UseCompressedOops) {
+    // Size info for oops within java objects is fixed
+    heapOopSize        = jintSize;
+    LogBytesPerHeapOop = LogBytesPerInt;
+    LogBitsPerHeapOop  = LogBitsPerInt;
+    BytesPerHeapOop    = BytesPerInt;
+    BitsPerHeapOop     = BitsPerInt;
+  } else {
+    heapOopSize        = oopSize;
+    LogBytesPerHeapOop = LogBytesPerWord;
+    LogBitsPerHeapOop  = LogBitsPerWord;
+    BytesPerHeapOop    = BytesPerWord;
+    BitsPerHeapOop     = BitsPerWord;
+  }
+  _type2aelembytes[T_OBJECT] = heapOopSize;
+  _type2aelembytes[T_ARRAY]  = heapOopSize;
 }
 
 
 // Map BasicType to signature character
-char type2char_tab[T_CONFLICT+1]={ 0, 0, 0, 0, 'Z', 'C', 'F', 'D', 'B', 'S', 'I', 'J', 'L', '[', 'V', 0, 0};
+char type2char_tab[T_CONFLICT+1]={ 0, 0, 0, 0, 'Z', 'C', 'F', 'D', 'B', 'S', 'I', 'J', 'L', '[', 'V', 0, 0, 0};
 
 // Map BasicType to Java type name
 const char* type2name_tab[T_CONFLICT+1] = {
@@ -155,6 +180,7 @@
   "array",
   "void",
   "*address*",
+  "*narrowoop*",
   "*conflict*"
 };
 
@@ -170,7 +196,7 @@
 
 
 // Map BasicType to size in words
-int type2size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 1, -1};
+int type2size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 1, 1, -1};
 
 BasicType type2field[T_CONFLICT+1] = {
   (BasicType)0,            // 0,
@@ -189,7 +215,8 @@
   T_OBJECT,                // T_ARRAY    = 13,
   T_VOID,                  // T_VOID     = 14,
   T_ADDRESS,               // T_ADDRESS  = 15,
-  T_CONFLICT               // T_CONFLICT = 16,
+  T_NARROWOOP,             // T_NARROWOOP= 16,
+  T_CONFLICT               // T_CONFLICT = 17,
 };
 
 
@@ -210,7 +237,8 @@
   T_OBJECT,  // T_ARRAY    = 13,
   T_VOID,    // T_VOID     = 14,
   T_ADDRESS, // T_ADDRESS  = 15,
-  T_CONFLICT // T_CONFLICT = 16,
+  T_NARROWOOP, // T_NARROWOOP  = 16,
+  T_CONFLICT // T_CONFLICT = 17,
 };
 
 
@@ -231,7 +259,8 @@
   T_ARRAY_aelem_bytes,    // T_ARRAY    = 13,
   0,                      // T_VOID     = 14,
   T_OBJECT_aelem_bytes,   // T_ADDRESS  = 15,
-  0                       // T_CONFLICT = 16,
+  T_NARROWOOP_aelem_bytes,// T_NARROWOOP= 16,
+  0                       // T_CONFLICT = 17,
 };
 
 #ifdef ASSERT
@@ -245,7 +274,7 @@
 
 // The following code is mostly taken from JVM typedefs_md.h and system_md.c
 
-static const jlong  high_bit  = (jlong)1 << (jlong)63;
+static const jlong high_bit   = (jlong)1 << (jlong)63;
 static const jlong other_bits = ~high_bit;
 
 jlong float2long(jfloat f) {
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -59,23 +59,26 @@
 
 const int WordsPerLong       = 2;       // Number of stack entries for longs
 
-const int oopSize            = sizeof(char*);
+const int oopSize            = sizeof(char*); // Full-width oop
+extern int heapOopSize;                       // Oop within a java object
 const int wordSize           = sizeof(char*);
 const int longSize           = sizeof(jlong);
 const int jintSize           = sizeof(jint);
 const int size_tSize         = sizeof(size_t);
 
-// Size of a char[] needed to represent a jint as a string in decimal.
-const int jintAsStringSize = 12;
+const int BytesPerOop        = BytesPerWord;  // Full-width oop
 
-const int LogBytesPerOop     = LogBytesPerWord;
-const int LogBitsPerOop      = LogBitsPerWord;
-const int BytesPerOop        = 1 << LogBytesPerOop;
-const int BitsPerOop         = 1 << LogBitsPerOop;
+extern int LogBytesPerHeapOop;                // Oop within a java object
+extern int LogBitsPerHeapOop;
+extern int BytesPerHeapOop;
+extern int BitsPerHeapOop;
 
 const int BitsPerJavaInteger = 32;
 const int BitsPerSize_t      = size_tSize * BitsPerByte;
 
+// Size of a char[] needed to represent a jint as a string in decimal.
+const int jintAsStringSize = 12;
+
 // In fact this should be
 // log2_intptr(sizeof(class JavaThread)) - log2_intptr(64);
 // see os::set_memory_serialize_page()
@@ -99,14 +102,14 @@
 };
 
 // HeapWordSize must be 2^LogHeapWordSize.
-const int HeapWordSize     = sizeof(HeapWord);
+const int HeapWordSize        = sizeof(HeapWord);
 #ifdef _LP64
-const int LogHeapWordSize  = 3;
+const int LogHeapWordSize     = 3;
 #else
-const int LogHeapWordSize  = 2;
+const int LogHeapWordSize     = 2;
 #endif
-const int HeapWordsPerOop  = oopSize      / HeapWordSize;
-const int HeapWordsPerLong = BytesPerLong / HeapWordSize;
+const int HeapWordsPerLong    = BytesPerLong / HeapWordSize;
+const int LogHeapWordsPerLong = LogBytesPerLong - LogHeapWordSize;
 
 // The larger HeapWordSize for 64bit requires larger heaps
 // for the same application running in 64bit.  See bug 4967770.
@@ -284,6 +287,9 @@
 const int MinObjAlignmentInBytes     = MinObjAlignment * HeapWordSize;
 const int MinObjAlignmentInBytesMask = MinObjAlignmentInBytes - 1;
 
+const int LogMinObjAlignment         = LogHeapWordsPerLong;
+const int LogMinObjAlignmentInBytes  = LogMinObjAlignment + LogHeapWordSize;
+
 // Machine dependent stuff
 
 #include "incls/_globalDefinitions_pd.hpp.incl"
@@ -371,7 +377,7 @@
   jlong long_value;
 };
 
-void check_basic_types(); // cannot define here; uses assert
+void basic_types_init(); // cannot define here; uses assert
 
 
 // NOTE: replicated in SA in vm/agent/sun/jvm/hotspot/runtime/BasicType.java
@@ -388,7 +394,8 @@
   T_ARRAY    = 13,
   T_VOID     = 14,
   T_ADDRESS  = 15,
-  T_CONFLICT = 16, // for stack value type with conflicting contents
+  T_NARROWOOP= 16,
+  T_CONFLICT = 17, // for stack value type with conflicting contents
   T_ILLEGAL  = 99
 };
 
@@ -438,6 +445,7 @@
   T_LONG_size    = 2,
   T_OBJECT_size  = 1,
   T_ARRAY_size   = 1,
+  T_NARROWOOP_size = 1,
   T_VOID_size    = 0
 };
 
@@ -465,6 +473,7 @@
   T_OBJECT_aelem_bytes  = 4,
   T_ARRAY_aelem_bytes   = 4,
 #endif
+  T_NARROWOOP_aelem_bytes = 4,
   T_VOID_aelem_bytes    = 0
 };
 
--- a/hotspot/src/share/vm/utilities/taskqueue.hpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/utilities/taskqueue.hpp	Sun Apr 13 17:43:42 2008 -0400
@@ -490,7 +490,31 @@
 typedef GenericTaskQueue<Task>         OopTaskQueue;
 typedef GenericTaskQueueSet<Task>      OopTaskQueueSet;
 
-typedef oop* StarTask;
+
+#define COMPRESSED_OOP_MASK  1
+
+// This is a container class for either an oop* or a narrowOop*.
+// Both are pushed onto a task queue and the consumer will test is_narrow()
+// to determine which should be processed.
+class StarTask {
+  void*  _holder;        // either union oop* or narrowOop*
+ public:
+  StarTask(narrowOop *p) { _holder = (void *)((uintptr_t)p | COMPRESSED_OOP_MASK); }
+  StarTask(oop *p)       { _holder = (void*)p; }
+  StarTask()             { _holder = NULL; }
+  operator oop*()        { return (oop*)_holder; }
+  operator narrowOop*()  {
+    return (narrowOop*)((uintptr_t)_holder & ~COMPRESSED_OOP_MASK);
+  }
+
+  // Operators to preserve const/volatile in assignments required by gcc
+  void operator=(const volatile StarTask& t) volatile { _holder = t._holder; }
+
+  bool is_narrow() const {
+    return (((uintptr_t)_holder & COMPRESSED_OOP_MASK) != 0);
+  }
+};
+
 typedef GenericTaskQueue<StarTask>     OopStarTaskQueue;
 typedef GenericTaskQueueSet<StarTask>  OopStarTaskQueueSet;
 
--- a/hotspot/src/share/vm/utilities/vmError.cpp	Fri Apr 11 09:56:35 2008 -0400
+++ b/hotspot/src/share/vm/utilities/vmError.cpp	Sun Apr 13 17:43:42 2008 -0400
@@ -332,11 +332,12 @@
 
      // VM version
      st->print_cr("#");
-     st->print_cr("# Java VM: %s (%s %s %s)",
+     st->print_cr("# Java VM: %s (%s %s %s %s)",
                    Abstract_VM_Version::vm_name(),
                    Abstract_VM_Version::vm_release(),
                    Abstract_VM_Version::vm_info_string(),
-                   Abstract_VM_Version::vm_platform_string()
+                   Abstract_VM_Version::vm_platform_string(),
+                   UseCompressedOops ? "compressed oops" : ""
                  );
 
   STEP(60, "(printing problematic frame)")