Merge
authoramurillo
Fri, 20 Sep 2013 11:09:25 -0700
changeset 20023 e446e24611f9
parent 19949 74d742bd9cdb (current diff)
parent 20022 e6e3f5ff6d73 (diff)
child 20024 5c23a187610b
Merge
hotspot/test/gc/metaspace/ClassMetaspaceSizeInJmapHeap.java
hotspot/test/runtime/6878713/Test6878713.sh
hotspot/test/runtime/6878713/testcase.jar
hotspot/test/runtime/7020373/Test7020373.sh
hotspot/test/runtime/7020373/testcase.jar
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Fri Sep 20 11:09:25 2013 -0700
@@ -1213,6 +1213,7 @@
                 }
                 HotSpotTypeDataBase db = (HotSpotTypeDataBase)agent.getTypeDataBase();
                 if (t.countTokens() == 1) {
+                    String name = t.nextToken();
                     out.println("intConstant " + name + " " + db.lookupIntConstant(name));
                 } else if (t.countTokens() == 0) {
                     Iterator i = db.getIntConstants();
@@ -1235,6 +1236,7 @@
                 }
                 HotSpotTypeDataBase db = (HotSpotTypeDataBase)agent.getTypeDataBase();
                 if (t.countTokens() == 1) {
+                    String name = t.nextToken();
                     out.println("longConstant " + name + " " + db.lookupLongConstant(name));
                 } else if (t.countTokens() == 0) {
                     Iterator i = db.getLongConstants();
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdAddress.java	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdAddress.java	Fri Sep 20 11:09:25 2013 -0700
@@ -81,7 +81,7 @@
 
     public Address getCompKlassAddressAt(long offset)
             throws UnalignedAddressException, UnmappedAddressException {
-        return debugger.readCompOopAddress(addr + offset);
+        return debugger.readCompKlassAddress(addr + offset);
     }
 
     //
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Fri Sep 20 11:09:25 2013 -0700
@@ -792,7 +792,7 @@
 
   public boolean isCompressedKlassPointersEnabled() {
     if (compressedKlassPointersEnabled == null) {
-        Flag flag = getCommandLineFlag("UseCompressedKlassPointers");
+        Flag flag = getCommandLineFlag("UseCompressedClassPointers");
         compressedKlassPointersEnabled = (flag == null) ? Boolean.FALSE:
              (flag.getBool()? Boolean.TRUE: Boolean.FALSE);
     }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Fri Sep 20 11:09:25 2013 -0700
@@ -66,18 +66,18 @@
       printGCAlgorithm(flagMap);
       System.out.println();
       System.out.println("Heap Configuration:");
-      printValue("MinHeapFreeRatio   = ", getFlagValue("MinHeapFreeRatio", flagMap));
-      printValue("MaxHeapFreeRatio   = ", getFlagValue("MaxHeapFreeRatio", flagMap));
-      printValMB("MaxHeapSize        = ", getFlagValue("MaxHeapSize", flagMap));
-      printValMB("NewSize            = ", getFlagValue("NewSize", flagMap));
-      printValMB("MaxNewSize         = ", getFlagValue("MaxNewSize", flagMap));
-      printValMB("OldSize            = ", getFlagValue("OldSize", flagMap));
-      printValue("NewRatio           = ", getFlagValue("NewRatio", flagMap));
-      printValue("SurvivorRatio      = ", getFlagValue("SurvivorRatio", flagMap));
-      printValMB("MetaspaceSize      = ", getFlagValue("MetaspaceSize", flagMap));
-      printValMB("ClassMetaspaceSize = ", getFlagValue("ClassMetaspaceSize", flagMap));
-      printValMB("MaxMetaspaceSize   = ", getFlagValue("MaxMetaspaceSize", flagMap));
-      printValMB("G1HeapRegionSize   = ", HeapRegion.grainBytes());
+      printValue("MinHeapFreeRatio         = ", getFlagValue("MinHeapFreeRatio", flagMap));
+      printValue("MaxHeapFreeRatio         = ", getFlagValue("MaxHeapFreeRatio", flagMap));
+      printValMB("MaxHeapSize              = ", getFlagValue("MaxHeapSize", flagMap));
+      printValMB("NewSize                  = ", getFlagValue("NewSize", flagMap));
+      printValMB("MaxNewSize               = ", getFlagValue("MaxNewSize", flagMap));
+      printValMB("OldSize                  = ", getFlagValue("OldSize", flagMap));
+      printValue("NewRatio                 = ", getFlagValue("NewRatio", flagMap));
+      printValue("SurvivorRatio            = ", getFlagValue("SurvivorRatio", flagMap));
+      printValMB("MetaspaceSize            = ", getFlagValue("MetaspaceSize", flagMap));
+      printValMB("CompressedClassSpaceSize = ", getFlagValue("CompressedClassSpaceSize", flagMap));
+      printValMB("MaxMetaspaceSize         = ", getFlagValue("MaxMetaspaceSize", flagMap));
+      printValMB("G1HeapRegionSize         = ", HeapRegion.grainBytes());
 
       System.out.println();
       System.out.println("Heap Usage:");
--- a/hotspot/make/bsd/makefiles/gcc.make	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/make/bsd/makefiles/gcc.make	Fri Sep 20 11:09:25 2013 -0700
@@ -80,7 +80,7 @@
     HOSTCC  = $(CC)
   endif
 
-  AS   = $(CC) -c -x assembler-with-cpp
+  AS   = $(CC) -c 
 endif
 
 
@@ -347,6 +347,13 @@
   LDFLAGS += -mmacosx-version-min=$(MACOSX_VERSION_MIN)
 endif
 
+
+#------------------------------------------------------------------------
+# Assembler flags
+
+# Enforce prerpocessing of .s files
+ASFLAGS += -x assembler-with-cpp
+
 #------------------------------------------------------------------------
 # Linker flags
 
--- a/hotspot/make/excludeSrc.make	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/make/excludeSrc.make	Fri Sep 20 11:09:25 2013 -0700
@@ -99,7 +99,7 @@
 	psTasks.cpp psVirtualspace.cpp psYoungGen.cpp vmPSOperations.cpp asParNewGeneration.cpp \
 	parCardTableModRefBS.cpp parGCAllocBuffer.cpp parNewGeneration.cpp mutableSpace.cpp \
 	gSpaceCounters.cpp allocationStats.cpp spaceCounters.cpp gcAdaptivePolicyCounters.cpp \
-	mutableNUMASpace.cpp immutableSpace.cpp yieldingWorkGroup.cpp
+	mutableNUMASpace.cpp immutableSpace.cpp yieldingWorkGroup.cpp hSpaceCounters.cpp
 endif
 
 ifeq ($(INCLUDE_NMT), false)
--- a/hotspot/make/hotspot_version	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/make/hotspot_version	Fri Sep 20 11:09:25 2013 -0700
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=25
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=50
+HS_BUILD_NUMBER=51
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -105,7 +105,7 @@
         if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false;
       }
 
-      if (UseCompressedKlassPointers) {
+      if (UseCompressedClassPointers) {
         if (src->is_address() && !src->is_stack() && src->type() == T_ADDRESS &&
             src->as_address_ptr()->disp() == oopDesc::klass_offset_in_bytes()) return false;
       }
@@ -963,7 +963,7 @@
       case T_METADATA:  __ ld_ptr(base, offset, to_reg->as_register()); break;
       case T_ADDRESS:
 #ifdef _LP64
-        if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedKlassPointers) {
+        if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) {
           __ lduw(base, offset, to_reg->as_register());
           __ decode_klass_not_null(to_reg->as_register());
         } else
@@ -2208,7 +2208,7 @@
     // We don't know the array types are compatible
     if (basic_type != T_OBJECT) {
       // Simple test for basic type arrays
-      if (UseCompressedKlassPointers) {
+      if (UseCompressedClassPointers) {
         // We don't need decode because we just need to compare
         __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp);
         __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
@@ -2342,7 +2342,7 @@
     // but not necessarily exactly of type default_type.
     Label known_ok, halt;
     metadata2reg(op->expected_type()->constant_encoding(), tmp);
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       // tmp holds the default type. It currently comes uncompressed after the
       // load of a constant, so encode it.
       __ encode_klass_not_null(tmp);
--- a/hotspot/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -186,7 +186,7 @@
     set((intx)markOopDesc::prototype(), t1);
   }
   st_ptr(t1, obj, oopDesc::mark_offset_in_bytes());
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     // Save klass
     mov(klass, t1);
     encode_klass_not_null(t1);
@@ -196,7 +196,7 @@
   }
   if (len->is_valid()) {
     st(len, obj, arrayOopDesc::length_offset_in_bytes());
-  } else if (UseCompressedKlassPointers) {
+  } else if (UseCompressedClassPointers) {
     // otherwise length is in the class gap
     store_klass_gap(G0, obj);
   }
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -3911,7 +3911,7 @@
   // The number of bytes in this code is used by
   // MachCallDynamicJavaNode::ret_addr_offset()
   // if this changes, change that.
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass);
     decode_klass_not_null(klass);
   } else {
@@ -3920,7 +3920,7 @@
 }
 
 void MacroAssembler::store_klass(Register klass, Register dst_oop) {
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     assert(dst_oop != klass, "not enough registers");
     encode_klass_not_null(klass);
     st(klass, dst_oop, oopDesc::klass_offset_in_bytes());
@@ -3930,7 +3930,7 @@
 }
 
 void MacroAssembler::store_klass_gap(Register s, Register d) {
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     assert(s != d, "not enough registers");
     st(s, d, oopDesc::klass_gap_offset_in_bytes());
   }
@@ -4089,7 +4089,7 @@
 }
 
 void MacroAssembler::encode_klass_not_null(Register r) {
-  assert (UseCompressedKlassPointers, "must be compressed");
+  assert (UseCompressedClassPointers, "must be compressed");
   assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
   assert(r != G6_heapbase, "bad register choice");
   set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
@@ -4105,7 +4105,7 @@
   if (src == dst) {
     encode_klass_not_null(src);
   } else {
-    assert (UseCompressedKlassPointers, "must be compressed");
+    assert (UseCompressedClassPointers, "must be compressed");
     assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
     set((intptr_t)Universe::narrow_klass_base(), dst);
     sub(src, dst, dst);
@@ -4119,7 +4119,7 @@
 // generated by decode_klass_not_null() and reinit_heapbase().  Hence, if
 // the instructions they generate change, then this method needs to be updated.
 int MacroAssembler::instr_size_for_decode_klass_not_null() {
-  assert (UseCompressedKlassPointers, "only for compressed klass ptrs");
+  assert (UseCompressedClassPointers, "only for compressed klass ptrs");
   // set + add + set
   int num_instrs = insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 1 +
     insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base());
@@ -4135,7 +4135,7 @@
 void  MacroAssembler::decode_klass_not_null(Register r) {
   // Do not add assert code to this unless you change vtableStubs_sparc.cpp
   // pd_code_size_limit.
-  assert (UseCompressedKlassPointers, "must be compressed");
+  assert (UseCompressedClassPointers, "must be compressed");
   assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
   assert(r != G6_heapbase, "bad register choice");
   set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
@@ -4151,7 +4151,7 @@
   } else {
     // Do not add assert code to this unless you change vtableStubs_sparc.cpp
     // pd_code_size_limit.
-    assert (UseCompressedKlassPointers, "must be compressed");
+    assert (UseCompressedClassPointers, "must be compressed");
     assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
     if (Universe::narrow_klass_shift() != 0) {
       assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice");
@@ -4167,7 +4167,7 @@
 }
 
 void MacroAssembler::reinit_heapbase() {
-  if (UseCompressedOops || UseCompressedKlassPointers) {
+  if (UseCompressedOops || UseCompressedClassPointers) {
     if (Universe::heap() != NULL) {
       set((intptr_t)Universe::narrow_ptrs_base(), G6_heapbase);
     } else {
--- a/hotspot/src/cpu/sparc/vm/sparc.ad	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad	Fri Sep 20 11:09:25 2013 -0700
@@ -557,7 +557,7 @@
     int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
     int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
     int klass_load_size;
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       assert(Universe::heap() != NULL, "java heap should be initialized");
       klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
     } else {
@@ -1657,7 +1657,7 @@
 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
   st->print_cr("\nUEP:");
 #ifdef    _LP64
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     assert(Universe::heap() != NULL, "java heap should be initialized");
     st->print_cr("\tLDUW   [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
     st->print_cr("\tSET    Universe::narrow_klass_base,R_G6_heap_base");
@@ -1897,7 +1897,7 @@
 
 bool Matcher::narrow_klass_use_complex_address() {
   NOT_LP64(ShouldNotCallThis());
-  assert(UseCompressedKlassPointers, "only for compressed klass code");
+  assert(UseCompressedClassPointers, "only for compressed klass code");
   return false;
 }
 
@@ -2561,7 +2561,7 @@
       int off = __ offset();
       __ load_klass(O0, G3_scratch);
       int klass_load_size;
-      if (UseCompressedKlassPointers) {
+      if (UseCompressedClassPointers) {
         assert(Universe::heap() != NULL, "java heap should be initialized");
         klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
       } else {
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -2945,7 +2945,7 @@
 
     BLOCK_COMMENT("arraycopy argument klass checks");
     //  get src->klass()
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       __ delayed()->nop(); // ??? not good
       __ load_klass(src, G3_src_klass);
     } else {
@@ -2980,7 +2980,7 @@
     // Load 32-bits signed value. Use br() instruction with it to check icc.
     __ lduw(G3_src_klass, lh_offset, G5_lh);
 
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       __ load_klass(dst, G4_dst_klass);
     }
     // Handle objArrays completely differently...
@@ -2988,7 +2988,7 @@
     __ set(objArray_lh, O5_temp);
     __ cmp(G5_lh,       O5_temp);
     __ br(Assembler::equal, false, Assembler::pt, L_objArray);
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       __ delayed()->nop();
     } else {
       __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
--- a/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -218,13 +218,13 @@
       // ld;ld;ld,jmp,nop
       const int basic = 5*BytesPerInstWord +
                         // shift;add for load_klass (only shift with zero heap based)
-                        (UseCompressedKlassPointers ?
+                        (UseCompressedClassPointers ?
                           MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
       return basic + slop;
     } else {
       const int basic = (28 LP64_ONLY(+ 6)) * BytesPerInstWord +
                         // shift;add for load_klass (only shift with zero heap based)
-                        (UseCompressedKlassPointers ?
+                        (UseCompressedClassPointers ?
                           MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
       return (basic + slop);
     }
--- a/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -148,7 +148,7 @@
 
   static int adjust_reg_range(int range) {
     // Reduce the number of available regs (to free r12) in case of compressed oops
-    if (UseCompressedOops || UseCompressedKlassPointers) return range - 1;
+    if (UseCompressedOops || UseCompressedClassPointers) return range - 1;
     return range;
   }
 
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -341,7 +341,7 @@
   Register receiver = FrameMap::receiver_opr->as_register();
   Register ic_klass = IC_Klass;
   const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
-  const bool do_post_padding = VerifyOops || UseCompressedKlassPointers;
+  const bool do_post_padding = VerifyOops || UseCompressedClassPointers;
   if (!do_post_padding) {
     // insert some nops so that the verified entry point is aligned on CodeEntryAlignment
     while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) {
@@ -1263,7 +1263,7 @@
       break;
 
     case T_ADDRESS:
-      if (UseCompressedKlassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
+      if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
         __ movl(dest->as_register(), from_addr);
       } else {
         __ movptr(dest->as_register(), from_addr);
@@ -1371,7 +1371,7 @@
     __ verify_oop(dest->as_register());
   } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
 #ifdef _LP64
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       __ decode_klass_not_null(dest->as_register());
     }
 #endif
@@ -1716,7 +1716,7 @@
   } else if (obj == klass_RInfo) {
     klass_RInfo = dst;
   }
-  if (k->is_loaded() && !UseCompressedKlassPointers) {
+  if (k->is_loaded() && !UseCompressedClassPointers) {
     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
   } else {
     Rtmp1 = op->tmp3()->as_register();
@@ -1724,14 +1724,6 @@
   }
 
   assert_different_registers(obj, k_RInfo, klass_RInfo);
-  if (!k->is_loaded()) {
-    klass2reg_with_patching(k_RInfo, op->info_for_patch());
-  } else {
-#ifdef _LP64
-    __ mov_metadata(k_RInfo, k->constant_encoding());
-#endif // _LP64
-  }
-  assert(obj != k_RInfo, "must be different");
 
   __ cmpptr(obj, (int32_t)NULL_WORD);
   if (op->should_profile()) {
@@ -1748,13 +1740,21 @@
   } else {
     __ jcc(Assembler::equal, *obj_is_null);
   }
+
+  if (!k->is_loaded()) {
+    klass2reg_with_patching(k_RInfo, op->info_for_patch());
+  } else {
+#ifdef _LP64
+    __ mov_metadata(k_RInfo, k->constant_encoding());
+#endif // _LP64
+  }
   __ verify_oop(obj);
 
   if (op->fast_check()) {
     // get object class
     // not a safepoint as obj null check happens earlier
 #ifdef _LP64
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       __ load_klass(Rtmp1, obj);
       __ cmpptr(k_RInfo, Rtmp1);
     } else {
@@ -3294,7 +3294,7 @@
     // We don't know the array types are compatible
     if (basic_type != T_OBJECT) {
       // Simple test for basic type arrays
-      if (UseCompressedKlassPointers) {
+      if (UseCompressedClassPointers) {
         __ movl(tmp, src_klass_addr);
         __ cmpl(tmp, dst_klass_addr);
       } else {
@@ -3456,21 +3456,21 @@
     Label known_ok, halt;
     __ mov_metadata(tmp, default_type->constant_encoding());
 #ifdef _LP64
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       __ encode_klass_not_null(tmp);
     }
 #endif
 
     if (basic_type != T_OBJECT) {
 
-      if (UseCompressedKlassPointers)          __ cmpl(tmp, dst_klass_addr);
+      if (UseCompressedClassPointers)          __ cmpl(tmp, dst_klass_addr);
       else                   __ cmpptr(tmp, dst_klass_addr);
       __ jcc(Assembler::notEqual, halt);
-      if (UseCompressedKlassPointers)          __ cmpl(tmp, src_klass_addr);
+      if (UseCompressedClassPointers)          __ cmpl(tmp, src_klass_addr);
       else                   __ cmpptr(tmp, src_klass_addr);
       __ jcc(Assembler::equal, known_ok);
     } else {
-      if (UseCompressedKlassPointers)          __ cmpl(tmp, dst_klass_addr);
+      if (UseCompressedClassPointers)          __ cmpl(tmp, dst_klass_addr);
       else                   __ cmpptr(tmp, dst_klass_addr);
       __ jcc(Assembler::equal, known_ok);
       __ cmpptr(src, dst);
--- a/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1239,7 +1239,7 @@
   }
   LIR_Opr reg = rlock_result(x);
   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
-  if (!x->klass()->is_loaded() || UseCompressedKlassPointers) {
+  if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
     tmp3 = new_register(objectType);
   }
   __ checkcast(reg, obj.result(), x->klass(),
@@ -1261,7 +1261,7 @@
   }
   obj.load_item();
   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
-  if (!x->klass()->is_loaded() || UseCompressedKlassPointers) {
+  if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
     tmp3 = new_register(objectType);
   }
   __ instanceof(reg, obj.result(), x->klass(),
--- a/hotspot/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -157,7 +157,7 @@
     movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype());
   }
 #ifdef _LP64
-  if (UseCompressedKlassPointers) { // Take care not to kill klass
+  if (UseCompressedClassPointers) { // Take care not to kill klass
     movptr(t1, klass);
     encode_klass_not_null(t1);
     movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1);
@@ -171,7 +171,7 @@
     movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len);
   }
 #ifdef _LP64
-  else if (UseCompressedKlassPointers) {
+  else if (UseCompressedClassPointers) {
     xorptr(t1, t1);
     store_klass_gap(obj, t1);
   }
@@ -334,7 +334,7 @@
   assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
   int start_offset = offset();
 
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     load_klass(rscratch1, receiver);
     cmpptr(rscratch1, iCache);
   } else {
@@ -345,7 +345,7 @@
   jump_cc(Assembler::notEqual,
           RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
   const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
-  assert(UseCompressedKlassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
+  assert(UseCompressedClassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
 }
 
 
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1635,7 +1635,7 @@
 #ifdef ASSERT
   // TraceBytecodes does not use r12 but saves it over the call, so don't verify
   // r12 is the heapbase.
-  LP64_ONLY(if ((UseCompressedOops || UseCompressedKlassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
+  LP64_ONLY(if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
 #endif // ASSERT
 
   assert(java_thread != oop_result  , "cannot use the same register for java_thread & oop_result");
@@ -4802,7 +4802,7 @@
 
 void MacroAssembler::load_klass(Register dst, Register src) {
 #ifdef _LP64
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
     decode_klass_not_null(dst);
   } else
@@ -4817,7 +4817,7 @@
 
 void MacroAssembler::store_klass(Register dst, Register src) {
 #ifdef _LP64
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     encode_klass_not_null(src);
     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
   } else
@@ -4892,7 +4892,7 @@
 
 #ifdef _LP64
 void MacroAssembler::store_klass_gap(Register dst, Register src) {
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     // Store to klass gap in destination
     movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
   }
@@ -5075,7 +5075,7 @@
 // when (Universe::heap() != NULL).  Hence, if the instructions they
 // generate change, then this method needs to be updated.
 int MacroAssembler::instr_size_for_decode_klass_not_null() {
-  assert (UseCompressedKlassPointers, "only for compressed klass ptrs");
+  assert (UseCompressedClassPointers, "only for compressed klass ptrs");
   // mov64 + addq + shlq? + mov64  (for reinit_heapbase()).
   return (Universe::narrow_klass_shift() == 0 ? 20 : 24);
 }
@@ -5085,7 +5085,7 @@
 void  MacroAssembler::decode_klass_not_null(Register r) {
   // Note: it will change flags
   assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
-  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   assert(r != r12_heapbase, "Decoding a klass in r12");
   // Cannot assert, unverified entry point counts instructions (see .ad file)
   // vtableStubs also counts instructions in pd_code_size_limit.
@@ -5103,7 +5103,7 @@
 void  MacroAssembler::decode_klass_not_null(Register dst, Register src) {
   // Note: it will change flags
   assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
-  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   if (dst == src) {
     decode_klass_not_null(dst);
   } else {
@@ -5141,7 +5141,7 @@
 }
 
 void  MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
-  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int klass_index = oop_recorder()->find_index(k);
   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@@ -5149,7 +5149,7 @@
 }
 
 void  MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
-  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int klass_index = oop_recorder()->find_index(k);
   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@@ -5175,7 +5175,7 @@
 }
 
 void  MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
-  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int klass_index = oop_recorder()->find_index(k);
   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@@ -5183,7 +5183,7 @@
 }
 
 void  MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
-  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int klass_index = oop_recorder()->find_index(k);
   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@@ -5191,7 +5191,7 @@
 }
 
 void MacroAssembler::reinit_heapbase() {
-  if (UseCompressedOops || UseCompressedKlassPointers) {
+  if (UseCompressedOops || UseCompressedClassPointers) {
     if (Universe::heap() != NULL) {
       if (Universe::narrow_oop_base() == NULL) {
         MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -34,9 +34,9 @@
   // Run with +PrintInterpreter to get the VM to print out the size.
   // Max size with JVMTI
 #ifdef AMD64
-  const static int InterpreterCodeSize = 200 * 1024;
+  const static int InterpreterCodeSize = 208 * 1024;
 #else
-  const static int InterpreterCodeSize = 168 * 1024;
+  const static int InterpreterCodeSize = 176 * 1024;
 #endif // AMD64
 
 #endif // CPU_X86_VM_TEMPLATEINTERPRETER_X86_HPP
--- a/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -211,11 +211,11 @@
   if (is_vtable_stub) {
     // Vtable stub size
     return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0) +
-           (UseCompressedKlassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
+           (UseCompressedClassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
   } else {
     // Itable stub size
     return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) +
-           (UseCompressedKlassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
+           (UseCompressedClassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
   }
   // In order to tune these parameters, run the JVM with VM options
   // +PrintMiscellaneous and +WizardMode to see information about
--- a/hotspot/src/cpu/x86/vm/x86_64.ad	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad	Fri Sep 20 11:09:25 2013 -0700
@@ -1391,7 +1391,7 @@
 #ifndef PRODUCT
 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 {
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     st->print_cr("movl    rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
     st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
     st->print_cr("\tcmpq    rax, rscratch1\t # Inline cache check");
@@ -1408,7 +1408,7 @@
 {
   MacroAssembler masm(&cbuf);
   uint insts_size = cbuf.insts_size();
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     masm.load_klass(rscratch1, j_rarg0);
     masm.cmpptr(rax, rscratch1);
   } else {
@@ -1557,7 +1557,7 @@
 }
 
 bool Matcher::narrow_klass_use_complex_address() {
-  assert(UseCompressedKlassPointers, "only for compressed klass code");
+  assert(UseCompressedClassPointers, "only for compressed klass code");
   return (LogKlassAlignmentInBytes <= 3);
 }
 
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -3589,8 +3589,6 @@
 #endif
   }
 
-  os::large_page_init();
-
   // initialize suspend/resume support - must do this before signal_sets_init()
   if (SR_initialize() != 0) {
     perror("SR_initialize failed");
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -131,6 +131,7 @@
 bool os::Linux::_supports_fast_thread_cpu_time = false;
 const char * os::Linux::_glibc_version = NULL;
 const char * os::Linux::_libpthread_version = NULL;
+pthread_condattr_t os::Linux::_condattr[1];
 
 static jlong initial_time_count=0;
 
@@ -1399,12 +1400,15 @@
           clock_gettime_func(CLOCK_MONOTONIC, &tp)  == 0) {
         // yes, monotonic clock is supported
         _clock_gettime = clock_gettime_func;
+        return;
       } else {
         // close librt if there is no monotonic clock
         dlclose(handle);
       }
     }
   }
+  warning("No monotonic clock was available - timed services may " \
+          "be adversely affected if the time-of-day clock changes");
 }
 
 #ifndef SYS_clock_getres
@@ -2165,23 +2169,49 @@
 }
 
 // Try to identify popular distros.
-// Most Linux distributions have /etc/XXX-release file, which contains
-// the OS version string. Some have more than one /etc/XXX-release file
-// (e.g. Mandrake has both /etc/mandrake-release and /etc/redhat-release.),
-// so the order is important.
+// Most Linux distributions have a /etc/XXX-release file, which contains
+// the OS version string. Newer Linux distributions have a /etc/lsb-release
+// file that also contains the OS version string. Some have more than one
+// /etc/XXX-release file (e.g. Mandrake has both /etc/mandrake-release and
+// /etc/redhat-release.), so the order is important.
+// Any Linux that is based on Redhat (i.e. Oracle, Mandrake, Sun JDS...) have
+// their own specific XXX-release file as well as a redhat-release file.
+// Because of this the XXX-release file needs to be searched for before the
+// redhat-release file.
+// Since Red Hat has a lsb-release file that is not very descriptive the
+// search for redhat-release needs to be before lsb-release.
+// Since the lsb-release file is the new standard it needs to be searched
+// before the older style release files.
+// Searching system-release (Red Hat) and os-release (other Linuxes) are a
+// next to last resort.  The os-release file is a new standard that contains
+// distribution information and the system-release file seems to be an old
+// standard that has been replaced by the lsb-release and os-release files.
+// Searching for the debian_version file is the last resort.  It contains
+// an informative string like "6.0.6" or "wheezy/sid". Because of this
+// "Debian " is printed before the contents of the debian_version file.
 void os::Linux::print_distro_info(outputStream* st) {
-  if (!_print_ascii_file("/etc/mandrake-release", st) &&
-      !_print_ascii_file("/etc/sun-release", st) &&
-      !_print_ascii_file("/etc/redhat-release", st) &&
-      !_print_ascii_file("/etc/SuSE-release", st) &&
-      !_print_ascii_file("/etc/turbolinux-release", st) &&
-      !_print_ascii_file("/etc/gentoo-release", st) &&
-      !_print_ascii_file("/etc/debian_version", st) &&
-      !_print_ascii_file("/etc/ltib-release", st) &&
-      !_print_ascii_file("/etc/angstrom-version", st)) {
-      st->print("Linux");
-  }
-  st->cr();
+   if (!_print_ascii_file("/etc/oracle-release", st) &&
+       !_print_ascii_file("/etc/mandriva-release", st) &&
+       !_print_ascii_file("/etc/mandrake-release", st) &&
+       !_print_ascii_file("/etc/sun-release", st) &&
+       !_print_ascii_file("/etc/redhat-release", st) &&
+       !_print_ascii_file("/etc/lsb-release", st) &&
+       !_print_ascii_file("/etc/SuSE-release", st) &&
+       !_print_ascii_file("/etc/turbolinux-release", st) &&
+       !_print_ascii_file("/etc/gentoo-release", st) &&
+       !_print_ascii_file("/etc/ltib-release", st) &&
+       !_print_ascii_file("/etc/angstrom-version", st) &&
+       !_print_ascii_file("/etc/system-release", st) &&
+       !_print_ascii_file("/etc/os-release", st)) {
+
+       if (file_exists("/etc/debian_version")) {
+         st->print("Debian ");
+         _print_ascii_file("/etc/debian_version", st);
+       } else {
+         st->print("Linux");
+       }
+   }
+   st->cr();
 }
 
 void os::Linux::print_libversion_info(outputStream* st) {
@@ -4709,6 +4739,26 @@
 
   Linux::clock_init();
   initial_time_count = os::elapsed_counter();
+
+  // pthread_condattr initialization for monotonic clock
+  int status;
+  pthread_condattr_t* _condattr = os::Linux::condAttr();
+  if ((status = pthread_condattr_init(_condattr)) != 0) {
+    fatal(err_msg("pthread_condattr_init: %s", strerror(status)));
+  }
+  // Only set the clock if CLOCK_MONOTONIC is available
+  if (Linux::supports_monotonic_clock()) {
+    if ((status = pthread_condattr_setclock(_condattr, CLOCK_MONOTONIC)) != 0) {
+      if (status == EINVAL) {
+        warning("Unable to use monotonic clock with relative timed-waits" \
+                " - changes to the time-of-day clock may have adverse affects");
+      } else {
+        fatal(err_msg("pthread_condattr_setclock: %s", strerror(status)));
+      }
+    }
+  }
+  // else it defaults to CLOCK_REALTIME
+
   pthread_mutex_init(&dl_mutex, NULL);
 
   // If the pagesize of the VM is greater than 8K determine the appropriate
@@ -4755,8 +4805,6 @@
 #endif
   }
 
-  os::large_page_init();
-
   // initialize suspend/resume support - must do this before signal_sets_init()
   if (SR_initialize() != 0) {
     perror("SR_initialize failed");
@@ -5519,21 +5567,36 @@
 
 static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
   if (millis < 0)  millis = 0;
-  struct timeval now;
-  int status = gettimeofday(&now, NULL);
-  assert(status == 0, "gettimeofday");
+
   jlong seconds = millis / 1000;
   millis %= 1000;
   if (seconds > 50000000) { // see man cond_timedwait(3T)
     seconds = 50000000;
   }
-  abstime->tv_sec = now.tv_sec  + seconds;
-  long       usec = now.tv_usec + millis * 1000;
-  if (usec >= 1000000) {
-    abstime->tv_sec += 1;
-    usec -= 1000000;
-  }
-  abstime->tv_nsec = usec * 1000;
+
+  if (os::Linux::supports_monotonic_clock()) {
+    struct timespec now;
+    int status = os::Linux::clock_gettime(CLOCK_MONOTONIC, &now);
+    assert_status(status == 0, status, "clock_gettime");
+    abstime->tv_sec = now.tv_sec  + seconds;
+    long nanos = now.tv_nsec + millis * NANOSECS_PER_MILLISEC;
+    if (nanos >= NANOSECS_PER_SEC) {
+      abstime->tv_sec += 1;
+      nanos -= NANOSECS_PER_SEC;
+    }
+    abstime->tv_nsec = nanos;
+  } else {
+    struct timeval now;
+    int status = gettimeofday(&now, NULL);
+    assert(status == 0, "gettimeofday");
+    abstime->tv_sec = now.tv_sec  + seconds;
+    long usec = now.tv_usec + millis * 1000;
+    if (usec >= 1000000) {
+      abstime->tv_sec += 1;
+      usec -= 1000000;
+    }
+    abstime->tv_nsec = usec * 1000;
+  }
   return abstime;
 }
 
@@ -5625,7 +5688,7 @@
     status = os::Linux::safe_cond_timedwait(_cond, _mutex, &abst);
     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
       pthread_cond_destroy (_cond);
-      pthread_cond_init (_cond, NULL) ;
+      pthread_cond_init (_cond, os::Linux::condAttr()) ;
     }
     assert_status(status == 0 || status == EINTR ||
                   status == ETIME || status == ETIMEDOUT,
@@ -5726,32 +5789,50 @@
 
 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
   assert (time > 0, "convertTime");
-
-  struct timeval now;
-  int status = gettimeofday(&now, NULL);
-  assert(status == 0, "gettimeofday");
-
-  time_t max_secs = now.tv_sec + MAX_SECS;
-
-  if (isAbsolute) {
-    jlong secs = time / 1000;
-    if (secs > max_secs) {
-      absTime->tv_sec = max_secs;
+  time_t max_secs = 0;
+
+  if (!os::Linux::supports_monotonic_clock() || isAbsolute) {
+    struct timeval now;
+    int status = gettimeofday(&now, NULL);
+    assert(status == 0, "gettimeofday");
+
+    max_secs = now.tv_sec + MAX_SECS;
+
+    if (isAbsolute) {
+      jlong secs = time / 1000;
+      if (secs > max_secs) {
+        absTime->tv_sec = max_secs;
+      } else {
+        absTime->tv_sec = secs;
+      }
+      absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
+    } else {
+      jlong secs = time / NANOSECS_PER_SEC;
+      if (secs >= MAX_SECS) {
+        absTime->tv_sec = max_secs;
+        absTime->tv_nsec = 0;
+      } else {
+        absTime->tv_sec = now.tv_sec + secs;
+        absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
+        if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
+          absTime->tv_nsec -= NANOSECS_PER_SEC;
+          ++absTime->tv_sec; // note: this must be <= max_secs
+        }
+      }
     }
-    else {
-      absTime->tv_sec = secs;
-    }
-    absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
-  }
-  else {
+  } else {
+    // must be relative using monotonic clock
+    struct timespec now;
+    int status = os::Linux::clock_gettime(CLOCK_MONOTONIC, &now);
+    assert_status(status == 0, status, "clock_gettime");
+    max_secs = now.tv_sec + MAX_SECS;
     jlong secs = time / NANOSECS_PER_SEC;
     if (secs >= MAX_SECS) {
       absTime->tv_sec = max_secs;
       absTime->tv_nsec = 0;
-    }
-    else {
+    } else {
       absTime->tv_sec = now.tv_sec + secs;
-      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
+      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_nsec;
       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
         absTime->tv_nsec -= NANOSECS_PER_SEC;
         ++absTime->tv_sec; // note: this must be <= max_secs
@@ -5831,15 +5912,19 @@
   jt->set_suspend_equivalent();
   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
 
+  assert(_cur_index == -1, "invariant");
   if (time == 0) {
-    status = pthread_cond_wait (_cond, _mutex) ;
+    _cur_index = REL_INDEX; // arbitrary choice when not timed
+    status = pthread_cond_wait (&_cond[_cur_index], _mutex) ;
   } else {
-    status = os::Linux::safe_cond_timedwait (_cond, _mutex, &absTime) ;
+    _cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
+    status = os::Linux::safe_cond_timedwait (&_cond[_cur_index], _mutex, &absTime) ;
     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
-      pthread_cond_destroy (_cond) ;
-      pthread_cond_init    (_cond, NULL);
+      pthread_cond_destroy (&_cond[_cur_index]) ;
+      pthread_cond_init    (&_cond[_cur_index], isAbsolute ? NULL : os::Linux::condAttr());
     }
   }
+  _cur_index = -1;
   assert_status(status == 0 || status == EINTR ||
                 status == ETIME || status == ETIMEDOUT,
                 status, "cond_timedwait");
@@ -5868,17 +5953,24 @@
   s = _counter;
   _counter = 1;
   if (s < 1) {
-     if (WorkAroundNPTLTimedWaitHang) {
-        status = pthread_cond_signal (_cond) ;
-        assert (status == 0, "invariant") ;
+    // thread might be parked
+    if (_cur_index != -1) {
+      // thread is definitely parked
+      if (WorkAroundNPTLTimedWaitHang) {
+        status = pthread_cond_signal (&_cond[_cur_index]);
+        assert (status == 0, "invariant");
         status = pthread_mutex_unlock(_mutex);
-        assert (status == 0, "invariant") ;
-     } else {
+        assert (status == 0, "invariant");
+      } else {
         status = pthread_mutex_unlock(_mutex);
-        assert (status == 0, "invariant") ;
-        status = pthread_cond_signal (_cond) ;
-        assert (status == 0, "invariant") ;
-     }
+        assert (status == 0, "invariant");
+        status = pthread_cond_signal (&_cond[_cur_index]);
+        assert (status == 0, "invariant");
+      }
+    } else {
+      pthread_mutex_unlock(_mutex);
+      assert (status == 0, "invariant") ;
+    }
   } else {
     pthread_mutex_unlock(_mutex);
     assert (status == 0, "invariant") ;
--- a/hotspot/src/os/linux/vm/os_linux.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -221,6 +221,13 @@
 
   static jlong fast_thread_cpu_time(clockid_t clockid);
 
+  // pthread_cond clock suppport
+  private:
+  static pthread_condattr_t _condattr[1];
+
+  public:
+  static pthread_condattr_t* condAttr() { return _condattr; }
+
   // Stack repair handling
 
   // none present
@@ -295,7 +302,7 @@
   public:
     PlatformEvent() {
       int status;
-      status = pthread_cond_init (_cond, NULL);
+      status = pthread_cond_init (_cond, os::Linux::condAttr());
       assert_status(status == 0, status, "cond_init");
       status = pthread_mutex_init (_mutex, NULL);
       assert_status(status == 0, status, "mutex_init");
@@ -310,14 +317,19 @@
     void park () ;
     void unpark () ;
     int  TryPark () ;
-    int  park (jlong millis) ;
+    int  park (jlong millis) ; // relative timed-wait only
     void SetAssociation (Thread * a) { _Assoc = a ; }
 } ;
 
 class PlatformParker : public CHeapObj<mtInternal> {
   protected:
+    enum {
+        REL_INDEX = 0,
+        ABS_INDEX = 1
+    };
+    int _cur_index;  // which cond is in use: -1, 0, 1
     pthread_mutex_t _mutex [1] ;
-    pthread_cond_t  _cond  [1] ;
+    pthread_cond_t  _cond  [2] ; // one for relative times and one for abs.
 
   public:       // TODO-FIXME: make dtor private
     ~PlatformParker() { guarantee (0, "invariant") ; }
@@ -325,10 +337,13 @@
   public:
     PlatformParker() {
       int status;
-      status = pthread_cond_init (_cond, NULL);
-      assert_status(status == 0, status, "cond_init");
+      status = pthread_cond_init (&_cond[REL_INDEX], os::Linux::condAttr());
+      assert_status(status == 0, status, "cond_init rel");
+      status = pthread_cond_init (&_cond[ABS_INDEX], NULL);
+      assert_status(status == 0, status, "cond_init abs");
       status = pthread_mutex_init (_mutex, NULL);
       assert_status(status == 0, status, "mutex_init");
+      _cur_index = -1; // mark as unused
     }
 };
 
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -5178,9 +5178,7 @@
     if(Verbose && PrintMiscellaneous)
       tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
 #endif
-}
-
-  os::large_page_init();
+  }
 
   // Check minimum allowable stack size for thread creation and to initialize
   // the java system classes, including StackOverflowError - depends on page
--- a/hotspot/src/os/windows/vm/decoder_windows.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/os/windows/vm/decoder_windows.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -32,7 +32,11 @@
   _can_decode_in_vm = false;
   _pfnSymGetSymFromAddr64 = NULL;
   _pfnUndecorateSymbolName = NULL;
-
+#ifdef AMD64
+  _pfnStackWalk64 = NULL;
+  _pfnSymFunctionTableAccess64 = NULL;
+  _pfnSymGetModuleBase64 = NULL;
+#endif
   _decoder_status = no_error;
   initialize();
 }
@@ -53,14 +57,24 @@
     _pfnUndecorateSymbolName = (pfn_UndecorateSymbolName)::GetProcAddress(handle, "UnDecorateSymbolName");
 
     if (_pfnSymSetOptions == NULL || _pfnSymInitialize == NULL || _pfnSymGetSymFromAddr64 == NULL) {
-      _pfnSymGetSymFromAddr64 = NULL;
-      _pfnUndecorateSymbolName = NULL;
-      ::FreeLibrary(handle);
-      _dbghelp_handle = NULL;
+      uninitialize();
       _decoder_status = helper_func_error;
       return;
     }
 
+#ifdef AMD64
+    _pfnStackWalk64 = (pfn_StackWalk64)::GetProcAddress(handle, "StackWalk64");
+    _pfnSymFunctionTableAccess64 = (pfn_SymFunctionTableAccess64)::GetProcAddress(handle, "SymFunctionTableAccess64");
+    _pfnSymGetModuleBase64 = (pfn_SymGetModuleBase64)::GetProcAddress(handle, "SymGetModuleBase64");
+    if (_pfnStackWalk64 == NULL || _pfnSymFunctionTableAccess64 == NULL || _pfnSymGetModuleBase64 == NULL) {
+      // We can't call StackWalk64 to walk the stack, but we are still
+      // able to decode the symbols. Let's limp on.
+      _pfnStackWalk64 = NULL;
+      _pfnSymFunctionTableAccess64 = NULL;
+      _pfnSymGetModuleBase64 = NULL;
+    }
+#endif
+
     HANDLE hProcess = ::GetCurrentProcess();
     _pfnSymSetOptions(SYMOPT_UNDNAME | SYMOPT_DEFERRED_LOADS | SYMOPT_EXACT_SYMBOLS);
     if (!_pfnSymInitialize(hProcess, NULL, TRUE)) {
@@ -156,6 +170,11 @@
 void WindowsDecoder::uninitialize() {
   _pfnSymGetSymFromAddr64 = NULL;
   _pfnUndecorateSymbolName = NULL;
+#ifdef AMD64
+  _pfnStackWalk64 = NULL;
+  _pfnSymFunctionTableAccess64 = NULL;
+  _pfnSymGetModuleBase64 = NULL;
+#endif
   if (_dbghelp_handle != NULL) {
     ::FreeLibrary(_dbghelp_handle);
   }
@@ -195,3 +214,65 @@
          _pfnUndecorateSymbolName(symbol, buf, buflen, UNDNAME_COMPLETE);
 }
 
+#ifdef AMD64
+BOOL WindowsDbgHelp::StackWalk64(DWORD MachineType,
+                                 HANDLE hProcess,
+                                 HANDLE hThread,
+                                 LPSTACKFRAME64 StackFrame,
+                                 PVOID ContextRecord,
+                                 PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
+                                 PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
+                                 PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
+                                 PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress) {
+  DecoderLocker locker;
+  WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
+
+  if (!wd->has_error() && wd->_pfnStackWalk64) {
+    return wd->_pfnStackWalk64(MachineType,
+                               hProcess,
+                               hThread,
+                               StackFrame,
+                               ContextRecord,
+                               ReadMemoryRoutine,
+                               FunctionTableAccessRoutine,
+                               GetModuleBaseRoutine,
+                               TranslateAddress);
+  } else {
+    return false;
+  }
+}
+
+PVOID WindowsDbgHelp::SymFunctionTableAccess64(HANDLE hProcess, DWORD64 AddrBase) {
+  DecoderLocker locker;
+  WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
+
+  if (!wd->has_error() && wd->_pfnSymFunctionTableAccess64) {
+    return wd->_pfnSymFunctionTableAccess64(hProcess, AddrBase);
+  } else {
+    return NULL;
+  }
+}
+
+pfn_SymFunctionTableAccess64 WindowsDbgHelp::pfnSymFunctionTableAccess64() {
+  DecoderLocker locker;
+  WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
+
+  if (!wd->has_error()) {
+    return wd->_pfnSymFunctionTableAccess64;
+  } else {
+    return NULL;
+  }
+}
+
+pfn_SymGetModuleBase64 WindowsDbgHelp::pfnSymGetModuleBase64() {
+  DecoderLocker locker;
+  WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
+
+  if (!wd->has_error()) {
+    return wd->_pfnSymGetModuleBase64;
+  } else {
+    return NULL;
+  }
+}
+
+#endif // AMD64
--- a/hotspot/src/os/windows/vm/decoder_windows.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/os/windows/vm/decoder_windows.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -38,6 +38,20 @@
 typedef BOOL  (WINAPI *pfn_SymSetSearchPath)(HANDLE, PCTSTR);
 typedef BOOL  (WINAPI *pfn_SymGetSearchPath)(HANDLE, PTSTR, int);
 
+#ifdef AMD64
+typedef BOOL  (WINAPI *pfn_StackWalk64)(DWORD MachineType,
+                                        HANDLE hProcess,
+                                        HANDLE hThread,
+                                        LPSTACKFRAME64 StackFrame,
+                                        PVOID ContextRecord,
+                                        PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
+                                        PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
+                                        PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
+                                        PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress);
+typedef PVOID (WINAPI *pfn_SymFunctionTableAccess64)(HANDLE hProcess, DWORD64 AddrBase);
+typedef DWORD64 (WINAPI *pfn_SymGetModuleBase64)(HANDLE hProcess, DWORD64 dwAddr);
+#endif
+
 class WindowsDecoder : public AbstractDecoder {
 
 public:
@@ -61,7 +75,34 @@
   bool                      _can_decode_in_vm;
   pfn_SymGetSymFromAddr64   _pfnSymGetSymFromAddr64;
   pfn_UndecorateSymbolName  _pfnUndecorateSymbolName;
+#ifdef AMD64
+  pfn_StackWalk64              _pfnStackWalk64;
+  pfn_SymFunctionTableAccess64 _pfnSymFunctionTableAccess64;
+  pfn_SymGetModuleBase64       _pfnSymGetModuleBase64;
+
+  friend class WindowsDbgHelp;
+#endif
 };
 
+#ifdef AMD64
+// TODO: refactor and move the handling of dbghelp.dll outside of Decoder
+class WindowsDbgHelp : public Decoder {
+public:
+  static BOOL StackWalk64(DWORD MachineType,
+                          HANDLE hProcess,
+                          HANDLE hThread,
+                          LPSTACKFRAME64 StackFrame,
+                          PVOID ContextRecord,
+                          PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
+                          PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
+                          PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
+                          PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress);
+  static PVOID SymFunctionTableAccess64(HANDLE hProcess, DWORD64 AddrBase);
+
+  static pfn_SymFunctionTableAccess64 pfnSymFunctionTableAccess64();
+  static pfn_SymGetModuleBase64       pfnSymGetModuleBase64();
+};
+#endif
+
 #endif // OS_WINDOWS_VM_DECODER_WINDOWS_HPP
 
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -3189,9 +3189,12 @@
     return p_buf;
 
   } else {
+    if (TracePageSizes && Verbose) {
+       tty->print_cr("Reserving large pages in a single large chunk.");
+    }
     // normal policy just allocate it all at once
     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
-    char * res = (char *)VirtualAlloc(NULL, bytes, flag, prot);
+    char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
     if (res != NULL) {
       address pc = CALLER_PC;
       MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc);
@@ -3917,8 +3920,6 @@
 #endif
   }
 
-  os::large_page_init();
-
   // Setup Windows Exceptions
 
   // for debugging float code generation bugs
@@ -5429,7 +5430,7 @@
       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
         lib_name = ++start;
       } else {
-        // Need to check for C:
+        // Need to check for drive prefix
         if ((start = strchr(lib_name, ':')) != NULL) {
           lib_name = ++start;
         }
@@ -5714,7 +5715,66 @@
 #endif
 
 #ifndef PRODUCT
+
+// test the code path in reserve_memory_special() that tries to allocate memory in a single
+// contiguous memory block at a particular address.
+// The test first tries to find a good approximate address to allocate at by using the same
+// method to allocate some memory at any address. The test then tries to allocate memory in
+// the vicinity (not directly after it to avoid possible by-chance use of that location)
+// This is of course only some dodgy assumption, there is no guarantee that the vicinity of
+// the previously allocated memory is available for allocation. The only actual failure
+// that is reported is when the test tries to allocate at a particular location but gets a
+// different valid one. A NULL return value at this point is not considered an error but may
+// be legitimate.
+// If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages.
 void TestReserveMemorySpecial_test() {
-  // No tests available for this platform
-}
-#endif
+  if (!UseLargePages) {
+    if (VerboseInternalVMTests) {
+      gclog_or_tty->print("Skipping test because large pages are disabled");
+    }
+    return;
+  }
+  // save current value of globals
+  bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
+  bool old_use_numa_interleaving = UseNUMAInterleaving;
+
+  // set globals to make sure we hit the correct code path
+  UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
+
+  // do an allocation at an address selected by the OS to get a good one.
+  const size_t large_allocation_size = os::large_page_size() * 4;
+  char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
+  if (result == NULL) {
+    if (VerboseInternalVMTests) {
+      gclog_or_tty->print("Failed to allocate control block with size "SIZE_FORMAT". Skipping remainder of test.",
+        large_allocation_size);
+    }
+  } else {
+    os::release_memory_special(result, large_allocation_size);
+
+    // allocate another page within the recently allocated memory area which seems to be a good location. At least
+    // we managed to get it once.
+    const size_t expected_allocation_size = os::large_page_size();
+    char* expected_location = result + os::large_page_size();
+    char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
+    if (actual_location == NULL) {
+      if (VerboseInternalVMTests) {
+        gclog_or_tty->print("Failed to allocate any memory at "PTR_FORMAT" size "SIZE_FORMAT". Skipping remainder of test.",
+          expected_location, large_allocation_size);
+      }
+    } else {
+      // release memory
+      os::release_memory_special(actual_location, expected_allocation_size);
+      // only now check, after releasing any memory to avoid any leaks.
+      assert(actual_location == expected_location,
+        err_msg("Failed to allocate memory at requested location "PTR_FORMAT" of size "SIZE_FORMAT", is "PTR_FORMAT" instead",
+          expected_location, expected_allocation_size, actual_location));
+    }
+  }
+
+  // restore globals
+  UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
+  UseNUMAInterleaving = old_use_numa_interleaving;
+}
+#endif // PRODUCT
+
--- a/hotspot/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -35,7 +35,9 @@
 
 // Used on 64 bit platforms for UseCompressedOops base address
 #ifdef _LP64
-define_pd_global(uintx, HeapBaseMinAddress,      CONST64(4)*G);
+// use 6G as default base address because by default the OS maps the application
+// to 4G on Solaris-Sparc. This leaves at least 2G for the native heap.
+define_pd_global(uintx, HeapBaseMinAddress,      CONST64(6)*G);
 #else
 define_pd_global(uintx, HeapBaseMinAddress,      2*G);
 #endif
--- a/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -29,6 +29,7 @@
 #include "classfile/vmSymbols.hpp"
 #include "code/icBuffer.hpp"
 #include "code/vtableStubs.hpp"
+#include "decoder_windows.hpp"
 #include "interpreter/interpreter.hpp"
 #include "jvm_windows.h"
 #include "memory/allocation.inline.hpp"
@@ -327,6 +328,94 @@
 
 cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
 
+#ifdef AMD64
+/*
+ * Windows/x64 does not use stack frames the way expected by Java:
+ * [1] in most cases, there is no frame pointer. All locals are addressed via RSP
+ * [2] in rare cases, when alloca() is used, a frame pointer is used, but this may
+ *     not be RBP.
+ * See http://msdn.microsoft.com/en-us/library/ew5tede7.aspx
+ *
+ * So it's not possible to print the native stack using the
+ *     while (...) {...  fr = os::get_sender_for_C_frame(&fr); }
+ * loop in vmError.cpp. We need to roll our own loop.
+ */
+bool os::platform_print_native_stack(outputStream* st, void* context,
+                                     char *buf, int buf_size)
+{
+  CONTEXT ctx;
+  if (context != NULL) {
+    memcpy(&ctx, context, sizeof(ctx));
+  } else {
+    RtlCaptureContext(&ctx);
+  }
+
+  st->print_cr("Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code)");
+
+  STACKFRAME stk;
+  memset(&stk, 0, sizeof(stk));
+  stk.AddrStack.Offset    = ctx.Rsp;
+  stk.AddrStack.Mode      = AddrModeFlat;
+  stk.AddrFrame.Offset    = ctx.Rbp;
+  stk.AddrFrame.Mode      = AddrModeFlat;
+  stk.AddrPC.Offset       = ctx.Rip;
+  stk.AddrPC.Mode         = AddrModeFlat;
+
+  int count = 0;
+  address lastpc = 0;
+  while (count++ < StackPrintLimit) {
+    intptr_t* sp = (intptr_t*)stk.AddrStack.Offset;
+    intptr_t* fp = (intptr_t*)stk.AddrFrame.Offset; // NOT necessarily the same as ctx.Rbp!
+    address pc = (address)stk.AddrPC.Offset;
+
+    if (pc != NULL && sp != NULL && fp != NULL) {
+      if (count == 2 && lastpc == pc) {
+        // Skip it -- StackWalk64() may return the same PC
+        // (but different SP) on the first try.
+      } else {
+        // Don't try to create a frame(sp, fp, pc) -- on WinX64, stk.AddrFrame
+        // may not contain what Java expects, and may cause the frame() constructor
+        // to crash. Let's just print out the symbolic address.
+        frame::print_C_frame(st, buf, buf_size, pc);
+        st->cr();
+      }
+      lastpc = pc;
+    } else {
+      break;
+    }
+
+    PVOID p = WindowsDbgHelp::SymFunctionTableAccess64(GetCurrentProcess(), stk.AddrPC.Offset);
+    if (!p) {
+      // StackWalk64() can't handle this PC. Calling StackWalk64 again may cause crash.
+      break;
+    }
+
+    BOOL result = WindowsDbgHelp::StackWalk64(
+        IMAGE_FILE_MACHINE_AMD64,  // __in      DWORD MachineType,
+        GetCurrentProcess(),       // __in      HANDLE hProcess,
+        GetCurrentThread(),        // __in      HANDLE hThread,
+        &stk,                      // __inout   LP STACKFRAME64 StackFrame,
+        &ctx,                      // __inout   PVOID ContextRecord,
+        NULL,                      // __in_opt  PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
+        WindowsDbgHelp::pfnSymFunctionTableAccess64(),
+                                   // __in_opt  PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
+        WindowsDbgHelp::pfnSymGetModuleBase64(),
+                                   // __in_opt  PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
+        NULL);                     // __in_opt  PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress
+
+    if (!result) {
+      break;
+    }
+  }
+  if (count > StackPrintLimit) {
+    st->print_cr("...<more frames>...");
+  }
+  st->cr();
+
+  return true;
+}
+#endif // AMD64
+
 ExtendedPC os::fetch_frame_from_context(void* ucVoid,
                     intptr_t** ret_sp, intptr_t** ret_fp) {
 
@@ -401,6 +490,9 @@
                                      StubRoutines::x86::get_previous_fp_entry());
   if (func == NULL) return frame();
   intptr_t* fp = (*func)();
+  if (fp == NULL) {
+    return frame();
+  }
 #else
   intptr_t* fp = _get_previous_fp();
 #endif // AMD64
--- a/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -62,4 +62,10 @@
 
   static bool      register_code_area(char *low, char *high);
 
+#ifdef AMD64
+#define PLATFORM_PRINT_NATIVE_STACK 1
+static bool platform_print_native_stack(outputStream* st, void* context,
+                                        char *buf, int buf_size);
+#endif
+
 #endif // OS_CPU_WINDOWS_X86_VM_OS_WINDOWS_X86_HPP
--- a/hotspot/src/share/tools/LogCompilation/README	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/tools/LogCompilation/README	Fri Sep 20 11:09:25 2013 -0700
@@ -4,14 +4,14 @@
 requires a 1.5 JDK to build and simply typing make should build it.
 
 It produces a jar file, logc.jar, that can be run on the
-hotspot.log from LogCompilation output like this:
+HotSpot log (by default, hotspot_pid{pid}.log) from LogCompilation output like this:
 
-  java -jar logc.jar hotspot.log
+  java -jar logc.jar hotspot_pid1234.log
 
 This will produce something like the normal PrintCompilation output.
 Adding the -i option with also report inlining like PrintInlining.
 
-More information about the LogCompilation output can be found at 
+More information about the LogCompilation output can be found at
 
 https://wikis.oracle.com/display/HotSpotInternals/LogCompilation+overview
 https://wikis.oracle.com/display/HotSpotInternals/PrintCompilation
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -709,10 +709,10 @@
   Bytecodes::Code code       = field_access.code();
 
   // We must load class, initialize class and resolvethe field
-  FieldAccessInfo result; // initialize class if needed
+  fieldDescriptor result; // initialize class if needed
   constantPoolHandle constants(THREAD, caller->constants());
-  LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK_NULL);
-  return result.klass()();
+  LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK_NULL);
+  return result.field_holder();
 }
 
 
@@ -826,11 +826,11 @@
   if (stub_id == Runtime1::access_field_patching_id) {
 
     Bytecode_field field_access(caller_method, bci);
-    FieldAccessInfo result; // initialize class if needed
+    fieldDescriptor result; // initialize class if needed
     Bytecodes::Code code = field_access.code();
     constantPoolHandle constants(THREAD, caller_method->constants());
-    LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK);
-    patch_field_offset = result.field_offset();
+    LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK);
+    patch_field_offset = result.offset();
 
     // If we're patching a field which is volatile then at compile it
     // must not have been know to be volatile, so the generated code
--- a/hotspot/src/share/vm/ci/ciField.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciField.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -75,7 +75,6 @@
 
   assert(klass->get_instanceKlass()->is_linked(), "must be linked before using its constan-pool");
 
-  _cp_index = index;
   constantPoolHandle cpool(thread, klass->get_instanceKlass()->constants());
 
   // Get the field's name, signature, and type.
@@ -116,7 +115,7 @@
   // The declared holder of this field may not have been loaded.
   // Bail out with partial field information.
   if (!holder_is_accessible) {
-    // _cp_index and _type have already been set.
+    // _type has already been set.
     // The default values for _flags and _constant_value will suffice.
     // We need values for _holder, _offset,  and _is_constant,
     _holder = declared_holder;
@@ -146,8 +145,6 @@
 ciField::ciField(fieldDescriptor *fd): _known_to_link_with_put(NULL), _known_to_link_with_get(NULL) {
   ASSERT_IN_VM;
 
-  _cp_index = -1;
-
   // Get the field's name, signature, and type.
   ciEnv* env = CURRENT_ENV;
   _name = env->get_symbol(fd->name());
@@ -351,12 +348,11 @@
     }
   }
 
-  FieldAccessInfo result;
-  constantPoolHandle c_pool(THREAD,
-                         accessing_klass->get_instanceKlass()->constants());
-  LinkResolver::resolve_field(result, c_pool, _cp_index,
-                              Bytecodes::java_code(bc),
-                              true, false, KILL_COMPILE_ON_FATAL_(false));
+  fieldDescriptor result;
+  LinkResolver::resolve_field(result, _holder->get_instanceKlass(),
+                              _name->get_symbol(), _signature->get_symbol(),
+                              accessing_klass->get_Klass(), bc, true, false,
+                              KILL_COMPILE_ON_FATAL_(false));
 
   // update the hit-cache, unless there is a problem with memory scoping:
   if (accessing_klass->is_shared() || !is_shared()) {
--- a/hotspot/src/share/vm/ci/ciField.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciField.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,9 +53,6 @@
   ciInstanceKlass* _known_to_link_with_get;
   ciConstant       _constant_value;
 
-  // Used for will_link
-  int              _cp_index;
-
   ciType* compute_type();
   ciType* compute_type_impl();
 
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -522,8 +522,7 @@
 
   for (JavaFieldStream fs(k); !fs.done(); fs.next()) {
     if (fs.access_flags().is_static())  continue;
-    fieldDescriptor fd;
-    fd.initialize(k, fs.index());
+    fieldDescriptor& fd = fs.field_descriptor();
     ciField* field = new (arena) ciField(&fd);
     fields->append(field);
   }
--- a/hotspot/src/share/vm/ci/ciMethod.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciMethod.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -286,7 +286,10 @@
   check_is_loaded();
   assert(holder()->is_linked(), "must be linked");
   VM_ENTRY_MARK;
-  return klassItable::compute_itable_index(get_Method());
+  Method* m = get_Method();
+  if (!m->has_itable_index())
+    return Method::nonvirtual_vtable_index;
+  return m->itable_index();
 }
 #endif // SHARK
 
@@ -1137,6 +1140,10 @@
 // ------------------------------------------------------------------
 // ciMethod::check_call
 bool ciMethod::check_call(int refinfo_index, bool is_static) const {
+  // This method is used only in C2 from InlineTree::ok_to_inline,
+  // and is only used under -Xcomp or -XX:CompileTheWorld.
+  // It appears to fail when applied to an invokeinterface call site.
+  // FIXME: Remove this method and resolve_method_statically; refactor to use the other LinkResolver entry points.
   VM_ENTRY_MARK;
   {
     EXCEPTION_MARK;
--- a/hotspot/src/share/vm/ci/ciSymbol.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/ci/ciSymbol.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,6 +44,7 @@
   friend class ciInstanceKlass;
   friend class ciSignature;
   friend class ciMethod;
+  friend class ciField;
   friend class ciObjArrayKlass;
 
 private:
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -888,6 +888,7 @@
   int runtime_visible_type_annotations_length = 0;
   u1* runtime_invisible_type_annotations = NULL;
   int runtime_invisible_type_annotations_length = 0;
+  bool runtime_invisible_type_annotations_exists = false;
   while (attributes_count--) {
     cfs->guarantee_more(6, CHECK);  // attribute_name_index, attribute_length
     u2 attribute_name_index = cfs->get_u2_fast();
@@ -946,15 +947,27 @@
         assert(runtime_invisible_annotations != NULL, "null invisible annotations");
         cfs->skip_u1(runtime_invisible_annotations_length, CHECK);
       } else if (attribute_name == vmSymbols::tag_runtime_visible_type_annotations()) {
+        if (runtime_visible_type_annotations != NULL) {
+          classfile_parse_error(
+            "Multiple RuntimeVisibleTypeAnnotations attributes for field in class file %s", CHECK);
+        }
         runtime_visible_type_annotations_length = attribute_length;
         runtime_visible_type_annotations = cfs->get_u1_buffer();
         assert(runtime_visible_type_annotations != NULL, "null visible type annotations");
         cfs->skip_u1(runtime_visible_type_annotations_length, CHECK);
-      } else if (PreserveAllAnnotations && attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
-        runtime_invisible_type_annotations_length = attribute_length;
-        runtime_invisible_type_annotations = cfs->get_u1_buffer();
-        assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
-        cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK);
+      } else if (attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
+        if (runtime_invisible_type_annotations_exists) {
+          classfile_parse_error(
+            "Multiple RuntimeInvisibleTypeAnnotations attributes for field in class file %s", CHECK);
+        } else {
+          runtime_invisible_type_annotations_exists = true;
+        }
+        if (PreserveAllAnnotations) {
+          runtime_invisible_type_annotations_length = attribute_length;
+          runtime_invisible_type_annotations = cfs->get_u1_buffer();
+          assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
+        }
+        cfs->skip_u1(attribute_length, CHECK);
       } else {
         cfs->skip_u1(attribute_length, CHECK);  // Skip unknown attributes
       }
@@ -2066,6 +2079,7 @@
   int runtime_visible_type_annotations_length = 0;
   u1* runtime_invisible_type_annotations = NULL;
   int runtime_invisible_type_annotations_length = 0;
+  bool runtime_invisible_type_annotations_exists = false;
   u1* annotation_default = NULL;
   int annotation_default_length = 0;
 
@@ -2322,16 +2336,30 @@
         assert(annotation_default != NULL, "null annotation default");
         cfs->skip_u1(annotation_default_length, CHECK_(nullHandle));
       } else if (method_attribute_name == vmSymbols::tag_runtime_visible_type_annotations()) {
+        if (runtime_visible_type_annotations != NULL) {
+          classfile_parse_error(
+            "Multiple RuntimeVisibleTypeAnnotations attributes for method in class file %s",
+            CHECK_(nullHandle));
+        }
         runtime_visible_type_annotations_length = method_attribute_length;
         runtime_visible_type_annotations = cfs->get_u1_buffer();
         assert(runtime_visible_type_annotations != NULL, "null visible type annotations");
         // No need for the VM to parse Type annotations
         cfs->skip_u1(runtime_visible_type_annotations_length, CHECK_(nullHandle));
-      } else if (PreserveAllAnnotations && method_attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
-        runtime_invisible_type_annotations_length = method_attribute_length;
-        runtime_invisible_type_annotations = cfs->get_u1_buffer();
-        assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
-        cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK_(nullHandle));
+      } else if (method_attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
+        if (runtime_invisible_type_annotations_exists) {
+          classfile_parse_error(
+            "Multiple RuntimeInvisibleTypeAnnotations attributes for method in class file %s",
+            CHECK_(nullHandle));
+        } else {
+          runtime_invisible_type_annotations_exists = true;
+        }
+        if (PreserveAllAnnotations) {
+          runtime_invisible_type_annotations_length = method_attribute_length;
+          runtime_invisible_type_annotations = cfs->get_u1_buffer();
+          assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
+        }
+        cfs->skip_u1(method_attribute_length, CHECK_(nullHandle));
       } else {
         // Skip unknown attributes
         cfs->skip_u1(method_attribute_length, CHECK_(nullHandle));
@@ -2824,6 +2852,7 @@
   int runtime_visible_type_annotations_length = 0;
   u1* runtime_invisible_type_annotations = NULL;
   int runtime_invisible_type_annotations_length = 0;
+  bool runtime_invisible_type_annotations_exists = false;
   u1* inner_classes_attribute_start = NULL;
   u4  inner_classes_attribute_length = 0;
   u2  enclosing_method_class_index = 0;
@@ -2927,16 +2956,28 @@
         parsed_bootstrap_methods_attribute = true;
         parse_classfile_bootstrap_methods_attribute(attribute_length, CHECK);
       } else if (tag == vmSymbols::tag_runtime_visible_type_annotations()) {
+        if (runtime_visible_type_annotations != NULL) {
+          classfile_parse_error(
+            "Multiple RuntimeVisibleTypeAnnotations attributes in class file %s", CHECK);
+        }
         runtime_visible_type_annotations_length = attribute_length;
         runtime_visible_type_annotations = cfs->get_u1_buffer();
         assert(runtime_visible_type_annotations != NULL, "null visible type annotations");
         // No need for the VM to parse Type annotations
         cfs->skip_u1(runtime_visible_type_annotations_length, CHECK);
-      } else if (PreserveAllAnnotations && tag == vmSymbols::tag_runtime_invisible_type_annotations()) {
-        runtime_invisible_type_annotations_length = attribute_length;
-        runtime_invisible_type_annotations = cfs->get_u1_buffer();
-        assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
-        cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK);
+      } else if (tag == vmSymbols::tag_runtime_invisible_type_annotations()) {
+        if (runtime_invisible_type_annotations_exists) {
+          classfile_parse_error(
+            "Multiple RuntimeInvisibleTypeAnnotations attributes in class file %s", CHECK);
+        } else {
+          runtime_invisible_type_annotations_exists = true;
+        }
+        if (PreserveAllAnnotations) {
+          runtime_invisible_type_annotations_length = attribute_length;
+          runtime_invisible_type_annotations = cfs->get_u1_buffer();
+          assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
+        }
+        cfs->skip_u1(attribute_length, CHECK);
       } else {
         // Unknown attribute
         cfs->skip_u1(attribute_length, CHECK);
@@ -3954,9 +3995,8 @@
       this_klass->set_has_final_method();
     }
     this_klass->copy_method_ordering(method_ordering, CHECK_NULL);
-    // The InstanceKlass::_methods_jmethod_ids cache and the
-    // InstanceKlass::_methods_cached_itable_indices cache are
-    // both managed on the assumption that the initial cache
+    // The InstanceKlass::_methods_jmethod_ids cache
+    // is managed on the assumption that the initial cache
     // size is equal to the number of methods in the class. If
     // that changes, then InstanceKlass::idnum_can_increment()
     // has to be changed accordingly.
--- a/hotspot/src/share/vm/classfile/classLoader.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/classfile/classLoader.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1319,6 +1319,25 @@
   // The CHECK at the caller will propagate the exception out
 }
 
+/**
+ * Returns if the given method should be compiled when doing compile-the-world.
+ *
+ * TODO:  This should be a private method in a CompileTheWorld class.
+ */
+static bool can_be_compiled(methodHandle m, int comp_level) {
+  assert(CompileTheWorld, "must be");
+
+  // It's not valid to compile a native wrapper for MethodHandle methods
+  // that take a MemberName appendix since the bytecode signature is not
+  // correct.
+  vmIntrinsics::ID iid = m->intrinsic_id();
+  if (MethodHandles::is_signature_polymorphic(iid) && MethodHandles::has_member_arg(iid)) {
+    return false;
+  }
+
+  return CompilationPolicy::can_be_compiled(m, comp_level);
+}
+
 void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
   int len = (int)strlen(name);
   if (len > 6 && strcmp(".class", name + len - 6) == 0) {
@@ -1362,8 +1381,7 @@
           int comp_level = CompilationPolicy::policy()->initial_compile_level();
           for (int n = 0; n < k->methods()->length(); n++) {
             methodHandle m (THREAD, k->methods()->at(n));
-            if (CompilationPolicy::can_be_compiled(m, comp_level)) {
-
+            if (can_be_compiled(m, comp_level)) {
               if (++_codecache_sweep_counter == CompileTheWorldSafepointInterval) {
                 // Give sweeper a chance to keep up with CTW
                 VM_ForceSafepoint op;
@@ -1375,7 +1393,7 @@
                                             methodHandle(), 0, "CTW", THREAD);
               if (HAS_PENDING_EXCEPTION) {
                 clear_pending_exception_if_not_oom(CHECK);
-                tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name()->as_C_string());
+                tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string());
               } else {
                 _compile_the_world_method_counter++;
               }
@@ -1391,11 +1409,13 @@
                                               methodHandle(), 0, "CTW", THREAD);
                 if (HAS_PENDING_EXCEPTION) {
                   clear_pending_exception_if_not_oom(CHECK);
-                  tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name()->as_C_string());
+                  tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string());
                 } else {
                   _compile_the_world_method_counter++;
                 }
               }
+            } else {
+              tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string());
             }
 
             nmethod* nm = m->code();
--- a/hotspot/src/share/vm/classfile/defaultMethods.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/classfile/defaultMethods.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -450,6 +450,10 @@
     streamIndentor si(str, indent * 2);
     str->indent().print("Selected method: ");
     print_method(str, _selected_target);
+    Klass* method_holder = _selected_target->method_holder();
+    if (!method_holder->is_interface()) {
+      tty->print(" : in superclass");
+    }
     str->print_cr("");
   }
 
@@ -1141,19 +1145,23 @@
 #endif // ndef PRODUCT
       if (method->has_target()) {
         Method* selected = method->get_selected_target();
-        max_stack = assemble_redirect(
+        if (selected->method_holder()->is_interface()) {
+          max_stack = assemble_redirect(
             &bpool, &buffer, slot->signature(), selected, CHECK);
+        }
       } else if (method->throws_exception()) {
         max_stack = assemble_abstract_method_error(
             &bpool, &buffer, method->get_exception_message(), CHECK);
       }
-      AccessFlags flags = accessFlags_from(
+      if (max_stack != 0) {
+        AccessFlags flags = accessFlags_from(
           JVM_ACC_PUBLIC | JVM_ACC_SYNTHETIC | JVM_ACC_BRIDGE);
-      Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(),
+        Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(),
           flags, max_stack, slot->size_of_parameters(),
           ConstMethod::OVERPASS, CHECK);
-      if (m != NULL) {
-        overpasses.push(m);
+        if (m != NULL) {
+          overpasses.push(m);
+        }
       }
     }
   }
--- a/hotspot/src/share/vm/code/compiledIC.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/code/compiledIC.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -161,31 +161,36 @@
 
 
 void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
-  methodHandle method = call_info->selected_method();
-  bool is_invoke_interface = (bytecode == Bytecodes::_invokeinterface && !call_info->has_vtable_index());
   assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
   assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
   assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
 
   address entry;
-  if (is_invoke_interface) {
-    int index = klassItable::compute_itable_index(call_info->resolved_method()());
-    entry = VtableStubs::create_stub(false, index, method());
+  if (call_info->call_kind() == CallInfo::itable_call) {
+    assert(bytecode == Bytecodes::_invokeinterface, "");
+    int itable_index = call_info->itable_index();
+    entry = VtableStubs::find_itable_stub(itable_index);
+#ifdef ASSERT
     assert(entry != NULL, "entry not computed");
+    int index = call_info->resolved_method()->itable_index();
+    assert(index == itable_index, "CallInfo pre-computes this");
+#endif //ASSERT
     InstanceKlass* k = call_info->resolved_method()->method_holder();
-    assert(k->is_interface(), "sanity check");
+    assert(k->verify_itable_index(itable_index), "sanity check");
     InlineCacheBuffer::create_transition_stub(this, k, entry);
   } else {
-    // Can be different than method->vtable_index(), due to package-private etc.
+    assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable");
+    // Can be different than selected_method->vtable_index(), due to package-private etc.
     int vtable_index = call_info->vtable_index();
-    entry = VtableStubs::create_stub(true, vtable_index, method());
-    InlineCacheBuffer::create_transition_stub(this, method(), entry);
+    assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check");
+    entry = VtableStubs::find_vtable_stub(vtable_index);
+    InlineCacheBuffer::create_transition_stub(this, NULL, entry);
   }
 
   if (TraceICs) {
     ResourceMark rm;
     tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
-                   instruction_address(), method->print_value_string(), entry);
+                   instruction_address(), call_info->selected_method()->print_value_string(), entry);
   }
 
   // We can't check this anymore. With lazy deopt we could have already
--- a/hotspot/src/share/vm/code/vtableStubs.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/code/vtableStubs.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -111,7 +111,7 @@
 }
 
 
-address VtableStubs::create_stub(bool is_vtable_stub, int vtable_index, Method* method) {
+address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) {
   assert(vtable_index >= 0, "must be positive");
 
   VtableStub* s = ShareVtableStubs ? lookup(is_vtable_stub, vtable_index) : NULL;
--- a/hotspot/src/share/vm/code/vtableStubs.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/code/vtableStubs.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -121,9 +121,11 @@
   static VtableStub* lookup            (bool is_vtable_stub, int vtable_index);
   static void        enter             (bool is_vtable_stub, int vtable_index, VtableStub* s);
   static inline uint hash              (bool is_vtable_stub, int vtable_index);
+  static address     find_stub         (bool is_vtable_stub, int vtable_index);
 
  public:
-  static address     create_stub(bool is_vtable_stub, int vtable_index, Method* method); // return the entry point of a stub for this call
+  static address     find_vtable_stub(int vtable_index) { return find_stub(true,  vtable_index); }
+  static address     find_itable_stub(int itable_index) { return find_stub(false, itable_index); }
   static bool        is_entry_point(address pc);                     // is pc a vtable stub entry point?
   static bool        contains(address pc);                           // is pc within any stub?
   static VtableStub* stub_containing(address pc);                    // stub containing pc or NULL
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -230,7 +230,7 @@
   // depends on this property.
   debug_only(
     FreeChunk* junk = NULL;
-    assert(UseCompressedKlassPointers ||
+    assert(UseCompressedClassPointers ||
            junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
            "Offset of FreeChunk::_prev within FreeChunk must match"
            "  that of OopDesc::_klass within OopDesc");
@@ -1407,7 +1407,7 @@
   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
   OrderAccess::storestore();
 
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     // Copy gap missed by (aligned) header size calculation below
     obj->set_klass_gap(old->klass_gap());
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -481,9 +481,8 @@
 
 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
   _g1h(g1h),
-  _markBitMap1(MinObjAlignment - 1),
-  _markBitMap2(MinObjAlignment - 1),
-
+  _markBitMap1(log2_intptr(MinObjAlignment)),
+  _markBitMap2(log2_intptr(MinObjAlignment)),
   _parallel_marking_threads(0),
   _max_parallel_marking_threads(0),
   _sleep_factor(0.0),
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -33,8 +33,8 @@
 
 void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
   if (has_count_table()) {
-    check_card_num(from_card_num,
-                   err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
+    assert(from_card_num >= 0 && from_card_num < _committed_max_card_num,
+           err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
     assert(from_card_num < to_card_num,
            err_msg("Wrong order? from: " SIZE_FORMAT ", to: "SIZE_FORMAT,
                    from_card_num, to_card_num));
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -72,25 +72,21 @@
     return has_reserved_count_table() && _committed_max_card_num > 0;
   }
 
-  void check_card_num(size_t card_num, const char* msg) {
-    assert(card_num >= 0 && card_num < _committed_max_card_num, msg);
-  }
-
   size_t ptr_2_card_num(const jbyte* card_ptr) {
     assert(card_ptr >= _ct_bot,
-           err_msg("Inavalied card pointer: "
+           err_msg("Invalid card pointer: "
                    "card_ptr: " PTR_FORMAT ", "
                    "_ct_bot: " PTR_FORMAT,
                    card_ptr, _ct_bot));
     size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte));
-    check_card_num(card_num,
-                   err_msg("card pointer out of range: " PTR_FORMAT, card_ptr));
+    assert(card_num >= 0 && card_num < _committed_max_card_num,
+           err_msg("card pointer out of range: " PTR_FORMAT, card_ptr));
     return card_num;
   }
 
   jbyte* card_num_2_ptr(size_t card_num) {
-    check_card_num(card_num,
-                   err_msg("card num out of range: "SIZE_FORMAT, card_num));
+    assert(card_num >= 0 && card_num < _committed_max_card_num,
+           err_msg("card num out of range: "SIZE_FORMAT, card_num));
     return (jbyte*) (_ct_bot + card_num);
   }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -2191,6 +2191,10 @@
   return JNI_OK;
 }
 
+size_t G1CollectedHeap::conservative_max_heap_alignment() {
+  return HeapRegion::max_region_size();
+}
+
 void G1CollectedHeap::ref_processing_init() {
   // Reference processing in G1 currently works as follows:
   //
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1092,6 +1092,9 @@
   // specified by the policy object.
   jint initialize();
 
+  // Return the (conservative) maximum heap alignment for any G1 heap
+  static size_t conservative_max_heap_alignment();
+
   // Initialize weak reference processing.
   virtual void ref_processing_init();
 
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -149,6 +149,10 @@
 // many regions in the heap (based on the min heap size).
 #define TARGET_REGION_NUMBER          2048
 
+size_t HeapRegion::max_region_size() {
+  return (size_t)MAX_REGION_SIZE;
+}
+
 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
   uintx region_size = G1HeapRegionSize;
   if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -355,6 +355,8 @@
                                       ~((1 << (size_t) LogOfHRGrainBytes) - 1);
   }
 
+  static size_t max_region_size();
+
   // It sets up the heap region size (GrainBytes / GrainWords), as
   // well as other related fields that are based on the heap region
   // size (LogOfHRGrainBytes / LogOfHRGrainWords /
--- a/hotspot/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -38,6 +38,7 @@
 
 class PtrQueueSet;
 class PtrQueue VALUE_OBJ_CLASS_SPEC {
+  friend class VMStructs;
 
 protected:
   // The ptr queue set to which this queue belongs.
--- a/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -31,7 +31,8 @@
 
 #define VM_STRUCTS_G1(nonstatic_field, static_field)                          \
                                                                               \
-  static_field(HeapRegion, GrainBytes, size_t)                                \
+  static_field(HeapRegion, GrainBytes,        size_t)                         \
+  static_field(HeapRegion, LogOfHRGrainBytes, int)                            \
                                                                               \
   nonstatic_field(HeapRegionSeq,   _regions, HeapRegion**)                    \
   nonstatic_field(HeapRegionSeq,   _length,  uint)                            \
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -68,9 +68,6 @@
   size_t min_old_gen_size()   { return _min_gen1_size; }
   size_t old_gen_size()       { return _initial_gen1_size; }
   size_t max_old_gen_size()   { return _max_gen1_size; }
-
-  size_t metaspace_size()      { return MetaspaceSize; }
-  size_t max_metaspace_size()  { return MaxMetaspaceSize; }
 };
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_GENERATIONSIZER_HPP
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -86,6 +86,11 @@
     set_alignment(_old_gen_alignment, intra_heap_alignment());
   }
 
+  // Return the (conservative) maximum heap alignment
+  static size_t conservative_max_heap_alignment() {
+    return intra_heap_alignment();
+  }
+
   // For use by VM operations
   enum CollectionType {
     Scavenge,
@@ -122,7 +127,7 @@
 
   // The alignment used for eden and survivors within the young gen
   // and for boundary between young gen and old gen.
-  size_t intra_heap_alignment() const { return 64 * K * HeapWordSize; }
+  static size_t intra_heap_alignment() { return 64 * K * HeapWordSize; }
 
   size_t capacity() const;
   size_t used() const;
--- a/hotspot/src/share/vm/gc_implementation/shared/allocationStats.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/allocationStats.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,11 +26,9 @@
 #define SHARE_VM_GC_IMPLEMENTATION_SHARED_ALLOCATIONSTATS_HPP
 
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc_implementation/shared/gcUtil.hpp"
 #include "memory/allocation.hpp"
 #include "utilities/globalDefinitions.hpp"
-#endif // INCLUDE_ALL_GCS
+#include "gc_implementation/shared/gcUtil.hpp"
 
 class AllocationStats VALUE_OBJ_CLASS_SPEC {
   // A duration threshold (in ms) used to filter
--- a/hotspot/src/share/vm/gc_implementation/shared/hSpaceCounters.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/hSpaceCounters.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,11 +26,9 @@
 #define SHARE_VM_GC_IMPLEMENTATION_SHARED_HSPACECOUNTERS_HPP
 
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
 #include "gc_implementation/shared/generationCounters.hpp"
 #include "memory/generation.hpp"
 #include "runtime/perfData.hpp"
-#endif // INCLUDE_ALL_GCS
 
 // A HSpaceCounter is a holder class for performance counters
 // that track a collections (logical spaces) in a heap;
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -87,15 +87,15 @@
   const MetaspaceSizes meta_space(
       MetaspaceAux::allocated_capacity_bytes(),
       MetaspaceAux::allocated_used_bytes(),
-      MetaspaceAux::reserved_in_bytes());
+      MetaspaceAux::reserved_bytes());
   const MetaspaceSizes data_space(
       MetaspaceAux::allocated_capacity_bytes(Metaspace::NonClassType),
       MetaspaceAux::allocated_used_bytes(Metaspace::NonClassType),
-      MetaspaceAux::reserved_in_bytes(Metaspace::NonClassType));
+      MetaspaceAux::reserved_bytes(Metaspace::NonClassType));
   const MetaspaceSizes class_space(
       MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType),
       MetaspaceAux::allocated_used_bytes(Metaspace::ClassType),
-      MetaspaceAux::reserved_in_bytes(Metaspace::ClassType));
+      MetaspaceAux::reserved_bytes(Metaspace::ClassType));
 
   return MetaspaceSummary(meta_space, data_space, class_space);
 }
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -496,15 +496,15 @@
 
 IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecodes::Code bytecode))
   // resolve field
-  FieldAccessInfo info;
+  fieldDescriptor info;
   constantPoolHandle pool(thread, method(thread)->constants());
   bool is_put    = (bytecode == Bytecodes::_putfield  || bytecode == Bytecodes::_putstatic);
   bool is_static = (bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic);
 
   {
     JvmtiHideSingleStepping jhss(thread);
-    LinkResolver::resolve_field(info, pool, get_index_u2_cpcache(thread, bytecode),
-                                bytecode, false, CHECK);
+    LinkResolver::resolve_field_access(info, pool, get_index_u2_cpcache(thread, bytecode),
+                                       bytecode, CHECK);
   } // end JvmtiHideSingleStepping
 
   // check if link resolution caused cpCache to be updated
@@ -524,7 +524,7 @@
   // class is intitialized.  This is required so that access to the static
   // field will call the initialization function every time until the class
   // is completely initialized ala. in 2.17.5 in JVM Specification.
-  InstanceKlass *klass = InstanceKlass::cast(info.klass()());
+  InstanceKlass* klass = InstanceKlass::cast(info.field_holder());
   bool uninitialized_static = ((bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic) &&
                                !klass->is_initialized());
   Bytecodes::Code get_code = (Bytecodes::Code)0;
@@ -539,9 +539,9 @@
   cache_entry(thread)->set_field(
     get_code,
     put_code,
-    info.klass(),
-    info.field_index(),
-    info.field_offset(),
+    info.field_holder(),
+    info.index(),
+    info.offset(),
     state,
     info.access_flags().is_final(),
     info.access_flags().is_volatile(),
@@ -686,29 +686,55 @@
   if (already_resolved(thread)) return;
 
   if (bytecode == Bytecodes::_invokeinterface) {
-
     if (TraceItables && Verbose) {
       ResourceMark rm(thread);
       tty->print_cr("Resolving: klass: %s to method: %s", info.resolved_klass()->name()->as_C_string(), info.resolved_method()->name()->as_C_string());
     }
+  }
+#ifdef ASSERT
+  if (bytecode == Bytecodes::_invokeinterface) {
     if (info.resolved_method()->method_holder() ==
                                             SystemDictionary::Object_klass()) {
       // NOTE: THIS IS A FIX FOR A CORNER CASE in the JVM spec
-      // (see also cpCacheOop.cpp for details)
+      // (see also CallInfo::set_interface for details)
+      assert(info.call_kind() == CallInfo::vtable_call ||
+             info.call_kind() == CallInfo::direct_call, "");
       methodHandle rm = info.resolved_method();
       assert(rm->is_final() || info.has_vtable_index(),
              "should have been set already");
-      cache_entry(thread)->set_method(bytecode, rm, info.vtable_index());
+    } else if (!info.resolved_method()->has_itable_index()) {
+      // Resolved something like CharSequence.toString.  Use vtable not itable.
+      assert(info.call_kind() != CallInfo::itable_call, "");
     } else {
       // Setup itable entry
-      int index = klassItable::compute_itable_index(info.resolved_method()());
-      cache_entry(thread)->set_interface_call(info.resolved_method(), index);
+      assert(info.call_kind() == CallInfo::itable_call, "");
+      int index = info.resolved_method()->itable_index();
+      assert(info.itable_index() == index, "");
     }
   } else {
-    cache_entry(thread)->set_method(
+    assert(info.call_kind() == CallInfo::direct_call ||
+           info.call_kind() == CallInfo::vtable_call, "");
+  }
+#endif
+  switch (info.call_kind()) {
+  case CallInfo::direct_call:
+    cache_entry(thread)->set_direct_call(
+      bytecode,
+      info.resolved_method());
+    break;
+  case CallInfo::vtable_call:
+    cache_entry(thread)->set_vtable_call(
       bytecode,
       info.resolved_method(),
       info.vtable_index());
+    break;
+  case CallInfo::itable_call:
+    cache_entry(thread)->set_itable_call(
+      bytecode,
+      info.resolved_method(),
+      info.itable_index());
+    break;
+  default:  ShouldNotReachHere();
   }
 }
 IRT_END
--- a/hotspot/src/share/vm/interpreter/linkResolver.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/interpreter/linkResolver.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -46,19 +46,6 @@
 #include "runtime/thread.inline.hpp"
 #include "runtime/vmThread.hpp"
 
-//------------------------------------------------------------------------------------------------------------------------
-// Implementation of FieldAccessInfo
-
-void FieldAccessInfo::set(KlassHandle klass, Symbol* name, int field_index, int field_offset,
-BasicType field_type, AccessFlags access_flags) {
-  _klass        = klass;
-  _name         = name;
-  _field_index  = field_index;
-  _field_offset = field_offset;
-  _field_type   = field_type;
-  _access_flags = access_flags;
-}
-
 
 //------------------------------------------------------------------------------------------------------------------------
 // Implementation of CallInfo
@@ -66,26 +53,25 @@
 
 void CallInfo::set_static(KlassHandle resolved_klass, methodHandle resolved_method, TRAPS) {
   int vtable_index = Method::nonvirtual_vtable_index;
-  set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, vtable_index, CHECK);
+  set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, CallInfo::direct_call, vtable_index, CHECK);
 }
 
 
-void CallInfo::set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, TRAPS) {
+void CallInfo::set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int itable_index, TRAPS) {
   // This is only called for interface methods. If the resolved_method
   // comes from java/lang/Object, it can be the subject of a virtual call, so
   // we should pick the vtable index from the resolved method.
-  // Other than that case, there is no valid vtable index to specify.
-  int vtable_index = Method::invalid_vtable_index;
-  if (resolved_method->method_holder() == SystemDictionary::Object_klass()) {
-    assert(resolved_method->vtable_index() == selected_method->vtable_index(), "sanity check");
-    vtable_index = resolved_method->vtable_index();
-  }
-  set_common(resolved_klass, selected_klass, resolved_method, selected_method, vtable_index, CHECK);
+  // In that case, the caller must call set_virtual instead of set_interface.
+  assert(resolved_method->method_holder()->is_interface(), "");
+  assert(itable_index == resolved_method()->itable_index(), "");
+  set_common(resolved_klass, selected_klass, resolved_method, selected_method, CallInfo::itable_call, itable_index, CHECK);
 }
 
 void CallInfo::set_virtual(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS) {
   assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index, "valid index");
-  set_common(resolved_klass, selected_klass, resolved_method, selected_method, vtable_index, CHECK);
+  assert(vtable_index < 0 || !resolved_method->has_vtable_index() || vtable_index == resolved_method->vtable_index(), "");
+  CallKind kind = (vtable_index >= 0 && !resolved_method->can_be_statically_bound() ? CallInfo::vtable_call : CallInfo::direct_call);
+  set_common(resolved_klass, selected_klass, resolved_method, selected_method, kind, vtable_index, CHECK);
   assert(!resolved_method->is_compiled_lambda_form(), "these must be handled via an invokehandle call");
 }
 
@@ -98,20 +84,29 @@
          resolved_method->is_compiled_lambda_form(),
          "linkMethod must return one of these");
   int vtable_index = Method::nonvirtual_vtable_index;
-  assert(resolved_method->vtable_index() == vtable_index, "");
-  set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, vtable_index, CHECK);
+  assert(!resolved_method->has_vtable_index(), "");
+  set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, CallInfo::direct_call, vtable_index, CHECK);
   _resolved_appendix    = resolved_appendix;
   _resolved_method_type = resolved_method_type;
 }
 
-void CallInfo::set_common(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS) {
+void CallInfo::set_common(KlassHandle resolved_klass,
+                          KlassHandle selected_klass,
+                          methodHandle resolved_method,
+                          methodHandle selected_method,
+                          CallKind kind,
+                          int index,
+                          TRAPS) {
   assert(resolved_method->signature() == selected_method->signature(), "signatures must correspond");
   _resolved_klass  = resolved_klass;
   _selected_klass  = selected_klass;
   _resolved_method = resolved_method;
   _selected_method = selected_method;
-  _vtable_index    = vtable_index;
+  _call_kind       = kind;
+  _call_index      = index;
   _resolved_appendix = Handle();
+  DEBUG_ONLY(verify());  // verify before making side effects
+
   if (CompilationPolicy::must_be_compiled(selected_method)) {
     // This path is unusual, mostly used by the '-Xcomp' stress test mode.
 
@@ -138,6 +133,65 @@
   }
 }
 
+// utility query for unreflecting a method
+CallInfo::CallInfo(Method* resolved_method, Klass* resolved_klass) {
+  Klass* resolved_method_holder = resolved_method->method_holder();
+  if (resolved_klass == NULL) { // 2nd argument defaults to holder of 1st
+    resolved_klass = resolved_method_holder;
+  }
+  _resolved_klass  = resolved_klass;
+  _selected_klass  = resolved_klass;
+  _resolved_method = resolved_method;
+  _selected_method = resolved_method;
+  // classify:
+  CallKind kind = CallInfo::unknown_kind;
+  int index = resolved_method->vtable_index();
+  if (resolved_method->can_be_statically_bound()) {
+    kind = CallInfo::direct_call;
+  } else if (!resolved_method_holder->is_interface()) {
+    // Could be an Object method inherited into an interface, but still a vtable call.
+    kind = CallInfo::vtable_call;
+  } else if (!resolved_klass->is_interface()) {
+    // A miranda method.  Compute the vtable index.
+    ResourceMark rm;
+    klassVtable* vt = InstanceKlass::cast(resolved_klass)->vtable();
+    index = vt->index_of_miranda(resolved_method->name(),
+                                 resolved_method->signature());
+    kind = CallInfo::vtable_call;
+  } else {
+    // A regular interface call.
+    kind = CallInfo::itable_call;
+    index = resolved_method->itable_index();
+  }
+  assert(index == Method::nonvirtual_vtable_index || index >= 0, err_msg("bad index %d", index));
+  _call_kind  = kind;
+  _call_index = index;
+  _resolved_appendix = Handle();
+  DEBUG_ONLY(verify());
+}
+
+#ifdef ASSERT
+void CallInfo::verify() {
+  switch (call_kind()) {  // the meaning and allowed value of index depends on kind
+  case CallInfo::direct_call:
+    if (_call_index == Method::nonvirtual_vtable_index)  break;
+    // else fall through to check vtable index:
+  case CallInfo::vtable_call:
+    assert(resolved_klass()->verify_vtable_index(_call_index), "");
+    break;
+  case CallInfo::itable_call:
+    assert(resolved_method()->method_holder()->verify_itable_index(_call_index), "");
+    break;
+  case CallInfo::unknown_kind:
+    assert(call_kind() != CallInfo::unknown_kind, "CallInfo must be set");
+    break;
+  default:
+    fatal(err_msg_res("Unexpected call kind %d", call_kind()));
+  }
+}
+#endif //ASSERT
+
+
 
 //------------------------------------------------------------------------------------------------------------------------
 // Klass resolution
@@ -163,13 +217,6 @@
   result = KlassHandle(THREAD, result_oop);
 }
 
-void LinkResolver::resolve_klass_no_update(KlassHandle& result, constantPoolHandle pool, int index, TRAPS) {
-  Klass* result_oop =
-         ConstantPool::klass_ref_at_if_loaded_check(pool, index, CHECK);
-  result = KlassHandle(THREAD, result_oop);
-}
-
-
 //------------------------------------------------------------------------------------------------------------------------
 // Method resolution
 //
@@ -360,7 +407,12 @@
 
 void LinkResolver::resolve_method_statically(methodHandle& resolved_method, KlassHandle& resolved_klass,
                                              Bytecodes::Code code, constantPoolHandle pool, int index, TRAPS) {
-
+  // This method is used only
+  // (1) in C2 from InlineTree::ok_to_inline (via ciMethod::check_call),
+  // and
+  // (2) in Bytecode_invoke::static_target
+  // It appears to fail when applied to an invokeinterface call site.
+  // FIXME: Remove this method and ciMethod::check_call; refactor to use the other LinkResolver entry points.
   // resolve klass
   if (code == Bytecodes::_invokedynamic) {
     resolved_klass = SystemDictionary::MethodHandle_klass();
@@ -580,45 +632,49 @@
   }
 }
 
-void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, TRAPS) {
-  resolve_field(result, pool, index, byte, check_only, true, CHECK);
+void LinkResolver::resolve_field_access(fieldDescriptor& result, constantPoolHandle pool, int index, Bytecodes::Code byte, TRAPS) {
+  // Load these early in case the resolve of the containing klass fails
+  Symbol* field = pool->name_ref_at(index);
+  Symbol* sig   = pool->signature_ref_at(index);
+
+  // resolve specified klass
+  KlassHandle resolved_klass;
+  resolve_klass(resolved_klass, pool, index, CHECK);
+
+  KlassHandle  current_klass(THREAD, pool->pool_holder());
+  resolve_field(result, resolved_klass, field, sig, current_klass, byte, true, true, CHECK);
 }
 
-void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, bool update_pool, TRAPS) {
+void LinkResolver::resolve_field(fieldDescriptor& fd, KlassHandle resolved_klass, Symbol* field, Symbol* sig,
+                                 KlassHandle current_klass, Bytecodes::Code byte, bool check_access, bool initialize_class,
+                                 TRAPS) {
   assert(byte == Bytecodes::_getstatic || byte == Bytecodes::_putstatic ||
-         byte == Bytecodes::_getfield  || byte == Bytecodes::_putfield, "bad bytecode");
+         byte == Bytecodes::_getfield  || byte == Bytecodes::_putfield  ||
+         (byte == Bytecodes::_nop && !check_access), "bad field access bytecode");
 
   bool is_static = (byte == Bytecodes::_getstatic || byte == Bytecodes::_putstatic);
   bool is_put    = (byte == Bytecodes::_putfield  || byte == Bytecodes::_putstatic);
 
-  // resolve specified klass
-  KlassHandle resolved_klass;
-  if (update_pool) {
-    resolve_klass(resolved_klass, pool, index, CHECK);
-  } else {
-    resolve_klass_no_update(resolved_klass, pool, index, CHECK);
-  }
-  // Load these early in case the resolve of the containing klass fails
-  Symbol* field = pool->name_ref_at(index);
-  Symbol* sig   = pool->signature_ref_at(index);
   // Check if there's a resolved klass containing the field
-  if( resolved_klass.is_null() ) {
+  if (resolved_klass.is_null()) {
     ResourceMark rm(THREAD);
     THROW_MSG(vmSymbols::java_lang_NoSuchFieldError(), field->as_C_string());
   }
 
   // Resolve instance field
-  fieldDescriptor fd; // find_field initializes fd if found
   KlassHandle sel_klass(THREAD, InstanceKlass::cast(resolved_klass())->find_field(field, sig, &fd));
   // check if field exists; i.e., if a klass containing the field def has been selected
-  if (sel_klass.is_null()){
+  if (sel_klass.is_null()) {
     ResourceMark rm(THREAD);
     THROW_MSG(vmSymbols::java_lang_NoSuchFieldError(), field->as_C_string());
   }
 
+  if (!check_access)
+    // Access checking may be turned off when calling from within the VM.
+    return;
+
   // check access
-  KlassHandle ref_klass(THREAD, pool->pool_holder());
-  check_field_accessability(ref_klass, resolved_klass, sel_klass, fd, CHECK);
+  check_field_accessability(current_klass, resolved_klass, sel_klass, fd, CHECK);
 
   // check for errors
   if (is_static != fd.is_static()) {
@@ -629,7 +685,7 @@
   }
 
   // Final fields can only be accessed from its own class.
-  if (is_put && fd.access_flags().is_final() && sel_klass() != pool->pool_holder()) {
+  if (is_put && fd.access_flags().is_final() && sel_klass() != current_klass()) {
     THROW(vmSymbols::java_lang_IllegalAccessError());
   }
 
@@ -639,19 +695,18 @@
   //
   // note 2: we don't want to force initialization if we are just checking
   //         if the field access is legal; e.g., during compilation
-  if (is_static && !check_only) {
+  if (is_static && initialize_class) {
     sel_klass->initialize(CHECK);
   }
 
-  {
+  if (sel_klass() != current_klass()) {
     HandleMark hm(THREAD);
-    Handle ref_loader (THREAD, InstanceKlass::cast(ref_klass())->class_loader());
+    Handle ref_loader (THREAD, InstanceKlass::cast(current_klass())->class_loader());
     Handle sel_loader (THREAD, InstanceKlass::cast(sel_klass())->class_loader());
-    Symbol*  signature_ref  = pool->signature_ref_at(index);
     {
       ResourceMark rm(THREAD);
       Symbol* failed_type_symbol =
-        SystemDictionary::check_signature_loaders(signature_ref,
+        SystemDictionary::check_signature_loaders(sig,
                                                   ref_loader, sel_loader,
                                                   false,
                                                   CHECK);
@@ -677,9 +732,6 @@
 
   // return information. note that the klass is set to the actual klass containing the
   // field, otherwise access of static fields in superclasses will not work.
-  KlassHandle holder (THREAD, fd.field_holder());
-  Symbol*  name   = fd.name();
-  result.set(holder, name, fd.index(), fd.offset(), fd.field_type(), fd.access_flags());
 }
 
 
@@ -907,10 +959,6 @@
   }
 
   // Virtual methods cannot be resolved before its klass has been linked, for otherwise the Method*'s
-  // has not been rewritten, and the vtable initialized.
-  assert(resolved_method->method_holder()->is_linked(), "must be linked");
-
-  // Virtual methods cannot be resolved before its klass has been linked, for otherwise the Method*'s
   // has not been rewritten, and the vtable initialized. Make sure to do this after the nullcheck, since
   // a missing receiver might result in a bogus lookup.
   assert(resolved_method->method_holder()->is_linked(), "must be linked");
@@ -920,6 +968,7 @@
     vtable_index = vtable_index_of_miranda_method(resolved_klass,
                            resolved_method->name(),
                            resolved_method->signature(), CHECK);
+
     assert(vtable_index >= 0 , "we should have valid vtable index at this point");
 
     InstanceKlass* inst = InstanceKlass::cast(recv_klass());
@@ -927,6 +976,7 @@
   } else {
     // at this point we are sure that resolved_method is virtual and not
     // a miranda method; therefore, it must have a valid vtable index.
+    assert(!resolved_method->has_itable_index(), "");
     vtable_index = resolved_method->vtable_index();
     // We could get a negative vtable_index for final methods,
     // because as an optimization they are they are never put in the vtable,
@@ -1006,6 +1056,12 @@
   lookup_instance_method_in_klasses(sel_method, recv_klass,
             resolved_method->name(),
             resolved_method->signature(), CHECK);
+  if (sel_method.is_null() && !check_null_and_abstract) {
+    // In theory this is a harmless placeholder value, but
+    // in practice leaving in null affects the nsk default method tests.
+    // This needs further study.
+    sel_method = resolved_method;
+  }
   // check if method exists
   if (sel_method.is_null()) {
     ResourceMark rm(THREAD);
@@ -1046,7 +1102,14 @@
                                                       sel_method->signature()));
   }
   // setup result
-  result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, CHECK);
+  if (!resolved_method->has_itable_index()) {
+    int vtable_index = resolved_method->vtable_index();
+    assert(vtable_index == sel_method->vtable_index(), "sanity check");
+    result.set_virtual(resolved_klass, recv_klass, resolved_method, sel_method, vtable_index, CHECK);
+    return;
+  }
+  int itable_index = resolved_method()->itable_index();
+  result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, itable_index, CHECK);
 }
 
 
@@ -1293,7 +1356,8 @@
   }
 
   if (TraceMethodHandles) {
-    tty->print_cr("resolve_invokedynamic #%d %s %s",
+      ResourceMark rm(THREAD);
+      tty->print_cr("resolve_invokedynamic #%d %s %s",
                   ConstantPool::decode_invokedynamic_index(index),
                   method_name->as_C_string(), method_signature->as_C_string());
     tty->print("  BSM info: "); bootstrap_specifier->print();
@@ -1342,9 +1406,16 @@
 //------------------------------------------------------------------------------------------------------------------------
 #ifndef PRODUCT
 
-void FieldAccessInfo::print() {
+void CallInfo::print() {
   ResourceMark rm;
-  tty->print_cr("Field %s@%d", name()->as_C_string(), field_offset());
+  const char* kindstr = "unknown";
+  switch (_call_kind) {
+  case direct_call: kindstr = "direct"; break;
+  case vtable_call: kindstr = "vtable"; break;
+  case itable_call: kindstr = "itable"; break;
+  }
+  tty->print_cr("Call %s@%d %s", kindstr, _call_index,
+                _resolved_method.is_null() ? "(none)" : _resolved_method->name_and_sig_as_C_string());
 }
 
 #endif
--- a/hotspot/src/share/vm/interpreter/linkResolver.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/interpreter/linkResolver.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,63 +30,54 @@
 
 // All the necessary definitions for run-time link resolution.
 
-// LinkInfo & its subclasses provide all the information gathered
-// for a particular link after resolving it. A link is any reference
+// CallInfo provides all the information gathered for a particular
+// linked call site after resolving it. A link is any reference
 // made from within the bytecodes of a method to an object outside of
 // that method. If the info is invalid, the link has not been resolved
 // successfully.
 
-class LinkInfo VALUE_OBJ_CLASS_SPEC {
-};
-
-
-// Link information for getfield/putfield & getstatic/putstatic bytecodes.
-
-class FieldAccessInfo: public LinkInfo {
- protected:
-  KlassHandle  _klass;
-  Symbol*      _name;
-  AccessFlags  _access_flags;
-  int          _field_index;  // original index in the klass
-  int          _field_offset;
-  BasicType    _field_type;
-
+class CallInfo VALUE_OBJ_CLASS_SPEC {
  public:
-  void         set(KlassHandle klass, Symbol* name, int field_index, int field_offset,
-                 BasicType field_type, AccessFlags access_flags);
-  KlassHandle  klass() const                     { return _klass; }
-  Symbol* name() const                           { return _name; }
-  int          field_index() const               { return _field_index; }
-  int          field_offset() const              { return _field_offset; }
-  BasicType    field_type() const                { return _field_type; }
-  AccessFlags  access_flags() const              { return _access_flags; }
-
-  // debugging
-  void print()  PRODUCT_RETURN;
-};
-
-
-// Link information for all calls.
-
-class CallInfo: public LinkInfo {
+  // Ways that a method call might be selected (or not) based on receiver type.
+  // Note that an invokevirtual instruction might be linked with no_dispatch,
+  // and an invokeinterface instruction might be linked with any of the three options
+  enum CallKind {
+    direct_call,                        // jump into resolved_method (must be concrete)
+    vtable_call,                        // select recv.klass.method_at_vtable(index)
+    itable_call,                        // select recv.klass.method_at_itable(resolved_method.holder, index)
+    unknown_kind = -1
+  };
  private:
-  KlassHandle  _resolved_klass;         // static receiver klass
+  KlassHandle  _resolved_klass;         // static receiver klass, resolved from a symbolic reference
   KlassHandle  _selected_klass;         // dynamic receiver class (same as static, or subklass)
   methodHandle _resolved_method;        // static target method
   methodHandle _selected_method;        // dynamic (actual) target method
-  int          _vtable_index;           // vtable index of selected method
+  CallKind     _call_kind;              // kind of call (static(=bytecode static/special +
+                                        //               others inferred), vtable, itable)
+  int          _call_index;             // vtable or itable index of selected class method (if any)
   Handle       _resolved_appendix;      // extra argument in constant pool (if CPCE::has_appendix)
   Handle       _resolved_method_type;   // MethodType (for invokedynamic and invokehandle call sites)
 
   void         set_static(   KlassHandle resolved_klass,                             methodHandle resolved_method                                                       , TRAPS);
-  void         set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method                         , TRAPS);
+  void         set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int itable_index       , TRAPS);
   void         set_virtual(  KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index       , TRAPS);
   void         set_handle(                                                           methodHandle resolved_method, Handle resolved_appendix, Handle resolved_method_type, TRAPS);
-  void         set_common(   KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index       , TRAPS);
+  void         set_common(   KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, CallKind kind, int index, TRAPS);
 
   friend class LinkResolver;
 
  public:
+  CallInfo() {
+#ifndef PRODUCT
+    _call_kind  = CallInfo::unknown_kind;
+    _call_index = Method::garbage_vtable_index;
+#endif //PRODUCT
+  }
+
+  // utility to extract an effective CallInfo from a method and an optional receiver limit
+  // does not queue the method for compilation
+  CallInfo(Method* resolved_method, Klass* resolved_klass = NULL);
+
   KlassHandle  resolved_klass() const            { return _resolved_klass; }
   KlassHandle  selected_klass() const            { return _selected_klass; }
   methodHandle resolved_method() const           { return _resolved_method; }
@@ -95,21 +86,43 @@
   Handle       resolved_method_type() const      { return _resolved_method_type; }
 
   BasicType    result_type() const               { return selected_method()->result_type(); }
-  bool         has_vtable_index() const          { return _vtable_index >= 0; }
-  bool         is_statically_bound() const       { return _vtable_index == Method::nonvirtual_vtable_index; }
+  CallKind     call_kind() const                 { return _call_kind; }
+  int          call_index() const                { return _call_index; }
   int          vtable_index() const {
     // Even for interface calls the vtable index could be non-negative.
     // See CallInfo::set_interface.
     assert(has_vtable_index() || is_statically_bound(), "");
-    return _vtable_index;
+    assert(call_kind() == vtable_call || call_kind() == direct_call, "");
+    // The returned value is < 0 if the call is statically bound.
+    // But, the returned value may be >= 0 even if the kind is direct_call.
+    // It is up to the caller to decide which way to go.
+    return _call_index;
   }
+  int          itable_index() const {
+    assert(call_kind() == itable_call, "");
+    // The returned value is always >= 0, a valid itable index.
+    return _call_index;
+  }
+
+  // debugging
+#ifdef ASSERT
+  bool         has_vtable_index() const          { return _call_index >= 0 && _call_kind != CallInfo::itable_call; }
+  bool         is_statically_bound() const       { return _call_index == Method::nonvirtual_vtable_index; }
+#endif //ASSERT
+  void         verify() PRODUCT_RETURN;
+  void         print()  PRODUCT_RETURN;
 };
 
+// Link information for getfield/putfield & getstatic/putstatic bytecodes
+// is represented using a fieldDescriptor.
 
 // The LinkResolver is used to resolve constant-pool references at run-time.
 // It does all necessary link-time checks & throws exceptions if necessary.
 
 class LinkResolver: AllStatic {
+  friend class klassVtable;
+  friend class klassItable;
+
  private:
   static void lookup_method_in_klasses          (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
   static void lookup_instance_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
@@ -120,7 +133,6 @@
   static int vtable_index_of_miranda_method(KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
 
   static void resolve_klass           (KlassHandle& result, constantPoolHandle  pool, int index, TRAPS);
-  static void resolve_klass_no_update (KlassHandle& result, constantPoolHandle pool, int index, TRAPS); // no update of constantPool entry
 
   static void resolve_pool  (KlassHandle& resolved_klass, Symbol*& method_name, Symbol*& method_signature, KlassHandle& current_klass, constantPoolHandle pool, int index, TRAPS);
 
@@ -148,9 +160,16 @@
                                         Bytecodes::Code code, constantPoolHandle pool, int index, TRAPS);
 
   // runtime/static resolving for fields
-  static void resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, TRAPS);
-  // takes an extra bool argument "update_pool" to decide whether to update the constantPool during klass resolution.
-  static void resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, bool update_pool, TRAPS);
+  static void resolve_field_access(fieldDescriptor& result, constantPoolHandle pool, int index, Bytecodes::Code byte, TRAPS);
+  static void resolve_field(fieldDescriptor& result, KlassHandle resolved_klass, Symbol* field_name, Symbol* field_signature,
+                            KlassHandle current_klass, Bytecodes::Code access_kind, bool check_access, bool initialize_class, TRAPS);
+
+  // source of access_kind codes:
+  static Bytecodes::Code field_access_kind(bool is_static, bool is_put) {
+    return (is_static
+            ? (is_put ? Bytecodes::_putstatic : Bytecodes::_getstatic)
+            : (is_put ? Bytecodes::_putfield  : Bytecodes::_getfield ));
+  }
 
   // runtime resolving:
   //   resolved_klass = specified class (i.e., static receiver class)
--- a/hotspot/src/share/vm/memory/binaryTreeDictionary.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/memory/binaryTreeDictionary.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,10 +33,10 @@
 #include "runtime/globals.hpp"
 #include "utilities/ostream.hpp"
 #include "utilities/macros.hpp"
+#include "gc_implementation/shared/spaceDecorator.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
 #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
-#include "gc_implementation/shared/spaceDecorator.hpp"
 #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
 #endif // INCLUDE_ALL_GCS
 
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -47,6 +47,11 @@
 
 // CollectorPolicy methods.
 
+// Align down. If the aligning result in 0, return 'alignment'.
+static size_t restricted_align_down(size_t size, size_t alignment) {
+  return MAX2(alignment, align_size_down_(size, alignment));
+}
+
 void CollectorPolicy::initialize_flags() {
   assert(max_alignment() >= min_alignment(),
       err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
@@ -59,18 +64,24 @@
     vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
   }
 
-  if (MetaspaceSize > MaxMetaspaceSize) {
-    MaxMetaspaceSize = MetaspaceSize;
+  if (!is_size_aligned(MaxMetaspaceSize, max_alignment())) {
+    FLAG_SET_ERGO(uintx, MaxMetaspaceSize,
+        restricted_align_down(MaxMetaspaceSize, max_alignment()));
   }
-  MetaspaceSize = MAX2(min_alignment(), align_size_down_(MetaspaceSize, min_alignment()));
-  // Don't increase Metaspace size limit above specified.
-  MaxMetaspaceSize = align_size_down(MaxMetaspaceSize, max_alignment());
+
   if (MetaspaceSize > MaxMetaspaceSize) {
-    MetaspaceSize = MaxMetaspaceSize;
+    FLAG_SET_ERGO(uintx, MetaspaceSize, MaxMetaspaceSize);
   }
 
-  MinMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MinMetaspaceExpansion, min_alignment()));
-  MaxMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MaxMetaspaceExpansion, min_alignment()));
+  if (!is_size_aligned(MetaspaceSize, min_alignment())) {
+    FLAG_SET_ERGO(uintx, MetaspaceSize,
+        restricted_align_down(MetaspaceSize, min_alignment()));
+  }
+
+  assert(MetaspaceSize <= MaxMetaspaceSize, "Must be");
+
+  MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, min_alignment());
+  MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, min_alignment());
 
   MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment());
 
@@ -145,6 +156,30 @@
   _all_soft_refs_clear = true;
 }
 
+size_t CollectorPolicy::compute_max_alignment() {
+  // The card marking array and the offset arrays for old generations are
+  // committed in os pages as well. Make sure they are entirely full (to
+  // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
+  // byte entry and the os page size is 4096, the maximum heap size should
+  // be 512*4096 = 2MB aligned.
+
+  // There is only the GenRemSet in Hotspot and only the GenRemSet::CardTable
+  // is supported.
+  // Requirements of any new remembered set implementations must be added here.
+  size_t alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable);
+
+  // Parallel GC does its own alignment of the generations to avoid requiring a
+  // large page (256M on some platforms) for the permanent generation.  The
+  // other collectors should also be updated to do their own alignment and then
+  // this use of lcm() should be removed.
+  if (UseLargePages && !UseParallelGC) {
+      // in presence of large pages we have to make sure that our
+      // alignment is large page aware
+      alignment = lcm(os::large_page_size(), alignment);
+  }
+
+  return alignment;
+}
 
 // GenCollectorPolicy methods.
 
@@ -175,29 +210,6 @@
                                         GCTimeRatio);
 }
 
-size_t GenCollectorPolicy::compute_max_alignment() {
-  // The card marking array and the offset arrays for old generations are
-  // committed in os pages as well. Make sure they are entirely full (to
-  // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
-  // byte entry and the os page size is 4096, the maximum heap size should
-  // be 512*4096 = 2MB aligned.
-  size_t alignment = GenRemSet::max_alignment_constraint(rem_set_name());
-
-  // Parallel GC does its own alignment of the generations to avoid requiring a
-  // large page (256M on some platforms) for the permanent generation.  The
-  // other collectors should also be updated to do their own alignment and then
-  // this use of lcm() should be removed.
-  if (UseLargePages && !UseParallelGC) {
-      // in presence of large pages we have to make sure that our
-      // alignment is large page aware
-      alignment = lcm(os::large_page_size(), alignment);
-  }
-
-  assert(alignment >= min_alignment(), "Must be");
-
-  return alignment;
-}
-
 void GenCollectorPolicy::initialize_flags() {
   // All sizes must be multiples of the generation granularity.
   set_min_alignment((uintx) Generation::GenGrain);
--- a/hotspot/src/share/vm/memory/collectorPolicy.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/memory/collectorPolicy.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -98,6 +98,9 @@
   {}
 
  public:
+  // Return maximum heap alignment that may be imposed by the policy
+  static size_t compute_max_alignment();
+
   void set_min_alignment(size_t align)         { _min_alignment = align; }
   size_t min_alignment()                       { return _min_alignment; }
   void set_max_alignment(size_t align)         { _max_alignment = align; }
@@ -234,9 +237,6 @@
   // Try to allocate space by expanding the heap.
   virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
 
-  // compute max heap alignment
-  size_t compute_max_alignment();
-
  // Scale the base_size by NewRation according to
  //     result = base_size / (NewRatio + 1)
  // and align by min_alignment()
--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -148,6 +148,11 @@
     return gen_policy()->size_policy();
   }
 
+  // Return the (conservative) maximum heap alignment
+  static size_t conservative_max_heap_alignment() {
+    return Generation::GenGrain;
+  }
+
   size_t capacity() const;
   size_t used() const;
 
--- a/hotspot/src/share/vm/memory/metablock.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/memory/metablock.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -50,13 +50,6 @@
 // Chunks, change Chunks so that they can be allocated out of a VirtualSpace.
 size_t Metablock::_min_block_byte_size = sizeof(Metablock);
 
-#ifdef ASSERT
-size_t Metablock::_overhead =
-  Chunk::aligned_overhead_size(sizeof(Metablock)) / BytesPerWord;
-#else
-size_t Metablock::_overhead = 0;
-#endif
-
 // New blocks returned by the Metaspace are zero initialized.
 // We should fix the constructors to not assume this instead.
 Metablock* Metablock::initialize(MetaWord* p, size_t word_size) {
--- a/hotspot/src/share/vm/memory/metablock.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/memory/metablock.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -48,7 +48,6 @@
     } _header;
   } _block;
   static size_t _min_block_byte_size;
-  static size_t _overhead;
 
   typedef union block_t Block;
   typedef struct header_t Header;
@@ -73,7 +72,6 @@
   void set_prev(Metablock* v) { _block._header._prev = v; }
 
   static size_t min_block_byte_size() { return _min_block_byte_size; }
-  static size_t overhead() { return _overhead; }
 
   bool is_free()                 { return header()->_word_size != 0; }
   void clear_next()              { set_next(NULL); }
--- a/hotspot/src/share/vm/memory/metaspace.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/memory/metaspace.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -51,7 +51,7 @@
 // Parameters for stress mode testing
 const uint metadata_deallocate_a_lot_block = 10;
 const uint metadata_deallocate_a_lock_chunk = 3;
-size_t const allocation_from_dictionary_limit = 64 * K;
+size_t const allocation_from_dictionary_limit = 4 * K;
 
 MetaWord* last_allocated = 0;
 
@@ -177,8 +177,8 @@
   void return_chunks(ChunkIndex index, Metachunk* chunks);
 
   // Total of the space in the free chunks list
-  size_t free_chunks_total();
-  size_t free_chunks_total_in_bytes();
+  size_t free_chunks_total_words();
+  size_t free_chunks_total_bytes();
 
   // Number of chunks in the free chunks list
   size_t free_chunks_count();
@@ -228,6 +228,10 @@
   BlockTreeDictionary* _dictionary;
   static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size);
 
+  // Only allocate and split from freelist if the size of the allocation
+  // is at least 1/4th the size of the available block.
+  const static int WasteMultiplier = 4;
+
   // Accessors
   BlockTreeDictionary* dictionary() const { return _dictionary; }
 
@@ -287,6 +291,10 @@
   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 
+  size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
+  size_t expanded_words() const  { return _virtual_space.committed_size() / BytesPerWord; }
+  size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
+
   // address of next available space in _virtual_space;
   // Accessors
   VirtualSpaceNode* next() { return _next; }
@@ -323,12 +331,10 @@
 
   // Allocate a chunk from the virtual space and return it.
   Metachunk* get_chunk_vs(size_t chunk_word_size);
-  Metachunk* get_chunk_vs_with_expand(size_t chunk_word_size);
 
   // Expands/shrinks the committed space in a virtual space.  Delegates
   // to Virtualspace
   bool expand_by(size_t words, bool pre_touch = false);
-  bool shrink_by(size_t words);
 
   // In preparation for deleting this node, remove all the chunks
   // in the node from any freelist.
@@ -336,8 +342,6 @@
 
 #ifdef ASSERT
   // Debug support
-  static void verify_virtual_space_total();
-  static void verify_virtual_space_count();
   void mangle();
 #endif
 
@@ -423,10 +427,13 @@
   // Can this virtual list allocate >1 spaces?  Also, used to determine
   // whether to allocate unlimited small chunks in this virtual space
   bool _is_class;
-  bool can_grow() const { return !is_class() || !UseCompressedKlassPointers; }
-
-  // Sum of space in all virtual spaces and number of virtual spaces
-  size_t _virtual_space_total;
+  bool can_grow() const { return !is_class() || !UseCompressedClassPointers; }
+
+  // Sum of reserved and committed memory in the virtual spaces
+  size_t _reserved_words;
+  size_t _committed_words;
+
+  // Number of virtual spaces
   size_t _virtual_space_count;
 
   ~VirtualSpaceList();
@@ -440,7 +447,7 @@
     _current_virtual_space = v;
   }
 
-  void link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size);
+  void link_vs(VirtualSpaceNode* new_entry);
 
   // Get another virtual space and add it to the list.  This
   // is typically prompted by a failed attempt to allocate a chunk
@@ -457,6 +464,8 @@
                            size_t grow_chunks_by_words,
                            size_t medium_chunk_bunch);
 
+  bool expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch = false);
+
   // Get the first chunk for a Metaspace.  Used for
   // special cases such as the boot class loader, reflection
   // class loader and anonymous class loader.
@@ -472,10 +481,15 @@
   // Allocate the first virtualspace.
   void initialize(size_t word_size);
 
-  size_t virtual_space_total() { return _virtual_space_total; }
-
-  void inc_virtual_space_total(size_t v);
-  void dec_virtual_space_total(size_t v);
+  size_t reserved_words()  { return _reserved_words; }
+  size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
+  size_t committed_words() { return _committed_words; }
+  size_t committed_bytes() { return committed_words() * BytesPerWord; }
+
+  void inc_reserved_words(size_t v);
+  void dec_reserved_words(size_t v);
+  void inc_committed_words(size_t v);
+  void dec_committed_words(size_t v);
   void inc_virtual_space_count();
   void dec_virtual_space_count();
 
@@ -623,6 +637,7 @@
 
   // Add chunk to the list of chunks in use
   void add_chunk(Metachunk* v, bool make_current);
+  void retire_current_chunk();
 
   Mutex* lock() const { return _lock; }
 
@@ -722,9 +737,7 @@
     // MinChunkSize is a placeholder for the real minimum size JJJ
     size_t byte_size = word_size * BytesPerWord;
 
-    size_t byte_size_with_overhead = byte_size + Metablock::overhead();
-
-    size_t raw_bytes_size = MAX2(byte_size_with_overhead,
+    size_t raw_bytes_size = MAX2(byte_size,
                                  Metablock::min_block_byte_size());
     raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
     size_t raw_word_size = raw_bytes_size / BytesPerWord;
@@ -807,12 +820,25 @@
   }
 
   Metablock* free_block =
-    dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::exactly);
+    dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
   if (free_block == NULL) {
     return NULL;
   }
 
-  return (MetaWord*) free_block;
+  const size_t block_size = free_block->size();
+  if (block_size > WasteMultiplier * word_size) {
+    return_block((MetaWord*)free_block, block_size);
+    return NULL;
+  }
+
+  MetaWord* new_block = (MetaWord*)free_block;
+  assert(block_size >= word_size, "Incorrect size of block from freelist");
+  const size_t unused = block_size - word_size;
+  if (unused >= TreeChunk<Metablock, FreeList>::min_size()) {
+    return_block(new_block + word_size, unused);
+  }
+
+  return new_block;
 }
 
 void BlockFreelist::print_on(outputStream* st) const {
@@ -855,9 +881,9 @@
 
   if (!is_available(chunk_word_size)) {
     if (TraceMetadataChunkAllocation) {
-      tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
+      gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
       // Dump some information about the virtual space that is nearly full
-      print_on(tty);
+      print_on(gclog_or_tty);
     }
     return NULL;
   }
@@ -878,20 +904,11 @@
   if (TraceMetavirtualspaceAllocation && !result) {
     gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed "
                            "for byte size " SIZE_FORMAT, bytes);
-    virtual_space()->print();
+    virtual_space()->print_on(gclog_or_tty);
   }
   return result;
 }
 
-// Shrink the virtual space (commit more of the reserved space)
-bool VirtualSpaceNode::shrink_by(size_t words) {
-  size_t bytes = words * BytesPerWord;
-  virtual_space()->shrink_by(bytes);
-  return true;
-}
-
-// Add another chunk to the chunk list.
-
 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
   assert_lock_strong(SpaceManager::expand_lock());
   Metachunk* result = take_from_committed(chunk_word_size);
@@ -901,23 +918,6 @@
   return result;
 }
 
-Metachunk* VirtualSpaceNode::get_chunk_vs_with_expand(size_t chunk_word_size) {
-  assert_lock_strong(SpaceManager::expand_lock());
-
-  Metachunk* new_chunk = get_chunk_vs(chunk_word_size);
-
-  if (new_chunk == NULL) {
-    // Only a small part of the virtualspace is committed when first
-    // allocated so committing more here can be expected.
-    size_t page_size_words = os::vm_page_size() / BytesPerWord;
-    size_t aligned_expand_vs_by_words = align_size_up(chunk_word_size,
-                                                    page_size_words);
-    expand_by(aligned_expand_vs_by_words, false);
-    new_chunk = get_chunk_vs(chunk_word_size);
-  }
-  return new_chunk;
-}
-
 bool VirtualSpaceNode::initialize() {
 
   if (!_rs.is_reserved()) {
@@ -977,13 +977,22 @@
   }
 }
 
-void VirtualSpaceList::inc_virtual_space_total(size_t v) {
+void VirtualSpaceList::inc_reserved_words(size_t v) {
   assert_lock_strong(SpaceManager::expand_lock());
-  _virtual_space_total = _virtual_space_total + v;
+  _reserved_words = _reserved_words + v;
+}
+void VirtualSpaceList::dec_reserved_words(size_t v) {
+  assert_lock_strong(SpaceManager::expand_lock());
+  _reserved_words = _reserved_words - v;
 }
-void VirtualSpaceList::dec_virtual_space_total(size_t v) {
+
+void VirtualSpaceList::inc_committed_words(size_t v) {
   assert_lock_strong(SpaceManager::expand_lock());
-  _virtual_space_total = _virtual_space_total - v;
+  _committed_words = _committed_words + v;
+}
+void VirtualSpaceList::dec_committed_words(size_t v) {
+  assert_lock_strong(SpaceManager::expand_lock());
+  _committed_words = _committed_words - v;
 }
 
 void VirtualSpaceList::inc_virtual_space_count() {
@@ -1034,7 +1043,8 @@
       }
 
       vsl->purge(chunk_manager());
-      dec_virtual_space_total(vsl->reserved()->word_size());
+      dec_reserved_words(vsl->reserved_words());
+      dec_committed_words(vsl->committed_words());
       dec_virtual_space_count();
       purged_vsl = vsl;
       delete vsl;
@@ -1062,12 +1072,12 @@
     // Sum used region [bottom, top) in each virtualspace
     allocated_by_vs += vsl->used_words_in_vs();
   }
-  assert(allocated_by_vs >= chunk_manager()->free_chunks_total(),
+  assert(allocated_by_vs >= chunk_manager()->free_chunks_total_words(),
     err_msg("Total in free chunks " SIZE_FORMAT
             " greater than total from virtual_spaces " SIZE_FORMAT,
-            allocated_by_vs, chunk_manager()->free_chunks_total()));
+            allocated_by_vs, chunk_manager()->free_chunks_total_words()));
   size_t used =
-    allocated_by_vs - chunk_manager()->free_chunks_total();
+    allocated_by_vs - chunk_manager()->free_chunks_total_words();
   return used;
 }
 
@@ -1088,7 +1098,8 @@
                                    _is_class(false),
                                    _virtual_space_list(NULL),
                                    _current_virtual_space(NULL),
-                                   _virtual_space_total(0),
+                                   _reserved_words(0),
+                                   _committed_words(0),
                                    _virtual_space_count(0) {
   MutexLockerEx cl(SpaceManager::expand_lock(),
                    Mutex::_no_safepoint_check_flag);
@@ -1105,7 +1116,8 @@
                                    _is_class(true),
                                    _virtual_space_list(NULL),
                                    _current_virtual_space(NULL),
-                                   _virtual_space_total(0),
+                                   _reserved_words(0),
+                                   _committed_words(0),
                                    _virtual_space_count(0) {
   MutexLockerEx cl(SpaceManager::expand_lock(),
                    Mutex::_no_safepoint_check_flag);
@@ -1115,7 +1127,7 @@
   _chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk);
   _chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk);
   assert(succeeded, " VirtualSpaceList initialization should not fail");
-  link_vs(class_entry, rs.size()/BytesPerWord);
+  link_vs(class_entry);
 }
 
 size_t VirtualSpaceList::free_bytes() {
@@ -1138,31 +1150,47 @@
     delete new_entry;
     return false;
   } else {
+    assert(new_entry->reserved_words() == vs_word_size, "Must be");
     // ensure lock-free iteration sees fully initialized node
     OrderAccess::storestore();
-    link_vs(new_entry, vs_word_size);
+    link_vs(new_entry);
     return true;
   }
 }
 
-void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size) {
+void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
   if (virtual_space_list() == NULL) {
       set_virtual_space_list(new_entry);
   } else {
     current_virtual_space()->set_next(new_entry);
   }
   set_current_virtual_space(new_entry);
-  inc_virtual_space_total(vs_word_size);
+  inc_reserved_words(new_entry->reserved_words());
+  inc_committed_words(new_entry->committed_words());
   inc_virtual_space_count();
 #ifdef ASSERT
   new_entry->mangle();
 #endif
   if (TraceMetavirtualspaceAllocation && Verbose) {
     VirtualSpaceNode* vsl = current_virtual_space();
-    vsl->print_on(tty);
+    vsl->print_on(gclog_or_tty);
   }
 }
 
+bool VirtualSpaceList::expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch) {
+  size_t before = node->committed_words();
+
+  bool result = node->expand_by(word_size, pre_touch);
+
+  size_t after = node->committed_words();
+
+  // after and before can be the same if the memory was pre-committed.
+  assert(after >= before, "Must be");
+  inc_committed_words(after - before);
+
+  return result;
+}
+
 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
                                            size_t grow_chunks_by_words,
                                            size_t medium_chunk_bunch) {
@@ -1186,7 +1214,7 @@
     size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
                                                         page_size_words);
     bool vs_expanded =
-      current_virtual_space()->expand_by(aligned_expand_vs_by_words, false);
+      expand_by(current_virtual_space(), aligned_expand_vs_by_words);
     if (!vs_expanded) {
       // Should the capacity of the metaspaces be expanded for
       // this allocation?  If it's the virtual space for classes and is
@@ -1197,7 +1225,14 @@
             MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words);
         if (grow_vs(grow_vs_words)) {
           // Got it.  It's on the list now.  Get a chunk from it.
-          next = current_virtual_space()->get_chunk_vs_with_expand(grow_chunks_by_words);
+          assert(current_virtual_space()->expanded_words() == 0,
+              "New virtuals space nodes should not have expanded");
+
+          size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words,
+                                                              page_size_words);
+          // We probably want to expand by aligned_expand_vs_by_words here.
+          expand_by(current_virtual_space(), grow_chunks_by_words_aligned);
+          next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
         }
       } else {
         // Allocation will fail and induce a GC
@@ -1307,7 +1342,7 @@
   // reserved space, because this is a larger space prereserved for compressed
   // class pointers.
   if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
-    size_t real_allocated = Metaspace::space_list()->virtual_space_total() +
+    size_t real_allocated = Metaspace::space_list()->reserved_words() +
               MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
     if (real_allocated >= MaxMetaspaceSize) {
       return false;
@@ -1508,7 +1543,7 @@
                                sm->sum_count_in_chunks_in_use());
         dummy_chunk->print_on(gclog_or_tty);
         gclog_or_tty->print_cr("  Free chunks total %d  count %d",
-                               vsl->chunk_manager()->free_chunks_total(),
+                               vsl->chunk_manager()->free_chunks_total_words(),
                                vsl->chunk_manager()->free_chunks_count());
       }
     }
@@ -1565,12 +1600,12 @@
 
 // ChunkManager methods
 
-size_t ChunkManager::free_chunks_total() {
+size_t ChunkManager::free_chunks_total_words() {
   return _free_chunks_total;
 }
 
-size_t ChunkManager::free_chunks_total_in_bytes() {
-  return free_chunks_total() * BytesPerWord;
+size_t ChunkManager::free_chunks_total_bytes() {
+  return free_chunks_total_words() * BytesPerWord;
 }
 
 size_t ChunkManager::free_chunks_count() {
@@ -1698,9 +1733,9 @@
   assert_lock_strong(SpaceManager::expand_lock());
   slow_locked_verify();
   if (TraceMetadataChunkAllocation) {
-    tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
-                  PTR_FORMAT "  size " SIZE_FORMAT,
-                  chunk, chunk->word_size());
+    gclog_or_tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
+                           PTR_FORMAT "  size " SIZE_FORMAT,
+                           chunk, chunk->word_size());
   }
   free_chunks_put(chunk);
 }
@@ -1729,9 +1764,9 @@
     dec_free_chunks_total(chunk->capacity_word_size());
 
     if (TraceMetadataChunkAllocation && Verbose) {
-      tty->print_cr("ChunkManager::free_chunks_get: free_list "
-                    PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
-                    free_list, chunk, chunk->word_size());
+      gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
+                             PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
+                             free_list, chunk, chunk->word_size());
     }
   } else {
     chunk = humongous_dictionary()->get_chunk(
@@ -1741,10 +1776,10 @@
     if (chunk != NULL) {
       if (TraceMetadataHumongousAllocation) {
         size_t waste = chunk->word_size() - word_size;
-        tty->print_cr("Free list allocate humongous chunk size " SIZE_FORMAT
-                      " for requested size " SIZE_FORMAT
-                      " waste " SIZE_FORMAT,
-                      chunk->word_size(), word_size, waste);
+        gclog_or_tty->print_cr("Free list allocate humongous chunk size "
+                               SIZE_FORMAT " for requested size " SIZE_FORMAT
+                               " waste " SIZE_FORMAT,
+                               chunk->word_size(), word_size, waste);
       }
       // Chunk is being removed from the chunks free list.
       dec_free_chunks_total(chunk->capacity_word_size());
@@ -1786,10 +1821,10 @@
     } else {
       list_count = humongous_dictionary()->total_count();
     }
-    tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
-               PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
-               this, chunk, chunk->word_size(), list_count);
-    locked_print_free_chunks(tty);
+    gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
+                        PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
+                        this, chunk, chunk->word_size(), list_count);
+    locked_print_free_chunks(gclog_or_tty);
   }
 
   return chunk;
@@ -2278,6 +2313,7 @@
   ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
 
   if (index != HumongousIndex) {
+    retire_current_chunk();
     set_current_chunk(new_chunk);
     new_chunk->set_next(chunks_in_use(index));
     set_chunks_in_use(index, new_chunk);
@@ -2308,7 +2344,17 @@
                         sum_count_in_chunks_in_use());
     new_chunk->print_on(gclog_or_tty);
     if (vs_list() != NULL) {
-      vs_list()->chunk_manager()->locked_print_free_chunks(tty);
+      vs_list()->chunk_manager()->locked_print_free_chunks(gclog_or_tty);
+    }
+  }
+}
+
+void SpaceManager::retire_current_chunk() {
+  if (current_chunk() != NULL) {
+    size_t remaining_words = current_chunk()->free_word_size();
+    if (remaining_words >= TreeChunk<Metablock, FreeList>::min_size()) {
+      block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
+      inc_used_metrics(remaining_words);
     }
   }
 }
@@ -2320,10 +2366,10 @@
                                              grow_chunks_by_words,
                                              medium_chunk_bunch());
 
-  if (TraceMetadataHumongousAllocation &&
+  if (TraceMetadataHumongousAllocation && next != NULL &&
       SpaceManager::is_humongous(next->word_size())) {
-    gclog_or_tty->print_cr("  new humongous chunk word size " PTR_FORMAT,
-                           next->word_size());
+    gclog_or_tty->print_cr("  new humongous chunk word size "
+                           PTR_FORMAT, next->word_size());
   }
 
   return next;
@@ -2441,9 +2487,6 @@
          curr = curr->next()) {
       out->print("%d) ", i++);
       curr->print_on(out);
-      if (TraceMetadataChunkAllocation && Verbose) {
-        block_freelists()->print_on(out);
-      }
       curr_total += curr->word_size();
       used += curr->used_word_size();
       capacity += curr->capacity_word_size();
@@ -2451,6 +2494,10 @@
     }
   }
 
+  if (TraceMetadataChunkAllocation && Verbose) {
+    block_freelists()->print_on(out);
+  }
+
   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
   // Free space isn't wasted.
   waste -= free;
@@ -2538,13 +2585,13 @@
   return used * BytesPerWord;
 }
 
-size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) {
+size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
   size_t free = 0;
   ClassLoaderDataGraphMetaspaceIterator iter;
   while (iter.repeat()) {
     Metaspace* msp = iter.get_next();
     if (msp != NULL) {
-      free += msp->free_words(mdtype);
+      free += msp->free_words_slow(mdtype);
     }
   }
   return free * BytesPerWord;
@@ -2567,34 +2614,56 @@
   return capacity * BytesPerWord;
 }
 
-size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
-  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
-  return list == NULL ? 0 : list->virtual_space_total();
+size_t MetaspaceAux::capacity_bytes_slow() {
+#ifdef PRODUCT
+  // Use allocated_capacity_bytes() in PRODUCT instead of this function.
+  guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
+#endif
+  size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
+  size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
+  assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
+      err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
+        " class_capacity + non_class_capacity " SIZE_FORMAT
+        " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
+        allocated_capacity_bytes(), class_capacity + non_class_capacity,
+        class_capacity, non_class_capacity));
+
+  return class_capacity + non_class_capacity;
 }
 
-size_t MetaspaceAux::min_chunk_size() { return Metaspace::first_chunk_word_size(); }
-
-size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {
+size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
+  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
+  return list == NULL ? 0 : list->reserved_bytes();
+}
+
+size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
+  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
+  return list == NULL ? 0 : list->committed_bytes();
+}
+
+size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
+
+size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
   if (list == NULL) {
     return 0;
   }
   ChunkManager* chunk = list->chunk_manager();
   chunk->slow_verify();
-  return chunk->free_chunks_total();
+  return chunk->free_chunks_total_words();
 }
 
-size_t MetaspaceAux::free_chunks_total_in_bytes(Metaspace::MetadataType mdtype) {
-  return free_chunks_total(mdtype) * BytesPerWord;
+size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
+  return free_chunks_total_words(mdtype) * BytesPerWord;
 }
 
-size_t MetaspaceAux::free_chunks_total() {
-  return free_chunks_total(Metaspace::ClassType) +
-         free_chunks_total(Metaspace::NonClassType);
+size_t MetaspaceAux::free_chunks_total_words() {
+  return free_chunks_total_words(Metaspace::ClassType) +
+         free_chunks_total_words(Metaspace::NonClassType);
 }
 
-size_t MetaspaceAux::free_chunks_total_in_bytes() {
-  return free_chunks_total() * BytesPerWord;
+size_t MetaspaceAux::free_chunks_total_bytes() {
+  return free_chunks_total_words() * BytesPerWord;
 }
 
 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
@@ -2605,14 +2674,14 @@
                         "("  SIZE_FORMAT ")",
                         prev_metadata_used,
                         allocated_used_bytes(),
-                        reserved_in_bytes());
+                        reserved_bytes());
   } else {
     gclog_or_tty->print(" "  SIZE_FORMAT "K"
                         "->" SIZE_FORMAT "K"
                         "("  SIZE_FORMAT "K)",
-                        prev_metadata_used / K,
-                        allocated_used_bytes() / K,
-                        reserved_in_bytes()/ K);
+                        prev_metadata_used/K,
+                        allocated_used_bytes()/K,
+                        reserved_bytes()/K);
   }
 
   gclog_or_tty->print("]");
@@ -2625,14 +2694,14 @@
   out->print_cr(" Metaspace total "
                 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
                 " reserved " SIZE_FORMAT "K",
-                allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_in_bytes()/K);
+                allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_bytes()/K);
 
   out->print_cr("  data space     "
                 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
                 " reserved " SIZE_FORMAT "K",
                 allocated_capacity_bytes(nct)/K,
                 allocated_used_bytes(nct)/K,
-                reserved_in_bytes(nct)/K);
+                reserved_bytes(nct)/K);
   if (Metaspace::using_class_space()) {
     Metaspace::MetadataType ct = Metaspace::ClassType;
     out->print_cr("  class space    "
@@ -2640,17 +2709,17 @@
                   " reserved " SIZE_FORMAT "K",
                   allocated_capacity_bytes(ct)/K,
                   allocated_used_bytes(ct)/K,
-                  reserved_in_bytes(ct)/K);
+                  reserved_bytes(ct)/K);
   }
 }
 
 // Print information for class space and data space separately.
 // This is almost the same as above.
 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
-  size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype);
+  size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
   size_t capacity_bytes = capacity_bytes_slow(mdtype);
   size_t used_bytes = used_bytes_slow(mdtype);
-  size_t free_bytes = free_in_bytes(mdtype);
+  size_t free_bytes = free_bytes_slow(mdtype);
   size_t used_and_free = used_bytes + free_bytes +
                            free_chunks_capacity_bytes;
   out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
@@ -2836,7 +2905,7 @@
 // to work with compressed klass pointers.
 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
-  assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
+  assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
   address lower_base = MIN2((address)metaspace_base, cds_base);
   address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
                                 (address)(metaspace_base + class_metaspace_size()));
@@ -2846,7 +2915,7 @@
 // Try to allocate the metaspace at the requested addr.
 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
   assert(using_class_space(), "called improperly");
-  assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
+  assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
   assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
          "Metaspace size is too big");
 
@@ -2869,9 +2938,9 @@
 
     // If no successful allocation then try to allocate the space anywhere.  If
     // that fails then OOM doom.  At this point we cannot try allocating the
-    // metaspace as if UseCompressedKlassPointers is off because too much
-    // initialization has happened that depends on UseCompressedKlassPointers.
-    // So, UseCompressedKlassPointers cannot be turned off at this point.
+    // metaspace as if UseCompressedClassPointers is off because too much
+    // initialization has happened that depends on UseCompressedClassPointers.
+    // So, UseCompressedClassPointers cannot be turned off at this point.
     if (!metaspace_rs.is_reserved()) {
       metaspace_rs = ReservedSpace(class_metaspace_size(),
                                    os::vm_allocation_granularity(), false);
@@ -2904,12 +2973,12 @@
   }
 }
 
-// For UseCompressedKlassPointers the class space is reserved above the top of
+// For UseCompressedClassPointers the class space is reserved above the top of
 // the Java heap.  The argument passed in is at the base of the compressed space.
 void Metaspace::initialize_class_space(ReservedSpace rs) {
   // The reserved space size may be bigger because of alignment, esp with UseLargePages
-  assert(rs.size() >= ClassMetaspaceSize,
-         err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
+  assert(rs.size() >= CompressedClassSpaceSize,
+         err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
   assert(using_class_space(), "Must be using class space");
   _class_space_list = new VirtualSpaceList(rs);
 }
@@ -2921,7 +2990,7 @@
   int max_alignment = os::vm_page_size();
   size_t cds_total = 0;
 
-  set_class_metaspace_size(align_size_up(ClassMetaspaceSize,
+  set_class_metaspace_size(align_size_up(CompressedClassSpaceSize,
                                          os::vm_allocation_granularity()));
 
   MetaspaceShared::set_max_alignment(max_alignment);
@@ -2941,8 +3010,8 @@
 #ifdef _LP64
     // Set the compressed klass pointer base so that decoding of these pointers works
     // properly when creating the shared archive.
-    assert(UseCompressedOops && UseCompressedKlassPointers,
-      "UseCompressedOops and UseCompressedKlassPointers must be set");
+    assert(UseCompressedOops && UseCompressedClassPointers,
+      "UseCompressedOops and UseCompressedClassPointers must be set");
     Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
     if (TraceMetavirtualspaceAllocation && Verbose) {
       gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
@@ -2979,7 +3048,7 @@
     }
 
 #ifdef _LP64
-    // If UseCompressedKlassPointers is set then allocate the metaspace area
+    // If UseCompressedClassPointers is set then allocate the metaspace area
     // above the heap and above the CDS area (if it exists).
     if (using_class_space()) {
       if (UseSharedSpaces) {
@@ -2997,7 +3066,7 @@
     // on the medium chunk list.   The next chunk will be small and progress
     // from there.  This size calculated by -version.
     _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
-                                       (ClassMetaspaceSize/BytesPerWord)*2);
+                                       (CompressedClassSpaceSize/BytesPerWord)*2);
     _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
     // Arbitrarily set the initial virtual space to a multiple
     // of the boot class loader size.
@@ -3064,7 +3133,7 @@
 
 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
   // DumpSharedSpaces doesn't use class metadata area (yet)
-  // Also, don't use class_vsm() unless UseCompressedKlassPointers is true.
+  // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
   if (mdtype == ClassType && using_class_space()) {
     return  class_vsm()->allocate(word_size);
   } else {
@@ -3103,7 +3172,7 @@
   }
 }
 
-size_t Metaspace::free_words(MetadataType mdtype) const {
+size_t Metaspace::free_words_slow(MetadataType mdtype) const {
   if (mdtype == ClassType) {
     return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
   } else {
@@ -3213,7 +3282,7 @@
         MetaspaceAux::dump(gclog_or_tty);
       }
       // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
-      const char* space_string = (mdtype == ClassType) ? "Class Metadata space" :
+      const char* space_string = (mdtype == ClassType) ? "Compressed class space" :
                                                          "Metadata space";
       report_java_out_of_memory(space_string);
 
@@ -3311,3 +3380,59 @@
     class_vsm()->dump(out);
   }
 }
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+class MetaspaceAuxTest : AllStatic {
+ public:
+  static void test_reserved() {
+    size_t reserved = MetaspaceAux::reserved_bytes();
+
+    assert(reserved > 0, "assert");
+
+    size_t committed  = MetaspaceAux::committed_bytes();
+    assert(committed <= reserved, "assert");
+
+    size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
+    assert(reserved_metadata > 0, "assert");
+    assert(reserved_metadata <= reserved, "assert");
+
+    if (UseCompressedClassPointers) {
+      size_t reserved_class    = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
+      assert(reserved_class > 0, "assert");
+      assert(reserved_class < reserved, "assert");
+    }
+  }
+
+  static void test_committed() {
+    size_t committed = MetaspaceAux::committed_bytes();
+
+    assert(committed > 0, "assert");
+
+    size_t reserved  = MetaspaceAux::reserved_bytes();
+    assert(committed <= reserved, "assert");
+
+    size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
+    assert(committed_metadata > 0, "assert");
+    assert(committed_metadata <= committed, "assert");
+
+    if (UseCompressedClassPointers) {
+      size_t committed_class    = MetaspaceAux::committed_bytes(Metaspace::ClassType);
+      assert(committed_class > 0, "assert");
+      assert(committed_class < committed, "assert");
+    }
+  }
+
+  static void test() {
+    test_reserved();
+    test_committed();
+  }
+};
+
+void MetaspaceAux_test() {
+  MetaspaceAuxTest::test();
+}
+
+#endif
--- a/hotspot/src/share/vm/memory/metaspace.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/memory/metaspace.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -182,9 +182,8 @@
 
   char*  bottom() const;
   size_t used_words_slow(MetadataType mdtype) const;
-  size_t free_words(MetadataType mdtype) const;
+  size_t free_words_slow(MetadataType mdtype) const;
   size_t capacity_words_slow(MetadataType mdtype) const;
-  size_t waste_words(MetadataType mdtype) const;
 
   size_t used_bytes_slow(MetadataType mdtype) const;
   size_t capacity_bytes_slow(MetadataType mdtype) const;
@@ -213,27 +212,22 @@
 
   void iterate(AllocRecordClosure *closure);
 
-  // Return TRUE only if UseCompressedKlassPointers is True and DumpSharedSpaces is False.
+  // Return TRUE only if UseCompressedClassPointers is True and DumpSharedSpaces is False.
   static bool using_class_space() {
-    return NOT_LP64(false) LP64_ONLY(UseCompressedKlassPointers && !DumpSharedSpaces);
+    return NOT_LP64(false) LP64_ONLY(UseCompressedClassPointers && !DumpSharedSpaces);
   }
 
 };
 
 class MetaspaceAux : AllStatic {
-  static size_t free_chunks_total(Metaspace::MetadataType mdtype);
-
- public:
-  // Statistics for class space and data space in metaspace.
+  static size_t free_chunks_total_words(Metaspace::MetadataType mdtype);
 
   // These methods iterate over the classloader data graph
   // for the given Metaspace type.  These are slow.
   static size_t used_bytes_slow(Metaspace::MetadataType mdtype);
-  static size_t free_in_bytes(Metaspace::MetadataType mdtype);
+  static size_t free_bytes_slow(Metaspace::MetadataType mdtype);
   static size_t capacity_bytes_slow(Metaspace::MetadataType mdtype);
-
-  // Iterates over the virtual space list.
-  static size_t reserved_in_bytes(Metaspace::MetadataType mdtype);
+  static size_t capacity_bytes_slow();
 
   // Running sum of space in all Metachunks that has been
   // allocated to a Metaspace.  This is used instead of
@@ -263,17 +257,16 @@
   }
 
   // Used by MetaspaceCounters
-  static size_t free_chunks_total();
-  static size_t free_chunks_total_in_bytes();
-  static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype);
+  static size_t free_chunks_total_words();
+  static size_t free_chunks_total_bytes();
+  static size_t free_chunks_total_bytes(Metaspace::MetadataType mdtype);
 
   static size_t allocated_capacity_words(Metaspace::MetadataType mdtype) {
     return _allocated_capacity_words[mdtype];
   }
   static size_t allocated_capacity_words() {
-    return _allocated_capacity_words[Metaspace::NonClassType] +
-           (Metaspace::using_class_space() ?
-           _allocated_capacity_words[Metaspace::ClassType] : 0);
+    return allocated_capacity_words(Metaspace::NonClassType) +
+           allocated_capacity_words(Metaspace::ClassType);
   }
   static size_t allocated_capacity_bytes(Metaspace::MetadataType mdtype) {
     return allocated_capacity_words(mdtype) * BytesPerWord;
@@ -286,9 +279,8 @@
     return _allocated_used_words[mdtype];
   }
   static size_t allocated_used_words() {
-    return _allocated_used_words[Metaspace::NonClassType] +
-           (Metaspace::using_class_space() ?
-           _allocated_used_words[Metaspace::ClassType] : 0);
+    return allocated_used_words(Metaspace::NonClassType) +
+           allocated_used_words(Metaspace::ClassType);
   }
   static size_t allocated_used_bytes(Metaspace::MetadataType mdtype) {
     return allocated_used_words(mdtype) * BytesPerWord;
@@ -300,31 +292,22 @@
   static size_t free_bytes();
   static size_t free_bytes(Metaspace::MetadataType mdtype);
 
-  // Total capacity in all Metaspaces
-  static size_t capacity_bytes_slow() {
-#ifdef PRODUCT
-    // Use allocated_capacity_bytes() in PRODUCT instead of this function.
-    guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
-#endif
-    size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
-    size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
-    assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
-           err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
-             " class_capacity + non_class_capacity " SIZE_FORMAT
-             " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
-             allocated_capacity_bytes(), class_capacity + non_class_capacity,
-             class_capacity, non_class_capacity));
-
-    return class_capacity + non_class_capacity;
+  static size_t reserved_bytes(Metaspace::MetadataType mdtype);
+  static size_t reserved_bytes() {
+    return reserved_bytes(Metaspace::ClassType) +
+           reserved_bytes(Metaspace::NonClassType);
   }
 
-  // Total space reserved in all Metaspaces
-  static size_t reserved_in_bytes() {
-    return reserved_in_bytes(Metaspace::ClassType) +
-           reserved_in_bytes(Metaspace::NonClassType);
+  static size_t committed_bytes(Metaspace::MetadataType mdtype);
+  static size_t committed_bytes() {
+    return committed_bytes(Metaspace::ClassType) +
+           committed_bytes(Metaspace::NonClassType);
   }
 
-  static size_t min_chunk_size();
+  static size_t min_chunk_size_words();
+  static size_t min_chunk_size_bytes() {
+    return min_chunk_size_words() * BytesPerWord;
+  }
 
   // Print change in used metadata.
   static void print_metaspace_change(size_t prev_metadata_used);
--- a/hotspot/src/share/vm/memory/metaspaceCounters.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/memory/metaspaceCounters.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -65,26 +65,25 @@
 
 MetaspacePerfCounters* MetaspaceCounters::_perf_counters = NULL;
 
-size_t MetaspaceCounters::calculate_capacity() {
-  // The total capacity is the sum of
-  //   1) capacity of Metachunks in use by all Metaspaces
-  //   2) unused space at the end of each Metachunk
-  //   3) space in the freelist
-  size_t total_capacity = MetaspaceAux::allocated_capacity_bytes()
-    + MetaspaceAux::free_bytes() + MetaspaceAux::free_chunks_total_in_bytes();
-  return total_capacity;
+size_t MetaspaceCounters::used() {
+  return MetaspaceAux::allocated_used_bytes();
+}
+
+size_t MetaspaceCounters::capacity() {
+  return MetaspaceAux::committed_bytes();
+}
+
+size_t MetaspaceCounters::max_capacity() {
+  return MetaspaceAux::reserved_bytes();
 }
 
 void MetaspaceCounters::initialize_performance_counters() {
   if (UsePerfData) {
     assert(_perf_counters == NULL, "Should only be initialized once");
 
-    size_t min_capacity = MetaspaceAux::min_chunk_size();
-    size_t capacity = calculate_capacity();
-    size_t max_capacity = MetaspaceAux::reserved_in_bytes();
-    size_t used = MetaspaceAux::allocated_used_bytes();
-
-    _perf_counters = new MetaspacePerfCounters("metaspace", min_capacity, capacity, max_capacity, used);
+    size_t min_capacity = 0;
+    _perf_counters = new MetaspacePerfCounters("metaspace", min_capacity,
+                                               capacity(), max_capacity(), used());
   }
 }
 
@@ -92,31 +91,29 @@
   if (UsePerfData) {
     assert(_perf_counters != NULL, "Should be initialized");
 
-    size_t capacity = calculate_capacity();
-    size_t max_capacity = MetaspaceAux::reserved_in_bytes();
-    size_t used = MetaspaceAux::allocated_used_bytes();
-
-    _perf_counters->update(capacity, max_capacity, used);
+    _perf_counters->update(capacity(), max_capacity(), used());
   }
 }
 
 MetaspacePerfCounters* CompressedClassSpaceCounters::_perf_counters = NULL;
 
-size_t CompressedClassSpaceCounters::calculate_capacity() {
-    return MetaspaceAux::allocated_capacity_bytes(_class_type) +
-           MetaspaceAux::free_bytes(_class_type) +
-           MetaspaceAux::free_chunks_total_in_bytes(_class_type);
+size_t CompressedClassSpaceCounters::used() {
+  return MetaspaceAux::allocated_used_bytes(Metaspace::ClassType);
+}
+
+size_t CompressedClassSpaceCounters::capacity() {
+  return MetaspaceAux::committed_bytes(Metaspace::ClassType);
+}
+
+size_t CompressedClassSpaceCounters::max_capacity() {
+  return MetaspaceAux::reserved_bytes(Metaspace::ClassType);
 }
 
 void CompressedClassSpaceCounters::update_performance_counters() {
-  if (UsePerfData && UseCompressedKlassPointers) {
+  if (UsePerfData && UseCompressedClassPointers) {
     assert(_perf_counters != NULL, "Should be initialized");
 
-    size_t capacity = calculate_capacity();
-    size_t max_capacity = MetaspaceAux::reserved_in_bytes(_class_type);
-    size_t used = MetaspaceAux::allocated_used_bytes(_class_type);
-
-    _perf_counters->update(capacity, max_capacity, used);
+    _perf_counters->update(capacity(), max_capacity(), used());
   }
 }
 
@@ -125,13 +122,10 @@
     assert(_perf_counters == NULL, "Should only be initialized once");
     const char* ns = "compressedclassspace";
 
-    if (UseCompressedKlassPointers) {
-      size_t min_capacity = MetaspaceAux::min_chunk_size();
-      size_t capacity = calculate_capacity();
-      size_t max_capacity = MetaspaceAux::reserved_in_bytes(_class_type);
-      size_t used = MetaspaceAux::allocated_used_bytes(_class_type);
-
-      _perf_counters = new MetaspacePerfCounters(ns, min_capacity, capacity, max_capacity, used);
+    if (UseCompressedClassPointers) {
+      size_t min_capacity = 0;
+      _perf_counters = new MetaspacePerfCounters(ns, min_capacity, capacity(),
+                                                 max_capacity(), used());
     } else {
       _perf_counters = new MetaspacePerfCounters(ns, 0, 0, 0, 0);
     }
--- a/hotspot/src/share/vm/memory/metaspaceCounters.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/memory/metaspaceCounters.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -25,13 +25,15 @@
 #ifndef SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
 #define SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
 
-#include "memory/metaspace.hpp"
+#include "memory/allocation.hpp"
 
 class MetaspacePerfCounters;
 
 class MetaspaceCounters: public AllStatic {
   static MetaspacePerfCounters* _perf_counters;
-  static size_t calculate_capacity();
+  static size_t used();
+  static size_t capacity();
+  static size_t max_capacity();
 
  public:
   static void initialize_performance_counters();
@@ -40,8 +42,9 @@
 
 class CompressedClassSpaceCounters: public AllStatic {
   static MetaspacePerfCounters* _perf_counters;
-  static size_t calculate_capacity();
-  static const Metaspace::MetadataType _class_type = Metaspace::ClassType;
+  static size_t used();
+  static size_t capacity();
+  static size_t max_capacity();
 
  public:
   static void initialize_performance_counters();
--- a/hotspot/src/share/vm/memory/universe.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/memory/universe.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -602,7 +602,7 @@
   }
 }
 
-static intptr_t non_oop_bits = 0;
+intptr_t Universe::_non_oop_bits = 0;
 
 void* Universe::non_oop_word() {
   // Neither the high bits nor the low bits of this value is allowed
@@ -616,11 +616,11 @@
   // Using the OS-supplied non-memory-address word (usually 0 or -1)
   // will take care of the high bits, however many there are.
 
-  if (non_oop_bits == 0) {
-    non_oop_bits = (intptr_t)os::non_memory_address_word() | 1;
+  if (_non_oop_bits == 0) {
+    _non_oop_bits = (intptr_t)os::non_memory_address_word() | 1;
   }
 
-  return (void*)non_oop_bits;
+  return (void*)_non_oop_bits;
 }
 
 jint universe_init() {
@@ -872,13 +872,16 @@
 
 // Reserve the Java heap, which is now the same for all GCs.
 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
+  assert(alignment <= Arguments::conservative_max_heap_alignment(),
+      err_msg("actual alignment "SIZE_FORMAT" must be within maximum heap alignment "SIZE_FORMAT,
+          alignment, Arguments::conservative_max_heap_alignment()));
   size_t total_reserved = align_size_up(heap_size, alignment);
   assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
       "heap size is too big for compressed oops");
 
   bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size());
   assert(!UseLargePages
-      || UseParallelOldGC
+      || UseParallelGC
       || use_large_pages, "Wrong alignment to use large pages");
 
   char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
@@ -1028,7 +1031,7 @@
 
     msg = java_lang_String::create_from_str("Metadata space", CHECK_false);
     java_lang_Throwable::set_message(Universe::_out_of_memory_error_metaspace, msg());
-    msg = java_lang_String::create_from_str("Class Metadata space", CHECK_false);
+    msg = java_lang_String::create_from_str("Compressed class space", CHECK_false);
     java_lang_Throwable::set_message(Universe::_out_of_memory_error_class_metaspace, msg());
 
     msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false);
--- a/hotspot/src/share/vm/memory/universe.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/memory/universe.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -179,9 +179,11 @@
   // The particular choice of collected heap.
   static CollectedHeap* _collectedHeap;
 
+  static intptr_t _non_oop_bits;
+
   // For UseCompressedOops.
   static struct NarrowPtrStruct _narrow_oop;
-  // For UseCompressedKlassPointers.
+  // For UseCompressedClassPointers.
   static struct NarrowPtrStruct _narrow_klass;
   static address _narrow_ptrs_base;
 
@@ -229,7 +231,7 @@
     _narrow_oop._base    = base;
   }
   static void     set_narrow_klass_base(address base) {
-    assert(UseCompressedKlassPointers, "no compressed klass ptrs?");
+    assert(UseCompressedClassPointers, "no compressed klass ptrs?");
     _narrow_klass._base   = base;
   }
   static void     set_narrow_oop_use_implicit_null_checks(bool use) {
@@ -353,7 +355,7 @@
   static int      narrow_oop_shift()                      { return  _narrow_oop._shift; }
   static bool     narrow_oop_use_implicit_null_checks()   { return  _narrow_oop._use_implicit_null_checks; }
 
-  // For UseCompressedKlassPointers
+  // For UseCompressedClassPointers
   static address  narrow_klass_base()                     { return  _narrow_klass._base; }
   static bool  is_narrow_klass_base(void* addr)           { return (narrow_klass_base() == (address)addr); }
   static int      narrow_klass_shift()                    { return  _narrow_klass._shift; }
--- a/hotspot/src/share/vm/oops/arrayOop.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/oops/arrayOop.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -65,7 +65,7 @@
   // declared nonstatic fields in arrayOopDesc if not compressed, otherwise
   // it occupies the second half of the _klass field in oopDesc.
   static int length_offset_in_bytes() {
-    return UseCompressedKlassPointers ? klass_gap_offset_in_bytes() :
+    return UseCompressedClassPointers ? klass_gap_offset_in_bytes() :
                                sizeof(arrayOopDesc);
   }
 
--- a/hotspot/src/share/vm/oops/constantPool.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/oops/constantPool.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -396,32 +396,6 @@
 }
 
 
-// This is an interface for the compiler that allows accessing non-resolved entries
-// in the constant pool - but still performs the validations tests. Must be used
-// in a pre-parse of the compiler - to determine what it can do and not do.
-// Note: We cannot update the ConstantPool from the vm_thread.
-Klass* ConstantPool::klass_ref_at_if_loaded_check(constantPoolHandle this_oop, int index, TRAPS) {
-  int which = this_oop->klass_ref_index_at(index);
-  CPSlot entry = this_oop->slot_at(which);
-  if (entry.is_resolved()) {
-    assert(entry.get_klass()->is_klass(), "must be");
-    return entry.get_klass();
-  } else {
-    assert(entry.is_unresolved(), "must be either symbol or klass");
-    Symbol*  name  = entry.get_symbol();
-    oop loader = this_oop->pool_holder()->class_loader();
-    oop protection_domain = this_oop->pool_holder()->protection_domain();
-    Handle h_loader(THREAD, loader);
-    Handle h_prot  (THREAD, protection_domain);
-    KlassHandle k(THREAD, SystemDictionary::find(name, h_loader, h_prot, THREAD));
-
-    // Do access check for klasses
-    if( k.not_null() ) verify_constant_pool_resolve(this_oop, k, CHECK_NULL);
-    return k();
-  }
-}
-
-
 Method* ConstantPool::method_at_if_loaded(constantPoolHandle cpool,
                                                    int which) {
   if (cpool->cache() == NULL)  return NULL;  // nothing to load yet
--- a/hotspot/src/share/vm/oops/constantPool.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/oops/constantPool.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -730,8 +730,6 @@
   static oop         method_type_at_if_loaded      (constantPoolHandle this_oop, int which);
   static Klass*            klass_at_if_loaded      (constantPoolHandle this_oop, int which);
   static Klass*        klass_ref_at_if_loaded      (constantPoolHandle this_oop, int which);
-  // Same as above - but does LinkResolving.
-  static Klass*        klass_ref_at_if_loaded_check(constantPoolHandle this_oop, int which, TRAPS);
 
   // Routines currently used for annotations (only called by jvm.cpp) but which might be used in the
   // future by other Java code. These take constant pool indices rather than
--- a/hotspot/src/share/vm/oops/cpCache.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/oops/cpCache.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -140,9 +140,10 @@
             err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value));
 }
 
-void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
-                                        methodHandle method,
-                                        int vtable_index) {
+void ConstantPoolCacheEntry::set_direct_or_vtable_call(Bytecodes::Code invoke_code,
+                                                       methodHandle method,
+                                                       int vtable_index) {
+  bool is_vtable_call = (vtable_index >= 0);  // FIXME: split this method on this boolean
   assert(method->interpreter_entry() != NULL, "should have been set at this point");
   assert(!method->is_obsolete(),  "attempt to write obsolete method to cpCache");
 
@@ -160,7 +161,8 @@
       // ...and fall through as if we were handling invokevirtual:
     case Bytecodes::_invokevirtual:
       {
-        if (method->can_be_statically_bound()) {
+        if (!is_vtable_call) {
+          assert(method->can_be_statically_bound(), "");
           // set_f2_as_vfinal_method checks if is_vfinal flag is true.
           set_method_flags(as_TosState(method->result_type()),
                            (                             1      << is_vfinal_shift) |
@@ -169,6 +171,7 @@
                            method()->size_of_parameters());
           set_f2_as_vfinal_method(method());
         } else {
+          assert(!method->can_be_statically_bound(), "");
           assert(vtable_index >= 0, "valid index");
           assert(!method->is_final_method(), "sanity");
           set_method_flags(as_TosState(method->result_type()),
@@ -182,6 +185,7 @@
 
     case Bytecodes::_invokespecial:
     case Bytecodes::_invokestatic:
+      assert(!is_vtable_call, "");
       // Note:  Read and preserve the value of the is_vfinal flag on any
       // invokevirtual bytecode shared with this constant pool cache entry.
       // It is cheap and safe to consult is_vfinal() at all times.
@@ -232,8 +236,22 @@
   NOT_PRODUCT(verify(tty));
 }
 
+void ConstantPoolCacheEntry::set_direct_call(Bytecodes::Code invoke_code, methodHandle method) {
+  int index = Method::nonvirtual_vtable_index;
+  // index < 0; FIXME: inline and customize set_direct_or_vtable_call
+  set_direct_or_vtable_call(invoke_code, method, index);
+}
 
-void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) {
+void ConstantPoolCacheEntry::set_vtable_call(Bytecodes::Code invoke_code, methodHandle method, int index) {
+  // either the method is a miranda or its holder should accept the given index
+  assert(method->method_holder()->is_interface() || method->method_holder()->verify_vtable_index(index), "");
+  // index >= 0; FIXME: inline and customize set_direct_or_vtable_call
+  set_direct_or_vtable_call(invoke_code, method, index);
+}
+
+void ConstantPoolCacheEntry::set_itable_call(Bytecodes::Code invoke_code, methodHandle method, int index) {
+  assert(method->method_holder()->verify_itable_index(index), "");
+  assert(invoke_code == Bytecodes::_invokeinterface, "");
   InstanceKlass* interf = method->method_holder();
   assert(interf->is_interface(), "must be an interface");
   assert(!method->is_final_method(), "interfaces do not have final methods; cannot link to one here");
--- a/hotspot/src/share/vm/oops/cpCache.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/oops/cpCache.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -219,15 +219,29 @@
     Klass*          root_klass                   // needed by the GC to dirty the klass
   );
 
-  void set_method(                               // sets entry to resolved method entry
+ private:
+  void set_direct_or_vtable_call(
     Bytecodes::Code invoke_code,                 // the bytecode used for invoking the method
     methodHandle    method,                      // the method/prototype if any (NULL, otherwise)
     int             vtable_index                 // the vtable index if any, else negative
   );
 
-  void set_interface_call(
-    methodHandle method,                         // Resolved method
-    int index                                    // Method index into interface
+ public:
+  void set_direct_call(                          // sets entry to exact concrete method entry
+    Bytecodes::Code invoke_code,                 // the bytecode used for invoking the method
+    methodHandle    method                       // the method to call
+  );
+
+  void set_vtable_call(                          // sets entry to vtable index
+    Bytecodes::Code invoke_code,                 // the bytecode used for invoking the method
+    methodHandle    method,                      // resolved method which declares the vtable index
+    int             vtable_index                 // the vtable index
+  );
+
+  void set_itable_call(
+    Bytecodes::Code invoke_code,                 // the bytecode used; must be invokeinterface
+    methodHandle method,                         // the resolved interface method
+    int itable_index                             // index into itable for the method
   );
 
   void set_method_handle(
--- a/hotspot/src/share/vm/oops/fieldStreams.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/oops/fieldStreams.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
 
 #include "oops/instanceKlass.hpp"
 #include "oops/fieldInfo.hpp"
+#include "runtime/fieldDescriptor.hpp"
 
 // The is the base class for iteration over the fields array
 // describing the declared fields in the class.  Several subclasses
@@ -43,8 +44,10 @@
   int                 _index;
   int                 _limit;
   int                 _generic_signature_slot;
+  fieldDescriptor     _fd_buf;
 
   FieldInfo* field() const { return FieldInfo::from_field_array(_fields, _index); }
+  InstanceKlass* field_holder() const { return _constants->pool_holder(); }
 
   int init_generic_signature_start_slot() {
     int length = _fields->length();
@@ -102,6 +105,7 @@
     _index = 0;
     _limit = klass->java_fields_count();
     init_generic_signature_start_slot();
+    assert(klass == field_holder(), "");
   }
   FieldStreamBase(instanceKlassHandle klass) {
     _fields = klass->fields();
@@ -109,6 +113,7 @@
     _index = 0;
     _limit = klass->java_fields_count();
     init_generic_signature_start_slot();
+    assert(klass == field_holder(), "");
   }
 
   // accessors
@@ -180,6 +185,12 @@
     return field()->contended_group();
   }
 
+  // bridge to a heavier API:
+  fieldDescriptor& field_descriptor() const {
+    fieldDescriptor& field = const_cast<fieldDescriptor&>(_fd_buf);
+    field.reinitialize(field_holder(), _index);
+    return field;
+  }
 };
 
 // Iterate over only the internal fields
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -286,7 +286,6 @@
   init_previous_versions();
   set_generic_signature_index(0);
   release_set_methods_jmethod_ids(NULL);
-  release_set_methods_cached_itable_indices(NULL);
   set_annotations(NULL);
   set_jvmti_cached_class_field_map(NULL);
   set_initial_method_idnum(0);
@@ -1149,7 +1148,7 @@
     Symbol* f_name = fs.name();
     Symbol* f_sig  = fs.signature();
     if (f_name == name && f_sig == sig) {
-      fd->initialize(const_cast<InstanceKlass*>(this), fs.index());
+      fd->reinitialize(const_cast<InstanceKlass*>(this), fs.index());
       return true;
     }
   }
@@ -1218,7 +1217,7 @@
 bool InstanceKlass::find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
     if (fs.offset() == offset) {
-      fd->initialize(const_cast<InstanceKlass*>(this), fs.index());
+      fd->reinitialize(const_cast<InstanceKlass*>(this), fs.index());
       if (fd->is_static() == is_static) return true;
     }
   }
@@ -1251,8 +1250,7 @@
 void InstanceKlass::do_local_static_fields(FieldClosure* cl) {
   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
     if (fs.access_flags().is_static()) {
-      fieldDescriptor fd;
-      fd.initialize(this, fs.index());
+      fieldDescriptor& fd = fs.field_descriptor();
       cl->do_field(&fd);
     }
   }
@@ -1268,8 +1266,7 @@
 void InstanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) {
   for (JavaFieldStream fs(this_oop()); !fs.done(); fs.next()) {
     if (fs.access_flags().is_static()) {
-      fieldDescriptor fd;
-      fd.initialize(this_oop(), fs.index());
+      fieldDescriptor& fd = fs.field_descriptor();
       f(&fd, CHECK);
     }
   }
@@ -1291,7 +1288,7 @@
   int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1), mtClass);
   int j = 0;
   for (int i = 0; i < length; i += 1) {
-    fd.initialize(this, i);
+    fd.reinitialize(this, i);
     if (!fd.is_static()) {
       fields_sorted[j + 0] = fd.offset();
       fields_sorted[j + 1] = i;
@@ -1303,7 +1300,7 @@
     // _sort_Fn is defined in growableArray.hpp.
     qsort(fields_sorted, length/2, 2*sizeof(int), (_sort_Fn)compare_fields_by_offset);
     for (int i = 0; i < length; i += 2) {
-      fd.initialize(this, fields_sorted[i + 1]);
+      fd.reinitialize(this, fields_sorted[i + 1]);
       assert(!fd.is_static() && fd.offset() == fields_sorted[i], "only nonstatic fields");
       cl->do_field(&fd);
     }
@@ -1686,87 +1683,6 @@
 }
 
 
-// Cache an itable index
-void InstanceKlass::set_cached_itable_index(size_t idnum, int index) {
-  int* indices = methods_cached_itable_indices_acquire();
-  int* to_dealloc_indices = NULL;
-
-  // We use a double-check locking idiom here because this cache is
-  // performance sensitive. In the normal system, this cache only
-  // transitions from NULL to non-NULL which is safe because we use
-  // release_set_methods_cached_itable_indices() to advertise the
-  // new cache. A partially constructed cache should never be seen
-  // by a racing thread. Cache reads and writes proceed without a
-  // lock, but creation of the cache itself requires no leaks so a
-  // lock is generally acquired in that case.
-  //
-  // If the RedefineClasses() API has been used, then this cache can
-  // grow and we'll have transitions from non-NULL to bigger non-NULL.
-  // Cache creation requires no leaks and we require safety between all
-  // cache accesses and freeing of the old cache so a lock is generally
-  // acquired when the RedefineClasses() API has been used.
-
-  if (indices == NULL || idnum_can_increment()) {
-    // we need a cache or the cache can grow
-    MutexLocker ml(JNICachedItableIndex_lock);
-    // reacquire the cache to see if another thread already did the work
-    indices = methods_cached_itable_indices_acquire();
-    size_t length = 0;
-    // cache size is stored in element[0], other elements offset by one
-    if (indices == NULL || (length = (size_t)indices[0]) <= idnum) {
-      size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count());
-      int* new_indices = NEW_C_HEAP_ARRAY(int, size+1, mtClass);
-      new_indices[0] = (int)size;
-      // copy any existing entries
-      size_t i;
-      for (i = 0; i < length; i++) {
-        new_indices[i+1] = indices[i+1];
-      }
-      // Set all the rest to -1
-      for (i = length; i < size; i++) {
-        new_indices[i+1] = -1;
-      }
-      if (indices != NULL) {
-        // We have an old cache to delete so save it for after we
-        // drop the lock.
-        to_dealloc_indices = indices;
-      }
-      release_set_methods_cached_itable_indices(indices = new_indices);
-    }
-
-    if (idnum_can_increment()) {
-      // this cache can grow so we have to write to it safely
-      indices[idnum+1] = index;
-    }
-  } else {
-    CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
-  }
-
-  if (!idnum_can_increment()) {
-    // The cache cannot grow and this JNI itable index value does not
-    // have to be unique like a jmethodID. If there is a race to set it,
-    // it doesn't matter.
-    indices[idnum+1] = index;
-  }
-
-  if (to_dealloc_indices != NULL) {
-    // we allocated a new cache so free the old one
-    FreeHeap(to_dealloc_indices);
-  }
-}
-
-
-// Retrieve a cached itable index
-int InstanceKlass::cached_itable_index(size_t idnum) {
-  int* indices = methods_cached_itable_indices_acquire();
-  if (indices != NULL && ((size_t)indices[0]) > idnum) {
-     // indices exist and are long enough, retrieve possible cached
-    return indices[idnum+1];
-  }
-  return -1;
-}
-
-
 //
 // Walk the list of dependent nmethods searching for nmethods which
 // are dependent on the changes that were passed in and mark them for
@@ -2326,12 +2242,6 @@
     }
   }
 
-  int* indices = methods_cached_itable_indices_acquire();
-  if (indices != (int*)NULL) {
-    release_set_methods_cached_itable_indices(NULL);
-    FreeHeap(indices);
-  }
-
   // release dependencies
   nmethodBucket* b = _dependencies;
   _dependencies = NULL;
@@ -2782,6 +2692,18 @@
   "allocated", "loaded", "linked", "being_initialized", "fully_initialized", "initialization_error"
 };
 
+static void print_vtable(intptr_t* start, int len, outputStream* st) {
+  for (int i = 0; i < len; i++) {
+    intptr_t e = start[i];
+    st->print("%d : " INTPTR_FORMAT, i, e);
+    if (e != 0 && ((Metadata*)e)->is_metaspace_object()) {
+      st->print(" ");
+      ((Metadata*)e)->print_value_on(st);
+    }
+    st->cr();
+  }
+}
+
 void InstanceKlass::print_on(outputStream* st) const {
   assert(is_klass(), "must be klass");
   Klass::print_on(st);
@@ -2816,7 +2738,7 @@
 
   st->print(BULLET"arrays:            "); array_klasses()->print_value_on_maybe_null(st); st->cr();
   st->print(BULLET"methods:           "); methods()->print_value_on(st);                  st->cr();
-  if (Verbose) {
+  if (Verbose || WizardMode) {
     Array<Method*>* method_array = methods();
     for(int i = 0; i < method_array->length(); i++) {
       st->print("%d : ", i); method_array->at(i)->print_value(); st->cr();
@@ -2874,7 +2796,9 @@
   st->print(BULLET"inner classes:     "); inner_classes()->print_value_on(st);     st->cr();
   st->print(BULLET"java mirror:       "); java_mirror()->print_value_on(st);       st->cr();
   st->print(BULLET"vtable length      %d  (start addr: " INTPTR_FORMAT ")", vtable_length(), start_of_vtable());  st->cr();
+  if (vtable_length() > 0 && (Verbose || WizardMode))  print_vtable(start_of_vtable(), vtable_length(), st);
   st->print(BULLET"itable length      %d (start addr: " INTPTR_FORMAT ")", itable_length(), start_of_itable()); st->cr();
+  if (itable_length() > 0 && (Verbose || WizardMode))  print_vtable(start_of_itable(), itable_length(), st);
   st->print_cr(BULLET"---- static fields (%d words):", static_field_size());
   FieldPrinter print_static_field(st);
   ((InstanceKlass*)this)->do_local_static_fields(&print_static_field);
@@ -2896,6 +2820,7 @@
 
 void InstanceKlass::print_value_on(outputStream* st) const {
   assert(is_klass(), "must be klass");
+  if (Verbose || WizardMode)  access_flags().print_on(st);
   name()->print_value_on(st);
 }
 
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -245,7 +245,6 @@
   MemberNameTable* _member_names;        // Member names
   JNIid*          _jni_ids;              // First JNI identifier for static fields in this class
   jmethodID*      _methods_jmethod_ids;  // jmethodIDs corresponding to method_idnum, or NULL if none
-  int*            _methods_cached_itable_indices;  // itable_index cache for JNI invoke corresponding to methods idnum, or NULL
   nmethodBucket*  _dependencies;         // list of dependent nmethods
   nmethod*        _osr_nmethods_head;    // Head of list of on-stack replacement nmethods for this class
   BreakpointInfo* _breakpoints;          // bpt lists, managed by Method*
@@ -690,10 +689,6 @@
                 size_t *length_p, jmethodID* id_p);
   jmethodID jmethod_id_or_null(Method* method);
 
-  // cached itable index support
-  void set_cached_itable_index(size_t idnum, int index);
-  int cached_itable_index(size_t idnum);
-
   // annotations support
   Annotations* annotations() const          { return _annotations; }
   void set_annotations(Annotations* anno)   { _annotations = anno; }
@@ -994,11 +989,6 @@
   void release_set_methods_jmethod_ids(jmethodID* jmeths)
          { OrderAccess::release_store_ptr(&_methods_jmethod_ids, jmeths); }
 
-  int* methods_cached_itable_indices_acquire() const
-         { return (int*)OrderAccess::load_ptr_acquire(&_methods_cached_itable_indices); }
-  void release_set_methods_cached_itable_indices(int* indices)
-         { OrderAccess::release_store_ptr(&_methods_cached_itable_indices, indices); }
-
   // Lock during initialization
 public:
   // Lock for (1) initialization; (2) access to the ConstantPool of this class.
--- a/hotspot/src/share/vm/oops/instanceOop.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/oops/instanceOop.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -37,9 +37,9 @@
 
   // If compressed, the offset of the fields of the instance may not be aligned.
   static int base_offset_in_bytes() {
-    // offset computation code breaks if UseCompressedKlassPointers
+    // offset computation code breaks if UseCompressedClassPointers
     // only is true
-    return (UseCompressedOops && UseCompressedKlassPointers) ?
+    return (UseCompressedOops && UseCompressedClassPointers) ?
              klass_gap_offset_in_bytes() :
              sizeof(instanceOopDesc);
   }
--- a/hotspot/src/share/vm/oops/klass.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/oops/klass.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -674,13 +674,23 @@
 
 #ifndef PRODUCT
 
-void Klass::verify_vtable_index(int i) {
+bool Klass::verify_vtable_index(int i) {
   if (oop_is_instance()) {
-    assert(i>=0 && i<((InstanceKlass*)this)->vtable_length()/vtableEntry::size(), "index out of bounds");
+    int limit = ((InstanceKlass*)this)->vtable_length()/vtableEntry::size();
+    assert(i >= 0 && i < limit, err_msg("index %d out of bounds %d", i, limit));
   } else {
     assert(oop_is_array(), "Must be");
-    assert(i>=0 && i<((ArrayKlass*)this)->vtable_length()/vtableEntry::size(), "index out of bounds");
+    int limit = ((ArrayKlass*)this)->vtable_length()/vtableEntry::size();
+    assert(i >= 0 && i < limit, err_msg("index %d out of bounds %d", i, limit));
   }
+  return true;
+}
+
+bool Klass::verify_itable_index(int i) {
+  assert(oop_is_instance(), "");
+  int method_count = klassItable::method_count_for_interface(this);
+  assert(i >= 0 && i < method_count, "index out of bounds");
+  return true;
 }
 
 #endif
--- a/hotspot/src/share/vm/oops/klass.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/oops/klass.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -699,7 +699,8 @@
   void verify(bool check_dictionary = true) { verify_on(tty, check_dictionary); }
 
 #ifndef PRODUCT
-  void verify_vtable_index(int index);
+  bool verify_vtable_index(int index);
+  bool verify_itable_index(int index);
 #endif
 
   virtual void oop_verify_on(oop obj, outputStream* st);
--- a/hotspot/src/share/vm/oops/klassVtable.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/oops/klassVtable.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -47,11 +47,12 @@
 
 
 // this function computes the vtable size (including the size needed for miranda
-// methods) and the number of miranda methods in this class
+// methods) and the number of miranda methods in this class.
 // Note on Miranda methods: Let's say there is a class C that implements
-// interface I.  Let's say there is a method m in I that neither C nor any
-// of its super classes implement (i.e there is no method of any access, with
-// the same name and signature as m), then m is a Miranda method which is
+// interface I, and none of C's superclasses implements I.
+// Let's say there is an abstract method m in I that neither C
+// nor any of its super classes implement (i.e there is no method of any access,
+// with the same name and signature as m), then m is a Miranda method which is
 // entered as a public abstract method in C's vtable.  From then on it should
 // treated as any other public method in C for method over-ride purposes.
 void klassVtable::compute_vtable_size_and_num_mirandas(
@@ -111,10 +112,13 @@
 }
 
 int klassVtable::index_of(Method* m, int len) const {
-  assert(m->vtable_index() >= 0, "do not ask this of non-vtable methods");
+  assert(m->has_vtable_index(), "do not ask this of non-vtable methods");
   return m->vtable_index();
 }
 
+// Copy super class's vtable to the first part (prefix) of this class's vtable,
+// and return the number of entries copied.  Expects that 'super' is the Java
+// super class (arrays can have "array" super classes that must be skipped).
 int klassVtable::initialize_from_super(KlassHandle super) {
   if (super.is_null()) {
     return 0;
@@ -139,14 +143,14 @@
   }
 }
 
-// Revised lookup semantics   introduced 1.3 (Kestral beta)
+//
+// Revised lookup semantics   introduced 1.3 (Kestrel beta)
 void klassVtable::initialize_vtable(bool checkconstraints, TRAPS) {
 
   // Note:  Arrays can have intermediate array supers.  Use java_super to skip them.
   KlassHandle super (THREAD, klass()->java_super());
   int nofNewEntries = 0;
 
-
   if (PrintVtables && !klass()->oop_is_array()) {
     ResourceMark rm(THREAD);
     tty->print_cr("Initializing: %s", _klass->name()->as_C_string());
@@ -174,8 +178,10 @@
     int len = methods->length();
     int initialized = super_vtable_len;
 
-    // update_inherited_vtable can stop for gc - ensure using handles
+    // Check each of this class's methods against super;
+    // if override, replace in copy of super vtable, otherwise append to end
     for (int i = 0; i < len; i++) {
+      // update_inherited_vtable can stop for gc - ensure using handles
       HandleMark hm(THREAD);
       assert(methods->at(i)->is_method(), "must be a Method*");
       methodHandle mh(THREAD, methods->at(i));
@@ -189,11 +195,11 @@
       }
     }
 
-    // add miranda methods; it will also update the value of initialized
-    fill_in_mirandas(&initialized);
+    // add miranda methods to end of vtable.
+    initialized = fill_in_mirandas(initialized);
 
     // In class hierarchies where the accessibility is not increasing (i.e., going from private ->
-    // package_private -> publicprotected), the vtable might actually be smaller than our initial
+    // package_private -> public/protected), the vtable might actually be smaller than our initial
     // calculation.
     assert(initialized <= _length, "vtable initialization failed");
     for(;initialized < _length; initialized++) {
@@ -248,14 +254,8 @@
   return superk;
 }
 
-// Methods that are "effectively" final don't need vtable entries.
-bool method_is_effectively_final(
-    AccessFlags klass_flags, methodHandle target) {
-  return target->is_final() || klass_flags.is_final() && !target->is_overpass();
-}
-
 // Update child's copy of super vtable for overrides
-// OR return true if a new vtable entry is required
+// OR return true if a new vtable entry is required.
 // Only called for InstanceKlass's, i.e. not for arrays
 // If that changed, could not use _klass as handle for klass
 bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle target_method, int super_vtable_len,
@@ -263,6 +263,7 @@
   ResourceMark rm;
   bool allocate_new = true;
   assert(klass->oop_is_instance(), "must be InstanceKlass");
+  assert(klass == target_method()->method_holder(), "caller resp.");
 
   // Initialize the method's vtable index to "nonvirtual".
   // If we allocate a vtable entry, we will update it to a non-negative number.
@@ -273,11 +274,17 @@
     return false;
   }
 
-  if (method_is_effectively_final(klass->access_flags(), target_method)) {
+  if (target_method->is_final_method(klass->access_flags())) {
     // a final method never needs a new entry; final methods can be statically
     // resolved and they have to be present in the vtable only if they override
     // a super's method, in which case they re-use its entry
     allocate_new = false;
+  } else if (klass->is_interface()) {
+    allocate_new = false;  // see note below in needs_new_vtable_entry
+    // An interface never allocates new vtable slots, only inherits old ones.
+    // This method will either be assigned its own itable index later,
+    // or be assigned an inherited vtable index in the loop below.
+    target_method()->set_vtable_index(Method::pending_itable_index);
   }
 
   // we need a new entry if there is no superclass
@@ -411,8 +418,14 @@
                                          Symbol* classname,
                                          AccessFlags class_flags,
                                          TRAPS) {
+  if (class_flags.is_interface()) {
+    // Interfaces do not use vtables, so there is no point to assigning
+    // a vtable index to any of their methods.  If we refrain from doing this,
+    // we can use Method::_vtable_index to hold the itable index
+    return false;
+  }
 
-  if (method_is_effectively_final(class_flags, target_method) ||
+  if (target_method->is_final_method(class_flags) ||
       // a final method never needs a new entry; final methods can be statically
       // resolved and they have to be present in the vtable only if they override
       // a super's method, in which case they re-use its entry
@@ -500,7 +513,8 @@
   return Method::invalid_vtable_index;
 }
 
-// check if an entry is miranda
+// check if an entry at an index is miranda
+// requires that method m at entry be declared ("held") by an interface.
 bool klassVtable::is_miranda_entry_at(int i) {
   Method* m = method_at(i);
   Klass* method_holder = m->method_holder();
@@ -516,7 +530,9 @@
   return false;
 }
 
-// check if a method is a miranda method, given a class's methods table and it's super
+// check if a method is a miranda method, given a class's methods table and its super
+// "miranda" means not static, not defined by this class, and not defined
+// in super unless it is private and therefore inaccessible to this class.
 // the caller must make sure that the method belongs to an interface implemented by the class
 bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods, Klass* super) {
   if (m->is_static()) {
@@ -541,6 +557,14 @@
   return false;
 }
 
+// Scans current_interface_methods for miranda methods that do not
+// already appear in new_mirandas and are also not defined-and-non-private
+// in super (superclass).  These mirandas are added to all_mirandas if it is
+// not null; in addition, those that are not duplicates of miranda methods
+// inherited by super from its interfaces are added to new_mirandas.
+// Thus, new_mirandas will be the set of mirandas that this class introduces,
+// all_mirandas will be the set of all mirandas applicable to this class
+// including all defined in superclasses.
 void klassVtable::add_new_mirandas_to_lists(
     GrowableArray<Method*>* new_mirandas, GrowableArray<Method*>* all_mirandas,
     Array<Method*>* current_interface_methods, Array<Method*>* class_methods,
@@ -599,17 +623,22 @@
   }
 }
 
-// fill in mirandas
-void klassVtable::fill_in_mirandas(int* initialized) {
+// Discover miranda methods ("miranda" = "interface abstract, no binding"),
+// and append them into the vtable starting at index initialized,
+// return the new value of initialized.
+int klassVtable::fill_in_mirandas(int initialized) {
   GrowableArray<Method*> mirandas(20);
   get_mirandas(&mirandas, NULL, ik()->super(), ik()->methods(),
                ik()->local_interfaces());
   for (int i = 0; i < mirandas.length(); i++) {
-    put_method_at(mirandas.at(i), *initialized);
-    ++(*initialized);
+    put_method_at(mirandas.at(i), initialized);
+    ++initialized;
   }
+  return initialized;
 }
 
+// Copy this class's vtable to the vtable beginning at start.
+// Used to copy superclass vtable to prefix of subclass's vtable.
 void klassVtable::copy_vtable_to(vtableEntry* start) {
   Copy::disjoint_words((HeapWord*)table(), (HeapWord*)start, _length * vtableEntry::size());
 }
@@ -723,6 +752,12 @@
 
 // Initialization
 void klassItable::initialize_itable(bool checkconstraints, TRAPS) {
+  if (_klass->is_interface()) {
+    // This needs to go after vtable indexes are assigned but
+    // before implementors need to know the number of itable indexes.
+    assign_itable_indexes_for_interface(_klass());
+  }
+
   // Cannot be setup doing bootstrapping, interfaces don't have
   // itables, and klass with only ones entry have empty itables
   if (Universe::is_bootstrapping() ||
@@ -754,45 +789,89 @@
 }
 
 
+inline bool interface_method_needs_itable_index(Method* m) {
+  if (m->is_static())           return false;   // e.g., Stream.empty
+  if (m->is_initializer())      return false;   // <init> or <clinit>
+  // If an interface redeclares a method from java.lang.Object,
+  // it should already have a vtable index, don't touch it.
+  // e.g., CharSequence.toString (from initialize_vtable)
+  // if (m->has_vtable_index())  return false; // NO!
+  return true;
+}
+
+int klassItable::assign_itable_indexes_for_interface(Klass* klass) {
+  // an interface does not have an itable, but its methods need to be numbered
+  if (TraceItables) tty->print_cr("%3d: Initializing itable for interface %s", ++initialize_count,
+                                  klass->name()->as_C_string());
+  Array<Method*>* methods = InstanceKlass::cast(klass)->methods();
+  int nof_methods = methods->length();
+  int ime_num = 0;
+  for (int i = 0; i < nof_methods; i++) {
+    Method* m = methods->at(i);
+    if (interface_method_needs_itable_index(m)) {
+      assert(!m->is_final_method(), "no final interface methods");
+      // If m is already assigned a vtable index, do not disturb it.
+      if (!m->has_vtable_index()) {
+        assert(m->vtable_index() == Method::pending_itable_index, "set by initialize_vtable");
+        m->set_itable_index(ime_num);
+        // Progress to next itable entry
+        ime_num++;
+      }
+    }
+  }
+  assert(ime_num == method_count_for_interface(klass), "proper sizing");
+  return ime_num;
+}
+
+int klassItable::method_count_for_interface(Klass* interf) {
+  assert(interf->oop_is_instance(), "must be");
+  assert(interf->is_interface(), "must be");
+  Array<Method*>* methods = InstanceKlass::cast(interf)->methods();
+  int nof_methods = methods->length();
+  while (nof_methods > 0) {
+    Method* m = methods->at(nof_methods-1);
+    if (m->has_itable_index()) {
+      int length = m->itable_index() + 1;
+#ifdef ASSERT
+      while (nof_methods = 0) {
+        m = methods->at(--nof_methods);
+        assert(!m->has_itable_index() || m->itable_index() < length, "");
+      }
+#endif //ASSERT
+      return length;  // return the rightmost itable index, plus one
+    }
+    nof_methods -= 1;
+  }
+  // no methods have itable indexes
+  return 0;
+}
+
+
 void klassItable::initialize_itable_for_interface(int method_table_offset, KlassHandle interf_h, bool checkconstraints, TRAPS) {
   Array<Method*>* methods = InstanceKlass::cast(interf_h())->methods();
   int nof_methods = methods->length();
   HandleMark hm;
-  KlassHandle klass = _klass;
   assert(nof_methods > 0, "at least one method must exist for interface to be in vtable");
   Handle interface_loader (THREAD, InstanceKlass::cast(interf_h())->class_loader());
-  int ime_num = 0;
 
-  // Skip first Method* if it is a class initializer
-  int i = methods->at(0)->is_static_initializer() ? 1 : 0;
-
-  // m, method_name, method_signature, klass reset each loop so they
-  // don't need preserving across check_signature_loaders call
-  // methods needs a handle in case of gc from check_signature_loaders
-  for(; i < nof_methods; i++) {
+  int ime_count = method_count_for_interface(interf_h());
+  for (int i = 0; i < nof_methods; i++) {
     Method* m = methods->at(i);
-    Symbol* method_name = m->name();
-    Symbol* method_signature = m->signature();
-
-    // This is same code as in Linkresolver::lookup_instance_method_in_klasses
-    Method* target = klass->uncached_lookup_method(method_name, method_signature);
-    while (target != NULL && target->is_static()) {
-      // continue with recursive lookup through the superclass
-      Klass* super = target->method_holder()->super();
-      target = (super == NULL) ? (Method*)NULL : super->uncached_lookup_method(method_name, method_signature);
+    methodHandle target;
+    if (m->has_itable_index()) {
+      LinkResolver::lookup_instance_method_in_klasses(target, _klass, m->name(), m->signature(), CHECK);
     }
     if (target == NULL || !target->is_public() || target->is_abstract()) {
       // Entry do not resolve. Leave it empty
     } else {
       // Entry did resolve, check loader constraints before initializing
       // if checkconstraints requested
-      methodHandle  target_h (THREAD, target); // preserve across gc
       if (checkconstraints) {
         Handle method_holder_loader (THREAD, target->method_holder()->class_loader());
         if (method_holder_loader() != interface_loader()) {
           ResourceMark rm(THREAD);
           Symbol* failed_type_symbol =
-            SystemDictionary::check_signature_loaders(method_signature,
+            SystemDictionary::check_signature_loaders(m->signature(),
                                                       method_holder_loader,
                                                       interface_loader,
                                                       true, CHECK);
@@ -803,9 +882,9 @@
               "and the class loader (instance of %s) for interface "
               "%s have different Class objects for the type %s "
               "used in the signature";
-            char* sig = target_h()->name_and_sig_as_C_string();
+            char* sig = target()->name_and_sig_as_C_string();
             const char* loader1 = SystemDictionary::loader_name(method_holder_loader());
-            char* current = klass->name()->as_C_string();
+            char* current = _klass->name()->as_C_string();
             const char* loader2 = SystemDictionary::loader_name(interface_loader());
             char* iface = InstanceKlass::cast(interf_h())->name()->as_C_string();
             char* failed_type_name = failed_type_symbol->as_C_string();
@@ -821,10 +900,10 @@
       }
 
       // ime may have moved during GC so recalculate address
-      itableOffsetEntry::method_entry(_klass(), method_table_offset)[ime_num].initialize(target_h());
+      int ime_num = m->itable_index();
+      assert(ime_num < ime_count, "oob");
+      itableOffsetEntry::method_entry(_klass(), method_table_offset)[ime_num].initialize(target());
     }
-    // Progress to next entry
-    ime_num++;
   }
 }
 
@@ -913,20 +992,22 @@
   virtual void doit(Klass* intf, int method_count) = 0;
 };
 
-// Visit all interfaces with at-least one method (excluding <clinit>)
+// Visit all interfaces with at least one itable method
 void visit_all_interfaces(Array<Klass*>* transitive_intf, InterfaceVisiterClosure *blk) {
   // Handle array argument
   for(int i = 0; i < transitive_intf->length(); i++) {
     Klass* intf = transitive_intf->at(i);
     assert(intf->is_interface(), "sanity check");
 
-    // Find no. of methods excluding a <clinit>
-    int method_count = InstanceKlass::cast(intf)->methods()->length();
-    if (method_count > 0) {
-      Method* m = InstanceKlass::cast(intf)->methods()->at(0);
-      assert(m != NULL && m->is_method(), "sanity check");
-      if (m->name() == vmSymbols::object_initializer_name()) {
-        method_count--;
+    // Find no. of itable methods
+    int method_count = 0;
+    // method_count = klassItable::method_count_for_interface(intf);
+    Array<Method*>* methods = InstanceKlass::cast(intf)->methods();
+    if (methods->length() > 0) {
+      for (int i = methods->length(); --i >= 0; ) {
+        if (interface_method_needs_itable_index(methods->at(i))) {
+          method_count++;
+        }
       }
     }
 
@@ -1024,40 +1105,26 @@
 }
 
 
-// m must be a method in an interface
-int klassItable::compute_itable_index(Method* m) {
-  InstanceKlass* intf = m->method_holder();
-  assert(intf->is_interface(), "sanity check");
-  Array<Method*>* methods = intf->methods();
-  int index = 0;
-  while(methods->at(index) != m) {
-    index++;
-    assert(index < methods->length(), "should find index for resolve_invoke");
-  }
-  // Adjust for <clinit>, which is left out of table if first method
-  if (methods->length() > 0 && methods->at(0)->is_static_initializer()) {
-    index--;
-  }
-  return index;
-}
-
-
-// inverse to compute_itable_index
+// inverse to itable_index
 Method* klassItable::method_for_itable_index(Klass* intf, int itable_index) {
   assert(InstanceKlass::cast(intf)->is_interface(), "sanity check");
+  assert(intf->verify_itable_index(itable_index), "");
   Array<Method*>* methods = InstanceKlass::cast(intf)->methods();
 
+  if (itable_index < 0 || itable_index >= method_count_for_interface(intf))
+    return NULL;                // help caller defend against bad indexes
+
   int index = itable_index;
-  // Adjust for <clinit>, which is left out of table if first method
-  if (methods->length() > 0 && methods->at(0)->is_static_initializer()) {
-    index++;
+  Method* m = methods->at(index);
+  int index2 = -1;
+  while (!m->has_itable_index() ||
+         (index2 = m->itable_index()) != itable_index) {
+    assert(index2 < itable_index, "monotonic");
+    if (++index == methods->length())
+      return NULL;
+    m = methods->at(index);
   }
-
-  if (itable_index < 0 || index >= methods->length())
-    return NULL;                // help caller defend against bad indexes
-
-  Method* m = methods->at(index);
-  assert(compute_itable_index(m) == itable_index, "correct inverse");
+  assert(m->itable_index() == itable_index, "correct inverse");
 
   return m;
 }
--- a/hotspot/src/share/vm/oops/klassVtable.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/oops/klassVtable.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -124,7 +124,7 @@
 
   // support for miranda methods
   bool is_miranda_entry_at(int i);
-  void fill_in_mirandas(int* initialized);
+  int fill_in_mirandas(int initialized);
   static bool is_miranda(Method* m, Array<Method*>* class_methods, Klass* super);
   static void add_new_mirandas_to_lists(
       GrowableArray<Method*>* new_mirandas,
@@ -150,6 +150,8 @@
 //      from_compiled_code_entry_point -> nmethod entry point
 //      from_interpreter_entry_point   -> i2cadapter
 class vtableEntry VALUE_OBJ_CLASS_SPEC {
+  friend class VMStructs;
+
  public:
   // size in words
   static int size() {
@@ -288,12 +290,12 @@
 #endif // INCLUDE_JVMTI
 
   // Setup of itable
+  static int assign_itable_indexes_for_interface(Klass* klass);
+  static int method_count_for_interface(Klass* klass);
   static int compute_itable_size(Array<Klass*>* transitive_interfaces);
   static void setup_itable_offset_table(instanceKlassHandle klass);
 
   // Resolving of method to index
-  static int compute_itable_index(Method* m);
-  // ...and back again:
   static Method* method_for_itable_index(Klass* klass, int itable_index);
 
   // Debugging/Statistics
--- a/hotspot/src/share/vm/oops/method.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/oops/method.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -509,24 +509,31 @@
   return _access_flags.has_loops();
 }
 
-
-bool Method::is_final_method() const {
-  // %%% Should return true for private methods also,
-  // since there is no way to override them.
-  return is_final() || method_holder()->is_final();
+bool Method::is_final_method(AccessFlags class_access_flags) const {
+  // or "does_not_require_vtable_entry"
+  // overpass can occur, is not final (reuses vtable entry)
+  // private methods get vtable entries for backward class compatibility.
+  if (is_overpass())  return false;
+  return is_final() || class_access_flags.is_final();
 }
 
-
-bool Method::is_strict_method() const {
-  return is_strict();
+bool Method::is_final_method() const {
+  return is_final_method(method_holder()->access_flags());
 }
 
-
-bool Method::can_be_statically_bound() const {
-  if (is_final_method())  return true;
+bool Method::can_be_statically_bound(AccessFlags class_access_flags) const {
+  if (is_final_method(class_access_flags))  return true;
+#ifdef ASSERT
+  bool is_nonv = (vtable_index() == nonvirtual_vtable_index);
+  if (class_access_flags.is_interface())  assert(is_nonv == is_static(), err_msg("is_nonv=%s", is_nonv));
+#endif
+  assert(valid_vtable_index() || valid_itable_index(), "method must be linked before we ask this question");
   return vtable_index() == nonvirtual_vtable_index;
 }
 
+bool Method::can_be_statically_bound() const {
+  return can_be_statically_bound(method_holder()->access_flags());
+}
 
 bool Method::is_accessor() const {
   if (code_size() != 5) return false;
@@ -967,7 +974,7 @@
 
   assert(ik->is_subclass_of(method_holder()), "should be subklass");
   assert(ik->vtable() != NULL, "vtable should exist");
-  if (vtable_index() == nonvirtual_vtable_index) {
+  if (!has_vtable_index()) {
     return false;
   } else {
     Method* vt_m = ik->method_at_vtable(vtable_index());
@@ -1959,7 +1966,7 @@
 
 void Method::print_value_on(outputStream* st) const {
   assert(is_method(), "must be method");
-  st->print_cr(internal_name());
+  st->print(internal_name());
   print_address_on(st);
   st->print(" ");
   name()->print_value_on(st);
@@ -1967,6 +1974,7 @@
   signature()->print_value_on(st);
   st->print(" in ");
   method_holder()->print_value_on(st);
+  if (WizardMode) st->print("#%d", _vtable_index);
   if (WizardMode) st->print("[%d,%d]", size_of_parameters(), max_locals());
   if (WizardMode && code() != NULL) st->print(" ((nmethod*)%p)", code());
 }
--- a/hotspot/src/share/vm/oops/method.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/oops/method.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -448,16 +448,22 @@
   enum VtableIndexFlag {
     // Valid vtable indexes are non-negative (>= 0).
     // These few negative values are used as sentinels.
-    highest_unused_vtable_index_value = -5,
+    itable_index_max        = -10, // first itable index, growing downward
+    pending_itable_index    = -9,  // itable index will be assigned
     invalid_vtable_index    = -4,  // distinct from any valid vtable index
     garbage_vtable_index    = -3,  // not yet linked; no vtable layout yet
     nonvirtual_vtable_index = -2   // there is no need for vtable dispatch
     // 6330203 Note:  Do not use -1, which was overloaded with many meanings.
   };
   DEBUG_ONLY(bool valid_vtable_index() const     { return _vtable_index >= nonvirtual_vtable_index; })
-  int  vtable_index() const                      { assert(valid_vtable_index(), "");
-                                                   return _vtable_index; }
+  bool has_vtable_index() const                  { return _vtable_index >= 0; }
+  int  vtable_index() const                      { return _vtable_index; }
   void set_vtable_index(int index)               { _vtable_index = index; }
+  DEBUG_ONLY(bool valid_itable_index() const     { return _vtable_index <= pending_itable_index; })
+  bool has_itable_index() const                  { return _vtable_index <= itable_index_max; }
+  int  itable_index() const                      { assert(valid_itable_index(), "");
+                                                   return itable_index_max - _vtable_index; }
+  void set_itable_index(int index)               { _vtable_index = itable_index_max - index; assert(valid_itable_index(), ""); }
 
   // interpreter entry
   address interpreter_entry() const              { return _i2i_entry; }
@@ -560,10 +566,11 @@
 
   // checks method and its method holder
   bool is_final_method() const;
-  bool is_strict_method() const;
+  bool is_final_method(AccessFlags class_access_flags) const;
 
   // true if method needs no dynamic dispatch (final and/or no vtable entry)
   bool can_be_statically_bound() const;
+  bool can_be_statically_bound(AccessFlags class_access_flags) const;
 
   // returns true if the method has any backward branches.
   bool has_loops() {
@@ -740,10 +747,6 @@
   // so handles are not used to avoid deadlock.
   jmethodID find_jmethod_id_or_null()               { return method_holder()->jmethod_id_or_null(this); }
 
-  // JNI static invoke cached itable index accessors
-  int cached_itable_index()                         { return method_holder()->cached_itable_index(method_idnum()); }
-  void set_cached_itable_index(int index)           { method_holder()->set_cached_itable_index(method_idnum(), index); }
-
   // Support for inlining of intrinsic methods
   vmIntrinsics::ID intrinsic_id() const          { return (vmIntrinsics::ID) _intrinsic_id;           }
   void     set_intrinsic_id(vmIntrinsics::ID id) {                           _intrinsic_id = (u1) id; }
--- a/hotspot/src/share/vm/oops/methodData.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/oops/methodData.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -72,6 +72,8 @@
 //
 // Overlay for generic profiling data.
 class DataLayout VALUE_OBJ_CLASS_SPEC {
+  friend class VMStructs;
+
 private:
   // Every data layout begins with a header.  This header
   // contains a tag, which is used to indicate the size/layout
--- a/hotspot/src/share/vm/oops/oop.inline.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/oops/oop.inline.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -69,7 +69,7 @@
 }
 
 inline Klass* oopDesc::klass() const {
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     return Klass::decode_klass_not_null(_metadata._compressed_klass);
   } else {
     return _metadata._klass;
@@ -78,7 +78,7 @@
 
 inline Klass* oopDesc::klass_or_null() const volatile {
   // can be NULL in CMS
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     return Klass::decode_klass(_metadata._compressed_klass);
   } else {
     return _metadata._klass;
@@ -86,19 +86,19 @@
 }
 
 inline int oopDesc::klass_gap_offset_in_bytes() {
-  assert(UseCompressedKlassPointers, "only applicable to compressed klass pointers");
+  assert(UseCompressedClassPointers, "only applicable to compressed klass pointers");
   return oopDesc::klass_offset_in_bytes() + sizeof(narrowKlass);
 }
 
 inline Klass** oopDesc::klass_addr() {
   // Only used internally and with CMS and will not work with
   // UseCompressedOops
-  assert(!UseCompressedKlassPointers, "only supported with uncompressed klass pointers");
+  assert(!UseCompressedClassPointers, "only supported with uncompressed klass pointers");
   return (Klass**) &_metadata._klass;
 }
 
 inline narrowKlass* oopDesc::compressed_klass_addr() {
-  assert(UseCompressedKlassPointers, "only called by compressed klass pointers");
+  assert(UseCompressedClassPointers, "only called by compressed klass pointers");
   return &_metadata._compressed_klass;
 }
 
@@ -106,7 +106,7 @@
   // since klasses are promoted no store check is needed
   assert(Universe::is_bootstrapping() || k != NULL, "must be a real Klass*");
   assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass*");
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     *compressed_klass_addr() = Klass::encode_klass_not_null(k);
   } else {
     *klass_addr() = k;
@@ -118,7 +118,7 @@
 }
 
 inline void oopDesc::set_klass_gap(int v) {
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v;
   }
 }
@@ -126,7 +126,7 @@
 inline void oopDesc::set_klass_to_list_ptr(oop k) {
   // This is only to be used during GC, for from-space objects, so no
   // barrier is needed.
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     _metadata._compressed_klass = (narrowKlass)encode_heap_oop(k);  // may be null (parnew overflow handling)
   } else {
     _metadata._klass = (Klass*)(address)k;
@@ -135,7 +135,7 @@
 
 inline oop oopDesc::list_ptr_from_klass() {
   // This is only to be used during GC, for from-space objects.
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     return decode_heap_oop((narrowOop)_metadata._compressed_klass);
   } else {
     // Special case for GC
--- a/hotspot/src/share/vm/oops/symbol.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/oops/symbol.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -45,7 +45,7 @@
 // in the SymbolTable bucket (the _literal field in HashtableEntry)
 // that points to the Symbol.  All other stores of a Symbol*
 // to a field of a persistent variable (e.g., the _name filed in
-// FieldAccessInfo or _ptr in a CPSlot) is reference counted.
+// fieldDescriptor or _ptr in a CPSlot) is reference counted.
 //
 // 1) The lookup of a "name" in the SymbolTable either creates a Symbol F for
 // "name" and returns a pointer to F or finds a pre-existing Symbol F for
--- a/hotspot/src/share/vm/opto/cfgnode.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/cfgnode.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1932,7 +1932,7 @@
 #ifdef _LP64
   // Push DecodeN/DecodeNKlass down through phi.
   // The rest of phi graph will transform by split EncodeP node though phis up.
-  if ((UseCompressedOops || UseCompressedKlassPointers) && can_reshape && progress == NULL) {
+  if ((UseCompressedOops || UseCompressedClassPointers) && can_reshape && progress == NULL) {
     bool may_push = true;
     bool has_decodeN = false;
     bool is_decodeN = false;
--- a/hotspot/src/share/vm/opto/chaitin.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/chaitin.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -122,40 +122,23 @@
   return score;
 }
 
-LRG_List::LRG_List( uint max ) : _cnt(max), _max(max), _lidxs(NEW_RESOURCE_ARRAY(uint,max)) {
-  memset( _lidxs, 0, sizeof(uint)*max );
-}
-
-void LRG_List::extend( uint nidx, uint lidx ) {
-  _nesting.check();
-  if( nidx >= _max ) {
-    uint size = 16;
-    while( size <= nidx ) size <<=1;
-    _lidxs = REALLOC_RESOURCE_ARRAY( uint, _lidxs, _max, size );
-    _max = size;
-  }
-  while( _cnt <= nidx )
-    _lidxs[_cnt++] = 0;
-  _lidxs[nidx] = lidx;
-}
-
 #define NUMBUCKS 3
 
 // Straight out of Tarjan's union-find algorithm
 uint LiveRangeMap::find_compress(uint lrg) {
   uint cur = lrg;
-  uint next = _uf_map[cur];
+  uint next = _uf_map.at(cur);
   while (next != cur) { // Scan chain of equivalences
     assert( next < cur, "always union smaller");
     cur = next; // until find a fixed-point
-    next = _uf_map[cur];
+    next = _uf_map.at(cur);
   }
 
   // Core of union-find algorithm: update chain of
   // equivalences to be equal to the root.
   while (lrg != next) {
-    uint tmp = _uf_map[lrg];
-    _uf_map.map(lrg, next);
+    uint tmp = _uf_map.at(lrg);
+    _uf_map.at_put(lrg, next);
     lrg = tmp;
   }
   return lrg;
@@ -165,10 +148,10 @@
 void LiveRangeMap::reset_uf_map(uint max_lrg_id) {
   _max_lrg_id= max_lrg_id;
   // Force the Union-Find mapping to be at least this large
-  _uf_map.extend(_max_lrg_id, 0);
+  _uf_map.at_put_grow(_max_lrg_id, 0);
   // Initialize it to be the ID mapping.
   for (uint i = 0; i < _max_lrg_id; ++i) {
-    _uf_map.map(i, i);
+    _uf_map.at_put(i, i);
   }
 }
 
@@ -176,12 +159,12 @@
 // the Union-Find mapping after this call.
 void LiveRangeMap::compress_uf_map_for_nodes() {
   // For all Nodes, compress mapping
-  uint unique = _names.Size();
+  uint unique = _names.length();
   for (uint i = 0; i < unique; ++i) {
-    uint lrg = _names[i];
+    uint lrg = _names.at(i);
     uint compressed_lrg = find(lrg);
     if (lrg != compressed_lrg) {
-      _names.map(i, compressed_lrg);
+      _names.at_put(i, compressed_lrg);
     }
   }
 }
@@ -198,11 +181,11 @@
     return lrg;
   }
 
-  uint next = _uf_map[lrg];
+  uint next = _uf_map.at(lrg);
   while (next != lrg) { // Scan chain of equivalences
     assert(next < lrg, "always union smaller");
     lrg = next; // until find a fixed-point
-    next = _uf_map[lrg];
+    next = _uf_map.at(lrg);
   }
   return next;
 }
@@ -215,7 +198,7 @@
        NULL
 #endif
        )
-  , _lrg_map(unique)
+  , _lrg_map(Thread::current()->resource_area(), unique)
   , _live(0)
   , _spilled_once(Thread::current()->resource_area())
   , _spilled_twice(Thread::current()->resource_area())
@@ -692,6 +675,7 @@
       _lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0);
     }
   }
+
   // Reset the Union-Find mapping to be identity
   _lrg_map.reset_uf_map(lr_counter);
 }
--- a/hotspot/src/share/vm/opto/chaitin.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/chaitin.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -283,8 +283,8 @@
 
   // Straight out of Tarjan's union-find algorithm
   uint find_compress(const Node *node) {
-    uint lrg_id = find_compress(_names[node->_idx]);
-    _names.map(node->_idx, lrg_id);
+    uint lrg_id = find_compress(_names.at(node->_idx));
+    _names.at_put(node->_idx, lrg_id);
     return lrg_id;
   }
 
@@ -305,40 +305,40 @@
   }
 
   uint size() const {
-    return _names.Size();
+    return _names.length();
   }
 
   uint live_range_id(uint idx) const {
-    return _names[idx];
+    return _names.at(idx);
   }
 
   uint live_range_id(const Node *node) const {
-    return _names[node->_idx];
+    return _names.at(node->_idx);
   }
 
   uint uf_live_range_id(uint lrg_id) const {
-    return _uf_map[lrg_id];
+    return _uf_map.at(lrg_id);
   }
 
   void map(uint idx, uint lrg_id) {
-    _names.map(idx, lrg_id);
+    _names.at_put(idx, lrg_id);
   }
 
   void uf_map(uint dst_lrg_id, uint src_lrg_id) {
-    _uf_map.map(dst_lrg_id, src_lrg_id);
+    _uf_map.at_put(dst_lrg_id, src_lrg_id);
   }
 
   void extend(uint idx, uint lrg_id) {
-    _names.extend(idx, lrg_id);
+    _names.at_put_grow(idx, lrg_id);
   }
 
   void uf_extend(uint dst_lrg_id, uint src_lrg_id) {
-    _uf_map.extend(dst_lrg_id, src_lrg_id);
+    _uf_map.at_put_grow(dst_lrg_id, src_lrg_id);
   }
 
-  LiveRangeMap(uint unique)
-  : _names(unique)
-  , _uf_map(unique)
+  LiveRangeMap(Arena* arena, uint unique)
+  : _names(arena, unique, unique, 0)
+  , _uf_map(arena, unique, unique, 0)
   , _max_lrg_id(0) {}
 
   uint find_id( const Node *n ) {
@@ -355,14 +355,14 @@
   void compress_uf_map_for_nodes();
 
   uint find(uint lidx) {
-    uint uf_lidx = _uf_map[lidx];
+    uint uf_lidx = _uf_map.at(lidx);
     return (uf_lidx == lidx) ? uf_lidx : find_compress(lidx);
   }
 
   // Convert a Node into a Live Range Index - a lidx
   uint find(const Node *node) {
     uint lidx = live_range_id(node);
-    uint uf_lidx = _uf_map[lidx];
+    uint uf_lidx = _uf_map.at(lidx);
     return (uf_lidx == lidx) ? uf_lidx : find_compress(node);
   }
 
@@ -371,10 +371,10 @@
 
   // Like Find above, but no path compress, so bad asymptotic behavior
   uint find_const(const Node *node) const {
-    if(node->_idx >= _names.Size()) {
+    if(node->_idx >= (uint)_names.length()) {
       return 0; // not mapped, usual for debug dump
     }
-    return find_const(_names[node->_idx]);
+    return find_const(_names.at(node->_idx));
   }
 };
 
--- a/hotspot/src/share/vm/opto/coalesce.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/coalesce.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -29,7 +29,6 @@
 
 class LoopTree;
 class LRG;
-class LRG_List;
 class Matcher;
 class PhaseIFG;
 class PhaseCFG;
--- a/hotspot/src/share/vm/opto/compile.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -2646,7 +2646,7 @@
             addp->in(AddPNode::Base) == n->in(AddPNode::Base),
             "Base pointers must match" );
 #ifdef _LP64
-    if ((UseCompressedOops || UseCompressedKlassPointers) &&
+    if ((UseCompressedOops || UseCompressedClassPointers) &&
         addp->Opcode() == Op_ConP &&
         addp == n->in(AddPNode::Base) &&
         n->in(AddPNode::Offset)->is_Con()) {
@@ -3033,7 +3033,7 @@
 
   // Skip next transformation if compressed oops are not used.
   if ((UseCompressedOops && !Matcher::gen_narrow_oop_implicit_null_checks()) ||
-      (!UseCompressedOops && !UseCompressedKlassPointers))
+      (!UseCompressedOops && !UseCompressedClassPointers))
     return;
 
   // Go over safepoints nodes to skip DecodeN/DecodeNKlass nodes for debug edges.
--- a/hotspot/src/share/vm/opto/connode.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/connode.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -630,7 +630,7 @@
   if (t == Type::TOP) return Type::TOP;
   assert (t != TypePtr::NULL_PTR, "null klass?");
 
-  assert(UseCompressedKlassPointers && t->isa_klassptr(), "only klass ptr here");
+  assert(UseCompressedClassPointers && t->isa_klassptr(), "only klass ptr here");
   return t->make_narrowklass();
 }
 
--- a/hotspot/src/share/vm/opto/library_call.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -3734,6 +3734,8 @@
                                              RegionNode* slow_region) {
   ciMethod* method = callee();
   int vtable_index = method->vtable_index();
+  assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
+         err_msg_res("bad index %d", vtable_index));
   // Get the Method* out of the appropriate vtable entry.
   int entry_offset  = (InstanceKlass::vtable_start_offset() +
                      vtable_index*vtableEntry::size()) * wordSize +
@@ -3784,6 +3786,8 @@
       // so the vtable index is fixed.
       // No need to use the linkResolver to get it.
        vtable_index = method->vtable_index();
+       assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
+              err_msg_res("bad index %d", vtable_index));
     }
     slow_call = new(C) CallDynamicJavaNode(tf,
                           SharedRuntime::get_resolve_virtual_call_stub(),
@@ -4204,7 +4208,7 @@
   // 12 - 64-bit VM, compressed klass
   // 16 - 64-bit VM, normal klass
   if (base_off % BytesPerLong != 0) {
-    assert(UseCompressedKlassPointers, "");
+    assert(UseCompressedClassPointers, "");
     if (is_array) {
       // Exclude length to copy by 8 bytes words.
       base_off += sizeof(int);
--- a/hotspot/src/share/vm/opto/live.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/live.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -91,7 +91,7 @@
         break;
       }
 
-      uint r = _names[n->_idx];
+      uint r = _names.at(n->_idx);
       assert(!def_outside->member(r), "Use of external LRG overlaps the same LRG defined in this block");
       def->insert( r );
       use->remove( r );
@@ -100,7 +100,7 @@
         Node *nk = n->in(k);
         uint nkidx = nk->_idx;
         if (_cfg.get_block_for_node(nk) != block) {
-          uint u = _names[nkidx];
+          uint u = _names.at(nkidx);
           use->insert(u);
           DEBUG_ONLY(def_outside->insert(u);)
         }
@@ -112,7 +112,7 @@
 #endif
     // Remove anything defined by Phis and the block start instruction
     for (uint k = i; k > 0; k--) {
-      uint r = _names[block->get_node(k - 1)->_idx];
+      uint r = _names.at(block->get_node(k - 1)->_idx);
       def->insert(r);
       use->remove(r);
     }
@@ -124,7 +124,7 @@
 
       // PhiNode uses go in the live-out set of prior blocks.
       for (uint k = i; k > 0; k--) {
-        add_liveout(p, _names[block->get_node(k-1)->in(l)->_idx], first_pass);
+        add_liveout(p, _names.at(block->get_node(k-1)->in(l)->_idx), first_pass);
       }
     }
     freeset(block);
@@ -256,7 +256,7 @@
   tty->print("LiveOut: ");  _live[b->_pre_order-1].dump();
   uint cnt = b->number_of_nodes();
   for( uint i=0; i<cnt; i++ ) {
-    tty->print("L%d/", _names[b->get_node(i)->_idx] );
+    tty->print("L%d/", _names.at(b->get_node(i)->_idx));
     b->get_node(i)->dump();
   }
   tty->print("\n");
@@ -321,7 +321,7 @@
 #ifdef _LP64
                       UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_CastPP ||
                       UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_DecodeN ||
-                      UseCompressedKlassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass ||
+                      UseCompressedClassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass ||
 #endif
                       check->as_Mach()->ideal_Opcode() == Op_LoadP ||
                       check->as_Mach()->ideal_Opcode() == Op_LoadKlass)) {
--- a/hotspot/src/share/vm/opto/live.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/live.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -40,27 +40,7 @@
 //------------------------------LRG_List---------------------------------------
 // Map Node indices to Live RanGe indices.
 // Array lookup in the optimized case.
-class LRG_List : public ResourceObj {
-  friend class VMStructs;
-  uint _cnt, _max;
-  uint* _lidxs;
-  ReallocMark _nesting;         // assertion check for reallocations
-public:
-  LRG_List( uint max );
-
-  uint lookup( uint nidx ) const {
-    return _lidxs[nidx];
-  }
-  uint operator[] (uint nidx) const { return lookup(nidx); }
-
-  void map( uint nidx, uint lidx ) {
-    assert( nidx < _cnt, "oob" );
-    _lidxs[nidx] = lidx;
-  }
-  void extend( uint nidx, uint lidx );
-
-  uint Size() const { return _cnt; }
-};
+typedef GrowableArray<uint> LRG_List;
 
 //------------------------------PhaseLive--------------------------------------
 // Compute live-in/live-out
--- a/hotspot/src/share/vm/opto/macro.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/macro.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -2191,7 +2191,7 @@
       Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
       klass_node = transform_later( LoadKlassNode::make(_igvn, mem, k_adr, _igvn.type(k_adr)->is_ptr()) );
 #ifdef _LP64
-      if (UseCompressedKlassPointers && klass_node->is_DecodeNKlass()) {
+      if (UseCompressedClassPointers && klass_node->is_DecodeNKlass()) {
         assert(klass_node->in(1)->Opcode() == Op_LoadNKlass, "sanity");
         klass_node->in(1)->init_req(0, ctrl);
       } else
--- a/hotspot/src/share/vm/opto/memnode.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/memnode.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -2031,7 +2031,7 @@
   assert(adr_type != NULL, "expecting TypeKlassPtr");
 #ifdef _LP64
   if (adr_type->is_ptr_to_narrowklass()) {
-    assert(UseCompressedKlassPointers, "no compressed klasses");
+    assert(UseCompressedClassPointers, "no compressed klasses");
     Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass()));
     return new (C) DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
   }
@@ -2369,7 +2369,7 @@
       val = gvn.transform(new (C) EncodePNode(val, val->bottom_type()->make_narrowoop()));
       return new (C) StoreNNode(ctl, mem, adr, adr_type, val);
     } else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
-               (UseCompressedKlassPointers && val->bottom_type()->isa_klassptr() &&
+               (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
                 adr->bottom_type()->isa_rawptr())) {
       val = gvn.transform(new (C) EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
       return new (C) StoreNKlassNode(ctl, mem, adr, adr_type, val);
--- a/hotspot/src/share/vm/opto/type.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/opto/type.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -2416,7 +2416,7 @@
 #ifdef _LP64
   if (_offset != 0) {
     if (_offset == oopDesc::klass_offset_in_bytes()) {
-      _is_ptr_to_narrowklass = UseCompressedKlassPointers;
+      _is_ptr_to_narrowklass = UseCompressedClassPointers;
     } else if (klass() == NULL) {
       // Array with unknown body type
       assert(this->isa_aryptr(), "only arrays without klass");
--- a/hotspot/src/share/vm/prims/jni.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/prims/jni.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1336,6 +1336,7 @@
       if (call_type == JNI_VIRTUAL) {
         // jni_GetMethodID makes sure class is linked and initialized
         // so m should have a valid vtable index.
+        assert(!m->has_itable_index(), "");
         int vtbl_index = m->vtable_index();
         if (vtbl_index != Method::nonvirtual_vtable_index) {
           Klass* k = h_recv->klass();
@@ -1355,12 +1356,7 @@
       // interface call
       KlassHandle h_holder(THREAD, holder);
 
-      int itbl_index = m->cached_itable_index();
-      if (itbl_index == -1) {
-        itbl_index = klassItable::compute_itable_index(m);
-        m->set_cached_itable_index(itbl_index);
-        // the above may have grabbed a lock, 'm' and anything non-handlized can't be used again
-      }
+      int itbl_index = m->itable_index();
       Klass* k = h_recv->klass();
       selected_method = InstanceKlass::cast(k)->method_at_itable(h_holder(), itbl_index, CHECK);
     }
@@ -5037,6 +5033,7 @@
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #endif
 #include "utilities/quickSort.hpp"
+#include "utilities/ostream.hpp"
 #if INCLUDE_VM_STRUCTS
 #include "runtime/vmStructs.hpp"
 #endif
@@ -5048,18 +5045,23 @@
 // Forward declaration
 void TestReservedSpace_test();
 void TestReserveMemorySpecial_test();
+void TestVirtualSpace_test();
+void MetaspaceAux_test();
 
 void execute_internal_vm_tests() {
   if (ExecuteInternalVMTests) {
     tty->print_cr("Running internal VM tests");
     run_unit_test(TestReservedSpace_test());
     run_unit_test(TestReserveMemorySpecial_test());
+    run_unit_test(TestVirtualSpace_test());
+    run_unit_test(MetaspaceAux_test());
     run_unit_test(GlobalDefinitions::test_globals());
     run_unit_test(GCTimerAllTest::all());
     run_unit_test(arrayOopDesc::test_max_array_length());
     run_unit_test(CollectedHeap::test_is_in());
     run_unit_test(QuickSort::test_quick_sort());
     run_unit_test(AltHashing::test_alt_hash());
+    run_unit_test(test_loggc_filename());
 #if INCLUDE_VM_STRUCTS
     run_unit_test(VMStructs::test());
 #endif
--- a/hotspot/src/share/vm/prims/jvm.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/prims/jvm.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1824,7 +1824,7 @@
     }
 
     if (!publicOnly || fs.access_flags().is_public()) {
-      fd.initialize(k(), fs.index());
+      fd.reinitialize(k(), fs.index());
       oop field = Reflection::new_field(&fd, UseNewReflection, CHECK_NULL);
       result->obj_at_put(out_idx, field);
       ++out_idx;
--- a/hotspot/src/share/vm/prims/jvmti.xml	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/prims/jvmti.xml	Fri Sep 20 11:09:25 2013 -0700
@@ -458,8 +458,10 @@
       the same name from being loaded dynamically.
 <p/>
       The VM will invoke the Agent_OnUnload_L function of the agent, if such
-      a function is exported, at the same point during startup as it would
-      have called the dynamic entry point Agent_OnUnLoad.
+      a function is exported, at the same point during VM execution as it would
+      have called the dynamic entry point Agent_OnUnLoad. A statically loaded
+      agent cannot be unloaded. The Agent_OnUnload_L function will still be
+      called to do any other agent shutdown related tasks. 
       If a <i>statically linked</i> agent L exports a function called
       Agent_OnUnLoad_L and a function called Agent_OnUnLoad, the Agent_OnUnLoad
       function will be ignored.
--- a/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1072,8 +1072,17 @@
     }
 
     res = merge_cp_and_rewrite(the_class, scratch_class, THREAD);
-    if (res != JVMTI_ERROR_NONE) {
-      return res;
+    if (HAS_PENDING_EXCEPTION) {
+      Symbol* ex_name = PENDING_EXCEPTION->klass()->name();
+      // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark
+      RC_TRACE_WITH_THREAD(0x00000002, THREAD,
+        ("merge_cp_and_rewrite exception: '%s'", ex_name->as_C_string()));
+      CLEAR_PENDING_EXCEPTION;
+      if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) {
+        return JVMTI_ERROR_OUT_OF_MEMORY;
+      } else {
+        return JVMTI_ERROR_INTERNAL;
+      }
     }
 
     if (VerifyMergedCPBytecodes) {
@@ -1105,6 +1114,9 @@
     }
     if (HAS_PENDING_EXCEPTION) {
       Symbol* ex_name = PENDING_EXCEPTION->klass()->name();
+      // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark
+      RC_TRACE_WITH_THREAD(0x00000002, THREAD,
+        ("Rewriter::rewrite or link_methods exception: '%s'", ex_name->as_C_string()));
       CLEAR_PENDING_EXCEPTION;
       if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) {
         return JVMTI_ERROR_OUT_OF_MEMORY;
@@ -1395,8 +1407,8 @@
   ClassLoaderData* loader_data = the_class->class_loader_data();
   ConstantPool* merge_cp_oop =
     ConstantPool::allocate(loader_data,
-                                  merge_cp_length,
-                                  THREAD);
+                           merge_cp_length,
+                           CHECK_(JVMTI_ERROR_OUT_OF_MEMORY));
   MergeCPCleaner cp_cleaner(loader_data, merge_cp_oop);
 
   HandleMark hm(THREAD);  // make sure handles are cleared before
@@ -1472,7 +1484,8 @@
 
       // Replace the new constant pool with a shrunken copy of the
       // merged constant pool
-      set_new_constant_pool(loader_data, scratch_class, merge_cp, merge_cp_length, THREAD);
+      set_new_constant_pool(loader_data, scratch_class, merge_cp, merge_cp_length,
+                            CHECK_(JVMTI_ERROR_OUT_OF_MEMORY));
       // The new constant pool replaces scratch_cp so have cleaner clean it up.
       // It can't be cleaned up while there are handles to it.
       cp_cleaner.add_scratch_cp(scratch_cp());
@@ -1502,7 +1515,8 @@
     // merged constant pool so now the rewritten bytecodes have
     // valid references; the previous new constant pool will get
     // GCed.
-    set_new_constant_pool(loader_data, scratch_class, merge_cp, merge_cp_length, THREAD);
+    set_new_constant_pool(loader_data, scratch_class, merge_cp, merge_cp_length,
+                          CHECK_(JVMTI_ERROR_OUT_OF_MEMORY));
     // The new constant pool replaces scratch_cp so have cleaner clean it up.
     // It can't be cleaned up while there are handles to it.
     cp_cleaner.add_scratch_cp(scratch_cp());
@@ -1590,11 +1604,23 @@
   for (int i = methods->length() - 1; i >= 0; i--) {
     methodHandle method(THREAD, methods->at(i));
     methodHandle new_method;
-    rewrite_cp_refs_in_method(method, &new_method, CHECK_false);
+    rewrite_cp_refs_in_method(method, &new_method, THREAD);
     if (!new_method.is_null()) {
       // the method has been replaced so save the new method version
+      // even in the case of an exception.  original method is on the
+      // deallocation list.
       methods->at_put(i, new_method());
     }
+    if (HAS_PENDING_EXCEPTION) {
+      Symbol* ex_name = PENDING_EXCEPTION->klass()->name();
+      // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark
+      RC_TRACE_WITH_THREAD(0x00000002, THREAD,
+        ("rewrite_cp_refs_in_method exception: '%s'", ex_name->as_C_string()));
+      // Need to clear pending exception here as the super caller sets
+      // the JVMTI_ERROR_INTERNAL if the returned value is false.
+      CLEAR_PENDING_EXCEPTION;
+      return false;
+    }
   }
 
   return true;
@@ -1674,10 +1700,7 @@
               Pause_No_Safepoint_Verifier pnsv(&nsv);
 
               // ldc is 2 bytes and ldc_w is 3 bytes
-              m = rc.insert_space_at(bci, 3, inst_buffer, THREAD);
-              if (m.is_null() || HAS_PENDING_EXCEPTION) {
-                guarantee(false, "insert_space_at() failed");
-              }
+              m = rc.insert_space_at(bci, 3, inst_buffer, CHECK);
             }
 
             // return the new method so that the caller can update
@@ -2487,8 +2510,8 @@
   // scratch_cp is a merged constant pool and has enough space for a
   // worst case merge situation. We want to associate the minimum
   // sized constant pool with the klass to save space.
-  constantPoolHandle smaller_cp(THREAD,
-          ConstantPool::allocate(loader_data, scratch_cp_length, THREAD));
+  ConstantPool* cp = ConstantPool::allocate(loader_data, scratch_cp_length, CHECK);
+  constantPoolHandle smaller_cp(THREAD, cp);
 
   // preserve version() value in the smaller copy
   int version = scratch_cp->version();
@@ -2500,6 +2523,11 @@
   smaller_cp->set_pool_holder(scratch_class());
 
   scratch_cp->copy_cp_to(1, scratch_cp_length - 1, smaller_cp, 1, THREAD);
+  if (HAS_PENDING_EXCEPTION) {
+    // Exception is handled in the caller
+    loader_data->add_to_deallocate_list(smaller_cp());
+    return;
+  }
   scratch_cp = smaller_cp;
 
   // attach new constant pool to klass
@@ -2930,7 +2958,7 @@
   for (int i = 0; i < _deleted_methods_length; ++i) {
     Method* old_method = _deleted_methods[i];
 
-    assert(old_method->vtable_index() < 0,
+    assert(!old_method->has_vtable_index(),
            "cannot delete methods with vtable entries");;
 
     // Mark all deleted methods as old and obsolete
--- a/hotspot/src/share/vm/prims/methodHandles.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/prims/methodHandles.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -127,25 +127,37 @@
 }
 
 oop MethodHandles::init_MemberName(Handle mname, Handle target) {
+  // This method is used from java.lang.invoke.MemberName constructors.
+  // It fills in the new MemberName from a java.lang.reflect.Member.
   Thread* thread = Thread::current();
   oop target_oop = target();
   Klass* target_klass = target_oop->klass();
   if (target_klass == SystemDictionary::reflect_Field_klass()) {
     oop clazz = java_lang_reflect_Field::clazz(target_oop); // fd.field_holder()
     int slot  = java_lang_reflect_Field::slot(target_oop);  // fd.index()
-    int mods  = java_lang_reflect_Field::modifiers(target_oop);
-    oop type  = java_lang_reflect_Field::type(target_oop);
-    oop name  = java_lang_reflect_Field::name(target_oop);
     KlassHandle k(thread, java_lang_Class::as_Klass(clazz));
-    intptr_t offset = InstanceKlass::cast(k())->field_offset(slot);
-    return init_field_MemberName(mname, k, accessFlags_from(mods), type, name, offset);
+    if (!k.is_null() && k->oop_is_instance()) {
+      fieldDescriptor fd(InstanceKlass::cast(k()), slot);
+      oop mname2 = init_field_MemberName(mname, fd);
+      if (mname2 != NULL) {
+        // Since we have the reified name and type handy, add them to the result.
+        if (java_lang_invoke_MemberName::name(mname2) == NULL)
+          java_lang_invoke_MemberName::set_name(mname2, java_lang_reflect_Field::name(target_oop));
+        if (java_lang_invoke_MemberName::type(mname2) == NULL)
+          java_lang_invoke_MemberName::set_type(mname2, java_lang_reflect_Field::type(target_oop));
+      }
+      return mname2;
+    }
   } else if (target_klass == SystemDictionary::reflect_Method_klass()) {
     oop clazz  = java_lang_reflect_Method::clazz(target_oop);
     int slot   = java_lang_reflect_Method::slot(target_oop);
     KlassHandle k(thread, java_lang_Class::as_Klass(clazz));
     if (!k.is_null() && k->oop_is_instance()) {
       Method* m = InstanceKlass::cast(k())->method_with_idnum(slot);
-      return init_method_MemberName(mname, m, true, k);
+      if (m == NULL || is_signature_polymorphic(m->intrinsic_id()))
+        return NULL;            // do not resolve unless there is a concrete signature
+      CallInfo info(m, k());
+      return init_method_MemberName(mname, info);
     }
   } else if (target_klass == SystemDictionary::reflect_Constructor_klass()) {
     oop clazz  = java_lang_reflect_Constructor::clazz(target_oop);
@@ -153,65 +165,50 @@
     KlassHandle k(thread, java_lang_Class::as_Klass(clazz));
     if (!k.is_null() && k->oop_is_instance()) {
       Method* m = InstanceKlass::cast(k())->method_with_idnum(slot);
-      return init_method_MemberName(mname, m, false, k);
-    }
-  } else if (target_klass == SystemDictionary::MemberName_klass()) {
-    // Note: This only works if the MemberName has already been resolved.
-    oop clazz        = java_lang_invoke_MemberName::clazz(target_oop);
-    int flags        = java_lang_invoke_MemberName::flags(target_oop);
-    Metadata* vmtarget=java_lang_invoke_MemberName::vmtarget(target_oop);
-    intptr_t vmindex = java_lang_invoke_MemberName::vmindex(target_oop);
-    KlassHandle k(thread, java_lang_Class::as_Klass(clazz));
-    int ref_kind     = (flags >> REFERENCE_KIND_SHIFT) & REFERENCE_KIND_MASK;
-    if (vmtarget == NULL)  return NULL;  // not resolved
-    if ((flags & IS_FIELD) != 0) {
-      assert(vmtarget->is_klass(), "field vmtarget is Klass*");
-      int basic_mods = (ref_kind_is_static(ref_kind) ? JVM_ACC_STATIC : 0);
-      // FIXME:  how does k (receiver_limit) contribute?
-      KlassHandle k_vmtarget(thread, (Klass*)vmtarget);
-      return init_field_MemberName(mname, k_vmtarget, accessFlags_from(basic_mods), NULL, NULL, vmindex);
-    } else if ((flags & (IS_METHOD | IS_CONSTRUCTOR)) != 0) {
-      assert(vmtarget->is_method(), "method or constructor vmtarget is Method*");
-      return init_method_MemberName(mname, (Method*)vmtarget, ref_kind_does_dispatch(ref_kind), k);
-    } else {
-      return NULL;
+      if (m == NULL)  return NULL;
+      CallInfo info(m, k());
+      return init_method_MemberName(mname, info);
     }
   }
   return NULL;
 }
 
-oop MethodHandles::init_method_MemberName(Handle mname, Method* m, bool do_dispatch,
-                                          KlassHandle receiver_limit_h) {
-  Klass* receiver_limit = receiver_limit_h();
-  AccessFlags mods = m->access_flags();
-  int flags = (jushort)( mods.as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS );
-  int vmindex = Method::nonvirtual_vtable_index; // implies never any dispatch
-  Klass* mklass = m->method_holder();
-  if (receiver_limit == NULL)
-    receiver_limit = mklass;
-  if (m->is_initializer()) {
-    flags |= IS_CONSTRUCTOR | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT);
-  } else if (mods.is_static()) {
-    flags |= IS_METHOD | (JVM_REF_invokeStatic << REFERENCE_KIND_SHIFT);
-  } else if (receiver_limit != mklass &&
-             !receiver_limit->is_subtype_of(mklass)) {
-    return NULL;  // bad receiver limit
-  } else if (do_dispatch && receiver_limit->is_interface() &&
-             mklass->is_interface()) {
+oop MethodHandles::init_method_MemberName(Handle mname, CallInfo& info) {
+  assert(info.resolved_appendix().is_null(), "only normal methods here");
+  KlassHandle receiver_limit = info.resolved_klass();
+  methodHandle m = info.resolved_method();
+  int flags = (jushort)( m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS );
+  int vmindex = Method::invalid_vtable_index;
+
+  switch (info.call_kind()) {
+  case CallInfo::itable_call:
+    vmindex = info.itable_index();
+    // More importantly, the itable index only works with the method holder.
+    receiver_limit = m->method_holder();
+    assert(receiver_limit->verify_itable_index(vmindex), "");
     flags |= IS_METHOD | (JVM_REF_invokeInterface << REFERENCE_KIND_SHIFT);
-    receiver_limit = mklass;  // ignore passed-in limit; interfaces are interconvertible
-    vmindex = klassItable::compute_itable_index(m);
-  } else if (do_dispatch && mklass != receiver_limit && mklass->is_interface()) {
+    break;
+
+  case CallInfo::vtable_call:
+    vmindex = info.vtable_index();
     flags |= IS_METHOD | (JVM_REF_invokeVirtual << REFERENCE_KIND_SHIFT);
-    // it is a miranda method, so m->vtable_index is not what we want
-    ResourceMark rm;
-    klassVtable* vt = InstanceKlass::cast(receiver_limit)->vtable();
-    vmindex = vt->index_of_miranda(m->name(), m->signature());
-  } else if (!do_dispatch || m->can_be_statically_bound()) {
-    flags |= IS_METHOD | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT);
-  } else {
-    flags |= IS_METHOD | (JVM_REF_invokeVirtual << REFERENCE_KIND_SHIFT);
-    vmindex = m->vtable_index();
+    assert(receiver_limit->is_subtype_of(m->method_holder()), "virtual call must be type-safe");
+    break;
+
+  case CallInfo::direct_call:
+    vmindex = Method::nonvirtual_vtable_index;
+    if (m->is_static()) {
+      flags |= IS_METHOD      | (JVM_REF_invokeStatic  << REFERENCE_KIND_SHIFT);
+    } else if (m->is_initializer()) {
+      flags |= IS_CONSTRUCTOR | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT);
+      assert(receiver_limit == m->method_holder(), "constructor call must be exactly typed");
+    } else {
+      flags |= IS_METHOD      | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT);
+      assert(receiver_limit->is_subtype_of(m->method_holder()), "special call must be type-safe");
+    }
+    break;
+
+  default:  assert(false, "bad CallInfo");  return NULL;
   }
 
   // @CallerSensitive annotation detected
@@ -221,7 +218,7 @@
 
   oop mname_oop = mname();
   java_lang_invoke_MemberName::set_flags(   mname_oop, flags);
-  java_lang_invoke_MemberName::set_vmtarget(mname_oop, m);
+  java_lang_invoke_MemberName::set_vmtarget(mname_oop, m());
   java_lang_invoke_MemberName::set_vmindex( mname_oop, vmindex);   // vtable/itable index
   java_lang_invoke_MemberName::set_clazz(   mname_oop, receiver_limit->java_mirror());
   // Note:  name and type can be lazily computed by resolve_MemberName,
@@ -237,59 +234,19 @@
   return mname();
 }
 
-Handle MethodHandles::init_method_MemberName(Handle mname, CallInfo& info, TRAPS) {
-  Handle empty;
-  if (info.resolved_appendix().not_null()) {
-    // The resolved MemberName must not be accompanied by an appendix argument,
-    // since there is no way to bind this value into the MemberName.
-    // Caller is responsible to prevent this from happening.
-    THROW_MSG_(vmSymbols::java_lang_InternalError(), "appendix", empty);
-  }
-  methodHandle m = info.resolved_method();
-  KlassHandle defc = info.resolved_klass();
-  int vmindex = Method::invalid_vtable_index;
-  if (defc->is_interface() && m->method_holder()->is_interface()) {
-    // static interface methods do not reference vtable or itable
-    if (m->is_static()) {
-      vmindex = Method::nonvirtual_vtable_index;
-    }
-    // interface methods invoked via invokespecial also
-    // do not reference vtable or itable.
-    int ref_kind = ((java_lang_invoke_MemberName::flags(mname()) >>
-                     REFERENCE_KIND_SHIFT) & REFERENCE_KIND_MASK);
-    if (ref_kind == JVM_REF_invokeSpecial) {
-      vmindex = Method::nonvirtual_vtable_index;
-    }
-    // If neither m is static nor ref_kind is invokespecial,
-    // set it to itable index.
-    if (vmindex == Method::invalid_vtable_index) {
-      // LinkResolver does not report itable indexes!  (fix this?)
-      vmindex = klassItable::compute_itable_index(m());
-    }
-  } else if (m->can_be_statically_bound()) {
-    // LinkResolver reports vtable index even for final methods!
-    vmindex = Method::nonvirtual_vtable_index;
-  } else {
-    vmindex = info.vtable_index();
-  }
-  oop res = init_method_MemberName(mname, m(), (vmindex >= 0), defc());
-  assert(res == NULL || (java_lang_invoke_MemberName::vmindex(res) == vmindex), "");
-  return Handle(THREAD, res);
-}
-
-oop MethodHandles::init_field_MemberName(Handle mname, KlassHandle field_holder,
-                                         AccessFlags mods, oop type, oop name,
-                                         intptr_t offset, bool is_setter) {
-  int flags = (jushort)( mods.as_short() & JVM_RECOGNIZED_FIELD_MODIFIERS );
-  flags |= IS_FIELD | ((mods.is_static() ? JVM_REF_getStatic : JVM_REF_getField) << REFERENCE_KIND_SHIFT);
+oop MethodHandles::init_field_MemberName(Handle mname, fieldDescriptor& fd, bool is_setter) {
+  int flags = (jushort)( fd.access_flags().as_short() & JVM_RECOGNIZED_FIELD_MODIFIERS );
+  flags |= IS_FIELD | ((fd.is_static() ? JVM_REF_getStatic : JVM_REF_getField) << REFERENCE_KIND_SHIFT);
   if (is_setter)  flags += ((JVM_REF_putField - JVM_REF_getField) << REFERENCE_KIND_SHIFT);
-  Metadata* vmtarget = field_holder();
-  int vmindex  = offset;  // determines the field uniquely when combined with static bit
+  Metadata* vmtarget = fd.field_holder();
+  int vmindex        = fd.offset();  // determines the field uniquely when combined with static bit
   oop mname_oop = mname();
   java_lang_invoke_MemberName::set_flags(mname_oop,    flags);
   java_lang_invoke_MemberName::set_vmtarget(mname_oop, vmtarget);
   java_lang_invoke_MemberName::set_vmindex(mname_oop,  vmindex);
-  java_lang_invoke_MemberName::set_clazz(mname_oop,    field_holder->java_mirror());
+  java_lang_invoke_MemberName::set_clazz(mname_oop,    fd.field_holder()->java_mirror());
+  oop type = field_signature_type_or_null(fd.signature());
+  oop name = field_name_or_null(fd.name());
   if (name != NULL)
     java_lang_invoke_MemberName::set_name(mname_oop,   name);
   if (type != NULL)
@@ -305,19 +262,6 @@
   return mname();
 }
 
-Handle MethodHandles::init_field_MemberName(Handle mname, FieldAccessInfo& info, TRAPS) {
-  return Handle();
-#if 0 // FIXME
-  KlassHandle field_holder = info.klass();
-  intptr_t    field_offset = info.field_offset();
-  return init_field_MemberName(mname_oop, field_holder(),
-                               info.access_flags(),
-                               type, name,
-                               field_offset, false /*is_setter*/);
-#endif
-}
-
-
 // JVM 2.9 Special Methods:
 // A method is signature polymorphic if and only if all of the following conditions hold :
 // * It is declared in the java.lang.invoke.MethodHandle class.
@@ -573,12 +517,12 @@
   return SystemDictionary::Object_klass()->java_mirror();
 }
 
-static oop field_name_or_null(Symbol* s) {
+oop MethodHandles::field_name_or_null(Symbol* s) {
   if (s == NULL)  return NULL;
   return StringTable::lookup(s);
 }
 
-static oop field_signature_type_or_null(Symbol* s) {
+oop MethodHandles::field_signature_type_or_null(Symbol* s) {
   if (s == NULL)  return NULL;
   BasicType bt = FieldType::basic_type(s);
   if (is_java_primitive(bt)) {
@@ -701,7 +645,14 @@
           return empty;
         }
       }
-      return init_method_MemberName(mname, result, THREAD);
+      if (result.resolved_appendix().not_null()) {
+        // The resolved MemberName must not be accompanied by an appendix argument,
+        // since there is no way to bind this value into the MemberName.
+        // Caller is responsible to prevent this from happening.
+        THROW_MSG_(vmSymbols::java_lang_InternalError(), "appendix", empty);
+      }
+      oop mname2 = init_method_MemberName(mname, result);
+      return Handle(THREAD, mname2);
     }
   case IS_CONSTRUCTOR:
     {
@@ -719,22 +670,21 @@
         }
       }
       assert(result.is_statically_bound(), "");
-      return init_method_MemberName(mname, result, THREAD);
+      oop mname2 = init_method_MemberName(mname, result);
+      return Handle(THREAD, mname2);
     }
   case IS_FIELD:
     {
-      // This is taken from LinkResolver::resolve_field, sans access checks.
-      fieldDescriptor fd; // find_field initializes fd if found
-      KlassHandle sel_klass(THREAD, InstanceKlass::cast(defc())->find_field(name, type, &fd));
-      // check if field exists; i.e., if a klass containing the field def has been selected
-      if (sel_klass.is_null())  return empty;  // should not happen
-      oop type = field_signature_type_or_null(fd.signature());
-      oop name = field_name_or_null(fd.name());
-      bool is_setter = (ref_kind_is_valid(ref_kind) && ref_kind_is_setter(ref_kind));
-      mname = Handle(THREAD,
-                     init_field_MemberName(mname, sel_klass,
-                                           fd.access_flags(), type, name, fd.offset(), is_setter));
-      return mname;
+      fieldDescriptor result; // find_field initializes fd if found
+      {
+        assert(!HAS_PENDING_EXCEPTION, "");
+        LinkResolver::resolve_field(result, defc, name, type, KlassHandle(), Bytecodes::_nop, false, false, THREAD);
+        if (HAS_PENDING_EXCEPTION) {
+          return empty;
+        }
+      }
+      oop mname2 = init_field_MemberName(mname, result, ref_kind_is_setter(ref_kind));
+      return Handle(THREAD, mname2);
     }
   default:
     THROW_MSG_(vmSymbols::java_lang_InternalError(), "unrecognized MemberName format", empty);
@@ -793,7 +743,6 @@
     }
   case IS_FIELD:
     {
-      // This is taken from LinkResolver::resolve_field, sans access checks.
       assert(vmtarget->is_klass(), "field vmtarget is Klass*");
       if (!((Klass*) vmtarget)->oop_is_instance())  break;
       instanceKlassHandle defc(THREAD, (Klass*) vmtarget);
@@ -872,11 +821,7 @@
         Handle result(thread, results->obj_at(rfill++));
         if (!java_lang_invoke_MemberName::is_instance(result()))
           return -99;  // caller bug!
-        oop type = field_signature_type_or_null(st.signature());
-        oop name = field_name_or_null(st.name());
-        oop saved = MethodHandles::init_field_MemberName(result, st.klass(),
-                                                         st.access_flags(), type, name,
-                                                         st.offset());
+        oop saved = MethodHandles::init_field_MemberName(result, st.field_descriptor());
         if (saved != result())
           results->obj_at_put(rfill-1, saved);  // show saved instance to user
       } else if (++overflow >= overflow_limit) {
@@ -926,7 +871,8 @@
         Handle result(thread, results->obj_at(rfill++));
         if (!java_lang_invoke_MemberName::is_instance(result()))
           return -99;  // caller bug!
-        oop saved = MethodHandles::init_method_MemberName(result, m, true, NULL);
+        CallInfo info(m);
+        oop saved = MethodHandles::init_method_MemberName(result, info);
         if (saved != result())
           results->obj_at_put(rfill-1, saved);  // show saved instance to user
       } else if (++overflow >= overflow_limit) {
@@ -1227,7 +1173,8 @@
     x = ((Klass*) vmtarget)->java_mirror();
   } else if (vmtarget->is_method()) {
     Handle mname2 = MethodHandles::new_MemberName(CHECK_NULL);
-    x = MethodHandles::init_method_MemberName(mname2, (Method*)vmtarget, false, NULL);
+    CallInfo info((Method*)vmtarget);
+    x = MethodHandles::init_method_MemberName(mname2, info);
   }
   result->obj_at_put(1, x);
   return JNIHandles::make_local(env, result());
--- a/hotspot/src/share/vm/prims/methodHandles.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/prims/methodHandles.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -49,19 +49,18 @@
   // Adapters.
   static MethodHandlesAdapterBlob* _adapter_code;
 
+  // utility functions for reifying names and types
+  static oop field_name_or_null(Symbol* s);
+  static oop field_signature_type_or_null(Symbol* s);
+
  public:
   // working with member names
   static Handle resolve_MemberName(Handle mname, TRAPS); // compute vmtarget/vmindex from name/type
   static void expand_MemberName(Handle mname, int suppress, TRAPS);  // expand defc/name/type if missing
   static Handle new_MemberName(TRAPS);  // must be followed by init_MemberName
   static oop init_MemberName(Handle mname_h, Handle target_h); // compute vmtarget/vmindex from target
-  static oop init_method_MemberName(Handle mname_h, Method* m, bool do_dispatch,
-                                    KlassHandle receiver_limit_h);
-  static oop init_field_MemberName(Handle mname_h, KlassHandle field_holder_h,
-                                   AccessFlags mods, oop type, oop name,
-                                   intptr_t offset, bool is_setter = false);
-  static Handle init_method_MemberName(Handle mname_h, CallInfo& info, TRAPS);
-  static Handle init_field_MemberName(Handle mname_h, FieldAccessInfo& info, TRAPS);
+  static oop init_field_MemberName(Handle mname_h, fieldDescriptor& fd, bool is_setter = false);
+  static oop init_method_MemberName(Handle mname_h, CallInfo& info);
   static int method_ref_kind(Method* m, bool do_dispatch_if_possible = true);
   static int find_MemberNames(KlassHandle k, Symbol* name, Symbol* sig,
                               int mflags, KlassHandle caller,
--- a/hotspot/src/share/vm/prims/whitebox.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/prims/whitebox.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -33,6 +33,7 @@
 #include "prims/whitebox.hpp"
 #include "prims/wbtestmethods/parserTests.hpp"
 
+#include "runtime/arguments.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/os.hpp"
 #include "utilities/debug.hpp"
@@ -94,6 +95,11 @@
   return closure.found();
 WB_END
 
+WB_ENTRY(jlong, WB_GetCompressedOopsMaxHeapSize(JNIEnv* env, jobject o)) {
+  return (jlong)Arguments::max_heap_for_compressed_oops();
+}
+WB_END
+
 WB_ENTRY(void, WB_PrintHeapSizes(JNIEnv* env, jobject o)) {
   CollectorPolicy * p = Universe::heap()->collector_policy();
   gclog_or_tty->print_cr("Minimum heap "SIZE_FORMAT" Initial heap "
@@ -436,6 +442,8 @@
       CC"(Ljava/lang/String;[Lsun/hotspot/parser/DiagnosticCommand;)[Ljava/lang/Object;",
       (void*) &WB_ParseCommandLine
   },
+  {CC"getCompressedOopsMaxHeapSize", CC"()J",
+      (void*)&WB_GetCompressedOopsMaxHeapSize},
   {CC"printHeapSizes",     CC"()V",                   (void*)&WB_PrintHeapSizes    },
 #if INCLUDE_ALL_GCS
   {CC"g1InConcurrentMark", CC"()Z",                   (void*)&WB_G1InConcurrentMark},
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -28,6 +28,7 @@
 #include "compiler/compilerOracle.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/cardTableRS.hpp"
+#include "memory/genCollectedHeap.hpp"
 #include "memory/referenceProcessor.hpp"
 #include "memory/universe.inline.hpp"
 #include "oops/oop.inline.hpp"
@@ -54,6 +55,8 @@
 #endif
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
 #endif // INCLUDE_ALL_GCS
 
 // Note: This is a special bug reporting site for the JVM
@@ -90,6 +93,7 @@
 SystemProperty* Arguments::_system_properties   = NULL;
 const char*  Arguments::_gc_log_filename        = NULL;
 bool   Arguments::_has_profile                  = false;
+size_t Arguments::_conservative_max_heap_alignment = 0;
 uintx  Arguments::_min_heap_size                = 0;
 Arguments::Mode Arguments::_mode                = _mixed;
 bool   Arguments::_java_compiler                = false;
@@ -1391,10 +1395,17 @@
   return true;
 }
 
-inline uintx max_heap_for_compressed_oops() {
+uintx Arguments::max_heap_for_compressed_oops() {
   // Avoid sign flip.
   assert(OopEncodingHeapMax > (uint64_t)os::vm_page_size(), "Unusual page size");
-  LP64_ONLY(return OopEncodingHeapMax - os::vm_page_size());
+  // We need to fit both the NULL page and the heap into the memory budget, while
+  // keeping alignment constraints of the heap. To guarantee the latter, as the
+  // NULL page is located before the heap, we pad the NULL page to the conservative
+  // maximum alignment that the GC may ever impose upon the heap.
+  size_t displacement_due_to_null_page = align_size_up_(os::vm_page_size(),
+    Arguments::conservative_max_heap_alignment());
+
+  LP64_ONLY(return OopEncodingHeapMax - displacement_due_to_null_page);
   NOT_LP64(ShouldNotReachHere(); return 0);
 }
 
@@ -1439,7 +1450,7 @@
     if (UseCompressedOops && !FLAG_IS_DEFAULT(UseCompressedOops)) {
       warning("Max heap size too large for Compressed Oops");
       FLAG_SET_DEFAULT(UseCompressedOops, false);
-      FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
+      FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
     }
   }
 #endif // _LP64
@@ -1452,22 +1463,22 @@
 void Arguments::set_use_compressed_klass_ptrs() {
 #ifndef ZERO
 #ifdef _LP64
-  // UseCompressedOops must be on for UseCompressedKlassPointers to be on.
+  // UseCompressedOops must be on for UseCompressedClassPointers to be on.
   if (!UseCompressedOops) {
-    if (UseCompressedKlassPointers) {
-      warning("UseCompressedKlassPointers requires UseCompressedOops");
+    if (UseCompressedClassPointers) {
+      warning("UseCompressedClassPointers requires UseCompressedOops");
     }
-    FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
+    FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
   } else {
-    // Turn on UseCompressedKlassPointers too
-    if (FLAG_IS_DEFAULT(UseCompressedKlassPointers)) {
-      FLAG_SET_ERGO(bool, UseCompressedKlassPointers, true);
+    // Turn on UseCompressedClassPointers too
+    if (FLAG_IS_DEFAULT(UseCompressedClassPointers)) {
+      FLAG_SET_ERGO(bool, UseCompressedClassPointers, true);
     }
-    // Check the ClassMetaspaceSize to make sure we use compressed klass ptrs.
-    if (UseCompressedKlassPointers) {
-      if (ClassMetaspaceSize > KlassEncodingMetaspaceMax) {
-        warning("Class metaspace size is too large for UseCompressedKlassPointers");
-        FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
+    // Check the CompressedClassSpaceSize to make sure we use compressed klass ptrs.
+    if (UseCompressedClassPointers) {
+      if (CompressedClassSpaceSize > KlassEncodingMetaspaceMax) {
+        warning("CompressedClassSpaceSize is too large for UseCompressedClassPointers");
+        FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
       }
     }
   }
@@ -1475,6 +1486,23 @@
 #endif // !ZERO
 }
 
+void Arguments::set_conservative_max_heap_alignment() {
+  // The conservative maximum required alignment for the heap is the maximum of
+  // the alignments imposed by several sources: any requirements from the heap
+  // itself, the collector policy and the maximum page size we may run the VM
+  // with.
+  size_t heap_alignment = GenCollectedHeap::conservative_max_heap_alignment();
+#if INCLUDE_ALL_GCS
+  if (UseParallelGC) {
+    heap_alignment = ParallelScavengeHeap::conservative_max_heap_alignment();
+  } else if (UseG1GC) {
+    heap_alignment = G1CollectedHeap::conservative_max_heap_alignment();
+  }
+#endif // INCLUDE_ALL_GCS
+  _conservative_max_heap_alignment = MAX3(heap_alignment, os::max_page_size(),
+    CollectorPolicy::compute_max_alignment());
+}
+
 void Arguments::set_ergonomics_flags() {
 
   if (os::is_server_class_machine()) {
@@ -1503,6 +1531,8 @@
     }
   }
 
+  set_conservative_max_heap_alignment();
+
 #ifndef ZERO
 #ifdef _LP64
   set_use_compressed_oops();
@@ -1839,7 +1869,7 @@
         (NumberOfGCLogFiles == 0)  ||
         (GCLogFileSize == 0)) {
       jio_fprintf(defaultStream::output_stream(),
-                  "To enable GC log rotation, use -Xloggc:<filename> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=<num_of_files> -XX:GCLogFileSize=<num_of_size>\n"
+                  "To enable GC log rotation, use -Xloggc:<filename> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=<num_of_files> -XX:GCLogFileSize=<num_of_size>[k|K|m|M|g|G]\n"
                   "where num_of_file > 0 and num_of_size > 0\n"
                   "GC log rotation is turned off\n");
       UseGCLogFileRotation = false;
@@ -1853,6 +1883,51 @@
   }
 }
 
+// This function is called for -Xloggc:<filename>, it can be used
+// to check if a given file name(or string) conforms to the following
+// specification:
+// A valid string only contains "[A-Z][a-z][0-9].-_%[p|t]"
+// %p and %t only allowed once. We only limit usage of filename not path
+bool is_filename_valid(const char *file_name) {
+  const char* p = file_name;
+  char file_sep = os::file_separator()[0];
+  const char* cp;
+  // skip prefix path
+  for (cp = file_name; *cp != '\0'; cp++) {
+    if (*cp == '/' || *cp == file_sep) {
+      p = cp + 1;
+    }
+  }
+
+  int count_p = 0;
+  int count_t = 0;
+  while (*p != '\0') {
+    if ((*p >= '0' && *p <= '9') ||
+        (*p >= 'A' && *p <= 'Z') ||
+        (*p >= 'a' && *p <= 'z') ||
+         *p == '-'               ||
+         *p == '_'               ||
+         *p == '.') {
+       p++;
+       continue;
+    }
+    if (*p == '%') {
+      if(*(p + 1) == 'p') {
+        p += 2;
+        count_p ++;
+        continue;
+      }
+      if (*(p + 1) == 't') {
+        p += 2;
+        count_t ++;
+        continue;
+      }
+    }
+    return false;
+  }
+  return count_p < 2 && count_t < 2;
+}
+
 // Check consistency of GC selection
 bool Arguments::check_gc_consistency() {
   check_gclog_consistency();
@@ -2148,8 +2223,8 @@
 
   status = status && verify_object_alignment();
 
-  status = status && verify_interval(ClassMetaspaceSize, 1*M, 3*G,
-                                      "ClassMetaspaceSize");
+  status = status && verify_interval(CompressedClassSpaceSize, 1*M, 3*G,
+                                      "CompressedClassSpaceSize");
 
   status = status && verify_interval(MarkStackSizeMax,
                                   1, (max_jint - 1), "MarkStackSizeMax");
@@ -2806,6 +2881,13 @@
       // ostream_init_log(), when called will use this filename
       // to initialize a fileStream.
       _gc_log_filename = strdup(tail);
+     if (!is_filename_valid(_gc_log_filename)) {
+       jio_fprintf(defaultStream::output_stream(),
+                  "Invalid file name for use with -Xloggc: Filename can only contain the "
+                  "characters [A-Z][a-z][0-9]-_.%%[p|t] but it has been %s\n"
+                  "Note %%p or %%t can only be used once\n", _gc_log_filename);
+        return JNI_EINVAL;
+      }
       FLAG_SET_CMDLINE(bool, PrintGC, true);
       FLAG_SET_CMDLINE(bool, PrintGCTimeStamps, true);
 
@@ -3274,13 +3356,13 @@
     }
     UseSharedSpaces = false;
 #ifdef _LP64
-    if (!UseCompressedOops || !UseCompressedKlassPointers) {
+    if (!UseCompressedOops || !UseCompressedClassPointers) {
       vm_exit_during_initialization(
-        "Cannot dump shared archive when UseCompressedOops or UseCompressedKlassPointers is off.", NULL);
+        "Cannot dump shared archive when UseCompressedOops or UseCompressedClassPointers is off.", NULL);
     }
   } else {
-    // UseCompressedOops and UseCompressedKlassPointers must be on for UseSharedSpaces.
-    if (!UseCompressedOops || !UseCompressedKlassPointers) {
+    // UseCompressedOops and UseCompressedClassPointers must be on for UseSharedSpaces.
+    if (!UseCompressedOops || !UseCompressedClassPointers) {
       no_shared_spaces();
     }
 #endif
@@ -3326,6 +3408,33 @@
   return shared_archive_path;
 }
 
+#ifndef PRODUCT
+// Determine whether LogVMOutput should be implicitly turned on.
+static bool use_vm_log() {
+  if (LogCompilation || !FLAG_IS_DEFAULT(LogFile) ||
+      PrintCompilation || PrintInlining || PrintDependencies || PrintNativeNMethods ||
+      PrintDebugInfo || PrintRelocations || PrintNMethods || PrintExceptionHandlers ||
+      PrintAssembly || TraceDeoptimization || TraceDependencies ||
+      (VerifyDependencies && FLAG_IS_CMDLINE(VerifyDependencies))) {
+    return true;
+  }
+
+#ifdef COMPILER1
+  if (PrintC1Statistics) {
+    return true;
+  }
+#endif // COMPILER1
+
+#ifdef COMPILER2
+  if (PrintOptoAssembly || PrintOptoStatistics) {
+    return true;
+  }
+#endif // COMPILER2
+
+  return false;
+}
+#endif // PRODUCT
+
 // Parse entry point called from JNI_CreateJavaVM
 
 jint Arguments::parse(const JavaVMInitArgs* args) {
@@ -3506,6 +3615,11 @@
   no_shared_spaces();
 #endif // INCLUDE_CDS
 
+  return JNI_OK;
+}
+
+jint Arguments::apply_ergo() {
+
   // Set flags based on ergonomics.
   set_ergonomics_flags();
 
@@ -3581,7 +3695,7 @@
   FLAG_SET_DEFAULT(ProfileInterpreter, false);
   FLAG_SET_DEFAULT(UseBiasedLocking, false);
   LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedOops, false));
-  LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedKlassPointers, false));
+  LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedClassPointers, false));
 #endif // CC_INTERP
 
 #ifdef COMPILER2
@@ -3610,6 +3724,10 @@
     DebugNonSafepoints = true;
   }
 
+  if (FLAG_IS_CMDLINE(CompressedClassSpaceSize) && !UseCompressedClassPointers) {
+    warning("Setting CompressedClassSpaceSize has no effect when compressed class pointers are not used");
+  }
+
 #ifndef PRODUCT
   if (CompileTheWorld) {
     // Force NmethodSweeper to sweep whole CodeCache each time.
@@ -3617,7 +3735,13 @@
       NmethodSweepFraction = 1;
     }
   }
-#endif
+
+  if (!LogVMOutput && FLAG_IS_DEFAULT(LogVMOutput)) {
+    if (use_vm_log()) {
+      LogVMOutput = true;
+    }
+  }
+#endif // PRODUCT
 
   if (PrintCommandLineFlags) {
     CommandLineFlags::printSetFlags(tty);
--- a/hotspot/src/share/vm/runtime/arguments.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -144,7 +144,7 @@
   void set_os_lib(void* os_lib)             { _os_lib = os_lib; }
   AgentLibrary* next() const                { return _next; }
   bool is_static_lib() const                { return _is_static_lib; }
-  void set_static_lib(bool static_lib)      { _is_static_lib = static_lib; }
+  void set_static_lib(bool is_static_lib)   { _is_static_lib = is_static_lib; }
   bool valid()                              { return (_state == agent_valid); }
   void set_valid()                          { _state = agent_valid; }
   void set_invalid()                        { _state = agent_invalid; }
@@ -280,6 +280,9 @@
   // Option flags
   static bool   _has_profile;
   static const char*  _gc_log_filename;
+  // Value of the conservative maximum heap alignment needed
+  static size_t  _conservative_max_heap_alignment;
+
   static uintx  _min_heap_size;
 
   // -Xrun arguments
@@ -327,6 +330,7 @@
   // Garbage-First (UseG1GC)
   static void set_g1_gc_flags();
   // GC ergonomics
+  static void set_conservative_max_heap_alignment();
   static void set_use_compressed_oops();
   static void set_use_compressed_klass_ptrs();
   static void set_ergonomics_flags();
@@ -430,8 +434,10 @@
   static char*  SharedArchivePath;
 
  public:
-  // Parses the arguments
+  // Parses the arguments, first phase
   static jint parse(const JavaVMInitArgs* args);
+  // Apply ergonomics
+  static jint apply_ergo();
   // Adjusts the arguments after the OS have adjusted the arguments
   static jint adjust_after_os();
   // Check for consistency in the selection of the garbage collector.
@@ -445,6 +451,10 @@
   // Used by os_solaris
   static bool process_settings_file(const char* file_name, bool should_exist, jboolean ignore_unrecognized);
 
+  static size_t conservative_max_heap_alignment() { return _conservative_max_heap_alignment; }
+  // Return the maximum size a heap with compressed oops can take
+  static size_t max_heap_for_compressed_oops();
+
   // return a char* array containing all options
   static char** jvm_flags_array()          { return _jvm_flags_array; }
   static char** jvm_args_array()           { return _jvm_args_array; }
--- a/hotspot/src/share/vm/runtime/deoptimization.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/deoptimization.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1751,7 +1751,7 @@
   else    return trap_state & ~DS_RECOMPILE_BIT;
 }
 //---------------------------format_trap_state---------------------------------
-// This is used for debugging and diagnostics, including hotspot.log output.
+// This is used for debugging and diagnostics, including LogFile output.
 const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
                                               int trap_state) {
   DeoptReason reason      = trap_state_reason(trap_state);
@@ -1828,7 +1828,7 @@
   return buf;
 }
 
-// This is used for debugging and diagnostics, including hotspot.log output.
+// This is used for debugging and diagnostics, including LogFile output.
 const char* Deoptimization::format_trap_request(char* buf, size_t buflen,
                                                 int trap_request) {
   jint unloaded_class_index = trap_request_index(trap_request);
--- a/hotspot/src/share/vm/runtime/fieldDescriptor.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/fieldDescriptor.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -97,18 +97,32 @@
   return constants()->uncached_string_at(initial_value_index(), CHECK_0);
 }
 
-void fieldDescriptor::initialize(InstanceKlass* ik, int index) {
-  _cp = ik->constants();
+void fieldDescriptor::reinitialize(InstanceKlass* ik, int index) {
+  if (_cp.is_null() || field_holder() != ik) {
+    _cp = constantPoolHandle(Thread::current(), ik->constants());
+    // _cp should now reference ik's constant pool; i.e., ik is now field_holder.
+    assert(field_holder() == ik, "must be already initialized to this class");
+  }
   FieldInfo* f = ik->field(index);
   assert(!f->is_internal(), "regular Java fields only");
 
   _access_flags = accessFlags_from(f->access_flags());
   guarantee(f->name_index() != 0 && f->signature_index() != 0, "bad constant pool index for fieldDescriptor");
   _index = index;
+  verify();
 }
 
 #ifndef PRODUCT
 
+void fieldDescriptor::verify() const {
+  if (_cp.is_null()) {
+    assert(_index == badInt, "constructor must be called");  // see constructor
+  } else {
+    assert(_index >= 0, "good index");
+    assert(_index < field_holder()->java_fields_count(), "oob");
+  }
+}
+
 void fieldDescriptor::print_on(outputStream* st) const {
   access_flags().print_on(st);
   name()->print_value_on(st);
--- a/hotspot/src/share/vm/runtime/fieldDescriptor.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/fieldDescriptor.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,6 +53,13 @@
   }
 
  public:
+  fieldDescriptor() {
+    DEBUG_ONLY(_index = badInt);
+  }
+  fieldDescriptor(InstanceKlass* ik, int index) {
+    DEBUG_ONLY(_index = badInt);
+    reinitialize(ik, index);
+  }
   Symbol* name() const {
     return field()->name(_cp);
   }
@@ -112,12 +119,13 @@
   }
 
   // Initialization
-  void initialize(InstanceKlass* ik, int index);
+  void reinitialize(InstanceKlass* ik, int index);
 
   // Print
   void print() { print_on(tty); }
   void print_on(outputStream* st) const         PRODUCT_RETURN;
   void print_on_for(outputStream* st, oop obj)  PRODUCT_RETURN;
+  void verify() const                           PRODUCT_RETURN;
 };
 
 #endif // SHARE_VM_RUNTIME_FIELDDESCRIPTOR_HPP
--- a/hotspot/src/share/vm/runtime/frame.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/frame.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -652,7 +652,7 @@
 // Return whether the frame is in the VM or os indicating a Hotspot problem.
 // Otherwise, it's likely a bug in the native library that the Java code calls,
 // hopefully indicating where to submit bugs.
-static void print_C_frame(outputStream* st, char* buf, int buflen, address pc) {
+void frame::print_C_frame(outputStream* st, char* buf, int buflen, address pc) {
   // C/C++ frame
   bool in_vm = os::address_is_in_vm(pc);
   st->print(in_vm ? "V" : "C");
--- a/hotspot/src/share/vm/runtime/frame.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/frame.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -406,6 +406,7 @@
   void print_on(outputStream* st) const;
   void interpreter_frame_print_on(outputStream* st) const;
   void print_on_error(outputStream* st, char* buf, int buflen, bool verbose = false) const;
+  static void print_C_frame(outputStream* st, char* buf, int buflen, address pc);
 
   // Add annotated descriptions of memory locations belonging to this frame to values
   void describe(FrameValues& values, int frame_no);
--- a/hotspot/src/share/vm/runtime/globals.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -443,8 +443,8 @@
             "Use 32-bit object references in 64-bit VM  "                   \
             "lp64_product means flag is always constant in 32 bit VM")      \
                                                                             \
-  lp64_product(bool, UseCompressedKlassPointers, false,                     \
-            "Use 32-bit klass pointers in 64-bit VM  "                      \
+  lp64_product(bool, UseCompressedClassPointers, false,                     \
+            "Use 32-bit class pointers in 64-bit VM  "                      \
             "lp64_product means flag is always constant in 32 bit VM")      \
                                                                             \
   notproduct(bool, CheckCompressedOops, true,                               \
@@ -880,7 +880,7 @@
           "stay alive at the expense of JVM performance")                   \
                                                                             \
   diagnostic(bool, LogCompilation, false,                                   \
-          "Log compilation activity in detail to hotspot.log or LogFile")   \
+          "Log compilation activity in detail to LogFile")                  \
                                                                             \
   product(bool, PrintCompilation, false,                                    \
           "Print compilations")                                             \
@@ -2498,16 +2498,17 @@
          "Print all VM flags with default values and descriptions and exit")\
                                                                             \
   diagnostic(bool, SerializeVMOutput, true,                                 \
-         "Use a mutex to serialize output to tty and hotspot.log")          \
+         "Use a mutex to serialize output to tty and LogFile")              \
                                                                             \
   diagnostic(bool, DisplayVMOutput, true,                                   \
          "Display all VM output on the tty, independently of LogVMOutput")  \
                                                                             \
-  diagnostic(bool, LogVMOutput, trueInDebug,                                \
-         "Save VM output to hotspot.log, or to LogFile")                    \
+  diagnostic(bool, LogVMOutput, false,                                      \
+         "Save VM output to LogFile")                                       \
                                                                             \
   diagnostic(ccstr, LogFile, NULL,                                          \
-         "If LogVMOutput is on, save VM output to this file [hotspot.log]") \
+         "If LogVMOutput or LogCompilation is on, save VM output to "       \
+         "this file [default: ./hotspot_pid%p.log] (%p replaced with pid)") \
                                                                             \
   product(ccstr, ErrorFile, NULL,                                           \
          "If an error occurs, save the error data to this file "            \
@@ -3039,9 +3040,9 @@
   product(uintx, MaxMetaspaceSize, max_uintx,                               \
           "Maximum size of Metaspaces (in bytes)")                          \
                                                                             \
-  product(uintx, ClassMetaspaceSize, 1*G,                                   \
-          "Maximum size of InstanceKlass area in Metaspace used for "       \
-          "UseCompressedKlassPointers")                                     \
+  product(uintx, CompressedClassSpaceSize, 1*G,                             \
+          "Maximum size of class area in Metaspace when compressed "        \
+          "class pointers are used")                                        \
                                                                             \
   product(uintx, MinHeapFreeRatio,    40,                                   \
           "Min percentage of heap free after GC to avoid expansion")        \
--- a/hotspot/src/share/vm/runtime/mutexLocker.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/mutexLocker.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -45,7 +45,6 @@
 Mutex*   VMStatistic_lock             = NULL;
 Mutex*   JNIGlobalHandle_lock         = NULL;
 Mutex*   JNIHandleBlockFreeList_lock  = NULL;
-Mutex*   JNICachedItableIndex_lock    = NULL;
 Mutex*   MemberNameTable_lock         = NULL;
 Mutex*   JmethodIdCreation_lock       = NULL;
 Mutex*   JfieldIdCreation_lock        = NULL;
@@ -253,7 +252,6 @@
   }
   def(Heap_lock                    , Monitor, nonleaf+1,   false);
   def(JfieldIdCreation_lock        , Mutex  , nonleaf+1,   true ); // jfieldID, Used in VM_Operation
-  def(JNICachedItableIndex_lock    , Mutex  , nonleaf+1,   false); // Used to cache an itable index during JNI invoke
   def(MemberNameTable_lock         , Mutex  , nonleaf+1,   false); // Used to protect MemberNameTable
 
   def(CompiledIC_lock              , Mutex  , nonleaf+2,   false); // locks VtableStubs_lock, InlineCacheBuffer_lock
--- a/hotspot/src/share/vm/runtime/mutexLocker.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/mutexLocker.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,7 +50,6 @@
 extern Mutex*   VMStatistic_lock;                // a lock used to guard statistics count increment
 extern Mutex*   JNIGlobalHandle_lock;            // a lock on creating JNI global handles
 extern Mutex*   JNIHandleBlockFreeList_lock;     // a lock on the JNI handle block free list
-extern Mutex*   JNICachedItableIndex_lock;       // a lock on caching an itable index during JNI invoke
 extern Mutex*   MemberNameTable_lock;            // a lock on the MemberNameTable updates
 extern Mutex*   JmethodIdCreation_lock;          // a lock on creating JNI method identifiers
 extern Mutex*   JfieldIdCreation_lock;           // a lock on creating JNI static field identifiers
--- a/hotspot/src/share/vm/runtime/os.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/os.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -314,6 +314,11 @@
   }
 }
 
+void os::init_before_ergo() {
+  // We need to initialize large page support here because ergonomics takes some
+  // decisions depending on large page support and the calculated large page size.
+  large_page_init();
+}
 
 void os::signal_init() {
   if (!ReduceSignalUsage) {
@@ -454,6 +459,7 @@
  */
 void* os::find_agent_function(AgentLibrary *agent_lib, bool check_lib,
                               const char *syms[], size_t syms_len) {
+  assert(agent_lib != NULL, "sanity check");
   const char *lib_name;
   void *handle = agent_lib->os_lib();
   void *entryName = NULL;
@@ -484,6 +490,7 @@
   void *proc_handle;
   void *save_handle;
 
+  assert(agent_lib != NULL, "sanity check");
   if (agent_lib->name() == NULL) {
     return false;
   }
@@ -493,14 +500,13 @@
   // We want to look in this process' symbol table.
   agent_lib->set_os_lib(proc_handle);
   ret = find_agent_function(agent_lib, true, syms, syms_len);
-  agent_lib->set_os_lib(save_handle);
   if (ret != NULL) {
     // Found an entry point like Agent_OnLoad_lib_name so we have a static agent
-    agent_lib->set_os_lib(proc_handle);
     agent_lib->set_valid();
     agent_lib->set_static_lib(true);
     return true;
   }
+  agent_lib->set_os_lib(save_handle);
   return false;
 }
 
--- a/hotspot/src/share/vm/runtime/os.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/os.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -91,6 +91,8 @@
 typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
 
 class os: AllStatic {
+  friend class VMStructs;
+
  public:
   enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel)
 
@@ -139,7 +141,10 @@
 
  public:
   static void init(void);                      // Called before command line parsing
+  static void init_before_ergo(void);          // Called after command line parsing
+                                               // before VM ergonomics processing.
   static jint init_2(void);                    // Called after command line parsing
+                                               // and VM ergonomics processing
   static void init_globals(void) {             // Called from init_globals() in init.cpp
     init_globals_ext();
   }
@@ -254,6 +259,11 @@
   static size_t page_size_for_region(size_t region_min_size,
                                      size_t region_max_size,
                                      uint min_pages);
+  // Return the largest page size that can be used
+  static size_t max_page_size() {
+    // The _page_sizes array is sorted in descending order.
+    return _page_sizes[0];
+  }
 
   // Methods for tracing page sizes returned by the above method; enabled by
   // TracePageSizes.  The region_{min,max}_size parameters should be the values
@@ -795,6 +805,14 @@
 #endif
 
  public:
+#ifndef PLATFORM_PRINT_NATIVE_STACK
+  // No platform-specific code for printing the native stack.
+  static bool platform_print_native_stack(outputStream* st, void* context,
+                                          char *buf, int buf_size) {
+    return false;
+  }
+#endif
+
   // debugging support (mostly used by debug.cpp but also fatal error handler)
   static bool find(address pc, outputStream* st = tty); // OS specific function to make sense out of an address
 
--- a/hotspot/src/share/vm/runtime/reflection.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/reflection.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -952,7 +952,8 @@
         }
       }  else {
         // if the method can be overridden, we resolve using the vtable index.
-        int index  = reflected_method->vtable_index();
+        assert(!reflected_method->has_itable_index(), "");
+        int index = reflected_method->vtable_index();
         method = reflected_method;
         if (index != Method::nonvirtual_vtable_index) {
           // target_klass might be an arrayKlassOop but all vtables start at
--- a/hotspot/src/share/vm/runtime/reflectionUtils.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/reflectionUtils.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -109,6 +109,8 @@
  private:
   int length() const                { return _klass->java_fields_count(); }
 
+  fieldDescriptor _fd_buf;
+
  public:
   FieldStream(instanceKlassHandle klass, bool local_only, bool classes_only)
     : KlassStream(klass, local_only, classes_only) {
@@ -134,6 +136,12 @@
   int offset() const {
     return _klass->field_offset( index() );
   }
+  // bridge to a heavier API:
+  fieldDescriptor& field_descriptor() const {
+    fieldDescriptor& field = const_cast<fieldDescriptor&>(_fd_buf);
+    field.reinitialize(_klass(), _index);
+    return field;
+  }
 };
 
 class FilteredField : public CHeapObj<mtInternal>  {
--- a/hotspot/src/share/vm/runtime/sweeper.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/sweeper.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -269,6 +269,7 @@
   // the number of nmethods changes during the sweep so the final
   // stage must iterate until it there are no more nmethods.
   int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
+  int swept_count = 0;
 
   assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
   assert(!CodeCache_lock->owned_by_self(), "just checking");
@@ -278,6 +279,7 @@
 
     // The last invocation iterates until there are no more nmethods
     for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) {
+      swept_count++;
       if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
         if (PrintMethodFlushing && Verbose) {
           tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _invocations);
@@ -331,7 +333,7 @@
     event.set_endtime(sweep_end_counter);
     event.set_sweepIndex(_traversals);
     event.set_sweepFractionIndex(NmethodSweepFraction - _invocations + 1);
-    event.set_sweptCount(todo);
+    event.set_sweptCount(swept_count);
     event.set_flushedCount(_flushed_count);
     event.set_markedCount(_marked_count);
     event.set_zombifiedCount(_zombified_count);
--- a/hotspot/src/share/vm/runtime/thread.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -333,6 +333,8 @@
   // Reclaim the objectmonitors from the omFreeList of the moribund thread.
   ObjectSynchronizer::omFlush (this) ;
 
+  EVENT_THREAD_DESTRUCT(this);
+
   // stack_base can be NULL if the thread is never started or exited before
   // record_stack_base_and_size called. Although, we would like to ensure
   // that all started threads do call record_stack_base_and_size(), there is
@@ -3329,6 +3331,11 @@
   jint parse_result = Arguments::parse(args);
   if (parse_result != JNI_OK) return parse_result;
 
+  os::init_before_ergo();
+
+  jint ergo_result = Arguments::apply_ergo();
+  if (ergo_result != JNI_OK) return ergo_result;
+
   if (PauseAtStartup) {
     os::pause();
   }
@@ -3714,7 +3721,7 @@
     const char *name = agent->name();
     const char *msg = "Could not find agent library ";
 
-    // First check to see if agent is statcally linked into executable
+    // First check to see if agent is statically linked into executable
     if (os::find_builtin_agent(agent, on_load_symbols, num_symbol_entries)) {
       library = agent->os_lib();
     } else if (agent->is_absolute_path()) {
--- a/hotspot/src/share/vm/runtime/virtualspace.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/virtualspace.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -453,6 +453,42 @@
   return reserved_size() - committed_size();
 }
 
+size_t VirtualSpace::actual_committed_size() const {
+  // Special VirtualSpaces commit all reserved space up front.
+  if (special()) {
+    return reserved_size();
+  }
+
+  size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
+  size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
+  size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
+
+#ifdef ASSERT
+  size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
+  size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
+  size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
+
+  if (committed_high > 0) {
+    assert(committed_low == lower, "Must be");
+    assert(committed_middle == middle, "Must be");
+  }
+
+  if (committed_middle > 0) {
+    assert(committed_low == lower, "Must be");
+  }
+  if (committed_middle < middle) {
+    assert(committed_high == 0, "Must be");
+  }
+
+  if (committed_low < lower) {
+    assert(committed_high == 0, "Must be");
+    assert(committed_middle == 0, "Must be");
+  }
+#endif
+
+  return committed_low + committed_middle + committed_high;
+}
+
 
 bool VirtualSpace::contains(const void* p) const {
   return low() <= (const char*) p && (const char*) p < high();
@@ -718,16 +754,19 @@
   assert(high() <= upper_high(), "upper high");
 }
 
-void VirtualSpace::print() {
-  tty->print   ("Virtual space:");
-  if (special()) tty->print(" (pinned in memory)");
-  tty->cr();
-  tty->print_cr(" - committed: " SIZE_FORMAT, committed_size());
-  tty->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
-  tty->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
-  tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
+void VirtualSpace::print_on(outputStream* out) {
+  out->print   ("Virtual space:");
+  if (special()) out->print(" (pinned in memory)");
+  out->cr();
+  out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
+  out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
+  out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
+  out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
 }
 
+void VirtualSpace::print() {
+  print_on(tty);
+}
 
 /////////////// Unit tests ///////////////
 
@@ -910,6 +949,109 @@
   TestReservedSpace::test_reserved_space();
 }
 
+#define assert_equals(actual, expected)     \
+  assert(actual == expected,                \
+    err_msg("Got " SIZE_FORMAT " expected " \
+      SIZE_FORMAT, actual, expected));
+
+#define assert_ge(value1, value2)                  \
+  assert(value1 >= value2,                         \
+    err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
+      #value2 "': " SIZE_FORMAT, value1, value2));
+
+#define assert_lt(value1, value2)                  \
+  assert(value1 < value2,                          \
+    err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
+      #value2 "': " SIZE_FORMAT, value1, value2));
+
+
+class TestVirtualSpace : AllStatic {
+ public:
+  static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size) {
+    size_t granularity = os::vm_allocation_granularity();
+    size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
+
+    ReservedSpace reserved(reserve_size_aligned);
+
+    assert(reserved.is_reserved(), "Must be");
+
+    VirtualSpace vs;
+    bool initialized = vs.initialize(reserved, 0);
+    assert(initialized, "Failed to initialize VirtualSpace");
+
+    vs.expand_by(commit_size, false);
+
+    if (vs.special()) {
+      assert_equals(vs.actual_committed_size(), reserve_size_aligned);
+    } else {
+      assert_ge(vs.actual_committed_size(), commit_size);
+      // Approximate the commit granularity.
+      size_t commit_granularity = UseLargePages ? os::large_page_size() : os::vm_page_size();
+      assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
+    }
+
+    reserved.release();
+  }
+
+  static void test_virtual_space_actual_committed_space_one_large_page() {
+    if (!UseLargePages) {
+      return;
+    }
+
+    size_t large_page_size = os::large_page_size();
+
+    ReservedSpace reserved(large_page_size, large_page_size, true, false);
+
+    assert(reserved.is_reserved(), "Must be");
+
+    VirtualSpace vs;
+    bool initialized = vs.initialize(reserved, 0);
+    assert(initialized, "Failed to initialize VirtualSpace");
+
+    vs.expand_by(large_page_size, false);
+
+    assert_equals(vs.actual_committed_size(), large_page_size);
+
+    reserved.release();
+  }
+
+  static void test_virtual_space_actual_committed_space() {
+    test_virtual_space_actual_committed_space(4 * K, 0);
+    test_virtual_space_actual_committed_space(4 * K, 4 * K);
+    test_virtual_space_actual_committed_space(8 * K, 0);
+    test_virtual_space_actual_committed_space(8 * K, 4 * K);
+    test_virtual_space_actual_committed_space(8 * K, 8 * K);
+    test_virtual_space_actual_committed_space(12 * K, 0);
+    test_virtual_space_actual_committed_space(12 * K, 4 * K);
+    test_virtual_space_actual_committed_space(12 * K, 8 * K);
+    test_virtual_space_actual_committed_space(12 * K, 12 * K);
+    test_virtual_space_actual_committed_space(64 * K, 0);
+    test_virtual_space_actual_committed_space(64 * K, 32 * K);
+    test_virtual_space_actual_committed_space(64 * K, 64 * K);
+    test_virtual_space_actual_committed_space(2 * M, 0);
+    test_virtual_space_actual_committed_space(2 * M, 4 * K);
+    test_virtual_space_actual_committed_space(2 * M, 64 * K);
+    test_virtual_space_actual_committed_space(2 * M, 1 * M);
+    test_virtual_space_actual_committed_space(2 * M, 2 * M);
+    test_virtual_space_actual_committed_space(10 * M, 0);
+    test_virtual_space_actual_committed_space(10 * M, 4 * K);
+    test_virtual_space_actual_committed_space(10 * M, 8 * K);
+    test_virtual_space_actual_committed_space(10 * M, 1 * M);
+    test_virtual_space_actual_committed_space(10 * M, 2 * M);
+    test_virtual_space_actual_committed_space(10 * M, 5 * M);
+    test_virtual_space_actual_committed_space(10 * M, 10 * M);
+  }
+
+  static void test_virtual_space() {
+    test_virtual_space_actual_committed_space();
+    test_virtual_space_actual_committed_space_one_large_page();
+  }
+};
+
+void TestVirtualSpace_test() {
+  TestVirtualSpace::test_virtual_space();
+}
+
 #endif // PRODUCT
 
 #endif
--- a/hotspot/src/share/vm/runtime/virtualspace.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/virtualspace.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -183,11 +183,16 @@
   // Destruction
   ~VirtualSpace();
 
-  // Testers (all sizes are byte sizes)
-  size_t committed_size()   const;
-  size_t reserved_size()    const;
+  // Reserved memory
+  size_t reserved_size() const;
+  // Actually committed OS memory
+  size_t actual_committed_size() const;
+  // Memory used/expanded in this virtual space
+  size_t committed_size() const;
+  // Memory left to use/expand in this virtual space
   size_t uncommitted_size() const;
-  bool   contains(const void* p)  const;
+
+  bool   contains(const void* p) const;
 
   // Operations
   // returns true on success, false otherwise
@@ -198,7 +203,8 @@
   void check_for_contiguity() PRODUCT_RETURN;
 
   // Debugging
-  void print() PRODUCT_RETURN;
+  void print_on(outputStream* out) PRODUCT_RETURN;
+  void print();
 };
 
 #endif // SHARE_VM_RUNTIME_VIRTUALSPACE_HPP
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -315,7 +315,6 @@
   nonstatic_field(InstanceKlass,               _breakpoints,                                  BreakpointInfo*)                       \
   nonstatic_field(InstanceKlass,               _generic_signature_index,                           u2)                               \
   nonstatic_field(InstanceKlass,               _methods_jmethod_ids,                          jmethodID*)                            \
-  nonstatic_field(InstanceKlass,               _methods_cached_itable_indices,                int*)                                  \
   volatile_nonstatic_field(InstanceKlass,      _idnum_allocated_count,                        u2)                                    \
   nonstatic_field(InstanceKlass,               _annotations,                                  Annotations*)                          \
   nonstatic_field(InstanceKlass,               _dependencies,                                 nmethodBucket*)                        \
@@ -330,11 +329,13 @@
   nonstatic_field(Klass,                       _java_mirror,                                  oop)                                   \
   nonstatic_field(Klass,                       _modifier_flags,                               jint)                                  \
   nonstatic_field(Klass,                       _super,                                        Klass*)                                \
+  nonstatic_field(Klass,                       _subklass,                                     Klass*)                                \
   nonstatic_field(Klass,                       _layout_helper,                                jint)                                  \
   nonstatic_field(Klass,                       _name,                                         Symbol*)                               \
   nonstatic_field(Klass,                       _access_flags,                                 AccessFlags)                           \
-  nonstatic_field(Klass,                       _subklass,                                     Klass*)                                \
+  nonstatic_field(Klass,                       _prototype_header,                             markOop)                               \
   nonstatic_field(Klass,                       _next_sibling,                                 Klass*)                                \
+  nonstatic_field(vtableEntry,                 _method,                                       Method*)                               \
   nonstatic_field(MethodData,           _size,                                         int)                                   \
   nonstatic_field(MethodData,           _method,                                       Method*)                               \
   nonstatic_field(MethodData,           _data_size,                                    int)                                   \
@@ -342,10 +343,15 @@
   nonstatic_field(MethodData,           _nof_decompiles,                               uint)                                  \
   nonstatic_field(MethodData,           _nof_overflow_recompiles,                      uint)                                  \
   nonstatic_field(MethodData,           _nof_overflow_traps,                           uint)                                  \
+  nonstatic_field(MethodData,           _trap_hist._array[0],                          u1)                                    \
   nonstatic_field(MethodData,           _eflags,                                       intx)                                  \
   nonstatic_field(MethodData,           _arg_local,                                    intx)                                  \
   nonstatic_field(MethodData,           _arg_stack,                                    intx)                                  \
   nonstatic_field(MethodData,           _arg_returned,                                 intx)                                  \
+  nonstatic_field(DataLayout,           _header._struct._tag,                          u1)                                    \
+  nonstatic_field(DataLayout,           _header._struct._flags,                        u1)                                    \
+  nonstatic_field(DataLayout,           _header._struct._bci,                          u2)                                    \
+  nonstatic_field(DataLayout,           _cells[0],                                     intptr_t)                              \
   nonstatic_field(MethodCounters,       _interpreter_invocation_count,                 int)                                   \
   nonstatic_field(MethodCounters,       _interpreter_throwout_count,                   u2)                                    \
   nonstatic_field(MethodCounters,       _number_of_breakpoints,                        u2)                                    \
@@ -357,6 +363,7 @@
   nonstatic_field(Method,               _access_flags,                                 AccessFlags)                           \
   nonstatic_field(Method,               _vtable_index,                                 int)                                   \
   nonstatic_field(Method,               _method_size,                                  u2)                                    \
+  nonstatic_field(Method,               _intrinsic_id,                                 u1)                                    \
   nonproduct_nonstatic_field(Method,    _compiled_invocation_count,                    int)                                   \
   volatile_nonstatic_field(Method,      _code,                                         nmethod*)                              \
   nonstatic_field(Method,               _i2i_entry,                                    address)                               \
@@ -443,12 +450,19 @@
      static_field(Universe,                    _bootstrapping,                                bool)                                  \
      static_field(Universe,                    _fully_initialized,                            bool)                                  \
      static_field(Universe,                    _verify_count,                                 int)                                   \
+     static_field(Universe,                    _non_oop_bits,                                 intptr_t)                              \
      static_field(Universe,                    _narrow_oop._base,                             address)                               \
      static_field(Universe,                    _narrow_oop._shift,                            int)                                   \
      static_field(Universe,                    _narrow_oop._use_implicit_null_checks,         bool)                                  \
      static_field(Universe,                    _narrow_klass._base,                           address)                               \
      static_field(Universe,                    _narrow_klass._shift,                          int)                                   \
                                                                                                                                      \
+  /******/                                                                                                                           \
+  /* os */                                                                                                                           \
+  /******/                                                                                                                           \
+                                                                                                                                     \
+     static_field(os,                          _polling_page,                                 address)                               \
+                                                                                                                                     \
   /**********************************************************************************/                                               \
   /* Generation and Space hierarchies                                               */                                               \
   /**********************************************************************************/                                               \
@@ -456,6 +470,7 @@
   unchecked_nonstatic_field(ageTable,          sizes,                                         sizeof(ageTable::sizes))               \
                                                                                                                                      \
   nonstatic_field(BarrierSet,                  _max_covered_regions,                          int)                                   \
+  nonstatic_field(BarrierSet,                  _kind,                                         BarrierSet::Name)                      \
   nonstatic_field(BlockOffsetTable,            _bottom,                                       HeapWord*)                             \
   nonstatic_field(BlockOffsetTable,            _end,                                          HeapWord*)                             \
                                                                                                                                      \
@@ -495,6 +510,7 @@
   nonstatic_field(CollectedHeap,               _barrier_set,                                  BarrierSet*)                           \
   nonstatic_field(CollectedHeap,               _defer_initial_card_mark,                      bool)                                  \
   nonstatic_field(CollectedHeap,               _is_gc_active,                                 bool)                                  \
+  nonstatic_field(CollectedHeap,               _total_collections,                            unsigned int)                          \
   nonstatic_field(CompactibleSpace,            _compaction_top,                               HeapWord*)                             \
   nonstatic_field(CompactibleSpace,            _first_dead,                                   HeapWord*)                             \
   nonstatic_field(CompactibleSpace,            _end_of_live,                                  HeapWord*)                             \
@@ -505,7 +521,7 @@
   nonstatic_field(ContiguousSpace,             _saved_mark_word,                              HeapWord*)                             \
                                                                                                                                      \
   nonstatic_field(DefNewGeneration,            _next_gen,                                     Generation*)                           \
-  nonstatic_field(DefNewGeneration,            _tenuring_threshold,                           uint)                                   \
+  nonstatic_field(DefNewGeneration,            _tenuring_threshold,                           uint)                                  \
   nonstatic_field(DefNewGeneration,            _age_table,                                    ageTable)                              \
   nonstatic_field(DefNewGeneration,            _eden_space,                                   EdenSpace*)                            \
   nonstatic_field(DefNewGeneration,            _from_space,                                   ContiguousSpace*)                      \
@@ -552,6 +568,11 @@
   nonstatic_field(ThreadLocalAllocBuffer,      _desired_size,                                 size_t)                                \
   nonstatic_field(ThreadLocalAllocBuffer,      _refill_waste_limit,                           size_t)                                \
      static_field(ThreadLocalAllocBuffer,      _target_refills,                               unsigned)                              \
+  nonstatic_field(ThreadLocalAllocBuffer,      _number_of_refills,                            unsigned)                              \
+  nonstatic_field(ThreadLocalAllocBuffer,      _fast_refill_waste,                            unsigned)                              \
+  nonstatic_field(ThreadLocalAllocBuffer,      _slow_refill_waste,                            unsigned)                              \
+  nonstatic_field(ThreadLocalAllocBuffer,      _gc_waste,                                     unsigned)                              \
+  nonstatic_field(ThreadLocalAllocBuffer,      _slow_allocations,                             unsigned)                              \
   nonstatic_field(VirtualSpace,                _low_boundary,                                 char*)                                 \
   nonstatic_field(VirtualSpace,                _high_boundary,                                char*)                                 \
   nonstatic_field(VirtualSpace,                _low,                                          char*)                                 \
@@ -713,6 +734,13 @@
                                                                                                                                      \
   static_field(ClassLoaderDataGraph,           _head,                                         ClassLoaderData*)                      \
                                                                                                                                      \
+  /**********/                                                                                                                       \
+  /* Arrays */                                                                                                                       \
+  /**********/                                                                                                                       \
+                                                                                                                                     \
+  nonstatic_field(Array<Klass*>,               _length,                                       int)                                   \
+  nonstatic_field(Array<Klass*>,               _data[0],                                      Klass*)                                \
+                                                                                                                                     \
   /*******************/                                                                                                              \
   /* GrowableArrays  */                                                                                                              \
   /*******************/                                                                                                              \
@@ -720,7 +748,7 @@
   nonstatic_field(GenericGrowableArray,        _len,                                          int)                                   \
   nonstatic_field(GenericGrowableArray,        _max,                                          int)                                   \
   nonstatic_field(GenericGrowableArray,        _arena,                                        Arena*)                                \
-  nonstatic_field(GrowableArray<int>,               _data,                                         int*) \
+  nonstatic_field(GrowableArray<int>,          _data,                                         int*)                                  \
                                                                                                                                      \
   /********************************/                                                                                                 \
   /* CodeCache (NOTE: incomplete) */                                                                                                 \
@@ -763,7 +791,20 @@
   /* StubRoutines (NOTE: incomplete) */                                                                                              \
   /***********************************/                                                                                              \
                                                                                                                                      \
+     static_field(StubRoutines,                _verify_oop_count,                             jint)                                  \
      static_field(StubRoutines,                _call_stub_return_address,                     address)                               \
+     static_field(StubRoutines,                _aescrypt_encryptBlock,                        address)                               \
+     static_field(StubRoutines,                _aescrypt_decryptBlock,                        address)                               \
+     static_field(StubRoutines,                _cipherBlockChaining_encryptAESCrypt,          address)                               \
+     static_field(StubRoutines,                _cipherBlockChaining_decryptAESCrypt,          address)                               \
+     static_field(StubRoutines,                _updateBytesCRC32,                             address)                               \
+     static_field(StubRoutines,                _crc_table_adr,                                address)                               \
+                                                                                                                                     \
+  /*****************/                                                                                                                \
+  /* SharedRuntime */                                                                                                                \
+  /*****************/                                                                                                                \
+                                                                                                                                     \
+     static_field(SharedRuntime,               _ic_miss_blob,                                 RuntimeStub*)                          \
                                                                                                                                      \
   /***************************************/                                                                                          \
   /* PcDesc and other compiled code info */                                                                                          \
@@ -853,6 +894,7 @@
    volatile_nonstatic_field(Thread,            _suspend_flags,                                uint32_t)                              \
   nonstatic_field(Thread,                      _active_handles,                               JNIHandleBlock*)                       \
   nonstatic_field(Thread,                      _tlab,                                         ThreadLocalAllocBuffer)                \
+  nonstatic_field(Thread,                      _allocated_bytes,                              jlong)                                 \
   nonstatic_field(Thread,                      _current_pending_monitor,                      ObjectMonitor*)                        \
   nonstatic_field(Thread,                      _current_pending_monitor_is_from_java,         bool)                                  \
   nonstatic_field(Thread,                      _current_waiting_monitor,                      ObjectMonitor*)                        \
@@ -866,6 +908,7 @@
   nonstatic_field(JavaThread,                  _pending_async_exception,                      oop)                                   \
   volatile_nonstatic_field(JavaThread,         _exception_oop,                                oop)                                   \
   volatile_nonstatic_field(JavaThread,         _exception_pc,                                 address)                               \
+  volatile_nonstatic_field(JavaThread,         _is_method_handle_return,                      int)                                   \
   nonstatic_field(JavaThread,                  _is_compiling,                                 bool)                                  \
   nonstatic_field(JavaThread,                  _special_runtime_exit_condition,               JavaThread::AsyncRequests)             \
   nonstatic_field(JavaThread,                  _saved_exception_pc,                           address)                               \
@@ -875,6 +918,8 @@
   nonstatic_field(JavaThread,                  _stack_size,                                   size_t)                                \
   nonstatic_field(JavaThread,                  _vframe_array_head,                            vframeArray*)                          \
   nonstatic_field(JavaThread,                  _vframe_array_last,                            vframeArray*)                          \
+  nonstatic_field(JavaThread,                  _satb_mark_queue,                              ObjPtrQueue)                           \
+  nonstatic_field(JavaThread,                  _dirty_card_queue,                             DirtyCardQueue)                        \
   nonstatic_field(Thread,                      _resource_area,                                ResourceArea*)                         \
   nonstatic_field(CompilerThread,              _env,                                          ciEnv*)                                \
                                                                                                                                      \
@@ -1187,7 +1232,7 @@
   unchecked_nonstatic_field(Array<int>,            _data,                                     sizeof(int))                           \
   unchecked_nonstatic_field(Array<u1>,             _data,                                     sizeof(u1))                            \
   unchecked_nonstatic_field(Array<u2>,             _data,                                     sizeof(u2))                            \
-  unchecked_nonstatic_field(Array<Method*>, _data,                                     sizeof(Method*))                \
+  unchecked_nonstatic_field(Array<Method*>,        _data,                                     sizeof(Method*))                       \
   unchecked_nonstatic_field(Array<Klass*>,         _data,                                     sizeof(Klass*))                        \
                                                                                                                                      \
   /*********************************/                                                                                                \
@@ -1203,7 +1248,7 @@
   /* Miscellaneous fields */                                                                                                         \
   /************************/                                                                                                         \
                                                                                                                                      \
-  nonstatic_field(CompileTask,                 _method,                                      Method*)                         \
+  nonstatic_field(CompileTask,                 _method,                                      Method*)                                \
   nonstatic_field(CompileTask,                 _osr_bci,                                     int)                                    \
   nonstatic_field(CompileTask,                 _comp_level,                                  int)                                    \
   nonstatic_field(CompileTask,                 _compile_id,                                  uint)                                   \
@@ -1217,7 +1262,11 @@
                                                                                                                                      \
   nonstatic_field(vframeArrayElement,          _frame,                                       frame)                                  \
   nonstatic_field(vframeArrayElement,          _bci,                                         int)                                    \
-  nonstatic_field(vframeArrayElement,          _method,                                      Method*)                         \
+  nonstatic_field(vframeArrayElement,          _method,                                      Method*)                                \
+                                                                                                                                     \
+  nonstatic_field(PtrQueue,                    _active,                                      bool)                                   \
+  nonstatic_field(PtrQueue,                    _buf,                                         void**)                                 \
+  nonstatic_field(PtrQueue,                    _index,                                       size_t)                                 \
                                                                                                                                      \
   nonstatic_field(AccessFlags,                 _flags,                                       jint)                                   \
   nonstatic_field(elapsedTimer,                _counter,                                     jlong)                                  \
@@ -1363,7 +1412,7 @@
   /* MetadataOopDesc hierarchy (NOTE: some missing) */                    \
   /**************************************************/                    \
                                                                           \
-  declare_toplevel_type(CompiledICHolder)                          \
+  declare_toplevel_type(CompiledICHolder)                                 \
   declare_toplevel_type(MetaspaceObj)                                     \
     declare_type(Metadata, MetaspaceObj)                                  \
     declare_type(Klass, Metadata)                                         \
@@ -1374,17 +1423,20 @@
         declare_type(InstanceClassLoaderKlass, InstanceKlass)             \
         declare_type(InstanceMirrorKlass, InstanceKlass)                  \
         declare_type(InstanceRefKlass, InstanceKlass)                     \
-    declare_type(ConstantPool, Metadata)                           \
-    declare_type(ConstantPoolCache, MetaspaceObj)                  \
-    declare_type(MethodData, Metadata)                             \
-    declare_type(Method, Metadata)                                 \
-    declare_type(MethodCounters, MetaspaceObj)                     \
-    declare_type(ConstMethod, MetaspaceObj)                        \
+    declare_type(ConstantPool, Metadata)                                  \
+    declare_type(ConstantPoolCache, MetaspaceObj)                         \
+    declare_type(MethodData, Metadata)                                    \
+    declare_type(Method, Metadata)                                        \
+    declare_type(MethodCounters, MetaspaceObj)                            \
+    declare_type(ConstMethod, MetaspaceObj)                               \
+                                                                          \
+  declare_toplevel_type(vtableEntry)                                      \
                                                                           \
            declare_toplevel_type(Symbol)                                  \
            declare_toplevel_type(Symbol*)                                 \
   declare_toplevel_type(volatile Metadata*)                               \
                                                                           \
+  declare_toplevel_type(DataLayout)                                       \
   declare_toplevel_type(nmethodBucket)                                    \
                                                                           \
   /********/                                                              \
@@ -1432,6 +1484,7 @@
            declare_type(ModRefBarrierSet,             BarrierSet)         \
            declare_type(CardTableModRefBS,            ModRefBarrierSet)   \
            declare_type(CardTableModRefBSForCTRS,     CardTableModRefBS)  \
+  declare_toplevel_type(BarrierSet::Name)                                 \
   declare_toplevel_type(GenRemSet)                                        \
            declare_type(CardTableRS,                  GenRemSet)          \
   declare_toplevel_type(BlockOffsetSharedArray)                           \
@@ -1450,6 +1503,8 @@
   declare_toplevel_type(ThreadLocalAllocBuffer)                           \
   declare_toplevel_type(VirtualSpace)                                     \
   declare_toplevel_type(WaterMark)                                        \
+  declare_toplevel_type(ObjPtrQueue)                                      \
+  declare_toplevel_type(DirtyCardQueue)                                   \
                                                                           \
   /* Pointers to Garbage Collection types */                              \
                                                                           \
@@ -2068,6 +2123,7 @@
   declare_toplevel_type(StubQueue*)                                       \
   declare_toplevel_type(Thread*)                                          \
   declare_toplevel_type(Universe)                                         \
+  declare_toplevel_type(os)                                               \
   declare_toplevel_type(vframeArray)                                      \
   declare_toplevel_type(vframeArrayElement)                               \
   declare_toplevel_type(Annotations*)                                     \
@@ -2076,6 +2132,8 @@
   /* Miscellaneous types */                                               \
   /***************/                                                       \
                                                                           \
+  declare_toplevel_type(PtrQueue)                                         \
+                                                                          \
   /* freelist */                                                          \
   declare_toplevel_type(FreeChunk*)                                       \
   declare_toplevel_type(Metablock*)                                       \
@@ -2106,6 +2164,7 @@
   /* Useful globals */                                                    \
   /******************/                                                    \
                                                                           \
+  declare_preprocessor_constant("ASSERT", DEBUG_ONLY(1) NOT_DEBUG(0))     \
                                                                           \
   /**************/                                                        \
   /* Stack bias */                                                        \
@@ -2122,6 +2181,8 @@
   declare_constant(BytesPerWord)                                          \
   declare_constant(BytesPerLong)                                          \
                                                                           \
+  declare_constant(LogKlassAlignmentInBytes)                              \
+                                                                          \
   /********************************************/                          \
   /* Generation and Space Hierarchy Constants */                          \
   /********************************************/                          \
@@ -2130,6 +2191,9 @@
                                                                           \
   declare_constant(BarrierSet::ModRef)                                    \
   declare_constant(BarrierSet::CardTableModRef)                           \
+  declare_constant(BarrierSet::CardTableExtension)                        \
+  declare_constant(BarrierSet::G1SATBCT)                                  \
+  declare_constant(BarrierSet::G1SATBCTLogging)                           \
   declare_constant(BarrierSet::Other)                                     \
                                                                           \
   declare_constant(BlockOffsetSharedArray::LogN)                          \
@@ -2248,8 +2312,11 @@
   declare_constant(Klass::_primary_super_limit)                           \
   declare_constant(Klass::_lh_instance_slow_path_bit)                     \
   declare_constant(Klass::_lh_log2_element_size_shift)                    \
+  declare_constant(Klass::_lh_log2_element_size_mask)                     \
   declare_constant(Klass::_lh_element_type_shift)                         \
+  declare_constant(Klass::_lh_element_type_mask)                          \
   declare_constant(Klass::_lh_header_size_shift)                          \
+  declare_constant(Klass::_lh_header_size_mask)                           \
   declare_constant(Klass::_lh_array_tag_shift)                            \
   declare_constant(Klass::_lh_array_tag_type_value)                       \
   declare_constant(Klass::_lh_array_tag_obj_value)                        \
@@ -2268,6 +2335,12 @@
   declare_constant(ConstMethod::_has_default_annotations)                 \
   declare_constant(ConstMethod::_has_type_annotations)                    \
                                                                           \
+  /**************/                                                        \
+  /* DataLayout */                                                        \
+  /**************/                                                        \
+                                                                          \
+  declare_constant(DataLayout::cell_size)                                 \
+                                                                          \
   /*************************************/                                 \
   /* InstanceKlass enum                */                                 \
   /*************************************/                                 \
@@ -2402,6 +2475,13 @@
   declare_constant(Deoptimization::Reason_LIMIT)                          \
   declare_constant(Deoptimization::Reason_RECORDED_LIMIT)                 \
                                                                           \
+  declare_constant(Deoptimization::Action_none)                           \
+  declare_constant(Deoptimization::Action_maybe_recompile)                \
+  declare_constant(Deoptimization::Action_reinterpret)                    \
+  declare_constant(Deoptimization::Action_make_not_entrant)               \
+  declare_constant(Deoptimization::Action_make_not_compilable)            \
+  declare_constant(Deoptimization::Action_LIMIT)                          \
+                                                                          \
   /*********************/                                                 \
   /* Matcher (C2 only) */                                                 \
   /*********************/                                                 \
@@ -2468,6 +2548,16 @@
   declare_constant(vmSymbols::FIRST_SID)                                  \
   declare_constant(vmSymbols::SID_LIMIT)                                  \
                                                                           \
+  /****************/                                                      \
+  /* vmIntrinsics */                                                      \
+  /****************/                                                      \
+                                                                          \
+  declare_constant(vmIntrinsics::_invokeBasic)                            \
+  declare_constant(vmIntrinsics::_linkToVirtual)                          \
+  declare_constant(vmIntrinsics::_linkToStatic)                           \
+  declare_constant(vmIntrinsics::_linkToSpecial)                          \
+  declare_constant(vmIntrinsics::_linkToInterface)                        \
+                                                                          \
   /********************************/                                      \
   /* Calling convention constants */                                      \
   /********************************/                                      \
@@ -2515,6 +2605,8 @@
   declare_constant(markOopDesc::biased_lock_bit_in_place)                 \
   declare_constant(markOopDesc::age_mask)                                 \
   declare_constant(markOopDesc::age_mask_in_place)                        \
+  declare_constant(markOopDesc::epoch_mask)                               \
+  declare_constant(markOopDesc::epoch_mask_in_place)                      \
   declare_constant(markOopDesc::hash_mask)                                \
   declare_constant(markOopDesc::hash_mask_in_place)                       \
   declare_constant(markOopDesc::biased_lock_alignment)                    \
--- a/hotspot/src/share/vm/services/gcNotifier.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/services/gcNotifier.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -209,7 +209,7 @@
   GCNotificationRequest *request = getRequest();
   if (request != NULL) {
     NotificationMark nm(request);
-    Handle objGcInfo = createGcInfo(request->gcManager, request->gcStatInfo, THREAD);
+    Handle objGcInfo = createGcInfo(request->gcManager, request->gcStatInfo, CHECK);
 
     Handle objName = java_lang_String::create_from_str(request->gcManager->name(), CHECK);
     Handle objAction = java_lang_String::create_from_str(request->gcAction, CHECK);
--- a/hotspot/src/share/vm/services/memPtr.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/services/memPtr.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,9 +34,9 @@
   jint seq = Atomic::add(1, &_seq_number);
   if (seq < 0) {
     MemTracker::shutdown(MemTracker::NMT_sequence_overflow);
+  } else {
+    NOT_PRODUCT(_max_seq_number = (seq > _max_seq_number) ? seq : _max_seq_number;)
   }
-  assert(seq > 0, "counter overflow");
-  NOT_PRODUCT(_max_seq_number = (seq > _max_seq_number) ? seq : _max_seq_number;)
   return seq;
 }
 
--- a/hotspot/src/share/vm/services/memoryPool.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/services/memoryPool.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -260,10 +260,10 @@
 }
 
 MetaspacePool::MetaspacePool() :
-  MemoryPool("Metaspace", NonHeap, capacity_in_bytes(), calculate_max_size(), true, false) { }
+  MemoryPool("Metaspace", NonHeap, 0, calculate_max_size(), true, false) { }
 
 MemoryUsage MetaspacePool::get_memory_usage() {
-  size_t committed = align_size_down_(capacity_in_bytes(), os::vm_page_size());
+  size_t committed = MetaspaceAux::committed_bytes();
   return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
 }
 
@@ -271,26 +271,19 @@
   return MetaspaceAux::allocated_used_bytes();
 }
 
-size_t MetaspacePool::capacity_in_bytes() const {
-  return MetaspaceAux::allocated_capacity_bytes();
-}
-
 size_t MetaspacePool::calculate_max_size() const {
-  return FLAG_IS_CMDLINE(MaxMetaspaceSize) ? MaxMetaspaceSize : max_uintx;
+  return FLAG_IS_CMDLINE(MaxMetaspaceSize) ? MaxMetaspaceSize :
+                                             MemoryUsage::undefined_size();
 }
 
 CompressedKlassSpacePool::CompressedKlassSpacePool() :
-  MemoryPool("Compressed Class Space", NonHeap, capacity_in_bytes(), ClassMetaspaceSize, true, false) { }
+  MemoryPool("Compressed Class Space", NonHeap, 0, CompressedClassSpaceSize, true, false) { }
 
 size_t CompressedKlassSpacePool::used_in_bytes() {
   return MetaspaceAux::allocated_used_bytes(Metaspace::ClassType);
 }
 
-size_t CompressedKlassSpacePool::capacity_in_bytes() const {
-  return MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
-}
-
 MemoryUsage CompressedKlassSpacePool::get_memory_usage() {
-  size_t committed = align_size_down_(capacity_in_bytes(), os::vm_page_size());
+  size_t committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
   return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
 }
--- a/hotspot/src/share/vm/services/memoryPool.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/services/memoryPool.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -224,7 +224,6 @@
 
 class MetaspacePool : public MemoryPool {
   size_t calculate_max_size() const;
-  size_t capacity_in_bytes() const;
  public:
   MetaspacePool();
   MemoryUsage get_memory_usage();
@@ -232,7 +231,6 @@
 };
 
 class CompressedKlassSpacePool : public MemoryPool {
-  size_t capacity_in_bytes() const;
  public:
   CompressedKlassSpacePool();
   MemoryUsage get_memory_usage();
--- a/hotspot/src/share/vm/services/memoryService.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/services/memoryService.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -409,7 +409,7 @@
   mgr->add_pool(_metaspace_pool);
   _pools_list->append(_metaspace_pool);
 
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     _compressed_class_pool = new CompressedKlassSpacePool();
     mgr->add_pool(_compressed_class_pool);
     _pools_list->append(_compressed_class_pool);
--- a/hotspot/src/share/vm/services/memoryUsage.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/services/memoryUsage.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -63,10 +63,12 @@
   size_t committed() const { return _committed; }
   size_t max_size()  const { return _maxSize; }
 
+  static size_t undefined_size() { return (size_t) -1; }
+
   inline static jlong convert_to_jlong(size_t val) {
     // In the 64-bit vm, a size_t can overflow a jlong (which is signed).
     jlong ret;
-    if (val == (size_t)-1) {
+    if (val == undefined_size()) {
       ret = -1L;
     } else {
       NOT_LP64(ret = val;)
--- a/hotspot/src/share/vm/trace/traceMacros.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/trace/traceMacros.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -26,6 +26,7 @@
 #define SHARE_VM_TRACE_TRACE_MACRO_HPP
 
 #define EVENT_THREAD_EXIT(thread)
+#define EVENT_THREAD_DESTRUCT(thread)
 
 #define TRACE_INIT_ID(k)
 #define TRACE_DATA TraceThreadData
--- a/hotspot/src/share/vm/utilities/bitMap.inline.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/utilities/bitMap.inline.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -52,16 +52,16 @@
 
 inline bool BitMap::par_set_bit(idx_t bit) {
   verify_index(bit);
-  volatile idx_t* const addr = word_addr(bit);
-  const idx_t mask = bit_mask(bit);
-  idx_t old_val = *addr;
+  volatile bm_word_t* const addr = word_addr(bit);
+  const bm_word_t mask = bit_mask(bit);
+  bm_word_t old_val = *addr;
 
   do {
-    const idx_t new_val = old_val | mask;
+    const bm_word_t new_val = old_val | mask;
     if (new_val == old_val) {
       return false;     // Someone else beat us to it.
     }
-    const idx_t cur_val = (idx_t) Atomic::cmpxchg_ptr((void*) new_val,
+    const bm_word_t cur_val = (bm_word_t) Atomic::cmpxchg_ptr((void*) new_val,
                                                       (volatile void*) addr,
                                                       (void*) old_val);
     if (cur_val == old_val) {
@@ -73,16 +73,16 @@
 
 inline bool BitMap::par_clear_bit(idx_t bit) {
   verify_index(bit);
-  volatile idx_t* const addr = word_addr(bit);
-  const idx_t mask = ~bit_mask(bit);
-  idx_t old_val = *addr;
+  volatile bm_word_t* const addr = word_addr(bit);
+  const bm_word_t mask = ~bit_mask(bit);
+  bm_word_t old_val = *addr;
 
   do {
-    const idx_t new_val = old_val & mask;
+    const bm_word_t new_val = old_val & mask;
     if (new_val == old_val) {
       return false;     // Someone else beat us to it.
     }
-    const idx_t cur_val = (idx_t) Atomic::cmpxchg_ptr((void*) new_val,
+    const bm_word_t cur_val = (bm_word_t) Atomic::cmpxchg_ptr((void*) new_val,
                                                       (volatile void*) addr,
                                                       (void*) old_val);
     if (cur_val == old_val) {
--- a/hotspot/src/share/vm/utilities/decoder.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/utilities/decoder.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -24,7 +24,6 @@
 
 #include "precompiled.hpp"
 #include "prims/jvm.h"
-#include "runtime/mutexLocker.hpp"
 #include "runtime/os.hpp"
 #include "utilities/decoder.hpp"
 #include "utilities/vmError.hpp"
@@ -80,6 +79,23 @@
   return decoder;
 }
 
+inline bool DecoderLocker::is_first_error_thread() {
+  return (os::current_thread_id() == VMError::get_first_error_tid());
+}
+
+DecoderLocker::DecoderLocker() :
+  MutexLockerEx(DecoderLocker::is_first_error_thread() ?
+                NULL : Decoder::shared_decoder_lock(), true) {
+  _decoder = is_first_error_thread() ?
+    Decoder::get_error_handler_instance() : Decoder::get_shared_instance();
+  assert(_decoder != NULL, "null decoder");
+}
+
+Mutex* Decoder::shared_decoder_lock() {
+  assert(_shared_decoder_lock != NULL, "Just check");
+  return _shared_decoder_lock;
+}
+
 bool Decoder::decode(address addr, char* buf, int buflen, int* offset, const char* modulepath) {
   assert(_shared_decoder_lock != NULL, "Just check");
   bool error_handling_thread = os::current_thread_id() == VMError::first_error_tid;
--- a/hotspot/src/share/vm/utilities/decoder.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/utilities/decoder.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -28,6 +28,7 @@
 
 #include "memory/allocation.hpp"
 #include "runtime/mutex.hpp"
+#include "runtime/mutexLocker.hpp"
 
 class AbstractDecoder : public CHeapObj<mtInternal> {
 public:
@@ -124,6 +125,19 @@
 
 protected:
   static Mutex*               _shared_decoder_lock;
+  static Mutex* shared_decoder_lock();
+
+  friend class DecoderLocker;
+};
+
+class DecoderLocker : public MutexLockerEx {
+  AbstractDecoder* _decoder;
+  inline bool is_first_error_thread();
+public:
+  DecoderLocker();
+  AbstractDecoder* decoder() {
+    return _decoder;
+  }
 };
 
 #endif // SHARE_VM_UTILITIES_DECODER_HPP
--- a/hotspot/src/share/vm/utilities/ostream.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/utilities/ostream.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -342,7 +342,7 @@
 }
 
 char* stringStream::as_string() {
-  char* copy = NEW_RESOURCE_ARRAY(char, buffer_pos+1);
+  char* copy = NEW_RESOURCE_ARRAY(char, buffer_pos + 1);
   strncpy(copy, buffer, buffer_pos);
   copy[buffer_pos] = 0;  // terminating null
   return copy;
@@ -355,14 +355,190 @@
 outputStream* gclog_or_tty;
 extern Mutex* tty_lock;
 
+#define EXTRACHARLEN   32
+#define CURRENTAPPX    ".current"
+#define FILENAMEBUFLEN  1024
+// convert YYYY-MM-DD HH:MM:SS to YYYY-MM-DD_HH-MM-SS
+char* get_datetime_string(char *buf, size_t len) {
+  os::local_time_string(buf, len);
+  int i = (int)strlen(buf);
+  while (i-- >= 0) {
+    if (buf[i] == ' ') buf[i] = '_';
+    else if (buf[i] == ':') buf[i] = '-';
+  }
+  return buf;
+}
+
+static const char* make_log_name_internal(const char* log_name, const char* force_directory,
+                                                int pid, const char* tms) {
+  const char* basename = log_name;
+  char file_sep = os::file_separator()[0];
+  const char* cp;
+  char  pid_text[32];
+
+  for (cp = log_name; *cp != '\0'; cp++) {
+    if (*cp == '/' || *cp == file_sep) {
+      basename = cp + 1;
+    }
+  }
+  const char* nametail = log_name;
+  // Compute buffer length
+  size_t buffer_length;
+  if (force_directory != NULL) {
+    buffer_length = strlen(force_directory) + strlen(os::file_separator()) +
+                    strlen(basename) + 1;
+  } else {
+    buffer_length = strlen(log_name) + 1;
+  }
+
+  // const char* star = strchr(basename, '*');
+  const char* pts = strstr(basename, "%p");
+  int pid_pos = (pts == NULL) ? -1 : (pts - nametail);
+
+  if (pid_pos >= 0) {
+    jio_snprintf(pid_text, sizeof(pid_text), "pid%u", pid);
+    buffer_length += strlen(pid_text);
+  }
+
+  pts = strstr(basename, "%t");
+  int tms_pos = (pts == NULL) ? -1 : (pts - nametail);
+  if (tms_pos >= 0) {
+    buffer_length += strlen(tms);
+  }
+
+  // Create big enough buffer.
+  char *buf = NEW_C_HEAP_ARRAY(char, buffer_length, mtInternal);
+
+  strcpy(buf, "");
+  if (force_directory != NULL) {
+    strcat(buf, force_directory);
+    strcat(buf, os::file_separator());
+    nametail = basename;       // completely skip directory prefix
+  }
+
+  // who is first, %p or %t?
+  int first = -1, second = -1;
+  const char *p1st = NULL;
+  const char *p2nd = NULL;
+
+  if (pid_pos >= 0 && tms_pos >= 0) {
+    // contains both %p and %t
+    if (pid_pos < tms_pos) {
+      // case foo%pbar%tmonkey.log
+      first  = pid_pos;
+      p1st   = pid_text;
+      second = tms_pos;
+      p2nd   = tms;
+    } else {
+      // case foo%tbar%pmonkey.log
+      first  = tms_pos;
+      p1st   = tms;
+      second = pid_pos;
+      p2nd   = pid_text;
+    }
+  } else if (pid_pos >= 0) {
+    // contains %p only
+    first  = pid_pos;
+    p1st   = pid_text;
+  } else if (tms_pos >= 0) {
+    // contains %t only
+    first  = tms_pos;
+    p1st   = tms;
+  }
+
+  int buf_pos = (int)strlen(buf);
+  const char* tail = nametail;
+
+  if (first >= 0) {
+    tail = nametail + first + 2;
+    strncpy(&buf[buf_pos], nametail, first);
+    strcpy(&buf[buf_pos + first], p1st);
+    buf_pos = (int)strlen(buf);
+    if (second >= 0) {
+      strncpy(&buf[buf_pos], tail, second - first - 2);
+      strcpy(&buf[buf_pos + second - first - 2], p2nd);
+      tail = nametail + second + 2;
+    }
+  }
+  strcat(buf, tail);      // append rest of name, or all of name
+  return buf;
+}
+
+// log_name comes from -XX:LogFile=log_name or -Xloggc:log_name
+// in log_name, %p => pipd1234 and
+//              %t => YYYY-MM-DD_HH-MM-SS
+static const char* make_log_name(const char* log_name, const char* force_directory) {
+  char timestr[32];
+  get_datetime_string(timestr, sizeof(timestr));
+  return make_log_name_internal(log_name, force_directory, os::current_process_id(),
+                                timestr);
+}
+
+#ifndef PRODUCT
+void test_loggc_filename() {
+  int pid;
+  char  tms[32];
+  char  i_result[FILENAMEBUFLEN];
+  const char* o_result;
+  get_datetime_string(tms, sizeof(tms));
+  pid = os::current_process_id();
+
+  // test.log
+  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "test.log", tms);
+  o_result = make_log_name_internal("test.log", NULL, pid, tms);
+  assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"test.log\", NULL)");
+  FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
+
+  // test-%t-%p.log
+  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "test-%s-pid%u.log", tms, pid);
+  o_result = make_log_name_internal("test-%t-%p.log", NULL, pid, tms);
+  assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"test-%%t-%%p.log\", NULL)");
+  FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
+
+  // test-%t%p.log
+  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "test-%spid%u.log", tms, pid);
+  o_result = make_log_name_internal("test-%t%p.log", NULL, pid, tms);
+  assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"test-%%t%%p.log\", NULL)");
+  FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
+
+  // %p%t.log
+  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "pid%u%s.log", pid, tms);
+  o_result = make_log_name_internal("%p%t.log", NULL, pid, tms);
+  assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"%%p%%t.log\", NULL)");
+  FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
+
+  // %p-test.log
+  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "pid%u-test.log", pid);
+  o_result = make_log_name_internal("%p-test.log", NULL, pid, tms);
+  assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"%%p-test.log\", NULL)");
+  FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
+
+  // %t.log
+  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "%s.log", tms);
+  o_result = make_log_name_internal("%t.log", NULL, pid, tms);
+  assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"%%t.log\", NULL)");
+  FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
+}
+#endif // PRODUCT
+
 fileStream::fileStream(const char* file_name) {
   _file = fopen(file_name, "w");
-  _need_close = true;
+  if (_file != NULL) {
+    _need_close = true;
+  } else {
+    warning("Cannot open file %s due to %s\n", file_name, strerror(errno));
+    _need_close = false;
+  }
 }
 
 fileStream::fileStream(const char* file_name, const char* opentype) {
   _file = fopen(file_name, opentype);
-  _need_close = true;
+  if (_file != NULL) {
+    _need_close = true;
+  } else {
+    warning("Cannot open file %s due to %s\n", file_name, strerror(errno));
+    _need_close = false;
+  }
 }
 
 void fileStream::write(const char* s, size_t len) {
@@ -423,34 +599,51 @@
   update_position(s, len);
 }
 
-rotatingFileStream::~rotatingFileStream() {
+// dump vm version, os version, platform info, build id,
+// memory usage and command line flags into header
+void gcLogFileStream::dump_loggc_header() {
+  if (is_open()) {
+    print_cr(Abstract_VM_Version::internal_vm_info_string());
+    os::print_memory_info(this);
+    print("CommandLine flags: ");
+    CommandLineFlags::printSetFlags(this);
+  }
+}
+
+gcLogFileStream::~gcLogFileStream() {
   if (_file != NULL) {
     if (_need_close) fclose(_file);
-    _file      = NULL;
+    _file = NULL;
+  }
+  if (_file_name != NULL) {
     FREE_C_HEAP_ARRAY(char, _file_name, mtInternal);
     _file_name = NULL;
   }
 }
 
-rotatingFileStream::rotatingFileStream(const char* file_name) {
+gcLogFileStream::gcLogFileStream(const char* file_name) {
   _cur_file_num = 0;
   _bytes_written = 0L;
-  _file_name = NEW_C_HEAP_ARRAY(char, strlen(file_name)+10, mtInternal);
-  jio_snprintf(_file_name, strlen(file_name)+10, "%s.%d", file_name, _cur_file_num);
-  _file = fopen(_file_name, "w");
-  _need_close = true;
+  _file_name = make_log_name(file_name, NULL);
+
+  // gc log file rotation
+  if (UseGCLogFileRotation && NumberOfGCLogFiles > 1) {
+    char tempbuf[FILENAMEBUFLEN];
+    jio_snprintf(tempbuf, sizeof(tempbuf), "%s.%d" CURRENTAPPX, _file_name, _cur_file_num);
+    _file = fopen(tempbuf, "w");
+  } else {
+    _file = fopen(_file_name, "w");
+  }
+  if (_file != NULL) {
+    _need_close = true;
+    dump_loggc_header();
+  } else {
+    warning("Cannot open file %s due to %s\n", _file_name, strerror(errno));
+    _need_close = false;
+  }
 }
 
-rotatingFileStream::rotatingFileStream(const char* file_name, const char* opentype) {
-  _cur_file_num = 0;
-  _bytes_written = 0L;
-  _file_name = NEW_C_HEAP_ARRAY(char, strlen(file_name)+10, mtInternal);
-  jio_snprintf(_file_name, strlen(file_name)+10, "%s.%d", file_name, _cur_file_num);
-  _file = fopen(_file_name, opentype);
-  _need_close = true;
-}
-
-void rotatingFileStream::write(const char* s, size_t len) {
+void gcLogFileStream::write(const char* s, size_t len) {
   if (_file != NULL) {
     size_t count = fwrite(s, 1, len, _file);
     _bytes_written += count;
@@ -466,7 +659,12 @@
 // write to gc log file at safepoint. If in future, changes made for mutator threads or
 // concurrent GC threads to run parallel with VMThread at safepoint, write and rotate_log
 // must be synchronized.
-void rotatingFileStream::rotate_log() {
+void gcLogFileStream::rotate_log() {
+  char time_msg[FILENAMEBUFLEN];
+  char time_str[EXTRACHARLEN];
+  char current_file_name[FILENAMEBUFLEN];
+  char renamed_file_name[FILENAMEBUFLEN];
+
   if (_bytes_written < (jlong)GCLogFileSize) {
     return;
   }
@@ -481,27 +679,89 @@
     // rotate in same file
     rewind();
     _bytes_written = 0L;
+    jio_snprintf(time_msg, sizeof(time_msg), "File  %s rotated at %s\n",
+                 _file_name, os::local_time_string((char *)time_str, sizeof(time_str)));
+    write(time_msg, strlen(time_msg));
+    dump_loggc_header();
     return;
   }
 
-  // rotate file in names file.0, file.1, file.2, ..., file.<MaxGCLogFileNumbers-1>
-  // close current file, rotate to next file
+#if defined(_WINDOWS)
+#ifndef F_OK
+#define F_OK 0
+#endif
+#endif // _WINDOWS
+
+  // rotate file in names extended_filename.0, extended_filename.1, ...,
+  // extended_filename.<NumberOfGCLogFiles - 1>. Current rotation file name will
+  // have a form of extended_filename.<i>.current where i is the current rotation
+  // file number. After it reaches max file size, the file will be saved and renamed
+  // with .current removed from its tail.
+  size_t filename_len = strlen(_file_name);
   if (_file != NULL) {
-    _cur_file_num ++;
-    if (_cur_file_num >= NumberOfGCLogFiles) _cur_file_num = 0;
-    jio_snprintf(_file_name, strlen(Arguments::gc_log_filename()) + 10, "%s.%d",
-             Arguments::gc_log_filename(), _cur_file_num);
+    jio_snprintf(renamed_file_name, filename_len + EXTRACHARLEN, "%s.%d",
+                 _file_name, _cur_file_num);
+    jio_snprintf(current_file_name, filename_len + EXTRACHARLEN, "%s.%d" CURRENTAPPX,
+                 _file_name, _cur_file_num);
+    jio_snprintf(time_msg, sizeof(time_msg), "%s GC log file has reached the"
+                           " maximum size. Saved as %s\n",
+                           os::local_time_string((char *)time_str, sizeof(time_str)),
+                           renamed_file_name);
+    write(time_msg, strlen(time_msg));
+
     fclose(_file);
     _file = NULL;
+
+    bool can_rename = true;
+    if (access(current_file_name, F_OK) != 0) {
+      // current file does not exist?
+      warning("No source file exists, cannot rename\n");
+      can_rename = false;
+    }
+    if (can_rename) {
+      if (access(renamed_file_name, F_OK) == 0) {
+        if (remove(renamed_file_name) != 0) {
+          warning("Could not delete existing file %s\n", renamed_file_name);
+          can_rename = false;
+        }
+      } else {
+        // file does not exist, ok to rename
+      }
+    }
+    if (can_rename && rename(current_file_name, renamed_file_name) != 0) {
+      warning("Could not rename %s to %s\n", _file_name, renamed_file_name);
+    }
   }
-  _file = fopen(_file_name, "w");
+
+  _cur_file_num++;
+  if (_cur_file_num > NumberOfGCLogFiles - 1) _cur_file_num = 0;
+  jio_snprintf(current_file_name,  filename_len + EXTRACHARLEN, "%s.%d" CURRENTAPPX,
+               _file_name, _cur_file_num);
+  _file = fopen(current_file_name, "w");
+
   if (_file != NULL) {
     _bytes_written = 0L;
     _need_close = true;
+    // reuse current_file_name for time_msg
+    jio_snprintf(current_file_name, filename_len + EXTRACHARLEN,
+                 "%s.%d", _file_name, _cur_file_num);
+    jio_snprintf(time_msg, sizeof(time_msg), "%s GC log file created %s\n",
+                           os::local_time_string((char *)time_str, sizeof(time_str)),
+                           current_file_name);
+    write(time_msg, strlen(time_msg));
+    dump_loggc_header();
+    // remove the existing file
+    if (access(current_file_name, F_OK) == 0) {
+      if (remove(current_file_name) != 0) {
+        warning("Could not delete existing file %s\n", current_file_name);
+      }
+    }
   } else {
-    tty->print_cr("failed to open rotation log file %s due to %s\n",
+    warning("failed to open rotation log file %s due to %s\n"
+            "Turned off GC log file rotation\n",
                   _file_name, strerror(errno));
     _need_close = false;
+    FLAG_SET_DEFAULT(UseGCLogFileRotation, false);
   }
 }
 
@@ -530,69 +790,9 @@
   return _log_file != NULL;
 }
 
-static const char* make_log_name(const char* log_name, const char* force_directory) {
-  const char* basename = log_name;
-  char file_sep = os::file_separator()[0];
-  const char* cp;
-  for (cp = log_name; *cp != '\0'; cp++) {
-    if (*cp == '/' || *cp == file_sep) {
-      basename = cp+1;
-    }
-  }
-  const char* nametail = log_name;
-
-  // Compute buffer length
-  size_t buffer_length;
-  if (force_directory != NULL) {
-    buffer_length = strlen(force_directory) + strlen(os::file_separator()) +
-                    strlen(basename) + 1;
-  } else {
-    buffer_length = strlen(log_name) + 1;
-  }
-
-  const char* star = strchr(basename, '*');
-  int star_pos = (star == NULL) ? -1 : (star - nametail);
-  int skip = 1;
-  if (star == NULL) {
-    // Try %p
-    star = strstr(basename, "%p");
-    if (star != NULL) {
-      skip = 2;
-    }
-  }
-  star_pos = (star == NULL) ? -1 : (star - nametail);
-
-  char pid[32];
-  if (star_pos >= 0) {
-    jio_snprintf(pid, sizeof(pid), "%u", os::current_process_id());
-    buffer_length += strlen(pid);
-  }
-
-  // Create big enough buffer.
-  char *buf = NEW_C_HEAP_ARRAY(char, buffer_length, mtInternal);
-
-  strcpy(buf, "");
-  if (force_directory != NULL) {
-    strcat(buf, force_directory);
-    strcat(buf, os::file_separator());
-    nametail = basename;       // completely skip directory prefix
-  }
-
-  if (star_pos >= 0) {
-    // convert foo*bar.log or foo%pbar.log to foo123bar.log
-    int buf_pos = (int) strlen(buf);
-    strncpy(&buf[buf_pos], nametail, star_pos);
-    strcpy(&buf[buf_pos + star_pos], pid);
-    nametail += star_pos + skip;  // skip prefix and pid format
-  }
-
-  strcat(buf, nametail);      // append rest of name, or all of name
-  return buf;
-}
-
 void defaultStream::init_log() {
   // %%% Need a MutexLocker?
-  const char* log_name = LogFile != NULL ? LogFile : "hotspot.log";
+  const char* log_name = LogFile != NULL ? LogFile : "hotspot_pid%p.log";
   const char* try_name = make_log_name(log_name, NULL);
   fileStream* file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name);
   if (!file->is_open()) {
@@ -603,14 +803,15 @@
     // Note:  This feature is for maintainer use only.  No need for L10N.
     jio_print(warnbuf);
     FREE_C_HEAP_ARRAY(char, try_name, mtInternal);
-    try_name = make_log_name("hs_pid%p.log", os::get_temp_directory());
+    try_name = make_log_name(log_name, os::get_temp_directory());
     jio_snprintf(warnbuf, sizeof(warnbuf),
                  "Warning:  Forcing option -XX:LogFile=%s\n", try_name);
     jio_print(warnbuf);
     delete file;
     file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name);
-    FREE_C_HEAP_ARRAY(char, try_name, mtInternal);
   }
+  FREE_C_HEAP_ARRAY(char, try_name, mtInternal);
+
   if (file->is_open()) {
     _log_file = file;
     xmlStream* xs = new(ResourceObj::C_HEAP, mtInternal) xmlStream(file);
@@ -877,11 +1078,8 @@
 
   gclog_or_tty = tty; // default to tty
   if (Arguments::gc_log_filename() != NULL) {
-    fileStream * gclog  = UseGCLogFileRotation ?
-                          new(ResourceObj::C_HEAP, mtInternal)
-                             rotatingFileStream(Arguments::gc_log_filename()) :
-                          new(ResourceObj::C_HEAP, mtInternal)
-                             fileStream(Arguments::gc_log_filename());
+    fileStream * gclog  = new(ResourceObj::C_HEAP, mtInternal)
+                             gcLogFileStream(Arguments::gc_log_filename());
     if (gclog->is_open()) {
       // now we update the time stamp of the GC log to be synced up
       // with tty.
--- a/hotspot/src/share/vm/utilities/ostream.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/utilities/ostream.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -231,20 +231,24 @@
   void flush() {};
 };
 
-class rotatingFileStream : public fileStream {
+class gcLogFileStream : public fileStream {
  protected:
-  char*  _file_name;
+  const char*  _file_name;
   jlong  _bytes_written;
-  uintx  _cur_file_num;             // current logfile rotation number, from 0 to MaxGCLogFileNumbers-1
+  uintx  _cur_file_num;             // current logfile rotation number, from 0 to NumberOfGCLogFiles-1
  public:
-  rotatingFileStream(const char* file_name);
-  rotatingFileStream(const char* file_name, const char* opentype);
-  rotatingFileStream(FILE* file) : fileStream(file) {}
-  ~rotatingFileStream();
+  gcLogFileStream(const char* file_name);
+  ~gcLogFileStream();
   virtual void write(const char* c, size_t len);
   virtual void rotate_log();
+  void dump_loggc_header();
 };
 
+#ifndef PRODUCT
+// unit test for checking -Xloggc:<filename> parsing result
+void test_loggc_filename();
+#endif
+
 void ostream_init();
 void ostream_init_log();
 void ostream_exit();
--- a/hotspot/src/share/vm/utilities/vmError.cpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/utilities/vmError.cpp	Fri Sep 20 11:09:25 2013 -0700
@@ -574,6 +574,10 @@
   STEP(120, "(printing native stack)" )
 
      if (_verbose) {
+     if (os::platform_print_native_stack(st, _context, buf, sizeof(buf))) {
+       // We have printed the native stack in platform-specific code
+       // Windows/x64 needs special handling.
+     } else {
        frame fr = _context ? os::fetch_frame_from_context(_context)
                            : os::current_frame();
 
@@ -604,6 +608,7 @@
           st->cr();
        }
      }
+   }
 
   STEP(130, "(printing Java stack)" )
 
--- a/hotspot/src/share/vm/utilities/vmError.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/utilities/vmError.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -136,6 +136,10 @@
 
   // check to see if fatal error reporting is in progress
   static bool fatal_error_in_progress() { return first_error != NULL; }
+
+  static jlong get_first_error_tid() {
+    return first_error_tid;
+  }
 };
 
 #endif // SHARE_VM_UTILITIES_VMERROR_HPP
--- a/hotspot/src/share/vm/utilities/yieldingWorkgroup.hpp	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/src/share/vm/utilities/yieldingWorkgroup.hpp	Fri Sep 20 11:09:25 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,10 +26,7 @@
 #define SHARE_VM_UTILITIES_YIELDINGWORKGROUP_HPP
 
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
 #include "utilities/workgroup.hpp"
-#endif // INCLUDE_ALL_GCS
-
 
 // Forward declarations
 class YieldingFlexibleWorkGang;
--- a/hotspot/test/TEST.groups	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/test/TEST.groups	Fri Sep 20 11:09:25 2013 -0700
@@ -62,7 +62,7 @@
 #
 needs_jdk = \
   gc/TestG1ZeroPGCTJcmdThreadPrint.java \
-  gc/metaspace/ClassMetaspaceSizeInJmapHeap.java \
+  gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java \
   gc/metaspace/TestMetaspacePerfCounters.java \
   runtime/6819213/TestBootNativeLibraryPath.java \
   runtime/6878713/Test6878713.sh \
@@ -84,6 +84,7 @@
   runtime/NMT/ThreadedVirtualAllocTestType.java \
   runtime/NMT/VirtualAllocTestType.java \
   runtime/RedefineObject/TestRedefineObject.java \
+  runtime/XCheckJniJsig/XCheckJSig.java \
   serviceability/attach/AttachWithStalePidFile.java
 
 # JRE adds further tests to compact3
@@ -159,7 +160,18 @@
   gc/g1/TestRegionAlignment.java \
   gc/g1/TestShrinkToOneRegion.java \
   gc/metaspace/G1AddMetaspaceDependency.java \
-  runtime/6929067/Test6929067.sh
+  gc/startup_warnings/TestCMS.java \
+  gc/startup_warnings/TestCMSIncrementalMode.java \
+  gc/startup_warnings/TestCMSNoIncrementalMode.java \
+  gc/startup_warnings/TestDefaultMaxRAMFraction.java \
+  gc/startup_warnings/TestDefNewCMS.java \
+  gc/startup_warnings/TestIncGC.java \
+  gc/startup_warnings/TestParallelGC.java \
+  gc/startup_warnings/TestParallelScavengeSerialOld.java \
+  gc/startup_warnings/TestParNewCMS.java \
+  gc/startup_warnings/TestParNewSerialOld.java \
+  runtime/6929067/Test6929067.sh \
+  runtime/SharedArchiveFile/SharedArchiveFile.java
 
 # Minimal VM on Compact 2 adds in some compact2 tests
 #
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/TestObjectAlignment.java	Fri Sep 20 11:09:25 2013 -0700
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test TestObjectAlignment
+ * @key gc
+ * @bug 8021823
+ * @summary G1: Concurrent marking crashes with -XX:ObjectAlignmentInBytes>=32 in 64bit VMs
+ * @library /testlibrary
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=8
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=16
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=32
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=64
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=128
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=256
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=8
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=16
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=32
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=64
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=128
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=256
+ */
+
+import com.oracle.java.testlibrary.ProcessTools;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+
+public class TestObjectAlignment {
+
+  public static byte[] garbage;
+
+  private static boolean runsOn32bit() {
+    return System.getProperty("sun.arch.data.model").equals("32");
+  }
+
+  public static void main(String[] args) throws Exception {
+    if (runsOn32bit()) {
+      // 32 bit VMs do not allow setting ObjectAlignmentInBytes, so there is nothing to test. We still get called.
+      return;
+    }
+    for (int i = 0; i < 10; i++) {
+      garbage = new byte[1000];
+      System.gc();
+    }
+  }
+}
--- a/hotspot/test/gc/TestVerifyDuringStartup.java	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/test/gc/TestVerifyDuringStartup.java	Fri Sep 20 11:09:25 2013 -0700
@@ -48,7 +48,7 @@
                                              "-XX:+VerifyDuringStartup",
                                              "-version"});
 
-    System.out.print("Testing:\n" + JDKToolFinder.getCurrentJDKTool("java"));
+    System.out.print("Testing:\n" + JDKToolFinder.getJDKTool("java"));
     for (int i = 0; i < vmOpts.size(); i += 1) {
       System.out.print(" " + vmOpts.get(i));
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/arguments/TestAlignmentToUseLargePages.java	Fri Sep 20 11:09:25 2013 -0700
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test TestAlignmentToUseLargePages
+ * @summary All parallel GC variants may use large pages without the requirement that the
+ * heap alignment is large page aligned. Other collectors also need to start up with odd sized heaps.
+ * @bug 8024396
+ * @key gc
+ * @key regression
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:-UseParallelOldGC -XX:+UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:-UseParallelOldGC -XX:-UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:+UseParallelOldGC -XX:+UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:+UseParallelOldGC -XX:-UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseSerialGC -XX:+UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseSerialGC -XX:-UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseConcMarkSweepGC -XX:+UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseConcMarkSweepGC -XX:-UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseG1GC -XX:+UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseG1GC -XX:-UseLargePages TestAlignmentToUseLargePages
+ */
+
+public class TestAlignmentToUseLargePages {
+  public static void main(String args[]) throws Exception {
+    // nothing to do
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/arguments/TestCompressedClassFlags.java	Fri Sep 20 11:09:25 2013 -0700
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import com.oracle.java.testlibrary.*;
+
+/*
+ * @test
+ * @bug 8015107
+ * @summary Tests that VM prints a warning when -XX:CompressedClassSpaceSize
+ *          is used together with -XX:-UseCompressedClassPointers
+ * @library /testlibrary
+ */
+public class TestCompressedClassFlags {
+    public static void main(String[] args) throws Exception {
+        if (Platform.is64bit()) {
+            OutputAnalyzer output = runJava("-XX:CompressedClassSpaceSize=1g",
+                                            "-XX:-UseCompressedClassPointers",
+                                            "-version");
+            output.shouldContain("warning");
+            output.shouldNotContain("error");
+            output.shouldHaveExitValue(0);
+        }
+    }
+
+    private static OutputAnalyzer runJava(String ... args) throws Exception {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(args);
+        return new OutputAnalyzer(pb.start());
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/arguments/TestUseCompressedOopsErgo.java	Fri Sep 20 11:09:25 2013 -0700
@@ -0,0 +1,50 @@
+/*
+* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*/
+
+/*
+ * @test TestUseCompressedOopsErgo
+ * @key gc
+ * @bug 8010722
+ * @summary Tests ergonomics for UseCompressedOops.
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestUseCompressedOopsErgo TestUseCompressedOopsErgoTools
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm TestUseCompressedOopsErgo -XX:+UseG1GC
+ * @run main/othervm TestUseCompressedOopsErgo -XX:+UseParallelGC
+ * @run main/othervm TestUseCompressedOopsErgo -XX:+UseParallelGC -XX:-UseParallelOldGC
+ * @run main/othervm TestUseCompressedOopsErgo -XX:+UseConcMarkSweepGC
+ * @run main/othervm TestUseCompressedOopsErgo -XX:+UseSerialGC
+ */
+
+public class TestUseCompressedOopsErgo {
+
+  public static void main(String args[]) throws Exception {
+    if (!TestUseCompressedOopsErgoTools.is64bitVM()) {
+      // this test is relevant for 64 bit VMs only
+      return;
+    }
+    final String[] gcFlags = args;
+    TestUseCompressedOopsErgoTools.checkCompressedOopsErgo(gcFlags);
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/arguments/TestUseCompressedOopsErgoTools.java	Fri Sep 20 11:09:25 2013 -0700
@@ -0,0 +1,177 @@
+/*
+* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*/
+
+import sun.management.ManagementFactoryHelper;
+import com.sun.management.HotSpotDiagnosticMXBean;
+import com.sun.management.VMOption;
+
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.ArrayList;
+import java.util.Arrays;
+
+import com.oracle.java.testlibrary.*;
+import sun.hotspot.WhiteBox;
+
+class DetermineMaxHeapForCompressedOops {
+  public static void main(String[] args) throws Exception {
+    WhiteBox wb = WhiteBox.getWhiteBox();
+    System.out.print(wb.getCompressedOopsMaxHeapSize());
+  }
+}
+
+class TestUseCompressedOopsErgoTools {
+
+  private static long getCompressedClassSpaceSize() {
+    HotSpotDiagnosticMXBean diagnostic = ManagementFactoryHelper.getDiagnosticMXBean();
+
+    VMOption option = diagnostic.getVMOption("CompressedClassSpaceSize");
+    return Long.parseLong(option.getValue());
+  }
+
+
+  public static long getMaxHeapForCompressedOops(String[] vmargs) throws Exception {
+    OutputAnalyzer output = runWhiteBoxTest(vmargs, DetermineMaxHeapForCompressedOops.class.getName(), new String[] {}, false);
+    return Long.parseLong(output.getStdout());
+  }
+
+  public static boolean is64bitVM() {
+    String val = System.getProperty("sun.arch.data.model");
+    if (val == null) {
+      throw new RuntimeException("Could not read sun.arch.data.model");
+    }
+    if (val.equals("64")) {
+      return true;
+    } else if (val.equals("32")) {
+      return false;
+    }
+    throw new RuntimeException("Unexpected value " + val + " of sun.arch.data.model");
+  }
+
+  /**
+   * Executes a new VM process with the given class and parameters.
+   * @param vmargs Arguments to the VM to run
+   * @param classname Name of the class to run
+   * @param arguments Arguments to the class
+   * @param useTestDotJavaDotOpts Use test.java.opts as part of the VM argument string
+   * @return The OutputAnalyzer with the results for the invocation.
+   */
+  public static OutputAnalyzer runWhiteBoxTest(String[] vmargs, String classname, String[] arguments, boolean useTestDotJavaDotOpts) throws Exception {
+    ArrayList<String> finalargs = new ArrayList<String>();
+
+    String[] whiteboxOpts = new String[] {
+      "-Xbootclasspath/a:.",
+      "-XX:+UnlockDiagnosticVMOptions", "-XX:+WhiteBoxAPI",
+      "-cp", System.getProperty("java.class.path"),
+    };
+
+    if (useTestDotJavaDotOpts) {
+      // System.getProperty("test.java.opts") is '' if no options is set,
+      // we need to skip such a result
+      String[] externalVMOpts = new String[0];
+      if (System.getProperty("test.java.opts") != null && System.getProperty("test.java.opts").length() != 0) {
+        externalVMOpts = System.getProperty("test.java.opts").split(" ");
+      }
+      finalargs.addAll(Arrays.asList(externalVMOpts));
+    }
+
+    finalargs.addAll(Arrays.asList(vmargs));
+    finalargs.addAll(Arrays.asList(whiteboxOpts));
+    finalargs.add(classname);
+    finalargs.addAll(Arrays.asList(arguments));
+
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(finalargs.toArray(new String[0]));
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+    output.shouldHaveExitValue(0);
+    return output;
+  }
+
+  private static String[] join(String[] part1, String part2) {
+    ArrayList<String> result = new ArrayList<String>();
+    result.addAll(Arrays.asList(part1));
+    result.add(part2);
+    return result.toArray(new String[0]);
+  }
+
+  public static void checkCompressedOopsErgo(String[] gcflags) throws Exception {
+    long maxHeapForCompressedOops = getMaxHeapForCompressedOops(gcflags);
+
+    checkUseCompressedOops(gcflags, maxHeapForCompressedOops, true);
+    checkUseCompressedOops(gcflags, maxHeapForCompressedOops - 1, true);
+    checkUseCompressedOops(gcflags, maxHeapForCompressedOops + 1, false);
+
+    // the use of HeapBaseMinAddress should not change the outcome
+    checkUseCompressedOops(join(gcflags, "-XX:HeapBaseMinAddress=32G"), maxHeapForCompressedOops, true);
+    checkUseCompressedOops(join(gcflags, "-XX:HeapBaseMinAddress=32G"), maxHeapForCompressedOops - 1, true);
+    checkUseCompressedOops(join(gcflags, "-XX:HeapBaseMinAddress=32G"), maxHeapForCompressedOops + 1, false);
+
+    // use a different object alignment
+    maxHeapForCompressedOops = getMaxHeapForCompressedOops(join(gcflags, "-XX:ObjectAlignmentInBytes=16"));
+
+    checkUseCompressedOops(join(gcflags, "-XX:ObjectAlignmentInBytes=16"), maxHeapForCompressedOops, true);
+    checkUseCompressedOops(join(gcflags, "-XX:ObjectAlignmentInBytes=16"), maxHeapForCompressedOops - 1, true);
+    checkUseCompressedOops(join(gcflags, "-XX:ObjectAlignmentInBytes=16"), maxHeapForCompressedOops + 1, false);
+
+    // use a different CompressedClassSpaceSize
+    String compressedClassSpaceSizeArg = "-XX:CompressedClassSpaceSize=" + 2 * getCompressedClassSpaceSize();
+    maxHeapForCompressedOops = getMaxHeapForCompressedOops(join(gcflags, compressedClassSpaceSizeArg));
+
+    checkUseCompressedOops(join(gcflags, compressedClassSpaceSizeArg), maxHeapForCompressedOops, true);
+    checkUseCompressedOops(join(gcflags, compressedClassSpaceSizeArg), maxHeapForCompressedOops - 1, true);
+    checkUseCompressedOops(join(gcflags, compressedClassSpaceSizeArg), maxHeapForCompressedOops + 1, false);
+  }
+
+  private static void checkUseCompressedOops(String[] args, long heapsize, boolean expectUseCompressedOops) throws Exception {
+     ArrayList<String> finalargs = new ArrayList<String>();
+     finalargs.addAll(Arrays.asList(args));
+     finalargs.add("-Xmx" + heapsize);
+     finalargs.add("-XX:+PrintFlagsFinal");
+     finalargs.add("-version");
+
+     String output = expectValid(finalargs.toArray(new String[0]));
+
+     boolean actualUseCompressedOops = getFlagBoolValue(" UseCompressedOops", output);
+
+     Asserts.assertEQ(expectUseCompressedOops, actualUseCompressedOops);
+  }
+
+  private static boolean getFlagBoolValue(String flag, String where) {
+    Matcher m = Pattern.compile(flag + "\\s+:?= (true|false)").matcher(where);
+    if (!m.find()) {
+      throw new RuntimeException("Could not find value for flag " + flag + " in output string");
+    }
+    return m.group(1).equals("true");
+  }
+
+  private static String expect(String[] flags, boolean hasWarning, boolean hasError, int errorcode) throws Exception {
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(flags);
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+    output.shouldHaveExitValue(errorcode);
+    return output.getStdout();
+  }
+
+  private static String expectValid(String[] flags) throws Exception {
+    return expect(flags, false, false, 0);
+  }
+}
+
--- a/hotspot/test/gc/metaspace/ClassMetaspaceSizeInJmapHeap.java	Thu Sep 19 09:36:51 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,79 +0,0 @@
-/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test ClassMetaspaceSizeInJmapHeap
- * @bug 8004924
- * @summary Checks that jmap -heap contains the flag ClassMetaspaceSize
- * @library /testlibrary
- * @run main/othervm -XX:ClassMetaspaceSize=50m ClassMetaspaceSizeInJmapHeap
- */
-
-import com.oracle.java.testlibrary.*;
-import java.nio.file.*;
-import java.io.File;
-import java.nio.charset.Charset;
-import java.util.List;
-
-public class ClassMetaspaceSizeInJmapHeap {
-    public static void main(String[] args) throws Exception {
-        String pid = Integer.toString(ProcessTools.getProcessId());
-
-        JDKToolLauncher jmap = JDKToolLauncher.create("jmap")
-                                              .addToolArg("-heap")
-                                              .addToolArg(pid);
-        ProcessBuilder pb = new ProcessBuilder(jmap.getCommand());
-
-        File out = new File("ClassMetaspaceSizeInJmapHeap.stdout.txt");
-        pb.redirectOutput(out);
-
-        File err = new File("ClassMetaspaceSizeInJmapHeap.stderr.txt");
-        pb.redirectError(err);
-
-        run(pb);
-
-        OutputAnalyzer output = new OutputAnalyzer(read(out));
-        output.shouldContain("ClassMetaspaceSize = 52428800 (50.0MB)");
-        out.delete();
-    }
-
-    private static void run(ProcessBuilder pb) throws Exception {
-        Process p = pb.start();
-        p.waitFor();
-        int exitValue = p.exitValue();
-        if (exitValue != 0) {
-            throw new Exception("jmap -heap exited with error code: " + exitValue);
-        }
-    }
-
-    private static String read(File f) throws Exception {
-        Path p = f.toPath();
-        List<String> lines = Files.readAllLines(p, Charset.defaultCharset());
-
-        StringBuilder sb = new StringBuilder();
-        for (String line : lines) {
-            sb.append(line).append('\n');
-        }
-        return sb.toString();
-    }
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java	Fri Sep 20 11:09:25 2013 -0700
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test CompressedClassSpaceSizeInJmapHeap
+ * @bug 8004924
+ * @summary Checks that jmap -heap contains the flag CompressedClassSpaceSize
+ * @library /testlibrary
+ * @run main/othervm -XX:CompressedClassSpaceSize=50m CompressedClassSpaceSizeInJmapHeap
+ */
+
+import com.oracle.java.testlibrary.*;
+import java.nio.file.*;
+import java.io.File;
+import java.nio.charset.Charset;
+import java.util.List;
+
+public class CompressedClassSpaceSizeInJmapHeap {
+    public static void main(String[] args) throws Exception {
+        String pid = Integer.toString(ProcessTools.getProcessId());
+
+        JDKToolLauncher jmap = JDKToolLauncher.create("jmap")
+                                              .addToolArg("-heap")
+                                              .addToolArg(pid);
+        ProcessBuilder pb = new ProcessBuilder(jmap.getCommand());
+
+        File out = new File("CompressedClassSpaceSizeInJmapHeap.stdout.txt");
+        pb.redirectOutput(out);
+
+        File err = new File("CompressedClassSpaceSizeInJmapHeap.stderr.txt");
+        pb.redirectError(err);
+
+        run(pb);
+
+        OutputAnalyzer output = new OutputAnalyzer(read(out));
+        output.shouldContain("CompressedClassSpaceSize = 52428800 (50.0MB)");
+        out.delete();
+    }
+
+    private static void run(ProcessBuilder pb) throws Exception {
+        Process p = pb.start();
+        p.waitFor();
+        int exitValue = p.exitValue();
+        if (exitValue != 0) {
+            throw new Exception("jmap -heap exited with error code: " + exitValue);
+        }
+    }
+
+    private static String read(File f) throws Exception {
+        Path p = f.toPath();
+        List<String> lines = Files.readAllLines(p, Charset.defaultCharset());
+
+        StringBuilder sb = new StringBuilder();
+        for (String line : lines) {
+            sb.append(line).append('\n');
+        }
+        return sb.toString();
+    }
+}
--- a/hotspot/test/gc/metaspace/TestMetaspaceMemoryPool.java	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/test/gc/metaspace/TestMetaspaceMemoryPool.java	Fri Sep 20 11:09:25 2013 -0700
@@ -22,55 +22,35 @@
  */
 
 import java.util.List;
-import java.lang.management.ManagementFactory;
-import java.lang.management.MemoryManagerMXBean;
-import java.lang.management.MemoryPoolMXBean;
-import java.lang.management.MemoryUsage;
-
-import java.lang.management.RuntimeMXBean;
-import java.lang.management.ManagementFactory;
+import java.lang.management.*;
+import com.oracle.java.testlibrary.*;
+import static com.oracle.java.testlibrary.Asserts.*;
 
 /* @test TestMetaspaceMemoryPool
  * @bug 8000754
  * @summary Tests that a MemoryPoolMXBeans is created for metaspace and that a
  *          MemoryManagerMXBean is created.
+ * @library /testlibrary
  * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops TestMetaspaceMemoryPool
  * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:MaxMetaspaceSize=60m TestMetaspaceMemoryPool
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers TestMetaspaceMemoryPool
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:ClassMetaspaceSize=60m TestMetaspaceMemoryPool
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers TestMetaspaceMemoryPool
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:CompressedClassSpaceSize=60m TestMetaspaceMemoryPool
  */
 public class TestMetaspaceMemoryPool {
     public static void main(String[] args) {
         verifyThatMetaspaceMemoryManagerExists();
-        verifyMemoryPool(getMemoryPool("Metaspace"), isFlagDefined("MaxMetaspaceSize"));
 
-        if (runsOn64bit()) {
-            if (usesCompressedOops()) {
+        boolean isMetaspaceMaxDefined = InputArguments.containsPrefix("-XX:MaxMetaspaceSize");
+        verifyMemoryPool(getMemoryPool("Metaspace"), isMetaspaceMaxDefined);
+
+        if (Platform.is64bit()) {
+            if (InputArguments.contains("-XX:+UseCompressedOops")) {
                 MemoryPoolMXBean cksPool = getMemoryPool("Compressed Class Space");
                 verifyMemoryPool(cksPool, true);
             }
         }
     }
 
-    private static boolean runsOn64bit() {
-        return !System.getProperty("sun.arch.data.model").equals("32");
-    }
-
-    private static boolean usesCompressedOops() {
-        return isFlagDefined("+UseCompressedOops");
-    }
-
-    private static boolean isFlagDefined(String name) {
-        RuntimeMXBean runtimeMxBean = ManagementFactory.getRuntimeMXBean();
-        List<String> args = runtimeMxBean.getInputArguments();
-        for (String arg : args) {
-            if (arg.startsWith("-XX:" + name)) {
-                return true;
-            }
-        }
-        return false;
-    }
-
     private static void verifyThatMetaspaceMemoryManagerExists() {
         List<MemoryManagerMXBean> managers = ManagementFactory.getMemoryManagerMXBeans();
         for (MemoryManagerMXBean manager : managers) {
@@ -95,32 +75,19 @@
 
     private static void verifyMemoryPool(MemoryPoolMXBean pool, boolean isMaxDefined) {
         MemoryUsage mu = pool.getUsage();
-        assertDefined(mu.getInit(), "init");
-        assertDefined(mu.getUsed(), "used");
-        assertDefined(mu.getCommitted(), "committed");
+        long init = mu.getInit();
+        long used = mu.getUsed();
+        long committed = mu.getCommitted();
+        long max = mu.getMax();
+
+        assertGTE(init, 0L);
+        assertGTE(used, init);
+        assertGTE(committed, used);
 
         if (isMaxDefined) {
-            assertDefined(mu.getMax(), "max");
+            assertGTE(max, committed);
         } else {
-            assertUndefined(mu.getMax(), "max");
-        }
-    }
-
-    private static void assertDefined(long value, String name) {
-        assertTrue(value != -1, "Expected " + name + " to be defined");
-    }
-
-    private static void assertUndefined(long value, String name) {
-        assertEquals(value, -1, "Expected " + name + " to be undefined");
-    }
-
-    private static void assertEquals(long actual, long expected, String msg) {
-        assertTrue(actual == expected, msg);
-    }
-
-    private static void assertTrue(boolean condition, String msg) {
-        if (!condition) {
-            throw new RuntimeException(msg);
+            assertEQ(max, -1L);
         }
     }
 }
--- a/hotspot/test/gc/metaspace/TestMetaspacePerfCounters.java	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/test/gc/metaspace/TestMetaspacePerfCounters.java	Fri Sep 20 11:09:25 2013 -0700
@@ -33,13 +33,13 @@
  * @summary Tests that performance counters for metaspace and compressed class
  *          space exists and works.
  *
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters
  *
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters
  */
 public class TestMetaspacePerfCounters {
     public static Class fooClass = null;
@@ -61,10 +61,15 @@
     }
 
     private static void checkPerfCounters(String ns) throws Exception {
-        for (PerfCounter counter : countersInNamespace(ns)) {
-            String msg = "Expected " + counter.getName() + " to be larger than 0";
-            assertGT(counter.longValue(), 0L, msg);
-        }
+        long minCapacity = getMinCapacity(ns);
+        long maxCapacity = getMaxCapacity(ns);
+        long capacity = getCapacity(ns);
+        long used = getUsed(ns);
+
+        assertGTE(minCapacity, 0L);
+        assertGTE(used, minCapacity);
+        assertGTE(capacity, used);
+        assertGTE(maxCapacity, capacity);
     }
 
     private static void checkEmptyPerfCounters(String ns) throws Exception {
@@ -75,12 +80,10 @@
     }
 
     private static void checkUsedIncreasesWhenLoadingClass(String ns) throws Exception {
-        PerfCounter used = PerfCounters.findByName(ns + ".used");
-
-        long before = used.longValue();
+        long before = getUsed(ns);
         fooClass = compileAndLoad("Foo", "public class Foo { }");
         System.gc();
-        long after = used.longValue();
+        long after = getUsed(ns);
 
         assertGT(after, before);
     }
@@ -99,6 +102,22 @@
     }
 
     private static boolean isUsingCompressedClassPointers() {
-        return Platform.is64bit() && InputArguments.contains("-XX:+UseCompressedKlassPointers");
+        return Platform.is64bit() && InputArguments.contains("-XX:+UseCompressedClassPointers");
+    }
+
+    private static long getMinCapacity(String ns) throws Exception {
+        return PerfCounters.findByName(ns + ".minCapacity").longValue();
+    }
+
+    private static long getCapacity(String ns) throws Exception {
+        return PerfCounters.findByName(ns + ".capacity").longValue();
+    }
+
+    private static long getMaxCapacity(String ns) throws Exception {
+        return PerfCounters.findByName(ns + ".maxCapacity").longValue();
+    }
+
+    private static long getUsed(String ns) throws Exception {
+        return PerfCounters.findByName(ns + ".used").longValue();
     }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/metaspace/TestMetaspaceSizeFlags.java	Fri Sep 20 11:09:25 2013 -0700
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import com.oracle.java.testlibrary.Asserts;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+
+/*
+ * @test TestMetaspaceSizeFlags
+ * @key gc
+ * @bug 8024650
+ * @summary Test that metaspace size flags can be set correctly
+ * @library /testlibrary
+ */
+public class TestMetaspaceSizeFlags {
+  public static final long K = 1024L;
+  public static final long M = 1024L * K;
+
+  // HotSpot uses a number of different values to align memory size flags.
+  // This is currently the largest alignment (unless huge large pages are used).
+  public static final long MAX_ALIGNMENT = 32 * M;
+
+  public static void main(String [] args) throws Exception {
+    testMaxMetaspaceSizeEQMetaspaceSize(MAX_ALIGNMENT, MAX_ALIGNMENT);
+    // 8024650: MaxMetaspaceSize was adjusted instead of MetaspaceSize.
+    testMaxMetaspaceSizeLTMetaspaceSize(MAX_ALIGNMENT, MAX_ALIGNMENT * 2);
+    testMaxMetaspaceSizeGTMetaspaceSize(MAX_ALIGNMENT * 2, MAX_ALIGNMENT);
+    testTooSmallInitialMetaspace(0, 0);
+    testTooSmallInitialMetaspace(0, MAX_ALIGNMENT);
+    testTooSmallInitialMetaspace(MAX_ALIGNMENT, 0);
+  }
+
+  private static void testMaxMetaspaceSizeEQMetaspaceSize(long maxMetaspaceSize, long metaspaceSize) throws Exception {
+    MetaspaceFlags mf = runAndGetValue(maxMetaspaceSize, metaspaceSize);
+    Asserts.assertEQ(maxMetaspaceSize, metaspaceSize);
+    Asserts.assertEQ(mf.maxMetaspaceSize, maxMetaspaceSize);
+    Asserts.assertEQ(mf.metaspaceSize, metaspaceSize);
+  }
+
+  private static void testMaxMetaspaceSizeLTMetaspaceSize(long maxMetaspaceSize, long metaspaceSize) throws Exception {
+    MetaspaceFlags mf = runAndGetValue(maxMetaspaceSize, metaspaceSize);
+    Asserts.assertEQ(mf.maxMetaspaceSize, maxMetaspaceSize);
+    Asserts.assertEQ(mf.metaspaceSize, maxMetaspaceSize);
+  }
+
+  private static void testMaxMetaspaceSizeGTMetaspaceSize(long maxMetaspaceSize, long metaspaceSize) throws Exception {
+    MetaspaceFlags mf = runAndGetValue(maxMetaspaceSize, metaspaceSize);
+    Asserts.assertGT(maxMetaspaceSize, metaspaceSize);
+    Asserts.assertGT(mf.maxMetaspaceSize, mf.metaspaceSize);
+    Asserts.assertEQ(mf.maxMetaspaceSize, maxMetaspaceSize);
+    Asserts.assertEQ(mf.metaspaceSize, metaspaceSize);
+  }
+
+  private static void testTooSmallInitialMetaspace(long maxMetaspaceSize, long metaspaceSize) throws Exception {
+    OutputAnalyzer output = run(maxMetaspaceSize, metaspaceSize);
+    output.shouldContain("Too small initial Metaspace size");
+  }
+
+  private static MetaspaceFlags runAndGetValue(long maxMetaspaceSize, long metaspaceSize) throws Exception {
+    OutputAnalyzer output = run(maxMetaspaceSize, metaspaceSize);
+    output.shouldNotMatch("Error occurred during initialization of VM\n.*");
+
+    String stringMaxMetaspaceSize = output.firstMatch(".* MaxMetaspaceSize .* := (\\d+).*", 1);
+    String stringMetaspaceSize = output.firstMatch(".* MetaspaceSize .* := (\\d+).*", 1);
+
+    return new MetaspaceFlags(Long.parseLong(stringMaxMetaspaceSize),
+                              Long.parseLong(stringMetaspaceSize));
+  }
+
+  private static OutputAnalyzer run(long maxMetaspaceSize, long metaspaceSize) throws Exception {
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+        "-XX:MaxMetaspaceSize=" + maxMetaspaceSize,
+        "-XX:MetaspaceSize=" + metaspaceSize,
+        "-XX:-UseLargePages", // Prevent us from using 2GB large pages on solaris + sparc.
+        "-XX:+PrintFlagsFinal",
+        "-version");
+    return new OutputAnalyzer(pb.start());
+  }
+
+  private static class MetaspaceFlags {
+    public long maxMetaspaceSize;
+    public long metaspaceSize;
+    public MetaspaceFlags(long maxMetaspaceSize, long metaspaceSize) {
+      this.maxMetaspaceSize = maxMetaspaceSize;
+      this.metaspaceSize = metaspaceSize;
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/metaspace/TestPerfCountersAndMemoryPools.java	Fri Sep 20 11:09:25 2013 -0700
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.List;
+import java.lang.management.*;
+
+import com.oracle.java.testlibrary.*;
+import static com.oracle.java.testlibrary.Asserts.*;
+
+/* @test TestPerfCountersAndMemoryPools
+ * @bug 8023476
+ * @summary Tests that a MemoryPoolMXBeans and PerfCounters for metaspace
+ *          report the same data.
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UseSerialGC -XX:+UsePerfData TestPerfCountersAndMemoryPools
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:+UseSerialGC -XX:+UsePerfData TestPerfCountersAndMemoryPools
+ */
+public class TestPerfCountersAndMemoryPools {
+    public static void main(String[] args) throws Exception {
+        checkMemoryUsage("Metaspace", "sun.gc.metaspace");
+
+        if (InputArguments.contains("-XX:+UseCompressedKlassPointers") && Platform.is64bit()) {
+            checkMemoryUsage("Compressed Class Space", "sun.gc.compressedclassspace");
+        }
+    }
+
+    private static MemoryUsage getMemoryUsage(String memoryPoolName) {
+        List<MemoryPoolMXBean> pools = ManagementFactory.getMemoryPoolMXBeans();
+        for (MemoryPoolMXBean pool : pools) {
+            if (pool.getName().equals(memoryPoolName)) {
+                return pool.getUsage();
+            }
+        }
+
+        throw new RuntimeException("Excpted to find a memory pool with name " +
+                                   memoryPoolName);
+    }
+
+    private static void checkMemoryUsage(String memoryPoolName, String perfNS)
+        throws Exception {
+        // Need to do a gc before each comparison to update the perf counters
+
+        System.gc();
+        MemoryUsage mu = getMemoryUsage(memoryPoolName);
+        assertEQ(getMinCapacity(perfNS), mu.getInit());
+
+        System.gc();
+        mu = getMemoryUsage(memoryPoolName);
+        assertEQ(getUsed(perfNS), mu.getUsed());
+
+        System.gc();
+        mu = getMemoryUsage(memoryPoolName);
+        assertEQ(getCapacity(perfNS), mu.getCommitted());
+    }
+
+    private static long getMinCapacity(String ns) throws Exception {
+        return PerfCounters.findByName(ns + ".minCapacity").longValue();
+    }
+
+    private static long getCapacity(String ns) throws Exception {
+        return PerfCounters.findByName(ns + ".capacity").longValue();
+    }
+
+    private static long getUsed(String ns) throws Exception {
+        return PerfCounters.findByName(ns + ".used").longValue();
+    }
+}
--- a/hotspot/test/runtime/6878713/Test6878713.sh	Thu Sep 19 09:36:51 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,137 +0,0 @@
-#!/bin/sh
-
-# 
-#  Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
-#  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-# 
-#  This code is free software; you can redistribute it and/or modify it
-#  under the terms of the GNU General Public License version 2 only, as
-#  published by the Free Software Foundation.
-# 
-#  This code is distributed in the hope that it will be useful, but WITHOUT
-#  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-#  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-#  version 2 for more details (a copy is included in the LICENSE file that
-#  accompanied this code).
-# 
-#  You should have received a copy of the GNU General Public License version
-#  2 along with this work; if not, write to the Free Software Foundation,
-#  Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-# 
-#  Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-#  or visit www.oracle.com if you need additional information or have any
-#  questions.
-# 
-
- 
-
-##
-## @test
-## @bug 6878713
-## @bug 7030610
-## @bug 7037122
-## @bug 7123945
-## @summary Verifier heap corruption, relating to backward jsrs
-## @run shell Test6878713.sh
-##
-## some tests require path to find test source dir
-if [ "${TESTSRC}" = "" ]
-then
-  TESTSRC=${PWD}
-  echo "TESTSRC not set.  Using "${TESTSRC}" as default"
-fi
-echo "TESTSRC=${TESTSRC}"
-## Adding common setup Variables for running shell tests.
-. ${TESTSRC}/../../test_env.sh
-
-TARGET_CLASS=OOMCrashClass1960_2
-
-echo "INFO: extracting the target class."
-${COMPILEJAVA}${FS}bin${FS}jar xvf \
-    ${TESTSRC}${FS}testcase.jar ${TARGET_CLASS}.class
-
-# remove any hs_err_pid that might exist here
-rm -f hs_err_pid*.log
-
-echo "INFO: checking for 32-bit versus 64-bit VM."
-${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -version 2>&1 \
-    | grep "64-Bit [^ ][^ ]* VM" > /dev/null 2>&1
-status="$?"
-if [ "$status" = 0 ]; then
-    echo "INFO: testing a 64-bit VM."
-    is_64_bit=true
-else
-    echo "INFO: testing a 32-bit VM."
-fi
-
-if [ "$is_64_bit" = true ]; then
-    # limit is 768MB in 8-byte words (1024 * 1024 * 768 / 8) == 100663296
-    MALLOC_MAX=100663296
-else
-    # limit is 768MB in 4-byte words (1024 * 1024 * 768 / 4) == 201326592
-    MALLOC_MAX=201326592
-fi
-echo "INFO: MALLOC_MAX=$MALLOC_MAX"
-
-echo "INFO: executing the target class."
-# -XX:+PrintCommandLineFlags for debugging purposes
-# -XX:+IgnoreUnrecognizedVMOptions so test will run on a VM without
-#     the new -XX:MallocMaxTestWords option
-# -XX:+UnlockDiagnosticVMOptions so we can use -XX:MallocMaxTestWords
-# -XX:MallocMaxTestWords limits malloc to $MALLOC_MAX
-${TESTJAVA}${FS}bin${FS}java \
-    -XX:+PrintCommandLineFlags \
-    -XX:+IgnoreUnrecognizedVMOptions \
-    -XX:+UnlockDiagnosticVMOptions \
-    -XX:MallocMaxTestWords=$MALLOC_MAX \
-    ${TESTVMOPTS} ${TARGET_CLASS} > test.out 2>&1
-
-echo "INFO: begin contents of test.out:"
-cat test.out
-echo "INFO: end contents of test.out."
-
-echo "INFO: checking for memory allocation error message."
-# We are looking for this specific memory allocation failure mesg so
-# we know we exercised the right allocation path with the test class:
-MESG1="Native memory allocation (malloc) failed to allocate 25696531[0-9][0-9] bytes"
-grep "$MESG1" test.out
-status="$?"
-if [ "$status" = 0 ]; then
-    echo "INFO: found expected memory allocation error message."
-else
-    echo "INFO: did not find expected memory allocation error message."
-
-    # If we didn't find MESG1 above, then there are several scenarios:
-    # 1) -XX:MallocMaxTestWords is not supported by the current VM and we
-    #    didn't fail TARGET_CLASS's memory allocation attempt; instead
-    #    we failed to find TARGET_CLASS's main() method. The TARGET_CLASS
-    #    is designed to provoke a memory allocation failure during class
-    #    loading; we actually don't care about running the class which is
-    #    why it doesn't have a main() method.
-    # 2) we failed a memory allocation, but not the one we were looking
-    #    so it might be that TARGET_CLASS no longer tickles the same
-    #    memory allocation code path
-    # 3) TARGET_CLASS reproduces the failure mode (SIGSEGV) fixed by
-    #    6878713 because the test is running on a pre-fix VM.
-    echo "INFO: checking for no main() method message."
-    MESG2="Error: Main method not found in class"
-    grep "$MESG2" test.out
-    status="$?"
-    if [ "$status" = 0 ]; then
-        echo "INFO: found no main() method message."
-    else
-        echo "FAIL: did not find no main() method message."
-        # status is non-zero for exit below
-
-        if [ -s hs_err_pid*.log ]; then
-            echo "INFO: begin contents of hs_err_pid file:"
-            cat hs_err_pid*.log
-            echo "INFO: end contents of hs_err_pid file."
-        fi
-    fi
-fi
-
-if [ "$status" = 0 ]; then
-    echo "PASS: test found one of the expected messages."
-fi
-exit "$status"
Binary file hotspot/test/runtime/6878713/testcase.jar has changed
--- a/hotspot/test/runtime/7020373/Test7020373.sh	Thu Sep 19 09:36:51 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,43 +0,0 @@
-#!/bin/sh
-
-##
-## @test
-## @bug 7020373 7055247 7053586 7185550
-## @key cte_test
-## @summary JSR rewriting can overflow memory address size variables
-## @ignore Ignore it as 7053586 test uses lots of memory. See bug report for detail.
-## @run shell Test7020373.sh
-##
-
-if [ "${TESTSRC}" = "" ]
-then
-  TESTSRC=${PWD}
-  echo "TESTSRC not set.  Using "${TESTSRC}" as default"
-fi
-echo "TESTSRC=${TESTSRC}"
-## Adding common setup Variables for running shell tests.
-. ${TESTSRC}/../../test_env.sh
-
-${COMPILEJAVA}${FS}bin${FS}jar xvf ${TESTSRC}${FS}testcase.jar
-
-${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} OOMCrashClass4000_1 > test.out 2>&1
-
-cat test.out
-
-egrep "SIGSEGV|An unexpected error has been detected" test.out
-
-if [ $? = 0 ]
-then
-    echo "Test Failed"
-    exit 1
-else
-    egrep "java.lang.LinkageError|java.lang.NoSuchMethodError|Main method not found in class OOMCrashClass4000_1|insufficient memory" test.out
-    if [ $? = 0 ]
-    then
-        echo "Test Passed"
-        exit 0
-    else
-        echo "Test Failed"
-        exit 1
-    fi
-fi
Binary file hotspot/test/runtime/7020373/testcase.jar has changed
--- a/hotspot/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrs.java	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrs.java	Fri Sep 20 11:09:25 2013 -0700
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8003424
- * @summary Testing UseCompressedKlassPointers with CDS
+ * @summary Testing UseCompressedClassPointers with CDS
  * @library /testlibrary
  * @run main CDSCompressedKPtrs
  */
@@ -36,7 +36,7 @@
     ProcessBuilder pb;
     if (Platform.is64bit()) {
       pb = ProcessTools.createJavaProcessBuilder(
-        "-XX:+UseCompressedKlassPointers", "-XX:+UseCompressedOops",
+        "-XX:+UseCompressedClassPointers", "-XX:+UseCompressedOops",
         "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
       OutputAnalyzer output = new OutputAnalyzer(pb.start());
       try {
@@ -44,7 +44,7 @@
         output.shouldHaveExitValue(0);
 
         pb = ProcessTools.createJavaProcessBuilder(
-          "-XX:+UseCompressedKlassPointers", "-XX:+UseCompressedOops",
+          "-XX:+UseCompressedClassPointers", "-XX:+UseCompressedOops",
           "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
         output = new OutputAnalyzer(pb.start());
         output.shouldContain("sharing");
--- a/hotspot/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrsError.java	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrsError.java	Fri Sep 20 11:09:25 2013 -0700
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8003424
- * @summary Test that cannot use CDS if UseCompressedKlassPointers is turned off.
+ * @summary Test that cannot use CDS if UseCompressedClassPointers is turned off.
  * @library /testlibrary
  * @run main CDSCompressedKPtrsError
  */
@@ -36,7 +36,7 @@
     ProcessBuilder pb;
     if (Platform.is64bit()) {
       pb = ProcessTools.createJavaProcessBuilder(
-        "-XX:+UseCompressedOops", "-XX:+UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
+        "-XX:+UseCompressedOops", "-XX:+UseCompressedClassPointers", "-XX:+UnlockDiagnosticVMOptions",
         "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
       OutputAnalyzer output = new OutputAnalyzer(pb.start());
       try {
@@ -44,21 +44,21 @@
         output.shouldHaveExitValue(0);
 
         pb = ProcessTools.createJavaProcessBuilder(
-          "-XX:-UseCompressedKlassPointers", "-XX:-UseCompressedOops",
+          "-XX:-UseCompressedClassPointers", "-XX:-UseCompressedOops",
           "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
         output = new OutputAnalyzer(pb.start());
         output.shouldContain("Unable to use shared archive");
         output.shouldHaveExitValue(0);
 
         pb = ProcessTools.createJavaProcessBuilder(
-          "-XX:-UseCompressedKlassPointers", "-XX:+UseCompressedOops",
+          "-XX:-UseCompressedClassPointers", "-XX:+UseCompressedOops",
           "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
         output = new OutputAnalyzer(pb.start());
         output.shouldContain("Unable to use shared archive");
         output.shouldHaveExitValue(0);
 
         pb = ProcessTools.createJavaProcessBuilder(
-          "-XX:+UseCompressedKlassPointers", "-XX:-UseCompressedOops",
+          "-XX:+UseCompressedClassPointers", "-XX:-UseCompressedOops",
           "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
         output = new OutputAnalyzer(pb.start());
         output.shouldContain("Unable to use shared archive");
@@ -71,19 +71,19 @@
 
       // Test bad options with -Xshare:dump.
       pb = ProcessTools.createJavaProcessBuilder(
-        "-XX:-UseCompressedOops", "-XX:+UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
+        "-XX:-UseCompressedOops", "-XX:+UseCompressedClassPointers", "-XX:+UnlockDiagnosticVMOptions",
         "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
       output = new OutputAnalyzer(pb.start());
       output.shouldContain("Cannot dump shared archive");
 
       pb = ProcessTools.createJavaProcessBuilder(
-        "-XX:+UseCompressedOops", "-XX:-UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
+        "-XX:+UseCompressedOops", "-XX:-UseCompressedClassPointers", "-XX:+UnlockDiagnosticVMOptions",
         "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
       output = new OutputAnalyzer(pb.start());
       output.shouldContain("Cannot dump shared archive");
 
       pb = ProcessTools.createJavaProcessBuilder(
-        "-XX:-UseCompressedOops", "-XX:-UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
+        "-XX:-UseCompressedOops", "-XX:-UseCompressedClassPointers", "-XX:+UnlockDiagnosticVMOptions",
         "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
       output = new OutputAnalyzer(pb.start());
       output.shouldContain("Cannot dump shared archive");
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/ClassFile/JsrRewriting.java	Fri Sep 20 11:09:25 2013 -0700
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+
+/*
+ * @test JsrRewriting
+ * @summary JSR (jump local subroutine)
+ *      rewriting can overflow memory address size variables
+ * @bug 7020373
+ * @bug 7055247
+ * @bug 7053586
+ * @bug 7185550
+ * @bug 7149464
+ * @key cte_test
+ * @library /testlibrary
+ * @run main JsrRewriting
+ */
+
+import com.oracle.java.testlibrary.*;
+import java.io.File;
+
+public class JsrRewriting {
+
+    public static void main(String[] args) throws Exception {
+
+        // ======= Configure the test
+        String jarFile = System.getProperty("test.src") +
+            File.separator + "JsrRewritingTestCase.jar";
+        String className = "OOMCrashClass4000_1";
+
+        // limit is 768MB in native words
+        int mallocMaxTestWords = (1024 * 1024 * 768 / 4);
+        if (Platform.is64bit())
+            mallocMaxTestWords = (mallocMaxTestWords / 2);
+
+        // ======= extract the test class
+        ProcessBuilder pb = new ProcessBuilder(new String[] {
+            JDKToolFinder.getJDKTool("jar"),
+            "xvf", jarFile } );
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+
+        // ======= execute the test
+        pb = ProcessTools.createJavaProcessBuilder(
+            "-cp", ".",
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:MallocMaxTestWords=" + mallocMaxTestWords,
+            className);
+
+        output = new OutputAnalyzer(pb.start());
+        String[] expectedMsgs = {
+            "java.lang.LinkageError",
+            "java.lang.NoSuchMethodError",
+            "Main method not found in class " + className,
+            "insufficient memory"
+        };
+
+        MultipleOrMatch(output, expectedMsgs);
+    }
+
+    private static void
+        MultipleOrMatch(OutputAnalyzer analyzer, String[] whatToMatch) {
+            String output = analyzer.getOutput();
+
+            for (String expected : whatToMatch)
+                if (output.contains(expected))
+                    return;
+
+            String err =
+                " stdout: [" + analyzer.getOutput() + "];\n" +
+                " exitValue = " + analyzer.getExitValue() + "\n";
+            System.err.println(err);
+
+            StringBuilder msg = new StringBuilder("Output did not contain " +
+                "any of the following expected messages: \n");
+            for (String expected : whatToMatch)
+                msg.append(expected).append(System.lineSeparator());
+            throw new RuntimeException(msg.toString());
+    }
+}
+
Binary file hotspot/test/runtime/ClassFile/JsrRewritingTestCase.jar has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/ClassFile/OomWhileParsingRepeatedJsr.java	Fri Sep 20 11:09:25 2013 -0700
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+
+/*
+ * @test OomWhileParsingRepeatedJsr
+ * @summary Testing class file parser; specifically parsing
+ *          a file with repeated JSR (jump local subroutine)
+ *          bytecode command.
+ * @bug 6878713
+ * @bug 7030610
+ * @bug 7037122
+ * @bug 7123945
+ * @bug 8016029
+ * @library /testlibrary
+ * @run main OomWhileParsingRepeatedJsr
+ */
+
+import com.oracle.java.testlibrary.*;
+
+
+public class OomWhileParsingRepeatedJsr {
+
+    public static void main(String[] args) throws Exception {
+
+        // ======= Configure the test
+        String jarFile = System.getProperty("test.src") + "/testcase.jar";
+        String className = "OOMCrashClass1960_2";
+
+        // limit is 768MB in native words
+        int mallocMaxTestWords = (1024 * 1024 * 768 / 4);
+        if (Platform.is64bit())
+            mallocMaxTestWords = (mallocMaxTestWords / 2);
+
+        // ======= extract the test class
+        ProcessBuilder pb = new ProcessBuilder(new String[] {
+            JDKToolFinder.getJDKTool("jar"),
+            "xvf", jarFile } );
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+
+        // ======= execute the test
+        pb = ProcessTools.createJavaProcessBuilder(
+            "-cp", ".",
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:MallocMaxTestWords=" + mallocMaxTestWords,
+            className );
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Cannot reserve enough memory");
+    }
+}
+
Binary file hotspot/test/runtime/ClassFile/testcase.jar has changed
--- a/hotspot/test/runtime/CompressedOops/CompressedKlassPointerAndOops.java	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/test/runtime/CompressedOops/CompressedKlassPointerAndOops.java	Fri Sep 20 11:09:25 2013 -0700
@@ -25,7 +25,7 @@
  * @test
  * @bug 8000968
  * @key regression
- * @summary NPG: UseCompressedKlassPointers asserts with ObjectAlignmentInBytes=32
+ * @summary NPG: UseCompressedClassPointers asserts with ObjectAlignmentInBytes=32
  * @library /testlibrary
  */
 
@@ -52,7 +52,7 @@
         OutputAnalyzer output;
 
         pb = ProcessTools.createJavaProcessBuilder(
-            "-XX:+UseCompressedKlassPointers",
+            "-XX:+UseCompressedClassPointers",
             "-XX:+UseCompressedOops",
             "-XX:ObjectAlignmentInBytes=" + alignment,
             "-version");
--- a/hotspot/test/runtime/InitialThreadOverflow/testme.sh	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/test/runtime/InitialThreadOverflow/testme.sh	Fri Sep 20 11:09:25 2013 -0700
@@ -43,9 +43,9 @@
   exit 0
 fi
 
-gcc_cmd=`which gcc`
-if [ "x$gcc_cmd" == "x" ]; then
-    echo "WARNING: gcc not found. Cannot execute test." 2>&1
+gcc_cmd=`which g++`
+if [ "x$gcc_cmd" = "x" ]; then
+    echo "WARNING: g++ not found. Cannot execute test." 2>&1
     exit 0;
 fi
 
--- a/hotspot/test/testlibrary/OutputAnalyzerTest.java	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/test/testlibrary/OutputAnalyzerTest.java	Fri Sep 20 11:09:25 2013 -0700
@@ -172,5 +172,22 @@
     } catch (RuntimeException e) {
         // expected
     }
+
+    {
+      String aaaa = "aaaa";
+      String result = output.firstMatch(aaaa);
+      if (!aaaa.equals(result)) {
+        throw new Exception("firstMatch(String) faild to match. Expected: " + aaaa + " got: " + result);
+      }
+    }
+
+    {
+      String aa = "aa";
+      String aa_grouped_aa = aa + "(" + aa + ")";
+      String result = output.firstMatch(aa_grouped_aa, 1);
+      if (!aa.equals(result)) {
+        throw new Exception("firstMatch(String, int) failed to match. Expected: " + aa + " got: " + result);
+      }
+    }
   }
 }
--- a/hotspot/test/testlibrary/com/oracle/java/testlibrary/InputArguments.java	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/test/testlibrary/com/oracle/java/testlibrary/InputArguments.java	Fri Sep 20 11:09:25 2013 -0700
@@ -41,6 +41,9 @@
     /**
      * Returns true if {@code arg} is an input argument to the VM.
      *
+     * This is useful for checking boolean flags such as -XX:+UseSerialGC or
+     * -XX:-UsePerfData.
+     *
      * @param arg The name of the argument.
      * @return {@code true} if the given argument is an input argument,
      *         otherwise {@code false}.
@@ -48,4 +51,26 @@
     public static boolean contains(String arg) {
         return args.contains(arg);
     }
+
+    /**
+     * Returns true if {@code prefix} is the start of an input argument to the
+     * VM.
+     *
+     * This is useful for checking if flags describing a quantity, such as
+     * -XX:+MaxMetaspaceSize=100m, is set without having to know the quantity.
+     * To check if the flag -XX:MaxMetaspaceSize is set, use
+     * {@code InputArguments.containsPrefix("-XX:MaxMetaspaceSize")}.
+     *
+     * @param prefix The start of the argument.
+     * @return {@code true} if the given argument is the start of an input
+     *         argument, otherwise {@code false}.
+     */
+    public static boolean containsPrefix(String prefix) {
+        for (String arg : args) {
+            if (arg.startsWith(prefix)) {
+                return true;
+            }
+        }
+        return false;
+    }
 }
--- a/hotspot/test/testlibrary/com/oracle/java/testlibrary/JDKToolFinder.java	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/test/testlibrary/com/oracle/java/testlibrary/JDKToolFinder.java	Fri Sep 20 11:09:25 2013 -0700
@@ -23,7 +23,9 @@
 
 package com.oracle.java.testlibrary;
 
-import java.io.File;
+import java.io.FileNotFoundException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
 
 public final class JDKToolFinder {
 
@@ -32,38 +34,73 @@
 
     /**
      * Returns the full path to an executable in jdk/bin based on System
-     * property {@code compile.jdk} (set by jtreg test suite)
+     * property {@code test.jdk} or {@code compile.jdk} (both are set by the jtreg test suite)
      *
      * @return Full path to an executable in jdk/bin
      */
     public static String getJDKTool(String tool) {
-        String binPath = System.getProperty("compile.jdk");
-        if (binPath == null) {
-            throw new RuntimeException("System property 'compile.jdk' not set. "
-                    + "This property is normally set by jtreg. "
-                    + "When running test separately, set this property using "
-                    + "'-Dcompile.jdk=/path/to/jdk'.");
+
+        // First try to find the executable in test.jdk
+        try {
+            return getTool(tool, "test.jdk");
+        } catch (FileNotFoundException e) {
+
         }
-        binPath += File.separatorChar + "bin" + File.separatorChar + tool;
 
-        return binPath;
+        // Now see if it's available in compile.jdk
+        try {
+            return getTool(tool, "compile.jdk");
+        } catch (FileNotFoundException e) {
+            throw new RuntimeException("Failed to find " + tool +
+                    ", looked in test.jdk (" + System.getProperty("test.jdk") +
+                    ") and compile.jdk (" + System.getProperty("compile.jdk") + ")");
+        }
     }
+
     /**
-     * Returns the full path to an executable in &lt;current jdk&gt;/bin based
-     * on System property {@code test.jdk} (set by jtreg test suite)
+     * Returns the full path to an executable in jdk/bin based on System
+     * property {@code compile.jdk}
      *
      * @return Full path to an executable in jdk/bin
      */
-    public static String getCurrentJDKTool(String tool) {
-        String binPath = System.getProperty("test.jdk");
-        if (binPath == null) {
-            throw new RuntimeException("System property 'test.jdk' not set. "
-                + "This property is normally set by jtreg. "
-                + "When running test separately, set this property using "
-                + "'-Dtest.jdk=/path/to/jdk'.");
+    public static String getCompileJDKTool(String tool) {
+        try {
+            return getTool(tool, "compile.jdk");
+        } catch (FileNotFoundException e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    /**
+     * Returns the full path to an executable in jdk/bin based on System
+     * property {@code test.jdk}
+     *
+     * @return Full path to an executable in jdk/bin
+     */
+    public static String getTestJDKTool(String tool) {
+        try {
+            return getTool(tool, "test.jdk");
+        } catch (FileNotFoundException e) {
+            throw new RuntimeException(e);
         }
-        binPath += File.separatorChar + "bin" + File.separatorChar + tool;
+    }
+
+    private static String getTool(String tool, String property) throws FileNotFoundException {
+        String jdkPath = System.getProperty(property);
 
-        return binPath;
+        if (jdkPath == null) {
+            throw new RuntimeException(
+                    "System property '" + property + "' not set. This property is normally set by jtreg. "
+                    + "When running test separately, set this property using '-D" + property + "=/path/to/jdk'.");
+        }
+
+        Path toolName = Paths.get("bin", tool + (Platform.isWindows() ? ".exe" : ""));
+
+        Path jdkTool = Paths.get(jdkPath, toolName.toString());
+        if (!jdkTool.toFile().exists()) {
+            throw new FileNotFoundException("Could not find file " + jdkTool.toAbsolutePath());
+        }
+
+        return jdkTool.toAbsolutePath().toString();
     }
 }
--- a/hotspot/test/testlibrary/com/oracle/java/testlibrary/OutputAnalyzer.java	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/test/testlibrary/com/oracle/java/testlibrary/OutputAnalyzer.java	Fri Sep 20 11:09:25 2013 -0700
@@ -211,13 +211,13 @@
       if (matcher.find()) {
           reportDiagnosticSummary();
           throw new RuntimeException("'" + pattern
-                  + "' found in stdout \n");
+                  + "' found in stdout: '" + matcher.group() + "' \n");
       }
       matcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stderr);
       if (matcher.find()) {
           reportDiagnosticSummary();
           throw new RuntimeException("'" + pattern
-                  + "' found in stderr \n");
+                  + "' found in stderr: '" + matcher.group() + "' \n");
       }
   }
 
@@ -254,6 +254,37 @@
   }
 
   /**
+   * Get the captured group of the first string matching the pattern.
+   * stderr is searched before stdout.
+   *
+   * @param pattern The multi-line pattern to match
+   * @param group The group to capture
+   * @return The matched string or null if no match was found
+   */
+  public String firstMatch(String pattern, int group) {
+    Matcher stderrMatcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stderr);
+    Matcher stdoutMatcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stdout);
+    if (stderrMatcher.find()) {
+      return stderrMatcher.group(group);
+    }
+    if (stdoutMatcher.find()) {
+      return stdoutMatcher.group(group);
+    }
+    return null;
+  }
+
+  /**
+   * Get the first string matching the pattern.
+   * stderr is searched before stdout.
+   *
+   * @param pattern The multi-line pattern to match
+   * @return The matched string or null if no match was found
+   */
+  public String firstMatch(String pattern) {
+    return firstMatch(pattern, 0);
+  }
+
+  /**
    * Verify the exit value of the process
    *
    * @param expectedExitValue Expected exit value from process
--- a/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Thu Sep 19 09:36:51 2013 -0700
+++ b/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Fri Sep 20 11:09:25 2013 -0700
@@ -61,6 +61,8 @@
     registerNatives();
   }
 
+  // Get the maximum heap size supporting COOPs
+  public native long getCompressedOopsMaxHeapSize();
   // Arguments
   public native void printHeapSizes();