Merge
authorlana
Thu, 16 Oct 2014 14:15:37 -0700
changeset 27026 1f97f4f1b7d9
parent 27002 297d6a75d68a (current diff)
parent 27025 f4805f778f16 (diff)
child 27027 9bc18d9d3cd7
Merge
--- a/hotspot/make/aix/makefiles/fastdebug.make	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/make/aix/makefiles/fastdebug.make	Thu Oct 16 14:15:37 2014 -0700
@@ -67,7 +67,6 @@
 #    not justified.
 LFLAGS_QIPA=
 
-G_SUFFIX = _g
 VERSION = optimized
 SYSDEFS += -DASSERT -DFASTDEBUG
 PICFLAGS = DEFAULT
--- a/hotspot/make/aix/makefiles/mapfile-vers-debug	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/make/aix/makefiles/mapfile-vers-debug	Thu Oct 16 14:15:37 2014 -0700
@@ -84,6 +84,7 @@
                 JVM_EnableCompiler;
                 JVM_Exit;
                 JVM_FillInStackTrace;
+                JVM_FindClassFromCaller;
                 JVM_FindClassFromClass;
                 JVM_FindClassFromClassLoader;
                 JVM_FindClassFromBootLoader;
@@ -117,7 +118,6 @@
                 JVM_GetClassDeclaredMethods;
                 JVM_GetClassFieldsCount;
                 JVM_GetClassInterfaces;
-                JVM_GetClassLoader;
                 JVM_GetClassMethodsCount;
                 JVM_GetClassModifiers;
                 JVM_GetClassName;
--- a/hotspot/make/aix/makefiles/mapfile-vers-product	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/make/aix/makefiles/mapfile-vers-product	Thu Oct 16 14:15:37 2014 -0700
@@ -84,6 +84,7 @@
                 JVM_EnableCompiler;
                 JVM_Exit;
                 JVM_FillInStackTrace;
+                JVM_FindClassFromCaller;
                 JVM_FindClassFromClass;
                 JVM_FindClassFromClassLoader;
                 JVM_FindClassFromBootLoader;
@@ -117,7 +118,6 @@
                 JVM_GetClassDeclaredMethods;
                 JVM_GetClassFieldsCount;
                 JVM_GetClassInterfaces;
-                JVM_GetClassLoader;
                 JVM_GetClassMethodsCount;
                 JVM_GetClassModifiers;
                 JVM_GetClassName;
--- a/hotspot/make/bsd/makefiles/mapfile-vers-darwin-debug	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/make/bsd/makefiles/mapfile-vers-darwin-debug	Thu Oct 16 14:15:37 2014 -0700
@@ -82,6 +82,7 @@
                 _JVM_EnableCompiler
                 _JVM_Exit
                 _JVM_FillInStackTrace
+                _JVM_FindClassFromCaller
                 _JVM_FindClassFromClass
                 _JVM_FindClassFromClassLoader
                 _JVM_FindClassFromBootLoader
@@ -115,7 +116,6 @@
                 _JVM_GetClassDeclaredMethods
                 _JVM_GetClassFieldsCount
                 _JVM_GetClassInterfaces
-                _JVM_GetClassLoader
                 _JVM_GetClassMethodsCount
                 _JVM_GetClassModifiers
                 _JVM_GetClassName
--- a/hotspot/make/bsd/makefiles/mapfile-vers-darwin-product	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/make/bsd/makefiles/mapfile-vers-darwin-product	Thu Oct 16 14:15:37 2014 -0700
@@ -82,6 +82,7 @@
                 _JVM_EnableCompiler
                 _JVM_Exit
                 _JVM_FillInStackTrace
+                _JVM_FindClassFromCaller
                 _JVM_FindClassFromClass
                 _JVM_FindClassFromClassLoader
                 _JVM_FindClassFromBootLoader
@@ -115,7 +116,6 @@
                 _JVM_GetClassDeclaredMethods
                 _JVM_GetClassFieldsCount
                 _JVM_GetClassInterfaces
-                _JVM_GetClassLoader
                 _JVM_GetClassMethodsCount
                 _JVM_GetClassModifiers
                 _JVM_GetClassName
--- a/hotspot/make/bsd/makefiles/mapfile-vers-debug	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/make/bsd/makefiles/mapfile-vers-debug	Thu Oct 16 14:15:37 2014 -0700
@@ -84,6 +84,7 @@
                 JVM_EnableCompiler;
                 JVM_Exit;
                 JVM_FillInStackTrace;
+                JVM_FindClassFromCaller;
                 JVM_FindClassFromClass;
                 JVM_FindClassFromClassLoader;
                 JVM_FindClassFromBootLoader;
@@ -117,7 +118,6 @@
                 JVM_GetClassDeclaredMethods;
                 JVM_GetClassFieldsCount;
                 JVM_GetClassInterfaces;
-                JVM_GetClassLoader;
                 JVM_GetClassMethodsCount;
                 JVM_GetClassModifiers;
                 JVM_GetClassName;
--- a/hotspot/make/bsd/makefiles/mapfile-vers-product	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/make/bsd/makefiles/mapfile-vers-product	Thu Oct 16 14:15:37 2014 -0700
@@ -84,6 +84,7 @@
                 JVM_EnableCompiler;
                 JVM_Exit;
                 JVM_FillInStackTrace;
+                JVM_FindClassFromCaller;
                 JVM_FindClassFromClass;
                 JVM_FindClassFromClassLoader;
                 JVM_FindClassFromBootLoader;
@@ -117,7 +118,6 @@
                 JVM_GetClassDeclaredMethods;
                 JVM_GetClassFieldsCount;
                 JVM_GetClassInterfaces;
-                JVM_GetClassLoader;
                 JVM_GetClassMethodsCount;
                 JVM_GetClassModifiers;
                 JVM_GetClassName;
--- a/hotspot/make/linux/makefiles/mapfile-vers-debug	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/make/linux/makefiles/mapfile-vers-debug	Thu Oct 16 14:15:37 2014 -0700
@@ -84,6 +84,7 @@
                 JVM_EnableCompiler;
                 JVM_Exit;
                 JVM_FillInStackTrace;
+                JVM_FindClassFromCaller;
                 JVM_FindClassFromClass;
                 JVM_FindClassFromClassLoader;
                 JVM_FindClassFromBootLoader;
@@ -117,7 +118,6 @@
                 JVM_GetClassDeclaredMethods;
                 JVM_GetClassFieldsCount;
                 JVM_GetClassInterfaces;
-                JVM_GetClassLoader;
                 JVM_GetClassMethodsCount;
                 JVM_GetClassModifiers;
                 JVM_GetClassName;
--- a/hotspot/make/linux/makefiles/mapfile-vers-product	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/make/linux/makefiles/mapfile-vers-product	Thu Oct 16 14:15:37 2014 -0700
@@ -84,6 +84,7 @@
                 JVM_EnableCompiler;
                 JVM_Exit;
                 JVM_FillInStackTrace;
+                JVM_FindClassFromCaller;
                 JVM_FindClassFromClass;
                 JVM_FindClassFromClassLoader;
                 JVM_FindClassFromBootLoader;
@@ -117,7 +118,6 @@
                 JVM_GetClassDeclaredMethods;
                 JVM_GetClassFieldsCount;
                 JVM_GetClassInterfaces;
-                JVM_GetClassLoader;
                 JVM_GetClassMethodsCount;
                 JVM_GetClassModifiers;
                 JVM_GetClassName;
--- a/hotspot/make/solaris/makefiles/mapfile-vers	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/make/solaris/makefiles/mapfile-vers	Thu Oct 16 14:15:37 2014 -0700
@@ -84,6 +84,7 @@
                 JVM_EnableCompiler;
                 JVM_Exit;
                 JVM_FillInStackTrace;
+                JVM_FindClassFromCaller;
                 JVM_FindClassFromClass;
                 JVM_FindClassFromClassLoader;
                 JVM_FindClassFromBootLoader;
@@ -117,7 +118,6 @@
                 JVM_GetClassDeclaredMethods;
                 JVM_GetClassFieldsCount;
                 JVM_GetClassInterfaces;
-                JVM_GetClassLoader;
                 JVM_GetClassMethodsCount;
                 JVM_GetClassModifiers;
                 JVM_GetClassName;
--- a/hotspot/src/cpu/ppc/vm/assembler_ppc.hpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/assembler_ppc.hpp	Thu Oct 16 14:15:37 2014 -0700
@@ -268,8 +268,35 @@
 
     ISEL_OPCODE   = (31u << OPCODE_SHIFT |  15u << 1),
 
-    MTLR_OPCODE   = (31u << OPCODE_SHIFT | 467u << 1 | 8 << SPR_0_4_SHIFT),
-    MFLR_OPCODE   = (31u << OPCODE_SHIFT | 339u << 1 | 8 << SPR_0_4_SHIFT),
+    // Special purpose registers
+    MTSPR_OPCODE  = (31u << OPCODE_SHIFT | 467u << 1),
+    MFSPR_OPCODE  = (31u << OPCODE_SHIFT | 339u << 1),
+
+    MTXER_OPCODE  = (MTSPR_OPCODE | 1 << SPR_0_4_SHIFT),
+    MFXER_OPCODE  = (MFSPR_OPCODE | 1 << SPR_0_4_SHIFT),
+
+    MTDSCR_OPCODE = (MTSPR_OPCODE | 3 << SPR_0_4_SHIFT),
+    MFDSCR_OPCODE = (MFSPR_OPCODE | 3 << SPR_0_4_SHIFT),
+
+    MTLR_OPCODE   = (MTSPR_OPCODE | 8 << SPR_0_4_SHIFT),
+    MFLR_OPCODE   = (MFSPR_OPCODE | 8 << SPR_0_4_SHIFT),
+
+    MTCTR_OPCODE  = (MTSPR_OPCODE | 9 << SPR_0_4_SHIFT),
+    MFCTR_OPCODE  = (MFSPR_OPCODE | 9 << SPR_0_4_SHIFT),
+
+    MTTFHAR_OPCODE   = (MTSPR_OPCODE | 128 << SPR_0_4_SHIFT),
+    MFTFHAR_OPCODE   = (MFSPR_OPCODE | 128 << SPR_0_4_SHIFT),
+    MTTFIAR_OPCODE   = (MTSPR_OPCODE | 129 << SPR_0_4_SHIFT),
+    MFTFIAR_OPCODE   = (MFSPR_OPCODE | 129 << SPR_0_4_SHIFT),
+    MTTEXASR_OPCODE  = (MTSPR_OPCODE | 130 << SPR_0_4_SHIFT),
+    MFTEXASR_OPCODE  = (MFSPR_OPCODE | 130 << SPR_0_4_SHIFT),
+    MTTEXASRU_OPCODE = (MTSPR_OPCODE | 131 << SPR_0_4_SHIFT),
+    MFTEXASRU_OPCODE = (MFSPR_OPCODE | 131 << SPR_0_4_SHIFT),
+
+    MTVRSAVE_OPCODE  = (MTSPR_OPCODE | 256 << SPR_0_4_SHIFT),
+    MFVRSAVE_OPCODE  = (MFSPR_OPCODE | 256 << SPR_0_4_SHIFT),
+
+    MFTB_OPCODE   = (MFSPR_OPCODE | 268 << SPR_0_4_SHIFT),
 
     MTCRF_OPCODE  = (31u << OPCODE_SHIFT | 144u << 1),
     MFCR_OPCODE   = (31u << OPCODE_SHIFT | 19u << 1),
@@ -291,9 +318,6 @@
 
     // CTR-related opcodes
     BCCTR_OPCODE  = (19u << OPCODE_SHIFT | 528u << 1),
-    MTCTR_OPCODE  = (31u << OPCODE_SHIFT | 467u << 1 | 9 << SPR_0_4_SHIFT),
-    MFCTR_OPCODE  = (31u << OPCODE_SHIFT | 339u << 1 | 9 << SPR_0_4_SHIFT),
-
 
     LWZ_OPCODE   = (32u << OPCODE_SHIFT),
     LWZX_OPCODE  = (31u << OPCODE_SHIFT |  23u << 1),
@@ -585,6 +609,37 @@
     MTVSCR_OPCODE  = (4u  << OPCODE_SHIFT | 1604u     ),
     MFVSCR_OPCODE  = (4u  << OPCODE_SHIFT | 1540u     ),
 
+    // AES (introduced with Power 8)
+    VCIPHER_OPCODE      = (4u  << OPCODE_SHIFT | 1288u),
+    VCIPHERLAST_OPCODE  = (4u  << OPCODE_SHIFT | 1289u),
+    VNCIPHER_OPCODE     = (4u  << OPCODE_SHIFT | 1352u),
+    VNCIPHERLAST_OPCODE = (4u  << OPCODE_SHIFT | 1353u),
+    VSBOX_OPCODE        = (4u  << OPCODE_SHIFT | 1480u),
+
+    // SHA (introduced with Power 8)
+    VSHASIGMAD_OPCODE   = (4u  << OPCODE_SHIFT | 1730u),
+    VSHASIGMAW_OPCODE   = (4u  << OPCODE_SHIFT | 1666u),
+
+    // Vector Binary Polynomial Multiplication (introduced with Power 8)
+    VPMSUMB_OPCODE      = (4u  << OPCODE_SHIFT | 1032u),
+    VPMSUMD_OPCODE      = (4u  << OPCODE_SHIFT | 1224u),
+    VPMSUMH_OPCODE      = (4u  << OPCODE_SHIFT | 1096u),
+    VPMSUMW_OPCODE      = (4u  << OPCODE_SHIFT | 1160u),
+
+    // Vector Permute and Xor (introduced with Power 8)
+    VPERMXOR_OPCODE     = (4u  << OPCODE_SHIFT |   45u),
+
+    // Transactional Memory instructions (introduced with Power 8)
+    TBEGIN_OPCODE    = (31u << OPCODE_SHIFT |  654u << 1),
+    TEND_OPCODE      = (31u << OPCODE_SHIFT |  686u << 1),
+    TABORT_OPCODE    = (31u << OPCODE_SHIFT |  910u << 1),
+    TABORTWC_OPCODE  = (31u << OPCODE_SHIFT |  782u << 1),
+    TABORTWCI_OPCODE = (31u << OPCODE_SHIFT |  846u << 1),
+    TABORTDC_OPCODE  = (31u << OPCODE_SHIFT |  814u << 1),
+    TABORTDCI_OPCODE = (31u << OPCODE_SHIFT |  878u << 1),
+    TSR_OPCODE       = (31u << OPCODE_SHIFT |  750u << 1),
+    TCHECK_OPCODE    = (31u << OPCODE_SHIFT |  718u << 1),
+
     // Icache and dcache related instructions
     DCBA_OPCODE    = (31u << OPCODE_SHIFT |  758u << 1),
     DCBZ_OPCODE    = (31u << OPCODE_SHIFT | 1014u << 1),
@@ -1420,6 +1475,25 @@
   inline void mcrf( ConditionRegister crd, ConditionRegister cra);
   inline void mtcr( Register s);
 
+  // Special purpose registers
+  // Exception Register
+  inline void mtxer(Register s1);
+  inline void mfxer(Register d);
+  // Vector Register Save Register
+  inline void mtvrsave(Register s1);
+  inline void mfvrsave(Register d);
+  // Timebase
+  inline void mftb(Register d);
+  // Introduced with Power 8:
+  // Data Stream Control Register
+  inline void mtdscr(Register s1);
+  inline void mfdscr(Register d );
+  // Transactional Memory Registers
+  inline void mftfhar(Register d);
+  inline void mftfiar(Register d);
+  inline void mftexasr(Register d);
+  inline void mftexasru(Register d);
+
   // PPC 1, section 2.4.1 Branch Instructions
   inline void b(  address a, relocInfo::relocType rt = relocInfo::none);
   inline void b(  Label& L);
@@ -1860,6 +1934,39 @@
   inline void mtvscr(   VectorRegister b);
   inline void mfvscr(   VectorRegister d);
 
+  // AES (introduced with Power 8)
+  inline void vcipher(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcipherlast( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vncipher(    VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vncipherlast(VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsbox(       VectorRegister d, VectorRegister a);
+
+  // SHA (introduced with Power 8)
+  // Not yet implemented.
+
+  // Vector Binary Polynomial Multiplication (introduced with Power 8)
+  inline void vpmsumb(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vpmsumd(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vpmsumh(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vpmsumw(  VectorRegister d, VectorRegister a, VectorRegister b);
+
+  // Vector Permute and Xor (introduced with Power 8)
+  inline void vpermxor( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
+
+  // Transactional Memory instructions (introduced with Power 8)
+  inline void tbegin_();    // R=0
+  inline void tbeginrot_(); // R=1 Rollback-Only Transaction
+  inline void tend_();    // A=0
+  inline void tendall_(); // A=1
+  inline void tabort_(Register a);
+  inline void tabortwc_(int t, Register a, Register b);
+  inline void tabortwci_(int t, Register a, int si);
+  inline void tabortdc_(int t, Register a, Register b);
+  inline void tabortdci_(int t, Register a, int si);
+  inline void tsuspend_(); // tsr with L=0
+  inline void tresume_();  // tsr with L=1
+  inline void tcheck(int f);
+
   // The following encoders use r0 as second operand. These instructions
   // read r0 as '0'.
   inline void lwzx( Register d, Register s2);
--- a/hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp	Thu Oct 16 14:15:37 2014 -0700
@@ -312,6 +312,25 @@
                                                       { emit_int32(MCRF_OPCODE | bf(crd) | bfa(cra)); }
 inline void Assembler::mtcr( Register s)          { Assembler::mtcrf(0xff, s); }
 
+// Special purpose registers
+// Exception Register
+inline void Assembler::mtxer(Register s1)         { emit_int32(MTXER_OPCODE | rs(s1)); }
+inline void Assembler::mfxer(Register d )         { emit_int32(MFXER_OPCODE | rt(d)); }
+// Vector Register Save Register
+inline void Assembler::mtvrsave(Register s1)      { emit_int32(MTVRSAVE_OPCODE | rs(s1)); }
+inline void Assembler::mfvrsave(Register d )      { emit_int32(MFVRSAVE_OPCODE | rt(d)); }
+// Timebase
+inline void Assembler::mftb(Register d )          { emit_int32(MFTB_OPCODE  | rt(d)); }
+// Introduced with Power 8:
+// Data Stream Control Register
+inline void Assembler::mtdscr(Register s1)        { emit_int32(MTDSCR_OPCODE | rs(s1)); }
+inline void Assembler::mfdscr(Register d )        { emit_int32(MFDSCR_OPCODE | rt(d)); }
+// Transactional Memory Registers
+inline void Assembler::mftfhar(Register d )       { emit_int32(MFTFHAR_OPCODE   | rt(d)); }
+inline void Assembler::mftfiar(Register d )       { emit_int32(MFTFIAR_OPCODE   | rt(d)); }
+inline void Assembler::mftexasr(Register d )      { emit_int32(MFTEXASR_OPCODE  | rt(d)); }
+inline void Assembler::mftexasru(Register d )     { emit_int32(MFTEXASRU_OPCODE | rt(d)); }
+
 // SAP JVM 2006-02-13 PPC branch instruction.
 // PPC 1, section 2.4.1 Branch Instructions
 inline void Assembler::b( address a, relocInfo::relocType rt) { emit_data(BXX_OPCODE| li(disp( intptr_t(a), intptr_t(pc()))) |aa(0)|lk(0), rt); }
@@ -735,6 +754,39 @@
 inline void Assembler::mtvscr(  VectorRegister b)                                     { emit_int32( MTVSCR_OPCODE   | vrb(b)); }
 inline void Assembler::mfvscr(  VectorRegister d)                                     { emit_int32( MFVSCR_OPCODE   | vrt(d)); }
 
+// AES (introduced with Power 8)
+inline void Assembler::vcipher(     VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCIPHER_OPCODE      | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vcipherlast( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCIPHERLAST_OPCODE  | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vncipher(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VNCIPHER_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vncipherlast(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VNCIPHERLAST_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsbox(       VectorRegister d, VectorRegister a)                   { emit_int32( VSBOX_OPCODE        | vrt(d) | vra(a)         ); }
+
+// SHA (introduced with Power 8)
+// Not yet implemented.
+
+// Vector Binary Polynomial Multiplication (introduced with Power 8)
+inline void Assembler::vpmsumb(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPMSUMB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vpmsumd(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPMSUMD_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vpmsumh(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPMSUMH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vpmsumw(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPMSUMW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+
+// Vector Permute and Xor (introduced with Power 8)
+inline void Assembler::vpermxor( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VPMSUMW_OPCODE | vrt(d) | vra(a) | vrb(b) | vrc(c)); }
+
+// Transactional Memory instructions (introduced with Power 8)
+inline void Assembler::tbegin_()                                { emit_int32( TBEGIN_OPCODE | rc(1)); }
+inline void Assembler::tbeginrot_()                             { emit_int32( TBEGIN_OPCODE | /*R=1*/ 1u << (31-10) | rc(1)); }
+inline void Assembler::tend_()                                  { emit_int32( TEND_OPCODE | rc(1)); }
+inline void Assembler::tendall_()                               { emit_int32( TEND_OPCODE | /*A=1*/ 1u << (31-6) | rc(1)); }
+inline void Assembler::tabort_(Register a)                      { emit_int32( TABORT_OPCODE | ra(a) | rc(1)); }
+inline void Assembler::tabortwc_(int t, Register a, Register b) { emit_int32( TABORTWC_OPCODE | to(t) | ra(a) | rb(b) | rc(1)); }
+inline void Assembler::tabortwci_(int t, Register a, int si)    { emit_int32( TABORTWCI_OPCODE | to(t) | ra(a) | sh1620(si) | rc(1)); }
+inline void Assembler::tabortdc_(int t, Register a, Register b) { emit_int32( TABORTDC_OPCODE | to(t) | ra(a) | rb(b) | rc(1)); }
+inline void Assembler::tabortdci_(int t, Register a, int si)    { emit_int32( TABORTDCI_OPCODE | to(t) | ra(a) | sh1620(si) | rc(1)); }
+inline void Assembler::tsuspend_()                              { emit_int32( TSR_OPCODE | rc(1)); }
+inline void Assembler::tresume_()                               { emit_int32( TSR_OPCODE | /*L=1*/ 1u << (31-10) | rc(1)); }
+inline void Assembler::tcheck(int f)                            { emit_int32( TCHECK_OPCODE | bf(f)); }
+
 // ra0 version
 inline void Assembler::lwzx( Register d, Register s2) { emit_int32( LWZX_OPCODE | rt(d) | rb(s2));}
 inline void Assembler::lwz(  Register d, int si16   ) { emit_int32( LWZ_OPCODE  | rt(d) | d1(si16));}
--- a/hotspot/src/cpu/ppc/vm/globalDefinitions_ppc.hpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/globalDefinitions_ppc.hpp	Thu Oct 16 14:15:37 2014 -0700
@@ -37,6 +37,8 @@
 // signatures accordingly.
 const bool CCallingConventionRequiresIntsAsLongs = true;
 
+#define SUPPORTS_NATIVE_CX8
+
 // The PPC CPUs are NOT multiple-copy-atomic.
 #define CPU_NOT_MULTIPLE_COPY_ATOMIC
 
--- a/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -25,7 +25,6 @@
 
 
 #include "precompiled.hpp"
-#include "asm/assembler.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "interp_masm_ppc_64.hpp"
 #include "interpreter/interpreterRuntime.hpp"
--- a/hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -24,7 +24,6 @@
  */
 
 #include "precompiled.hpp"
-#include "asm/assembler.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/interpreter.hpp"
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -2366,7 +2366,7 @@
 #endif // INCLUDE_ALL_GCS
 
 // Values for last_Java_pc, and last_Java_sp must comply to the rules
-// in frame_ppc64.hpp.
+// in frame_ppc.hpp.
 void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc) {
   // Always set last_Java_pc and flags first because once last_Java_sp
   // is visible has_last_Java_frame is true and users will look at the
@@ -2493,6 +2493,7 @@
 }
 
 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
+  assert(dst != R0, "Dst reg may not be R0, as R0 is used here.");
   if (src == noreg) src = dst;
   Register shifted_src = src;
   if (Universe::narrow_klass_shift() != 0 ||
@@ -2527,14 +2528,11 @@
 
 void MacroAssembler::reinit_heapbase(Register d, Register tmp) {
   if (Universe::heap() != NULL) {
-    if (Universe::narrow_oop_base() == NULL) {
-      Assembler::xorr(R30, R30, R30);
-    } else {
-      load_const(R30, Universe::narrow_ptrs_base(), tmp);
-    }
+    load_const_optimized(R30, Universe::narrow_ptrs_base(), tmp);
   } else {
-    load_const(R30, Universe::narrow_ptrs_base_addr(), tmp);
-    ld(R30, 0, R30);
+    // Heap not yet allocated. Load indirectly.
+    int simm16_offset = load_const_optimized(R30, Universe::narrow_ptrs_base_addr(), tmp, true);
+    ld(R30, simm16_offset, R30);
   }
 }
 
--- a/hotspot/src/cpu/ppc/vm/ppc.ad	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/ppc.ad	Thu Oct 16 14:15:37 2014 -0700
@@ -1249,6 +1249,7 @@
 
     // Emit the trampoline stub which will be related to the branch-and-link below.
     CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, offsets.insts_call_instruction_offset);
+    if (Compile::current()->env()->failing()) { return offsets; } // Code cache may be full.
     __ relocate(rtype);
   }
 
@@ -1410,7 +1411,7 @@
     while (bang_offset <= bang_end) {
       // Need at least one stack bang at end of shadow zone.
 
-      // Again I had to copy code, this time from assembler_ppc64.cpp,
+      // Again I had to copy code, this time from assembler_ppc.cpp,
       // bang_stack_with_offset - see there for comments.
 
       // Stack grows down, caller passes positive offset.
@@ -2000,7 +2001,7 @@
 
   // Inline_cache contains a klass.
   Register ic_klass       = as_Register(Matcher::inline_cache_reg_encode());
-  Register receiver_klass = R0;  // tmp
+  Register receiver_klass = R12_scratch2;  // tmp
 
   assert_different_registers(ic_klass, receiver_klass, R11_scratch1, R3_ARG1);
   assert(R11_scratch1 == R11, "need prologue scratch register");
@@ -3484,6 +3485,7 @@
 
         // Emit the trampoline stub which will be related to the branch-and-link below.
         CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
+        if (Compile::current()->env()->failing()) { return; } // Code cache may be full.
         __ relocate(_optimized_virtual ?
                     relocInfo::opt_virtual_call_type : relocInfo::static_call_type);
       }
@@ -3527,6 +3529,7 @@
 
       // Emit the trampoline stub which will be related to the branch-and-link below.
       CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
+      if (ra_->C->env()->failing()) { return; } // Code cache may be full.
       assert(_optimized_virtual, "methodHandle call should be a virtual call");
       __ relocate(relocInfo::opt_virtual_call_type);
     }
@@ -3577,9 +3580,7 @@
       const address entry_point_const = __ address_constant(entry_point, RelocationHolder::none);
       const int entry_point_const_toc_offset = __ offset_to_method_toc(entry_point_const);
       CallStubImpl::emit_trampoline_stub(_masm, entry_point_const_toc_offset, __ offset());
-
-      if (ra_->C->env()->failing())
-        return;
+      if (ra_->C->env()->failing()) { return; } // Code cache may be full.
 
       // Build relocation at call site with ic position as data.
       assert((_load_ic_hi_node != NULL && _load_ic_node == NULL) ||
@@ -5638,19 +5639,6 @@
   ins_pipe(pipe_class_memory);
 %}
 
-//// Load compressed klass and decode it if narrow_klass_shift == 0.
-//// TODO: will narrow_klass_shift ever be 0?
-//instruct decodeNKlass2Klass(iRegPdst dst, memory mem) %{
-//  match(Set dst (DecodeNKlass (LoadNKlass mem)));
-//  predicate(false /* TODO: PPC port Universe::narrow_klass_shift() == 0*);
-//  ins_cost(MEMORY_REF_COST);
-//
-//  format %{ "LWZ     $dst, $mem \t// DecodeNKlass (unscaled)" %}
-//  size(4);
-//  ins_encode( enc_lwz(dst, mem) );
-//  ins_pipe(pipe_class_memory);
-//%}
-
 // Load Klass Pointer
 instruct loadKlass(iRegPdst dst, memoryAlg4 mem) %{
   match(Set dst (LoadKlass mem));
@@ -6070,11 +6058,15 @@
   %}
 %}
 
-instruct loadConNKlass_hi(iRegNdst dst, immNKlass src) %{
+// We have seen a safepoint between the hi and lo parts, and this node was handled
+// as an oop. Therefore this needs a match rule so that build_oop_map knows this is
+// not a narrow oop.
+instruct loadConNKlass_hi(iRegNdst dst, immNKlass_NM src) %{
+  match(Set dst src);
   effect(DEF dst, USE src);
   ins_cost(DEFAULT_COST);
 
-  format %{ "LIS     $dst, $src \t// narrow oop hi" %}
+  format %{ "LIS     $dst, $src \t// narrow klass hi" %}
   size(4);
   ins_encode %{
     // TODO: PPC port $archOpcode(ppc64Opcode_addis);
@@ -6084,6 +6076,21 @@
   ins_pipe(pipe_class_default);
 %}
 
+// As loadConNKlass_hi this must be recognized as narrow klass, not oop!
+instruct loadConNKlass_mask(iRegNdst dst, immNKlass_NM src1, iRegNsrc src2) %{
+  match(Set dst src1);
+  effect(TEMP src2);
+  ins_cost(DEFAULT_COST);
+
+  format %{ "MASK    $dst, $src2, 0xFFFFFFFF" %} // mask
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
+    __ clrldi($dst$$Register, $src2$$Register, 0x20);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 // This needs a match rule so that build_oop_map knows this is
 // not a narrow oop.
 instruct loadConNKlass_lo(iRegNdst dst, immNKlass_NM src1, iRegNsrc src2) %{
@@ -6091,10 +6098,10 @@
   effect(TEMP src2);
   ins_cost(DEFAULT_COST);
 
-  format %{ "ADDI    $dst, $src1, $src2 \t// narrow oop lo" %}
-  size(4);
-  ins_encode %{
-    // TODO: PPC port $archOpcode(ppc64Opcode_addi);
+  format %{ "ORI    $dst, $src1, $src2 \t// narrow klass lo" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_ori);
     intptr_t Csrc = Klass::encode_klass((Klass *)$src1$$constant);
     assert(__ oop_recorder() != NULL, "this assembler needs an OopRecorder");
     int klass_index = __ oop_recorder()->find_index((Klass *)$src1$$constant);
@@ -6125,10 +6132,11 @@
     MachNode *m2 = m1;
     if (!Assembler::is_uimm((jlong)Klass::encode_klass((Klass *)op_src->constant()), 31)) {
       // Value might be 1-extended. Mask out these bits.
-      m2 = new clearMs32bNode();
+      m2 = new loadConNKlass_maskNode();
       m2->add_req(NULL, m1);
       m2->_opnds[0] = op_dst;
-      m2->_opnds[1] = op_dst;
+      m2->_opnds[1] = op_src;
+      m2->_opnds[2] = op_dst;
       ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
       nodes->push(m2);
     }
@@ -6973,7 +6981,7 @@
   size(4);
   ins_encode %{
     // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
-    __ rldicl($dst$$Register, $src$$Register, 64-Universe::narrow_oop_shift(), 32);
+    __ rldicl($dst$$Register, $src$$Register, 64-Universe::narrow_klass_shift(), 32);
   %}
   ins_pipe(pipe_class_default);
 %}
--- a/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -24,7 +24,6 @@
  */
 
 #include "precompiled.hpp"
-#include "asm/assembler.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "interpreter/interpreter.hpp"
 #include "nativeInst_ppc.hpp"
@@ -39,9 +38,6 @@
 #include "runtime/stubCodeGenerator.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "utilities/top.hpp"
-#ifdef COMPILER2
-#include "opto/runtime.hpp"
-#endif
 #include "runtime/thread.inline.hpp"
 
 #define __ _masm->
@@ -216,7 +212,7 @@
     {
       BLOCK_COMMENT("Call frame manager or native entry.");
       // Call frame manager or native entry.
-      Register r_new_arg_entry = R14; // PPC_state;
+      Register r_new_arg_entry = R14;
       assert_different_registers(r_new_arg_entry, r_top_of_arguments_addr,
                                  r_arg_method, r_arg_thread);
 
--- a/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -353,7 +353,6 @@
   __ sldi(Rscratch1, Rscratch1, LogBytesPerWord);
   __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer);
   __ bne(CCR0, notInt);
-  __ isync(); // Order load of constant wrt. tags.
   __ lwax(R17_tos, Rcpool, Rscratch1);
   __ push(itos);
   __ b(exit);
@@ -365,7 +364,6 @@
   __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float);
   __ asm_assert_eq("unexpected type", 0x8765);
 #endif
-  __ isync(); // Order load of constant wrt. tags.
   __ lfsx(F15_ftos, Rcpool, Rscratch1);
   __ push(ftos);
 
@@ -424,13 +422,11 @@
   // Check out Conversions.java for an example.
   // Also ConstantPool::header_size() is 20, which makes it very difficult
   // to double-align double on the constant pool. SG, 11/7/97
-  __ isync(); // Order load of constant wrt. tags.
   __ lfdx(F15_ftos, Rcpool, Rindex);
   __ push(dtos);
   __ b(Lexit);
 
   __ bind(Llong);
-  __ isync(); // Order load of constant wrt. tags.
   __ ldx(R17_tos, Rcpool, Rindex);
   __ push(ltos);
 
--- a/hotspot/src/os/aix/vm/perfMemory_aix.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/os/aix/vm/perfMemory_aix.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -1020,7 +1020,3 @@
 
   unmap_shared(addr, bytes);
 }
-
-char* PerfMemory::backing_store_filename() {
-  return backing_store_file_name;
-}
--- a/hotspot/src/os/bsd/vm/perfMemory_bsd.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/os/bsd/vm/perfMemory_bsd.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -1043,7 +1043,3 @@
 
   unmap_shared(addr, bytes);
 }
-
-char* PerfMemory::backing_store_filename() {
-  return backing_store_file_name;
-}
--- a/hotspot/src/os/linux/vm/perfMemory_linux.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/os/linux/vm/perfMemory_linux.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -1049,7 +1049,3 @@
 
   unmap_shared(addr, bytes);
 }
-
-char* PerfMemory::backing_store_filename() {
-  return backing_store_file_name;
-}
--- a/hotspot/src/os/solaris/vm/perfMemory_solaris.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/os/solaris/vm/perfMemory_solaris.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -1068,7 +1068,3 @@
 
   unmap_shared(addr, bytes);
 }
-
-char* PerfMemory::backing_store_filename() {
-  return backing_store_file_name;
-}
--- a/hotspot/src/os/windows/vm/perfMemory_windows.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/os/windows/vm/perfMemory_windows.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -1846,7 +1846,3 @@
     remove_file_mapping(addr);
   }
 }
-
-char* PerfMemory::backing_store_filename() {
-  return sharedmem_fileName;
-}
--- a/hotspot/src/os_cpu/linux_ppc/vm/prefetch_linux_ppc.inline.hpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/os_cpu/linux_ppc/vm/prefetch_linux_ppc.inline.hpp	Thu Oct 16 14:15:37 2014 -0700
@@ -47,4 +47,4 @@
     );
 }
 
-#endif // OS_CPU_LINUX_PPC_VM_PREFETCH_LINUX_OJDKPPC_HPP
+#endif // OS_CPU_LINUX_PPC_VM_PREFETCH_LINUX_PPC_INLINE_HPP
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -2069,14 +2069,14 @@
   LIR_Opr base_op = base.result();
   LIR_Opr index_op = idx.result();
 #ifndef _LP64
-  if (x->base()->type()->tag() == longTag) {
+  if (base_op->type() == T_LONG) {
     base_op = new_register(T_INT);
     __ convert(Bytecodes::_l2i, base.result(), base_op);
   }
   if (x->has_index()) {
-    if (x->index()->type()->tag() == longTag) {
+    if (index_op->type() == T_LONG) {
       LIR_Opr long_index_op = index_op;
-      if (x->index()->type()->is_constant()) {
+      if (index_op->is_constant()) {
         long_index_op = new_register(T_LONG);
         __ move(index_op, long_index_op);
       }
@@ -2091,14 +2091,14 @@
   assert(!x->has_index() || index_op->type() == T_INT, "index should be an int");
 #else
   if (x->has_index()) {
-    if (x->index()->type()->tag() == intTag) {
-      if (!x->index()->type()->is_constant()) {
+    if (index_op->type() == T_INT) {
+      if (!index_op->is_constant()) {
         index_op = new_register(T_LONG);
         __ convert(Bytecodes::_i2l, idx.result(), index_op);
       }
     } else {
-      assert(x->index()->type()->tag() == longTag, "must be");
-      if (x->index()->type()->is_constant()) {
+      assert(index_op->type() == T_LONG, "must be");
+      if (index_op->is_constant()) {
         index_op = new_register(T_LONG);
         __ move(idx.result(), index_op);
       }
@@ -2179,12 +2179,12 @@
   LIR_Opr index_op = idx.result();
 
 #ifndef _LP64
-  if (x->base()->type()->tag() == longTag) {
+  if (base_op->type() == T_LONG) {
     base_op = new_register(T_INT);
     __ convert(Bytecodes::_l2i, base.result(), base_op);
   }
   if (x->has_index()) {
-    if (x->index()->type()->tag() == longTag) {
+    if (index_op->type() == T_LONG) {
       index_op = new_register(T_INT);
       __ convert(Bytecodes::_l2i, idx.result(), index_op);
     }
@@ -2194,7 +2194,7 @@
   assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
 #else
   if (x->has_index()) {
-    if (x->index()->type()->tag() == intTag) {
+    if (index_op->type() == T_INT) {
       index_op = new_register(T_LONG);
       __ convert(Bytecodes::_i2l, idx.result(), index_op);
     }
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -2859,6 +2859,11 @@
       "bootstrap_method_index %u has bad constant type in class file %s",
       bootstrap_method_index,
       CHECK);
+
+    guarantee_property((operand_fill_index + 1 + argument_count) < operands->length(),
+      "Invalid BootstrapMethods num_bootstrap_methods or num_bootstrap_arguments value in class file %s",
+      CHECK);
+
     operands->at_put(operand_fill_index++, bootstrap_method_index);
     operands->at_put(operand_fill_index++, argument_count);
 
@@ -2875,8 +2880,6 @@
     }
   }
 
-  assert(ConstantPool::operand_array_length(operands) == attribute_array_length, "correct decode");
-
   u1* current_end = cfs->current();
   guarantee_property(current_end == current_start + attribute_byte_length,
                      "Bad length on BootstrapMethods in class file %s",
--- a/hotspot/src/share/vm/classfile/classLoader.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/classfile/classLoader.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -75,6 +75,7 @@
 typedef jboolean (JNICALL *ReadEntry_t)(jzfile *zip, jzentry *entry, unsigned char *buf, char *namebuf);
 typedef jboolean (JNICALL *ReadMappedEntry_t)(jzfile *zip, jzentry *entry, unsigned char **buf, char *namebuf);
 typedef jzentry* (JNICALL *GetNextEntry_t)(jzfile *zip, jint n);
+typedef jint     (JNICALL *Crc32_t)(jint crc, const jbyte *buf, jint len);
 
 static ZipOpen_t         ZipOpen            = NULL;
 static ZipClose_t        ZipClose           = NULL;
@@ -83,6 +84,7 @@
 static ReadMappedEntry_t ReadMappedEntry    = NULL;
 static GetNextEntry_t    GetNextEntry       = NULL;
 static canonicalize_fn_t CanonicalizeEntry  = NULL;
+static Crc32_t           Crc32              = NULL;
 
 // Globals
 
@@ -799,9 +801,11 @@
   ReadEntry    = CAST_TO_FN_PTR(ReadEntry_t, os::dll_lookup(handle, "ZIP_ReadEntry"));
   ReadMappedEntry = CAST_TO_FN_PTR(ReadMappedEntry_t, os::dll_lookup(handle, "ZIP_ReadMappedEntry"));
   GetNextEntry = CAST_TO_FN_PTR(GetNextEntry_t, os::dll_lookup(handle, "ZIP_GetNextEntry"));
+  Crc32        = CAST_TO_FN_PTR(Crc32_t, os::dll_lookup(handle, "ZIP_CRC32"));
 
   // ZIP_Close is not exported on Windows in JDK5.0 so don't abort if ZIP_Close is NULL
-  if (ZipOpen == NULL || FindEntry == NULL || ReadEntry == NULL || GetNextEntry == NULL) {
+  if (ZipOpen == NULL || FindEntry == NULL || ReadEntry == NULL ||
+      GetNextEntry == NULL || Crc32 == NULL) {
     vm_exit_during_initialization("Corrupted ZIP library", path);
   }
 
@@ -811,6 +815,11 @@
   // This lookup only works on 1.3. Do not check for non-null here
 }
 
+int ClassLoader::crc32(int crc, const char* buf, int len) {
+  assert(Crc32 != NULL, "ZIP_CRC32 is not found");
+  return (*Crc32)(crc, (const jbyte*)buf, len);
+}
+
 // PackageInfo data exists in order to support the java.lang.Package
 // class.  A Package object provides information about a java package
 // (version, vendor, etc.) which originates in the manifest of the jar
--- a/hotspot/src/share/vm/classfile/classLoader.hpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/classfile/classLoader.hpp	Thu Oct 16 14:15:37 2014 -0700
@@ -226,6 +226,7 @@
   // to avoid confusing the zip library
   static bool get_canonical_path(const char* orig, char* out, int len);
  public:
+  static int crc32(int crc, const char* buf, int len);
   static bool update_class_path_entry_list(const char *path,
                                            bool check_for_duplicates,
                                            bool throw_exception=true);
--- a/hotspot/src/share/vm/classfile/stackMapFrame.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/classfile/stackMapFrame.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -148,7 +148,7 @@
     VerificationType* from, VerificationType* to, int32_t len, TRAPS) const {
   int32_t i = 0;
   for (i = 0; i < len; i++) {
-    if (!to[i].is_assignable_from(from[i], verifier(), THREAD)) {
+    if (!to[i].is_assignable_from(from[i], verifier(), false, THREAD)) {
       break;
     }
   }
@@ -245,7 +245,7 @@
   }
   VerificationType top = _stack[--_stack_size];
   bool subtype = type.is_assignable_from(
-    top, verifier(), CHECK_(VerificationType::bogus_type()));
+    top, verifier(), false, CHECK_(VerificationType::bogus_type()));
   if (!subtype) {
     verifier()->verify_error(
         ErrorContext::bad_type(_offset, stack_top_ctx(),
@@ -265,7 +265,7 @@
     return VerificationType::bogus_type();
   }
   bool subtype = type.is_assignable_from(_locals[index],
-    verifier(), CHECK_(VerificationType::bogus_type()));
+    verifier(), false, CHECK_(VerificationType::bogus_type()));
   if (!subtype) {
     verifier()->verify_error(
         ErrorContext::bad_type(_offset,
@@ -288,14 +288,14 @@
         "get long/double overflows locals");
     return;
   }
-  bool subtype = type1.is_assignable_from(_locals[index], verifier(), CHECK);
+  bool subtype = type1.is_assignable_from(_locals[index], verifier(), false, CHECK);
   if (!subtype) {
     verifier()->verify_error(
         ErrorContext::bad_type(_offset,
             TypeOrigin::local(index, this), TypeOrigin::implicit(type1)),
         "Bad local variable type");
   } else {
-    subtype = type2.is_assignable_from(_locals[index + 1], verifier(), CHECK);
+    subtype = type2.is_assignable_from(_locals[index + 1], verifier(), false, CHECK);
     if (!subtype) {
       /* Unreachable? All local store routines convert a split long or double
        * into a TOP during the store.  So we should never end up seeing an
--- a/hotspot/src/share/vm/classfile/stackMapFrame.hpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/classfile/stackMapFrame.hpp	Thu Oct 16 14:15:37 2014 -0700
@@ -234,7 +234,7 @@
     if (_stack_size != 0) {
       VerificationType top = _stack[_stack_size - 1];
       bool subtype = type.is_assignable_from(
-        top, verifier(), CHECK_(VerificationType::bogus_type()));
+        top, verifier(), false, CHECK_(VerificationType::bogus_type()));
       if (subtype) {
         --_stack_size;
         return top;
@@ -249,9 +249,9 @@
     assert(type2.is_long() || type2.is_double(), "must be long/double_2");
     if (_stack_size >= 2) {
       VerificationType top1 = _stack[_stack_size - 1];
-      bool subtype1 = type1.is_assignable_from(top1, verifier(), CHECK);
+      bool subtype1 = type1.is_assignable_from(top1, verifier(), false, CHECK);
       VerificationType top2 = _stack[_stack_size - 2];
-      bool subtype2 = type2.is_assignable_from(top2, verifier(), CHECK);
+      bool subtype2 = type2.is_assignable_from(top2, verifier(), false, CHECK);
       if (subtype1 && subtype2) {
         _stack_size -= 2;
         return;
--- a/hotspot/src/share/vm/classfile/verificationType.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/classfile/verificationType.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,8 @@
 }
 
 bool VerificationType::is_reference_assignable_from(
-    const VerificationType& from, ClassVerifier* context, TRAPS) const {
+    const VerificationType& from, ClassVerifier* context,
+    bool from_field_is_protected, TRAPS) const {
   instanceKlassHandle klass = context->current_class();
   if (from.is_null()) {
     // null is assignable to any reference
@@ -62,9 +63,11 @@
         Handle(THREAD, klass->protection_domain()), true, CHECK_false);
     KlassHandle this_class(THREAD, obj);
 
-    if (this_class->is_interface()) {
-      // We treat interfaces as java.lang.Object, including
-      // java.lang.Cloneable and java.io.Serializable
+    if (this_class->is_interface() && (!from_field_is_protected ||
+        from.name() != vmSymbols::java_lang_Object())) {
+      // If we are not trying to access a protected field or method in
+      // java.lang.Object then we treat interfaces as java.lang.Object,
+      // including java.lang.Cloneable and java.io.Serializable.
       return true;
     } else if (from.is_object()) {
       Klass* from_class = SystemDictionary::resolve_or_fail(
@@ -76,7 +79,8 @@
     VerificationType comp_this = get_component(context, CHECK_false);
     VerificationType comp_from = from.get_component(context, CHECK_false);
     if (!comp_this.is_bogus() && !comp_from.is_bogus()) {
-      return comp_this.is_assignable_from(comp_from, context, CHECK_false);
+      return comp_this.is_assignable_from(comp_from, context,
+                                          from_field_is_protected, CHECK_false);
     }
   }
   return false;
--- a/hotspot/src/share/vm/classfile/verificationType.hpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/classfile/verificationType.hpp	Thu Oct 16 14:15:37 2014 -0700
@@ -265,7 +265,8 @@
   // is assignable to another.  Returns true if one can assign 'from' to
   // this.
   bool is_assignable_from(
-      const VerificationType& from, ClassVerifier* context, TRAPS) const {
+      const VerificationType& from, ClassVerifier* context,
+      bool from_field_is_protected, TRAPS) const {
     if (equals(from) || is_bogus()) {
       return true;
     } else {
@@ -286,7 +287,9 @@
           return from.is_integer();
         default:
           if (is_reference() && from.is_reference()) {
-            return is_reference_assignable_from(from, context, CHECK_false);
+            return is_reference_assignable_from(from, context,
+                                                from_field_is_protected,
+                                                CHECK_false);
           } else {
             return false;
           }
@@ -308,7 +311,8 @@
  private:
 
   bool is_reference_assignable_from(
-    const VerificationType&, ClassVerifier*, TRAPS) const;
+    const VerificationType&, ClassVerifier*, bool from_field_is_protected,
+    TRAPS) const;
 };
 
 #endif // SHARE_VM_CLASSFILE_VERIFICATIONTYPE_HPP
--- a/hotspot/src/share/vm/classfile/verifier.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/classfile/verifier.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -98,6 +98,14 @@
   HandleMark hm;
   ResourceMark rm(THREAD);
 
+  if (!is_eligible_for_verification(klass, should_verify_class)) {
+    return true;
+  }
+
+  // If the class should be verified, first see if we can use the split
+  // verifier.  If not, or if verification fails and FailOverToOldVerifier
+  // is set, then call the inference verifier.
+
   Symbol* exception_name = NULL;
   const size_t message_buffer_len = klass->name()->utf8_length() + 1024;
   char* message_buffer = NEW_RESOURCE_ARRAY(char, message_buffer_len);
@@ -105,47 +113,42 @@
 
   const char* klassName = klass->external_name();
   bool can_failover = FailOverToOldVerifier &&
-      klass->major_version() < NOFAILOVER_MAJOR_VERSION;
+     klass->major_version() < NOFAILOVER_MAJOR_VERSION;
 
-  // If the class should be verified, first see if we can use the split
-  // verifier.  If not, or if verification fails and FailOverToOldVerifier
-  // is set, then call the inference verifier.
-  if (is_eligible_for_verification(klass, should_verify_class)) {
-    if (TraceClassInitialization) {
-      tty->print_cr("Start class verification for: %s", klassName);
+  if (TraceClassInitialization) {
+    tty->print_cr("Start class verification for: %s", klassName);
+  }
+  if (klass->major_version() >= STACKMAP_ATTRIBUTE_MAJOR_VERSION) {
+    ClassVerifier split_verifier(klass, THREAD);
+    split_verifier.verify_class(THREAD);
+    exception_name = split_verifier.result();
+    if (can_failover && !HAS_PENDING_EXCEPTION &&
+        (exception_name == vmSymbols::java_lang_VerifyError() ||
+         exception_name == vmSymbols::java_lang_ClassFormatError())) {
+      if (TraceClassInitialization || VerboseVerification) {
+        tty->print_cr(
+          "Fail over class verification to old verifier for: %s", klassName);
+      }
+      exception_name = inference_verify(
+        klass, message_buffer, message_buffer_len, THREAD);
     }
-    if (klass->major_version() >= STACKMAP_ATTRIBUTE_MAJOR_VERSION) {
-      ClassVerifier split_verifier(klass, THREAD);
-      split_verifier.verify_class(THREAD);
-      exception_name = split_verifier.result();
-      if (can_failover && !HAS_PENDING_EXCEPTION &&
-          (exception_name == vmSymbols::java_lang_VerifyError() ||
-           exception_name == vmSymbols::java_lang_ClassFormatError())) {
-        if (TraceClassInitialization || VerboseVerification) {
-          tty->print_cr(
-            "Fail over class verification to old verifier for: %s", klassName);
-        }
-        exception_name = inference_verify(
-          klass, message_buffer, message_buffer_len, THREAD);
-      }
-      if (exception_name != NULL) {
-        exception_message = split_verifier.exception_message();
-      }
-    } else {
-      exception_name = inference_verify(
-          klass, message_buffer, message_buffer_len, THREAD);
+    if (exception_name != NULL) {
+      exception_message = split_verifier.exception_message();
     }
+  } else {
+    exception_name = inference_verify(
+        klass, message_buffer, message_buffer_len, THREAD);
+  }
 
-    if (TraceClassInitialization || VerboseVerification) {
-      if (HAS_PENDING_EXCEPTION) {
-        tty->print("Verification for %s has", klassName);
-        tty->print_cr(" exception pending %s ",
-          InstanceKlass::cast(PENDING_EXCEPTION->klass())->external_name());
-      } else if (exception_name != NULL) {
-        tty->print_cr("Verification for %s failed", klassName);
-      }
-      tty->print_cr("End class verification for: %s", klassName);
+  if (TraceClassInitialization || VerboseVerification) {
+    if (HAS_PENDING_EXCEPTION) {
+      tty->print("Verification for %s has", klassName);
+      tty->print_cr(" exception pending %s ",
+        InstanceKlass::cast(PENDING_EXCEPTION->klass())->external_name());
+    } else if (exception_name != NULL) {
+      tty->print_cr("Verification for %s failed", klassName);
     }
+    tty->print_cr("End class verification for: %s", klassName);
   }
 
   if (HAS_PENDING_EXCEPTION) {
@@ -1718,7 +1721,7 @@
       VerificationType throwable =
         VerificationType::reference_type(vmSymbols::java_lang_Throwable());
       bool is_subclass = throwable.is_assignable_from(
-        catch_type, this, CHECK_VERIFY(this));
+        catch_type, this, false, CHECK_VERIFY(this));
       if (!is_subclass) {
         // 4286534: should throw VerifyError according to recent spec change
         verify_error(ErrorContext::bad_type(handler_pc,
@@ -2171,7 +2174,7 @@
         stack_object_type = current_type();
       }
       is_assignable = target_class_type.is_assignable_from(
-        stack_object_type, this, CHECK_VERIFY(this));
+        stack_object_type, this, false, CHECK_VERIFY(this));
       if (!is_assignable) {
         verify_error(ErrorContext::bad_type(bci,
             current_frame->stack_top_ctx(),
@@ -2198,7 +2201,7 @@
         // It's protected access, check if stack object is assignable to
         // current class.
         is_assignable = current_type().is_assignable_from(
-          stack_object_type, this, CHECK_VERIFY(this));
+          stack_object_type, this, true, CHECK_VERIFY(this));
         if (!is_assignable) {
           verify_error(ErrorContext::bad_type(bci,
               current_frame->stack_top_ctx(),
@@ -2472,7 +2475,7 @@
         instanceKlassHandle mh(THREAD, m->method_holder());
         if (m->is_protected() && !mh->is_same_class_package(_klass())) {
           bool assignable = current_type().is_assignable_from(
-            objectref_type, this, CHECK_VERIFY(this));
+            objectref_type, this, true, CHECK_VERIFY(this));
           if (!assignable) {
             verify_error(ErrorContext::bad_type(bci,
                 TypeOrigin::cp(new_class_index, objectref_type),
@@ -2643,11 +2646,11 @@
     bool have_imr_indirect = cp->tag_at(index).value() == JVM_CONSTANT_InterfaceMethodref;
     if (!current_class()->is_anonymous()) {
       subtype = ref_class_type.is_assignable_from(
-                 current_type(), this, CHECK_VERIFY(this));
+                 current_type(), this, false, CHECK_VERIFY(this));
     } else {
       VerificationType host_klass_type =
                         VerificationType::reference_type(current_class()->host_klass()->name());
-      subtype = ref_class_type.is_assignable_from(host_klass_type, this, CHECK_VERIFY(this));
+      subtype = ref_class_type.is_assignable_from(host_klass_type, this, false, CHECK_VERIFY(this));
 
       // If invokespecial of IMR, need to recheck for same or
       // direct interface relative to the host class
@@ -2691,7 +2694,7 @@
           VerificationType top = current_frame->pop_stack(CHECK_VERIFY(this));
           VerificationType hosttype =
             VerificationType::reference_type(current_class()->host_klass()->name());
-          bool subtype = hosttype.is_assignable_from(top, this, CHECK_VERIFY(this));
+          bool subtype = hosttype.is_assignable_from(top, this, false, CHECK_VERIFY(this));
           if (!subtype) {
             verify_error( ErrorContext::bad_type(current_frame->offset(),
               current_frame->stack_top_ctx(),
@@ -2716,7 +2719,7 @@
               // It's protected access, check if stack object is
               // assignable to current class.
               bool is_assignable = current_type().is_assignable_from(
-                stack_object_type, this, CHECK_VERIFY(this));
+                stack_object_type, this, true, CHECK_VERIFY(this));
               if (!is_assignable) {
                 if (ref_class_type.name() == vmSymbols::java_lang_Object()
                     && stack_object_type.is_array()
@@ -2899,7 +2902,7 @@
         "Method expects a return value");
     return;
   }
-  bool match = return_type.is_assignable_from(type, this, CHECK_VERIFY(this));
+  bool match = return_type.is_assignable_from(type, this, false, CHECK_VERIFY(this));
   if (!match) {
     verify_error(ErrorContext::bad_type(bci,
         current_frame->stack_top_ctx(), TypeOrigin::signature(return_type)),
--- a/hotspot/src/share/vm/code/codeCache.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/code/codeCache.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -254,8 +254,7 @@
   if (!SegmentedCodeCache) {
     // No segmentation: use a single code heap
     return (code_blob_type == CodeBlobType::All);
-  } else if ((Arguments::mode() == Arguments::_int) ||
-             (TieredStopAtLevel == CompLevel_none)) {
+  } else if (Arguments::mode() == Arguments::_int) {
     // Interpreter only: we don't need any method code heaps
     return (code_blob_type == CodeBlobType::NonNMethod);
   } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) {
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -1683,6 +1683,8 @@
   int  _failures;
   bool _verbose;
 
+  HeapRegionClaimer _hrclaimer;
+
 public:
   G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
                             BitMap* region_bm, BitMap* card_bm,
@@ -1692,19 +1694,8 @@
       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
       _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
       _failures(0), _verbose(false),
-      _n_workers(0) {
+      _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) {
     assert(VerifyDuringGC, "don't call this otherwise");
-
-    // Use the value already set as the number of active threads
-    // in the call to run_task().
-    if (G1CollectedHeap::use_parallel_gc_threads()) {
-      assert( _g1h->workers()->active_workers() > 0,
-        "Should have been previously set");
-      _n_workers = _g1h->workers()->active_workers();
-    } else {
-      _n_workers = 1;
-    }
-
     assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
     assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
 
@@ -1721,10 +1712,7 @@
                                             _verbose);
 
     if (G1CollectedHeap::use_parallel_gc_threads()) {
-      _g1h->heap_region_par_iterate_chunked(&verify_cl,
-                                            worker_id,
-                                            _n_workers,
-                                            HeapRegion::VerifyCountClaimValue);
+      _g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer);
     } else {
       _g1h->heap_region_iterate(&verify_cl);
     }
@@ -1813,22 +1801,14 @@
   BitMap* _actual_card_bm;
 
   uint    _n_workers;
+  HeapRegionClaimer _hrclaimer;
 
 public:
   G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
     : AbstractGangTask("G1 final counting"),
       _g1h(g1h), _cm(_g1h->concurrent_mark()),
       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
-      _n_workers(0) {
-    // Use the value already set as the number of active threads
-    // in the call to run_task().
-    if (G1CollectedHeap::use_parallel_gc_threads()) {
-      assert( _g1h->workers()->active_workers() > 0,
-        "Should have been previously set");
-      _n_workers = _g1h->workers()->active_workers();
-    } else {
-      _n_workers = 1;
-    }
+      _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) {
   }
 
   void work(uint worker_id) {
@@ -1839,10 +1819,7 @@
                                                 _actual_card_bm);
 
     if (G1CollectedHeap::use_parallel_gc_threads()) {
-      _g1h->heap_region_par_iterate_chunked(&final_update_cl,
-                                            worker_id,
-                                            _n_workers,
-                                            HeapRegion::FinalCountClaimValue);
+      _g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer);
     } else {
       _g1h->heap_region_iterate(&final_update_cl);
     }
@@ -1929,12 +1906,12 @@
   size_t _max_live_bytes;
   size_t _freed_bytes;
   FreeRegionList* _cleanup_list;
+  HeapRegionClaimer _hrclaimer;
 
 public:
-  G1ParNoteEndTask(G1CollectedHeap* g1h,
-                   FreeRegionList* cleanup_list) :
-    AbstractGangTask("G1 note end"), _g1h(g1h),
-    _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
+  G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
+      AbstractGangTask("G1 note end"), _g1h(g1h), _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list), _hrclaimer(n_workers) {
+  }
 
   void work(uint worker_id) {
     double start = os::elapsedTime();
@@ -1943,9 +1920,7 @@
     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
                                            &hrrs_cleanup_task);
     if (G1CollectedHeap::use_parallel_gc_threads()) {
-      _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
-                                            _g1h->workers()->active_workers(),
-                                            HeapRegion::NoteEndClaimValue);
+      _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer);
     } else {
       _g1h->heap_region_iterate(&g1_note_end);
     }
@@ -1991,16 +1966,16 @@
   G1RemSet* _g1rs;
   BitMap* _region_bm;
   BitMap* _card_bm;
+  HeapRegionClaimer _hrclaimer;
+
 public:
-  G1ParScrubRemSetTask(G1CollectedHeap* g1h,
-                       BitMap* region_bm, BitMap* card_bm) :
-    AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()),
-    _region_bm(region_bm), _card_bm(card_bm) { }
+  G1ParScrubRemSetTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm, uint n_workers) :
+      AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), _region_bm(region_bm), _card_bm(card_bm), _hrclaimer(n_workers) {
+  }
 
   void work(uint worker_id) {
     if (G1CollectedHeap::use_parallel_gc_threads()) {
-      _g1rs->scrub_par(_region_bm, _card_bm, worker_id,
-                       HeapRegion::ScrubRemSetClaimValue);
+      _g1rs->scrub_par(_region_bm, _card_bm, worker_id, &_hrclaimer);
     } else {
       _g1rs->scrub(_region_bm, _card_bm);
     }
@@ -2043,9 +2018,6 @@
   G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
 
   if (G1CollectedHeap::use_parallel_gc_threads()) {
-   assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
-           "sanity check");
-
     g1h->set_par_threads();
     n_workers = g1h->n_par_threads();
     assert(g1h->n_par_threads() == n_workers,
@@ -2053,9 +2025,6 @@
     g1h->workers()->run_task(&g1_par_count_task);
     // Done with the parallel phase so reset to 0.
     g1h->set_par_threads(0);
-
-    assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue),
-           "sanity check");
   } else {
     n_workers = 1;
     g1_par_count_task.work(0);
@@ -2080,9 +2049,6 @@
       g1h->workers()->run_task(&g1_par_verify_task);
       // Done with the parallel phase so reset to 0.
       g1h->set_par_threads(0);
-
-      assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue),
-             "sanity check");
     } else {
       g1_par_verify_task.work(0);
     }
@@ -2108,14 +2074,11 @@
   g1h->reset_gc_time_stamp();
 
   // Note end of marking in all heap regions.
-  G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
+  G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers);
   if (G1CollectedHeap::use_parallel_gc_threads()) {
     g1h->set_par_threads((int)n_workers);
     g1h->workers()->run_task(&g1_par_note_end_task);
     g1h->set_par_threads(0);
-
-    assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue),
-           "sanity check");
   } else {
     g1_par_note_end_task.work(0);
   }
@@ -2132,15 +2095,11 @@
   // regions.
   if (G1ScrubRemSets) {
     double rs_scrub_start = os::elapsedTime();
-    G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm);
+    G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers);
     if (G1CollectedHeap::use_parallel_gc_threads()) {
       g1h->set_par_threads((int)n_workers);
       g1h->workers()->run_task(&g1_par_scrub_rs_task);
       g1h->set_par_threads(0);
-
-      assert(g1h->check_heap_region_claim_values(
-                                            HeapRegion::ScrubRemSetClaimValue),
-             "sanity check");
     } else {
       g1_par_scrub_rs_task.work(0);
     }
@@ -3288,6 +3247,7 @@
   BitMap* _cm_card_bm;
   uint _max_worker_id;
   int _active_workers;
+  HeapRegionClaimer _hrclaimer;
 
 public:
   G1AggregateCountDataTask(G1CollectedHeap* g1h,
@@ -3295,18 +3255,18 @@
                            BitMap* cm_card_bm,
                            uint max_worker_id,
                            int n_workers) :
-    AbstractGangTask("Count Aggregation"),
-    _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
-    _max_worker_id(max_worker_id),
-    _active_workers(n_workers) { }
+      AbstractGangTask("Count Aggregation"),
+      _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
+      _max_worker_id(max_worker_id),
+      _active_workers(n_workers),
+      _hrclaimer(_active_workers) {
+  }
 
   void work(uint worker_id) {
     AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
 
     if (G1CollectedHeap::use_parallel_gc_threads()) {
-      _g1h->heap_region_par_iterate_chunked(&cl, worker_id,
-                                            _active_workers,
-                                            HeapRegion::AggregateCountClaimValue);
+      _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer);
     } else {
       _g1h->heap_region_iterate(&cl);
     }
@@ -3323,15 +3283,9 @@
                                            _max_worker_id, n_workers);
 
   if (G1CollectedHeap::use_parallel_gc_threads()) {
-    assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
-           "sanity check");
     _g1h->set_par_threads(n_workers);
     _g1h->workers()->run_task(&g1_par_agg_task);
     _g1h->set_par_threads(0);
-
-    assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue),
-           "sanity check");
-    _g1h->reset_heap_region_claim_values();
   } else {
     g1_par_agg_task.work(0);
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -90,8 +90,8 @@
 
 // Notes on implementation of parallelism in different tasks.
 //
-// G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
-// The number of GC workers is passed to heap_region_par_iterate_chunked().
+// G1ParVerifyTask uses heap_region_par_iterate() for parallelism.
+// The number of GC workers is passed to heap_region_par_iterate().
 // It does use run_task() which sets _n_workers in the task.
 // G1ParTask executes g1_process_roots() ->
 // SharedHeap::process_roots() which calls eventually to
@@ -1215,17 +1215,15 @@
 
 class ParRebuildRSTask: public AbstractGangTask {
   G1CollectedHeap* _g1;
+  HeapRegionClaimer _hrclaimer;
+
 public:
-  ParRebuildRSTask(G1CollectedHeap* g1)
-    : AbstractGangTask("ParRebuildRSTask"),
-      _g1(g1)
-  { }
+  ParRebuildRSTask(G1CollectedHeap* g1) :
+      AbstractGangTask("ParRebuildRSTask"), _g1(g1), _hrclaimer(g1->workers()->active_workers()) {}
 
   void work(uint worker_id) {
     RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
-    _g1->heap_region_par_iterate_chunked(&rebuild_rs, worker_id,
-                                          _g1->workers()->active_workers(),
-                                         HeapRegion::RebuildRSClaimValue);
+    _g1->heap_region_par_iterate(&rebuild_rs, worker_id, &_hrclaimer);
   }
 };
 
@@ -1455,8 +1453,6 @@
         set_par_threads(n_workers);
 
         ParRebuildRSTask rebuild_rs_task(this);
-        assert(check_heap_region_claim_values(
-               HeapRegion::InitialClaimValue), "sanity check");
         assert(UseDynamicNumberOfGCThreads ||
                workers()->active_workers() == workers()->total_workers(),
                "Unless dynamic should use total workers");
@@ -1466,9 +1462,6 @@
         set_par_threads(workers()->active_workers());
         workers()->run_task(&rebuild_rs_task);
         set_par_threads(0);
-        assert(check_heap_region_claim_values(
-               HeapRegion::RebuildRSClaimValue), "sanity check");
-        reset_heap_region_claim_values();
       } else {
         RebuildRSOutOfRegionClosure rebuild_rs(this);
         heap_region_iterate(&rebuild_rs);
@@ -2343,6 +2336,7 @@
     case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
     case GCCause::_java_lang_system_gc:     return ExplicitGCInvokesConcurrent;
     case GCCause::_g1_humongous_allocation: return true;
+    case GCCause::_update_allocation_context_stats_inc: return true;
     default:                                return false;
   }
 }
@@ -2633,110 +2627,11 @@
 }
 
 void
-G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
-                                                 uint worker_id,
-                                                 uint num_workers,
-                                                 jint claim_value) const {
-  _hrm.par_iterate(cl, worker_id, num_workers, claim_value);
-}
-
-class ResetClaimValuesClosure: public HeapRegionClosure {
-public:
-  bool doHeapRegion(HeapRegion* r) {
-    r->set_claim_value(HeapRegion::InitialClaimValue);
-    return false;
-  }
-};
-
-void G1CollectedHeap::reset_heap_region_claim_values() {
-  ResetClaimValuesClosure blk;
-  heap_region_iterate(&blk);
-}
-
-void G1CollectedHeap::reset_cset_heap_region_claim_values() {
-  ResetClaimValuesClosure blk;
-  collection_set_iterate(&blk);
-}
-
-#ifdef ASSERT
-// This checks whether all regions in the heap have the correct claim
-// value. I also piggy-backed on this a check to ensure that the
-// humongous_start_region() information on "continues humongous"
-// regions is correct.
-
-class CheckClaimValuesClosure : public HeapRegionClosure {
-private:
-  jint _claim_value;
-  uint _failures;
-  HeapRegion* _sh_region;
-
-public:
-  CheckClaimValuesClosure(jint claim_value) :
-    _claim_value(claim_value), _failures(0), _sh_region(NULL) { }
-  bool doHeapRegion(HeapRegion* r) {
-    if (r->claim_value() != _claim_value) {
-      gclog_or_tty->print_cr("Region " HR_FORMAT ", "
-                             "claim value = %d, should be %d",
-                             HR_FORMAT_PARAMS(r),
-                             r->claim_value(), _claim_value);
-      ++_failures;
-    }
-    if (!r->is_humongous()) {
-      _sh_region = NULL;
-    } else if (r->is_starts_humongous()) {
-      _sh_region = r;
-    } else if (r->is_continues_humongous()) {
-      if (r->humongous_start_region() != _sh_region) {
-        gclog_or_tty->print_cr("Region " HR_FORMAT ", "
-                               "HS = "PTR_FORMAT", should be "PTR_FORMAT,
-                               HR_FORMAT_PARAMS(r),
-                               r->humongous_start_region(),
-                               _sh_region);
-        ++_failures;
-      }
-    }
-    return false;
-  }
-  uint failures() { return _failures; }
-};
-
-bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
-  CheckClaimValuesClosure cl(claim_value);
-  heap_region_iterate(&cl);
-  return cl.failures() == 0;
-}
-
-class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure {
-private:
-  jint _claim_value;
-  uint _failures;
-
-public:
-  CheckClaimValuesInCSetHRClosure(jint claim_value) :
-    _claim_value(claim_value), _failures(0) { }
-
-  uint failures() { return _failures; }
-
-  bool doHeapRegion(HeapRegion* hr) {
-    assert(hr->in_collection_set(), "how?");
-    assert(!hr->is_humongous(), "H-region in CSet");
-    if (hr->claim_value() != _claim_value) {
-      gclog_or_tty->print_cr("CSet Region " HR_FORMAT ", "
-                             "claim value = %d, should be %d",
-                             HR_FORMAT_PARAMS(hr),
-                             hr->claim_value(), _claim_value);
-      _failures += 1;
-    }
-    return false;
-  }
-};
-
-bool G1CollectedHeap::check_cset_heap_region_claim_values(jint claim_value) {
-  CheckClaimValuesInCSetHRClosure cl(claim_value);
-  collection_set_iterate(&cl);
-  return cl.failures() == 0;
-}
-#endif // ASSERT
+G1CollectedHeap::heap_region_par_iterate(HeapRegionClosure* cl,
+                                         uint worker_id,
+                                         HeapRegionClaimer *hrclaimer) const {
+  _hrm.par_iterate(cl, worker_id, hrclaimer);
+}
 
 // Clear the cached CSet starting regions and (more importantly)
 // the time stamps. Called when we reset the GC time stamp.
@@ -3251,19 +3146,21 @@
 
 class G1ParVerifyTask: public AbstractGangTask {
 private:
-  G1CollectedHeap* _g1h;
-  VerifyOption     _vo;
-  bool             _failures;
+  G1CollectedHeap*  _g1h;
+  VerifyOption      _vo;
+  bool              _failures;
+  HeapRegionClaimer _hrclaimer;
 
 public:
   // _vo == UsePrevMarking -> use "prev" marking information,
   // _vo == UseNextMarking -> use "next" marking information,
   // _vo == UseMarkWord    -> use mark word from object header.
   G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
-    AbstractGangTask("Parallel verify task"),
-    _g1h(g1h),
-    _vo(vo),
-    _failures(false) { }
+      AbstractGangTask("Parallel verify task"),
+      _g1h(g1h),
+      _vo(vo),
+      _failures(false),
+      _hrclaimer(g1h->workers()->active_workers()) {}
 
   bool failures() {
     return _failures;
@@ -3272,9 +3169,7 @@
   void work(uint worker_id) {
     HandleMark hm;
     VerifyRegionClosure blk(true, _vo);
-    _g1h->heap_region_par_iterate_chunked(&blk, worker_id,
-                                          _g1h->workers()->active_workers(),
-                                          HeapRegion::ParVerifyClaimValue);
+    _g1h->heap_region_par_iterate(&blk, worker_id, &_hrclaimer);
     if (blk.failures()) {
       _failures = true;
     }
@@ -3316,8 +3211,6 @@
 
     if (!silent) { gclog_or_tty->print("HeapRegions "); }
     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
-      assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
-             "sanity check");
 
       G1ParVerifyTask task(this, vo);
       assert(UseDynamicNumberOfGCThreads ||
@@ -3331,15 +3224,6 @@
         failures = true;
       }
 
-      // Checks that the expected amount of parallel work was done.
-      // The implication is that n_workers is > 0.
-      assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
-             "sanity check");
-
-      reset_heap_region_claim_values();
-
-      assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
-             "sanity check");
     } else {
       VerifyRegionClosure blk(false, vo);
       heap_region_iterate(&blk);
@@ -3926,8 +3810,6 @@
     }
 
     assert(check_young_list_well_formed(), "young list should be well formed");
-    assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
-           "sanity check");
 
     // Don't dynamically change the number of GC threads this early.  A value of
     // 0 is used to indicate serial work.  When parallel work is done,
@@ -4288,26 +4170,12 @@
 }
 
 void G1CollectedHeap::remove_self_forwarding_pointers() {
-  assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
-
   double remove_self_forwards_start = os::elapsedTime();
 
+  set_par_threads();
   G1ParRemoveSelfForwardPtrsTask rsfp_task(this);
-
-  if (G1CollectedHeap::use_parallel_gc_threads()) {
-    set_par_threads();
-    workers()->run_task(&rsfp_task);
-    set_par_threads(0);
-  } else {
-    rsfp_task.work(0);
-  }
-
-  assert(check_cset_heap_region_claim_values(HeapRegion::ParEvacFailureClaimValue), "sanity");
-
-  // Reset the claim values in the regions in the collection set.
-  reset_cset_heap_region_claim_values();
-
-  assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
+  workers()->run_task(&rsfp_task);
+  set_par_threads(0);
 
   // Now restore saved marks, if any.
   assert(_objs_with_preserved_marks.size() ==
@@ -5948,11 +5816,6 @@
 
   purge_code_root_memory();
 
-  if (g1_policy()->during_initial_mark_pause()) {
-    // Reset the claim values set during marking the strong code roots
-    reset_heap_region_claim_values();
-  }
-
   finalize_for_evac_failure();
 
   if (evacuation_failed()) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Oct 16 14:15:37 2014 -0700
@@ -211,6 +211,7 @@
   friend class G1FreeHumongousRegionClosure;
   // Other related classes.
   friend class G1MarkSweep;
+  friend class HeapRegionClaimer;
 
 private:
   // The one and only G1CollectedHeap, so static functions can find it.
@@ -1377,38 +1378,15 @@
 
   inline HeapWord* bottom_addr_for_region(uint index) const;
 
-  // Divide the heap region sequence into "chunks" of some size (the number
-  // of regions divided by the number of parallel threads times some
-  // overpartition factor, currently 4).  Assumes that this will be called
-  // in parallel by ParallelGCThreads worker threads with distinct worker
-  // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
-  // calls will use the same "claim_value", and that that claim value is
-  // different from the claim_value of any heap region before the start of
-  // the iteration.  Applies "blk->doHeapRegion" to each of the regions, by
-  // attempting to claim the first region in each chunk, and, if
-  // successful, applying the closure to each region in the chunk (and
-  // setting the claim value of the second and subsequent regions of the
-  // chunk.)  For now requires that "doHeapRegion" always returns "false",
-  // i.e., that a closure never attempt to abort a traversal.
-  void heap_region_par_iterate_chunked(HeapRegionClosure* cl,
-                                       uint worker_id,
-                                       uint num_workers,
-                                       jint claim_value) const;
-
-  // It resets all the region claim values to the default.
-  void reset_heap_region_claim_values();
-
-  // Resets the claim values of regions in the current
-  // collection set to the default.
-  void reset_cset_heap_region_claim_values();
-
-#ifdef ASSERT
-  bool check_heap_region_claim_values(jint claim_value);
-
-  // Same as the routine above but only checks regions in the
-  // current collection set.
-  bool check_cset_heap_region_claim_values(jint claim_value);
-#endif // ASSERT
+  // Iterate over the heap regions in parallel. Assumes that this will be called
+  // in parallel by ParallelGCThreads worker threads with distinct worker ids
+  // in the range [0..max(ParallelGCThreads-1, 1)]. Applies "blk->doHeapRegion"
+  // to each of the regions, by attempting to claim the region using the
+  // HeapRegionClaimer and, if successful, applying the closure to the claimed
+  // region.
+  void heap_region_par_iterate(HeapRegionClosure* cl,
+                               uint worker_id,
+                               HeapRegionClaimer* hrclaimer) const;
 
   // Clear the cached cset start regions and (more importantly)
   // the time stamps. Called when we reset the GC time stamp.
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -1598,19 +1598,17 @@
   CollectionSetChooser* _hrSorted;
   uint _chunk_size;
   G1CollectedHeap* _g1;
+  HeapRegionClaimer _hrclaimer;
+
 public:
-  ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size) :
-    AbstractGangTask("ParKnownGarbageTask"),
-    _hrSorted(hrSorted), _chunk_size(chunk_size),
-    _g1(G1CollectedHeap::heap()) { }
+  ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) :
+      AbstractGangTask("ParKnownGarbageTask"),
+      _hrSorted(hrSorted), _chunk_size(chunk_size),
+      _g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {}
 
   void work(uint worker_id) {
     ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
-
-    // Back to zero for the claim value.
-    _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
-                                         _g1->workers()->active_workers(),
-                                         HeapRegion::InitialClaimValue);
+    _g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer);
   }
 };
 
@@ -1641,12 +1639,8 @@
     }
     _collectionSetChooser->prepare_for_par_region_addition(_g1->num_regions(),
                                                            WorkUnit);
-    ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
-                                            (int) WorkUnit);
+    ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser, WorkUnit, (uint) no_of_gc_threads);
     _g1->workers()->run_task(&parKnownGarbageTask);
-
-    assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
-           "sanity check");
   } else {
     KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
     _g1->heap_region_iterate(&knownGarbagecl);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Thu Oct 16 14:15:37 2014 -0700
@@ -177,16 +177,18 @@
   G1CollectedHeap* _g1h;
   ConcurrentMark* _cm;
   uint _worker_id;
+  HeapRegionClaimer* _hrclaimer;
 
   DirtyCardQueue _dcq;
   UpdateRSetDeferred _update_rset_cl;
 
 public:
   RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h,
-                                uint worker_id) :
-    _g1h(g1h), _dcq(&g1h->dirty_card_queue_set()), _update_rset_cl(g1h, &_dcq),
-    _worker_id(worker_id), _cm(_g1h->concurrent_mark()) {
-    }
+                                uint worker_id,
+                                HeapRegionClaimer* hrclaimer) :
+      _g1h(g1h), _dcq(&g1h->dirty_card_queue_set()), _update_rset_cl(g1h, &_dcq),
+      _worker_id(worker_id), _cm(_g1h->concurrent_mark()), _hrclaimer(hrclaimer) {
+  }
 
   bool doHeapRegion(HeapRegion *hr) {
     bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
@@ -195,7 +197,7 @@
     assert(!hr->is_humongous(), "sanity");
     assert(hr->in_collection_set(), "bad CS");
 
-    if (hr->claimHeapRegion(HeapRegion::ParEvacFailureClaimValue)) {
+    if (_hrclaimer->claim_region(hr->hrm_index())) {
       if (hr->evacuation_failed()) {
         RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, &_update_rset_cl,
                                             during_initial_mark,
@@ -233,14 +235,15 @@
 class G1ParRemoveSelfForwardPtrsTask: public AbstractGangTask {
 protected:
   G1CollectedHeap* _g1h;
+  HeapRegionClaimer _hrclaimer;
 
 public:
   G1ParRemoveSelfForwardPtrsTask(G1CollectedHeap* g1h) :
-    AbstractGangTask("G1 Remove Self-forwarding Pointers"),
-    _g1h(g1h) { }
+      AbstractGangTask("G1 Remove Self-forwarding Pointers"), _g1h(g1h),
+      _hrclaimer(g1h->workers()->active_workers()) {}
 
   void work(uint worker_id) {
-    RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, worker_id);
+    RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, worker_id, &_hrclaimer);
 
     HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
     _g1h->collection_set_iterate_from(hr, &rsfp_cl);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -425,13 +425,9 @@
   _g1->heap_region_iterate(&scrub_cl);
 }
 
-void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
-                                uint worker_num, int claim_val) {
+void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm, uint worker_num, HeapRegionClaimer *hrclaimer) {
   ScrubRSClosure scrub_cl(region_bm, card_bm);
-  _g1->heap_region_par_iterate_chunked(&scrub_cl,
-                                       worker_num,
-                                       n_workers(),
-                                       claim_val);
+  _g1->heap_region_par_iterate(&scrub_cl, worker_num, hrclaimer);
 }
 
 G1TriggerClosure::G1TriggerClosure() :
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Thu Oct 16 14:15:37 2014 -0700
@@ -128,10 +128,10 @@
   void scrub(BitMap* region_bm, BitMap* card_bm);
 
   // Like the above, but assumes is called in parallel: "worker_num" is the
-  // parallel thread id of the current thread, and "claim_val" is the
-  // value that should be used to claim heap regions.
+  // parallel thread id of the current thread, and "hrclaimer" is the shared
+  // HeapRegionClaimer that should be used to claim heap regions.
   void scrub_par(BitMap* region_bm, BitMap* card_bm,
-                 uint worker_num, int claim_val);
+                 uint worker_num, HeapRegionClaimer* hrclaimer);
 
   // Refine the card corresponding to "card_ptr".
   // If check_for_refs_into_cset is true, a true result is returned
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -217,7 +217,6 @@
     } else {
       hrrs->clear();
     }
-    _claimed = InitialClaimValue;
   }
   zero_marked_bytes();
 
@@ -294,17 +293,6 @@
   _humongous_start_region = NULL;
 }
 
-bool HeapRegion::claimHeapRegion(jint claimValue) {
-  jint current = _claimed;
-  if (current != claimValue) {
-    jint res = Atomic::cmpxchg(claimValue, &_claimed, current);
-    if (res == current) {
-      return true;
-    }
-  }
-  return false;
-}
-
 HeapRegion::HeapRegion(uint hrm_index,
                        G1BlockOffsetSharedArray* sharedOffsetArray,
                        MemRegion mr) :
@@ -314,7 +302,7 @@
     _humongous_start_region(NULL),
     _in_collection_set(false),
     _next_in_special_set(NULL),
-    _claimed(InitialClaimValue), _evacuation_failed(false),
+    _evacuation_failed(false),
     _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
     _next_young_region(NULL),
     _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL),
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Oct 16 14:15:37 2014 -0700
@@ -254,9 +254,6 @@
   HeapRegionSetBase* _containing_set;
 #endif // ASSERT
 
-  // For parallel heapRegion traversal.
-  jint _claimed;
-
   // We use concurrent marking to determine the amount of live data
   // in each heap region.
   size_t _prev_marked_bytes;    // Bytes known to be live via last completed marking.
@@ -336,19 +333,6 @@
   // up once during initialization time.
   static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
 
-  enum ClaimValues {
-    InitialClaimValue          = 0,
-    FinalCountClaimValue       = 1,
-    NoteEndClaimValue          = 2,
-    ScrubRemSetClaimValue      = 3,
-    ParVerifyClaimValue        = 4,
-    RebuildRSClaimValue        = 5,
-    ParEvacFailureClaimValue   = 6,
-    AggregateCountClaimValue   = 7,
-    VerifyCountClaimValue      = 8,
-    ParMarkRootClaimValue      = 9
-  };
-
   // All allocated blocks are occupied by objects in a HeapRegion
   bool block_is_obj(const HeapWord* p) const;
 
@@ -691,12 +675,6 @@
     return (HeapWord *) obj >= next_top_at_mark_start();
   }
 
-  // For parallel heapRegion traversal.
-  bool claimHeapRegion(int claimValue);
-  jint claim_value() { return _claimed; }
-  // Use this carefully: only when you're sure no one is claiming...
-  void set_claim_value(int claimValue) { _claimed = claimValue; }
-
   // Returns the "evacuation_failed" property of the region.
   bool evacuation_failed() { return _evacuation_failed; }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -260,20 +260,17 @@
   return num_regions;
 }
 
-uint HeapRegionManager::start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const {
-  return num_regions * worker_i / num_workers;
-}
-
-void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, uint num_workers, jint claim_value) const {
-  const uint start_index = start_region_for_worker(worker_id, num_workers, _allocated_heapregions_length);
+void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer) const {
+  const uint start_index = hrclaimer->start_region_for_worker(worker_id);
 
   // Every worker will actually look at all regions, skipping over regions that
   // are currently not committed.
   // This also (potentially) iterates over regions newly allocated during GC. This
   // is no problem except for some extra work.
-  for (uint count = 0; count < _allocated_heapregions_length; count++) {
-    const uint index = (start_index + count) % _allocated_heapregions_length;
-    assert(0 <= index && index < _allocated_heapregions_length, "sanity");
+  const uint n_regions = hrclaimer->n_regions();
+  for (uint count = 0; count < n_regions; count++) {
+    const uint index = (start_index + count) % n_regions;
+    assert(0 <= index && index < n_regions, "sanity");
     // Skip over unavailable regions
     if (!is_available(index)) {
       continue;
@@ -282,11 +279,11 @@
     // We'll ignore "continues humongous" regions (we'll process them
     // when we come across their corresponding "start humongous"
     // region) and regions already claimed.
-    if (r->claim_value() == claim_value || r->is_continues_humongous()) {
+    if (hrclaimer->is_region_claimed(index) || r->is_continues_humongous()) {
       continue;
     }
     // OK, try to claim it
-    if (!r->claimHeapRegion(claim_value)) {
+    if (!hrclaimer->claim_region(index)) {
       continue;
     }
     // Success!
@@ -306,13 +303,11 @@
         assert(chr->humongous_start_region() == r,
                err_msg("Must work on humongous continuation of the original start region "
                        PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr)));
-        assert(chr->claim_value() != claim_value,
+        assert(!hrclaimer->is_region_claimed(ch_index),
                "Must not have been claimed yet because claiming of humongous continuation first claims the start region");
 
-        bool claim_result = chr->claimHeapRegion(claim_value);
-        // We should always be able to claim it; no one else should
-        // be trying to claim this region.
-        guarantee(claim_result, "We should always be able to claim the is_continues_humongous part of the humongous object");
+        // There's no need to actually claim the continues humongous region, but we can do it in an assert as an extra precaution.
+        assert(hrclaimer->claim_region(ch_index), "We should always be able to claim the continuesHumongous part of the humongous object");
 
         bool res2 = blk->doHeapRegion(chr);
         if (res2) {
@@ -445,3 +440,31 @@
 }
 #endif // PRODUCT
 
+HeapRegionClaimer::HeapRegionClaimer(uint n_workers) :
+    _n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm._allocated_heapregions_length), _claims(NULL) {
+  assert(n_workers > 0, "Need at least one worker.");
+  _claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC);
+  memset(_claims, Unclaimed, sizeof(*_claims) * _n_regions);
+}
+
+HeapRegionClaimer::~HeapRegionClaimer() {
+  if (_claims != NULL) {
+    FREE_C_HEAP_ARRAY(uint, _claims, mtGC);
+  }
+}
+
+uint HeapRegionClaimer::start_region_for_worker(uint worker_id) const {
+  assert(worker_id < _n_workers, "Invalid worker_id.");
+  return _n_regions * worker_id / _n_workers;
+}
+
+bool HeapRegionClaimer::is_region_claimed(uint region_index) const {
+  assert(region_index < _n_regions, "Invalid index.");
+  return _claims[region_index] == Claimed;
+}
+
+bool HeapRegionClaimer::claim_region(uint region_index) {
+  assert(region_index < _n_regions, "Invalid index.");
+  uint old_val = Atomic::cmpxchg(Claimed, &_claims[region_index], Unclaimed);
+  return old_val == Unclaimed;
+}
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.hpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.hpp	Thu Oct 16 14:15:37 2014 -0700
@@ -31,6 +31,7 @@
 
 class HeapRegion;
 class HeapRegionClosure;
+class HeapRegionClaimer;
 class FreeRegionList;
 
 class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
@@ -66,6 +67,7 @@
 
 class HeapRegionManager: public CHeapObj<mtGC> {
   friend class VMStructs;
+  friend class HeapRegionClaimer;
 
   G1HeapRegionTable _regions;
 
@@ -99,9 +101,6 @@
 
   // Notify other data structures about change in the heap layout.
   void update_committed_space(HeapWord* old_end, HeapWord* new_end);
-  // Calculate the starting region for each worker during parallel iteration so
-  // that they do not all start from the same region.
-  uint start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const;
 
   // Find a contiguous set of empty or uncommitted regions of length num and return
   // the index of the first region or G1_NO_HRM_INDEX if the search was unsuccessful.
@@ -223,7 +222,7 @@
   // terminating the iteration early if doHeapRegion() returns true.
   void iterate(HeapRegionClosure* blk) const;
 
-  void par_iterate(HeapRegionClosure* blk, uint worker_id, uint no_of_par_workers, jint claim_value) const;
+  void par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer) const;
 
   // Uncommit up to num_regions_to_remove regions that are completely free.
   // Return the actual number of uncommitted regions.
@@ -235,5 +234,33 @@
   void verify_optional() PRODUCT_RETURN;
 };
 
+// The HeapRegionClaimer is used during parallel iteration over heap regions,
+// allowing workers to claim heap regions, gaining exclusive rights to these regions.
+class HeapRegionClaimer : public StackObj {
+  uint  _n_workers;
+  uint  _n_regions;
+  uint* _claims;
+
+  static const uint Unclaimed = 0;
+  static const uint Claimed   = 1;
+
+ public:
+  HeapRegionClaimer(uint n_workers);
+  ~HeapRegionClaimer();
+
+  inline uint n_regions() const {
+    return _n_regions;
+  }
+
+  // Calculate the starting region for given worker so
+  // that they do not all start from the same region.
+  uint start_region_for_worker(uint worker_id) const;
+
+  // Check if region has been claimed with this HRClaimer.
+  bool is_region_claimed(uint region_index) const;
+
+  // Claim the given region, returns true if successfully claimed.
+  bool claim_region(uint region_index);
+};
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_HPP
 
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -95,8 +95,9 @@
   assert(!_should_initiate_conc_mark ||
   ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
    (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
-    _gc_cause == GCCause::_g1_humongous_allocation),
-         "only a GC locker, a System.gc() or a hum allocation induced GC should start a cycle");
+    _gc_cause == GCCause::_g1_humongous_allocation ||
+    _gc_cause == GCCause::_update_allocation_context_stats_inc),
+      "only a GC locker, a System.gc(), stats update or a hum allocation induced GC should start a cycle");
 
   if (_word_size > 0) {
     // An allocation has been requested. So, try to do that first.
--- a/hotspot/src/share/vm/gc_interface/gcCause.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_interface/gcCause.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -54,7 +54,8 @@
     case _wb_young_gc:
       return "WhiteBox Initiated Young GC";
 
-    case _update_allocation_context_stats:
+    case _update_allocation_context_stats_inc:
+    case _update_allocation_context_stats_full:
       return "Update Allocation Context Stats";
 
     case _no_gc:
--- a/hotspot/src/share/vm/gc_interface/gcCause.hpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_interface/gcCause.hpp	Thu Oct 16 14:15:37 2014 -0700
@@ -47,7 +47,8 @@
     _heap_inspection,
     _heap_dump,
     _wb_young_gc,
-    _update_allocation_context_stats,
+    _update_allocation_context_stats_inc,
+    _update_allocation_context_stats_full,
 
     /* implementation independent, but reserved for GC use */
     _no_gc,
--- a/hotspot/src/share/vm/interpreter/linkResolver.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/interpreter/linkResolver.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -246,6 +246,12 @@
   // Ignore overpasses so statics can be found during resolution
   Method* result_oop = klass->uncached_lookup_method(name, signature, Klass::skip_overpass);
 
+  if (klass->oop_is_array()) {
+    // Only consider klass and super klass for arrays
+    result = methodHandle(THREAD, result_oop);
+    return;
+  }
+
   // JDK 8, JVMS 5.4.3.4: Interface method resolution should
   // ignore static and non-public methods of java.lang.Object,
   // like clone, finalize, registerNatives.
@@ -290,6 +296,11 @@
     result = methodHandle(THREAD, super_klass->uncached_lookup_method(name, signature, Klass::normal));
   }
 
+  if (klass->oop_is_array()) {
+    // Only consider klass and super klass for arrays
+    return;
+  }
+
   if (result.is_null()) {
     Array<Method*>* default_methods = InstanceKlass::cast(klass())->default_methods();
     if (default_methods != NULL) {
@@ -545,7 +556,7 @@
   // 2. lookup method in resolved klass and its super klasses
   lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, true, false, CHECK);
 
-  if (resolved_method.is_null()) { // not found in the class hierarchy
+  if (resolved_method.is_null() && !resolved_klass->oop_is_array()) { // not found in the class hierarchy
     // 3. lookup method in all the interfaces implemented by the resolved klass
     lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK);
 
@@ -558,16 +569,16 @@
         CLEAR_PENDING_EXCEPTION;
       }
     }
+  }
 
-    if (resolved_method.is_null()) {
-      // 4. method lookup failed
-      ResourceMark rm(THREAD);
-      THROW_MSG_CAUSE(vmSymbols::java_lang_NoSuchMethodError(),
-                      Method::name_and_sig_as_C_string(resolved_klass(),
-                                                              method_name,
-                                                              method_signature),
-                      nested_exception);
-    }
+  if (resolved_method.is_null()) {
+    // 4. method lookup failed
+    ResourceMark rm(THREAD);
+    THROW_MSG_CAUSE(vmSymbols::java_lang_NoSuchMethodError(),
+                    Method::name_and_sig_as_C_string(resolved_klass(),
+                                                            method_name,
+                                                            method_signature),
+                    nested_exception);
   }
 
   // 5. access checks, access checking may be turned off when calling from within the VM.
@@ -633,17 +644,18 @@
   // JDK8: also look for static methods
   lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, false, true, CHECK);
 
-  if (resolved_method.is_null()) {
+  if (resolved_method.is_null() && !resolved_klass->oop_is_array()) {
     // lookup method in all the super-interfaces
     lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK);
-    if (resolved_method.is_null()) {
-      // no method found
-      ResourceMark rm(THREAD);
-      THROW_MSG(vmSymbols::java_lang_NoSuchMethodError(),
-                Method::name_and_sig_as_C_string(resolved_klass(),
-                                                        method_name,
-                                                        method_signature));
-    }
+  }
+
+  if (resolved_method.is_null()) {
+    // no method found
+    ResourceMark rm(THREAD);
+    THROW_MSG(vmSymbols::java_lang_NoSuchMethodError(),
+              Method::name_and_sig_as_C_string(resolved_klass(),
+                                                      method_name,
+                                                      method_signature));
   }
 
   if (check_access) {
@@ -775,7 +787,7 @@
   }
 
   // Resolve instance field
-  KlassHandle sel_klass(THREAD, InstanceKlass::cast(resolved_klass())->find_field(field, sig, &fd));
+  KlassHandle sel_klass(THREAD, resolved_klass->find_field(field, sig, &fd));
   // check if field exists; i.e., if a klass containing the field def has been selected
   if (sel_klass.is_null()) {
     ResourceMark rm(THREAD);
--- a/hotspot/src/share/vm/memory/filemap.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/memory/filemap.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -331,6 +331,14 @@
     return false;
   }
 
+  size_t len = lseek(fd, 0, SEEK_END);
+  struct FileMapInfo::FileMapHeader::space_info* si =
+    &_header->_space[MetaspaceShared::mc];
+  if (si->_file_offset >= len || len - si->_file_offset < si->_used) {
+    fail_continue("The shared archive file has been truncated.");
+    return false;
+  }
+
   _file_offset += (long)n;
   return true;
 }
@@ -431,6 +439,7 @@
   si->_capacity = capacity;
   si->_read_only = read_only;
   si->_allow_exec = allow_exec;
+  si->_crc = ClassLoader::crc32(0, base, (jint)size);
   write_bytes_aligned(base, (int)size);
 }
 
@@ -455,14 +464,15 @@
 // Align file position to an allocation unit boundary.
 
 void FileMapInfo::align_file_position() {
-  long new_file_offset = align_size_up(_file_offset, os::vm_allocation_granularity());
+  size_t new_file_offset = align_size_up(_file_offset,
+                                         os::vm_allocation_granularity());
   if (new_file_offset != _file_offset) {
     _file_offset = new_file_offset;
     if (_file_open) {
       // Seek one byte back from the target and write a byte to insure
       // that the written file is the correct length.
       _file_offset -= 1;
-      if (lseek(_fd, _file_offset, SEEK_SET) < 0) {
+      if (lseek(_fd, (long)_file_offset, SEEK_SET) < 0) {
         fail_stop("Unable to seek.");
       }
       char zero = 0;
@@ -569,6 +579,19 @@
   return base;
 }
 
+bool FileMapInfo::verify_region_checksum(int i) {
+  if (!VerifySharedSpaces) {
+    return true;
+  }
+  const char* buf = _header->_space[i]._base;
+  size_t sz = _header->_space[i]._used;
+  int crc = ClassLoader::crc32(0, buf, (jint)sz);
+  if (crc != _header->_space[i]._crc) {
+    fail_continue("Checksum verification failed.");
+    return false;
+  }
+  return true;
+}
 
 // Unmap a memory region in the address space.
 
@@ -629,7 +652,21 @@
   return true;
 }
 
+int FileMapInfo::FileMapHeader::compute_crc() {
+  char* header = data();
+  // start computing from the field after _crc
+  char* buf = (char*)&_crc + sizeof(int);
+  size_t sz = data_size() - (buf - header);
+  int crc = ClassLoader::crc32(0, buf, (jint)sz);
+  return crc;
+}
+
 bool FileMapInfo::FileMapHeader::validate() {
+  if (VerifySharedSpaces && compute_crc() != _crc) {
+    fail_continue("Header checksum verification failed.");
+    return false;
+  }
+
   if (_version != current_version()) {
     FileMapInfo::fail_continue("The shared archive file is the wrong version.");
     return false;
--- a/hotspot/src/share/vm/memory/filemap.hpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/memory/filemap.hpp	Thu Oct 16 14:15:37 2014 -0700
@@ -61,7 +61,7 @@
 
   bool  _file_open;
   int   _fd;
-  long  _file_offset;
+  size_t  _file_offset;
 
 private:
   static SharedClassPathEntry* _classpath_entry_table;
@@ -87,12 +87,14 @@
     }
 
     int    _magic;                    // identify file type.
+    int    _crc;                      // header crc checksum.
     int    _version;                  // (from enum, above.)
     size_t _alignment;                // how shared archive should be aligned
     int    _obj_alignment;            // value of ObjectAlignmentInBytes
 
     struct space_info {
-      int    _file_offset;   // sizeof(this) rounded to vm page size
+      int    _crc;           // crc checksum of the current space
+      size_t _file_offset;   // sizeof(this) rounded to vm page size
       char*  _base;          // copy-on-write base address
       size_t _capacity;      // for validity checking
       size_t _used;          // for setting space top on read
@@ -135,6 +137,7 @@
 
     virtual bool validate();
     virtual void populate(FileMapInfo* info, size_t alignment);
+    int compute_crc();
   };
 
   FileMapHeader * _header;
@@ -153,6 +156,8 @@
   ~FileMapInfo();
 
   static int current_version()        { return _current_version; }
+  int    compute_header_crc()         { return _header->compute_crc(); }
+  void   set_header_crc(int crc)      { _header->_crc = crc; }
   void   populate_header(size_t alignment);
   bool   validate_header();
   void   invalidate();
@@ -181,6 +186,7 @@
   void  write_bytes_aligned(const void* buffer, int count);
   char* map_region(int i);
   void  unmap_region(int i);
+  bool  verify_region_checksum(int i);
   void  close();
   bool  is_open() { return _file_open; }
   ReservedSpace reserve_shared_memory();
--- a/hotspot/src/share/vm/memory/metaspaceShared.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -608,6 +608,7 @@
 
   // Pass 2 - write data.
   mapinfo->open_for_write();
+  mapinfo->set_header_crc(mapinfo->compute_header_crc());
   mapinfo->write_header();
   mapinfo->write_space(MetaspaceShared::ro, _loader_data->ro_metaspace(), true);
   mapinfo->write_space(MetaspaceShared::rw, _loader_data->rw_metaspace(), false);
@@ -937,9 +938,13 @@
 
   // Map each shared region
   if ((_ro_base = mapinfo->map_region(ro)) != NULL &&
+       mapinfo->verify_region_checksum(ro) &&
       (_rw_base = mapinfo->map_region(rw)) != NULL &&
+       mapinfo->verify_region_checksum(rw) &&
       (_md_base = mapinfo->map_region(md)) != NULL &&
+       mapinfo->verify_region_checksum(md) &&
       (_mc_base = mapinfo->map_region(mc)) != NULL &&
+       mapinfo->verify_region_checksum(mc) &&
       (image_alignment == (size_t)max_alignment()) &&
       mapinfo->validate_classpath_entry_table()) {
     // Success (no need to do anything)
--- a/hotspot/src/share/vm/oops/arrayKlass.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/oops/arrayKlass.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -64,6 +64,13 @@
   return NULL;
 }
 
+// find field according to JVM spec 5.4.3.2, returns the klass in which the field is defined
+Klass* ArrayKlass::find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
+  // There are no fields in an array klass but look to the super class (Object)
+  assert(super(), "super klass must be present");
+  return super()->find_field(name, sig, fd);
+}
+
 Method* ArrayKlass::uncached_lookup_method(Symbol* name, Symbol* signature, MethodLookupMode mode) const {
   // There are no methods in an array klass but the super class (Object) has some
   assert(super(), "super klass must be present");
--- a/hotspot/src/share/vm/oops/arrayKlass.hpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/oops/arrayKlass.hpp	Thu Oct 16 14:15:37 2014 -0700
@@ -28,6 +28,7 @@
 #include "memory/universe.hpp"
 #include "oops/klass.hpp"
 
+class fieldDescriptor;
 class klassVtable;
 
 // ArrayKlass is the abstract baseclass for all array classes
@@ -77,6 +78,9 @@
   virtual oop multi_allocate(int rank, jint* sizes, TRAPS);
   objArrayOop allocate_arrayArray(int n, int length, TRAPS);
 
+  // find field according to JVM spec 5.4.3.2, returns the klass in which the field is defined
+  Klass* find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const;
+
   // Lookup operations
   Method* uncached_lookup_method(Symbol* name, Symbol* signature, MethodLookupMode mode) const;
 
--- a/hotspot/src/share/vm/oops/klass.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/oops/klass.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -130,6 +130,15 @@
   return is_subclass_of(k);
 }
 
+Klass* Klass::find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
+#ifdef ASSERT
+  tty->print_cr("Error: find_field called on a klass oop."
+                " Likely error: reflection method does not correctly"
+                " wrap return value in a mirror object.");
+#endif
+  ShouldNotReachHere();
+  return NULL;
+}
 
 Method* Klass::uncached_lookup_method(Symbol* name, Symbol* signature, MethodLookupMode mode) const {
 #ifdef ASSERT
--- a/hotspot/src/share/vm/oops/klass.hpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/oops/klass.hpp	Thu Oct 16 14:15:37 2014 -0700
@@ -62,6 +62,7 @@
 class klassVtable;
 class ParCompactionManager;
 class KlassSizeStats;
+class fieldDescriptor;
 
 class Klass : public Metadata {
   friend class VMStructs;
@@ -411,6 +412,7 @@
   virtual void initialize(TRAPS);
   // lookup operation for MethodLookupCache
   friend class MethodLookupCache;
+  virtual Klass* find_field(Symbol* name, Symbol* signature, fieldDescriptor* fd) const;
   virtual Method* uncached_lookup_method(Symbol* name, Symbol* signature, MethodLookupMode mode) const;
  public:
   Method* lookup_method(Symbol* name, Symbol* signature) const {
--- a/hotspot/src/share/vm/opto/compile.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -1153,12 +1153,18 @@
   assert(s == start(), "");
 }
 
+/**
+ * Return the 'StartNode'. We must not have a pending failure, since the ideal graph
+ * can be in an inconsistent state, i.e., we can get segmentation faults when traversing
+ * the ideal graph.
+ */
 StartNode* Compile::start() const {
-  assert(!failing(), "");
+  assert (!failing(), err_msg_res("Must not have pending failure. Reason is: %s", failure_reason()));
   for (DUIterator_Fast imax, i = root()->fast_outs(imax); i < imax; i++) {
     Node* start = root()->fast_out(i);
-    if( start->is_Start() )
+    if (start->is_Start()) {
       return start->as_Start();
+    }
   }
   fatal("Did not find Start node!");
   return NULL;
--- a/hotspot/src/share/vm/opto/compile.hpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/opto/compile.hpp	Thu Oct 16 14:15:37 2014 -0700
@@ -707,12 +707,15 @@
   void sort_expensive_nodes();
 
   // Compilation environment.
-  Arena*            comp_arena()                { return &_comp_arena; }
-  ciEnv*            env() const                 { return _env; }
-  CompileLog*       log() const                 { return _log; }
-  bool              failing() const             { return _env->failing() || _failure_reason != NULL; }
-  const char*       failure_reason() { return _failure_reason; }
-  bool              failure_reason_is(const char* r) { return (r==_failure_reason) || (r!=NULL && _failure_reason!=NULL && strcmp(r, _failure_reason)==0); }
+  Arena*      comp_arena()           { return &_comp_arena; }
+  ciEnv*      env() const            { return _env; }
+  CompileLog* log() const            { return _log; }
+  bool        failing() const        { return _env->failing() || _failure_reason != NULL; }
+  const char* failure_reason() const { return (_env->failing()) ? _env->failure_reason() : _failure_reason; }
+
+  bool failure_reason_is(const char* r) const {
+    return (r == _failure_reason) || (r != NULL && _failure_reason != NULL && strcmp(r, _failure_reason) == 0);
+  }
 
   void record_failure(const char* reason);
   void record_method_not_compilable(const char* reason, bool all_tiers = false) {
--- a/hotspot/src/share/vm/opto/doCall.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/opto/doCall.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -802,10 +802,16 @@
     // each arm of the Phi.  If I know something clever about the exceptions
     // I'm loading the class from, I can replace the LoadKlass with the
     // klass constant for the exception oop.
-    if( ex_node->is_Phi() ) {
-      ex_klass_node = new PhiNode( ex_node->in(0), TypeKlassPtr::OBJECT );
-      for( uint i = 1; i < ex_node->req(); i++ ) {
-        Node* p = basic_plus_adr( ex_node->in(i), ex_node->in(i), oopDesc::klass_offset_in_bytes() );
+    if (ex_node->is_Phi()) {
+      ex_klass_node = new PhiNode(ex_node->in(0), TypeKlassPtr::OBJECT);
+      for (uint i = 1; i < ex_node->req(); i++) {
+        Node* ex_in = ex_node->in(i);
+        if (ex_in == top() || ex_in == NULL) {
+          // This path was not taken.
+          ex_klass_node->init_req(i, top());
+          continue;
+        }
+        Node* p = basic_plus_adr(ex_in, ex_in, oopDesc::klass_offset_in_bytes());
         Node* k = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
         ex_klass_node->init_req( i, k );
       }
--- a/hotspot/src/share/vm/prims/jvm.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/prims/jvm.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -805,6 +805,7 @@
   return (jclass) JNIHandles::make_local(env, k->java_mirror());
 JVM_END
 
+// Not used; JVM_FindClassFromCaller replaces this.
 JVM_ENTRY(jclass, JVM_FindClassFromClassLoader(JNIEnv* env, const char* name,
                                                jboolean init, jobject loader,
                                                jboolean throwError))
@@ -831,6 +832,42 @@
   return result;
 JVM_END
 
+// Find a class with this name in this loader, using the caller's protection domain.
+JVM_ENTRY(jclass, JVM_FindClassFromCaller(JNIEnv* env, const char* name,
+                                          jboolean init, jobject loader,
+                                          jclass caller))
+  JVMWrapper2("JVM_FindClassFromCaller %s throws ClassNotFoundException", name);
+  // Java libraries should ensure that name is never null...
+  if (name == NULL || (int)strlen(name) > Symbol::max_length()) {
+    // It's impossible to create this class;  the name cannot fit
+    // into the constant pool.
+    THROW_MSG_0(vmSymbols::java_lang_ClassNotFoundException(), name);
+  }
+
+  TempNewSymbol h_name = SymbolTable::new_symbol(name, CHECK_NULL);
+
+  oop loader_oop = JNIHandles::resolve(loader);
+  oop from_class = JNIHandles::resolve(caller);
+  oop protection_domain = NULL;
+  // If loader is null, shouldn't call ClassLoader.checkPackageAccess; otherwise get
+  // NPE. Put it in another way, the bootstrap class loader has all permission and
+  // thus no checkPackageAccess equivalence in the VM class loader.
+  // The caller is also passed as NULL by the java code if there is no security
+  // manager to avoid the performance cost of getting the calling class.
+  if (from_class != NULL && loader_oop != NULL) {
+    protection_domain = java_lang_Class::as_Klass(from_class)->protection_domain();
+  }
+
+  Handle h_loader(THREAD, loader_oop);
+  Handle h_prot(THREAD, protection_domain);
+  jclass result = find_class_from_class_loader(env, h_name, init, h_loader,
+                                               h_prot, false, THREAD);
+
+  if (TraceClassResolution && result != NULL) {
+    trace_class_resolution(java_lang_Class::as_Klass(JNIHandles::resolve_non_null(result)));
+  }
+  return result;
+JVM_END
 
 JVM_ENTRY(jclass, JVM_FindClassFromClass(JNIEnv *env, const char *name,
                                          jboolean init, jclass from))
@@ -1073,17 +1110,6 @@
 JVM_END
 
 
-JVM_ENTRY(jobject, JVM_GetClassLoader(JNIEnv *env, jclass cls))
-  JVMWrapper("JVM_GetClassLoader");
-  if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(cls))) {
-    return NULL;
-  }
-  Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(cls));
-  oop loader = k->class_loader();
-  return JNIHandles::make_local(env, loader);
-JVM_END
-
-
 JVM_QUICK_ENTRY(jboolean, JVM_IsInterface(JNIEnv *env, jclass cls))
   JVMWrapper("JVM_IsInterface");
   oop mirror = JNIHandles::resolve_non_null(cls);
@@ -3932,10 +3958,15 @@
 
 // Shared JNI/JVM entry points //////////////////////////////////////////////////////////////
 
-jclass find_class_from_class_loader(JNIEnv* env, Symbol* name, jboolean init, Handle loader, Handle protection_domain, jboolean throwError, TRAPS) {
+jclass find_class_from_class_loader(JNIEnv* env, Symbol* name, jboolean init,
+                                    Handle loader, Handle protection_domain,
+                                    jboolean throwError, TRAPS) {
   // Security Note:
   //   The Java level wrapper will perform the necessary security check allowing
-  //   us to pass the NULL as the initiating class loader.
+  //   us to pass the NULL as the initiating class loader.  The VM is responsible for
+  //   the checkPackageAccess relative to the initiating class loader via the
+  //   protection_domain. The protection_domain is passed as NULL by the java code
+  //   if there is no security manager in 3-arg Class.forName().
   Klass* klass = SystemDictionary::resolve_or_fail(name, loader, protection_domain, throwError != 0, CHECK_NULL);
 
   KlassHandle klass_handle(THREAD, klass);
--- a/hotspot/src/share/vm/prims/jvm.h	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/prims/jvm.h	Thu Oct 16 14:15:37 2014 -0700
@@ -420,6 +420,19 @@
 JVM_FindClassFromBootLoader(JNIEnv *env, const char *name);
 
 /*
+ * Find a class from a given class loader.  Throws ClassNotFoundException.
+ *  name:   name of class
+ *  init:   whether initialization is done
+ *  loader: class loader to look up the class. This may not be the same as the caller's
+ *          class loader.
+ *  caller: initiating class. The initiating class may be null when a security
+ *          manager is not installed.
+ */
+JNIEXPORT jclass JNICALL
+JVM_FindClassFromCaller(JNIEnv *env, const char *name, jboolean init,
+                        jobject loader, jclass caller);
+
+/*
  * Find a class from a given class.
  */
 JNIEXPORT jclass JNICALL
@@ -462,9 +475,6 @@
 JNIEXPORT jobjectArray JNICALL
 JVM_GetClassInterfaces(JNIEnv *env, jclass cls);
 
-JNIEXPORT jobject JNICALL
-JVM_GetClassLoader(JNIEnv *env, jclass cls);
-
 JNIEXPORT jboolean JNICALL
 JVM_IsInterface(JNIEnv *env, jclass cls);
 
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -3836,6 +3836,11 @@
     return JNI_ENOMEM;
   }
 
+  // Set up VerifySharedSpaces
+  if (FLAG_IS_DEFAULT(VerifySharedSpaces) && SharedArchiveFile != NULL) {
+    VerifySharedSpaces = true;
+  }
+
   // Delay warning until here so that we've had a chance to process
   // the -XX:-PrintWarnings flag
   if (needs_hotspotrc_warning) {
--- a/hotspot/src/share/vm/runtime/globals.hpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Thu Oct 16 14:15:37 2014 -0700
@@ -1177,11 +1177,11 @@
           "When true prevents OS-level spurious, or premature, wakeups "    \
           "from Object.wait (Ignored for Windows)")                         \
                                                                             \
-  product(intx, NativeMonitorTimeout, -1, "(Unstable)")                     \
-                                                                            \
-  product(intx, NativeMonitorFlags, 0, "(Unstable)")                        \
-                                                                            \
-  product(intx, NativeMonitorSpinLimit, 20, "(Unstable)")                   \
+  experimental(intx, NativeMonitorTimeout, -1, "(Unstable)")                \
+                                                                            \
+  experimental(intx, NativeMonitorFlags, 0, "(Unstable)")                   \
+                                                                            \
+  experimental(intx, NativeMonitorSpinLimit, 20, "(Unstable)")              \
                                                                             \
   develop(bool, UsePthreads, false,                                         \
           "Use pthread-based instead of libthread-based synchronization "   \
@@ -3790,6 +3790,10 @@
   product(bool, UseSharedSpaces, true,                                      \
           "Use shared spaces for metadata")                                 \
                                                                             \
+  product(bool, VerifySharedSpaces, false,                                  \
+          "Verify shared spaces (false for default archive, true for "      \
+          "archive specified by -XX:SharedArchiveFile)")                    \
+                                                                            \
   product(bool, RequireSharedSpaces, false,                                 \
           "Require shared spaces for metadata")                             \
                                                                             \
--- a/hotspot/src/share/vm/runtime/perfMemory.hpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/runtime/perfMemory.hpp	Thu Oct 16 14:15:37 2014 -0700
@@ -155,9 +155,6 @@
       }
     }
 
-    // filename of backing store or NULL if none.
-    static char* backing_store_filename();
-
     // returns the complete file path of hsperfdata.
     // the caller is expected to free the allocated memory.
     static char* get_perfdata_file_path();
--- a/hotspot/src/share/vm/runtime/reflection.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/runtime/reflection.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -507,7 +507,8 @@
   if (access.is_protected()) {
     if (!protected_restriction) {
       // See if current_class (or outermost host class) is a subclass of field_class
-      if (host_class->is_subclass_of(field_class)) {
+      // An interface may not access protected members of j.l.Object
+      if (!host_class->is_interface() && host_class->is_subclass_of(field_class)) {
         if (access.is_static() || // static fields are ok, see 6622385
             current_class == resolved_class ||
             field_class == resolved_class ||
--- a/hotspot/src/share/vm/runtime/sweeper.cpp	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/src/share/vm/runtime/sweeper.cpp	Thu Oct 16 14:15:37 2014 -0700
@@ -540,17 +540,25 @@
     // If there are no current activations of this method on the
     // stack we can safely convert it to a zombie method
     if (nm->can_not_entrant_be_converted()) {
-      if (PrintMethodFlushing && Verbose) {
-        tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
-      }
       // Clear ICStubs to prevent back patching stubs of zombie or unloaded
       // nmethods during the next safepoint (see ICStub::finalize).
-      MutexLocker cl(CompiledIC_lock);
-      nm->clear_ic_stubs();
-      // Code cache state change is tracked in make_zombie()
-      nm->make_zombie();
-      _zombified_count++;
-      SWEEP(nm);
+      {
+        MutexLocker cl(CompiledIC_lock);
+        nm->clear_ic_stubs();
+      }
+      // Acquiring the CompiledIC_lock may block for a safepoint and set the
+      // nmethod to zombie (see 'CodeCache::make_marked_nmethods_zombies').
+      // Check if nmethod is still non-entrant at this point.
+      if (nm->is_not_entrant()) {
+        if (PrintMethodFlushing && Verbose) {
+          tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
+        }
+        // Code cache state change is tracked in make_zombie()
+        nm->make_zombie();
+        _zombified_count++;
+        SWEEP(nm);
+      }
+      assert(nm->is_zombie(), "nmethod must be zombie");
     } else {
       // Still alive, clean up its inline caches
       MutexLocker cl(CompiledIC_lock);
--- a/hotspot/test/TEST.groups	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/test/TEST.groups	Thu Oct 16 14:15:37 2014 -0700
@@ -447,7 +447,7 @@
   compiler/codegen/ \
   compiler/cpuflags/RestoreMXCSR.java \
   compiler/EscapeAnalysis/ \
-  compiler/exceptions/TestRecursiveReplacedException.java \
+  compiler/exceptions/ \
   compiler/floatingpoint/ModNaN.java \
   compiler/gcbarriers/G1CrashTest.java \
   compiler/inlining/ \
--- a/hotspot/test/compiler/codecache/CheckSegmentedCodeCache.java	Thu Oct 16 12:01:59 2014 -0700
+++ b/hotspot/test/compiler/codecache/CheckSegmentedCodeCache.java	Thu Oct 16 14:15:37 2014 -0700
@@ -38,22 +38,26 @@
 
   private static void verifySegmentedCodeCache(ProcessBuilder pb, boolean enabled) throws Exception {
     OutputAnalyzer out = new OutputAnalyzer(pb.start());
+    out.shouldHaveExitValue(0);
     if (enabled) {
       try {
         // Non-nmethod code heap should be always available with the segmented code cache
         out.shouldContain(NON_METHOD);
       } catch (RuntimeException e) {
-        // TieredCompilation is disabled in a client VM
-        out.shouldContain("TieredCompilation is disabled in this release.");
+        // Check if TieredCompilation is disabled (in a client VM)
+        if(!out.getOutput().contains("TieredCompilation is disabled in this release.")) {
+          // Code cache is not segmented
+          throw new RuntimeException("No code cache segmentation.");
+        }
       }
     } else {
       out.shouldNotContain(NON_METHOD);
     }
-    out.shouldHaveExitValue(0);
   }
 
   private static void verifyCodeHeapNotExists(ProcessBuilder pb, String... heapNames) throws Exception {
     OutputAnalyzer out = new OutputAnalyzer(pb.start());
+    out.shouldHaveExitValue(0);
     for (String name : heapNames) {
       out.shouldNotContain(name);
     }
@@ -86,6 +90,10 @@
                                                "-XX:ReservedCodeCacheSize=240m",
                                                "-XX:+PrintCodeCache", "-version");
     verifySegmentedCodeCache(pb, true);
+    pb = ProcessTools.createJavaProcessBuilder("-XX:+TieredCompilation",
+                                               "-XX:ReservedCodeCacheSize=400m",
+                                               "-XX:+PrintCodeCache", "-version");
+    verifySegmentedCodeCache(pb, true);
 
     // Always enabled if SegmentedCodeCache is set
     pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
@@ -100,12 +108,13 @@
                                                "-Xint",
                                                "-XX:+PrintCodeCache", "-version");
     verifyCodeHeapNotExists(pb, PROFILED, NON_PROFILED);
+
+    // If we stop compilation at CompLevel_none or CompLevel_simple we
+    // don't need a profiled code heap.
     pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
                                                "-XX:TieredStopAtLevel=0",
                                                "-XX:+PrintCodeCache", "-version");
-    verifyCodeHeapNotExists(pb, PROFILED, NON_PROFILED);
-
-    // If we stop compilation at CompLevel_simple
+    verifyCodeHeapNotExists(pb, PROFILED);
     pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
                                                "-XX:TieredStopAtLevel=1",
                                                "-XX:+PrintCodeCache", "-version");
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/exceptions/CatchInlineExceptions.java	Thu Oct 16 14:15:37 2014 -0700
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8059299
+ * @summary assert(adr_type != NULL) failed: expecting TypeKlassPtr
+ * @run main/othervm -Xbatch CatchInlineExceptions
+ */
+
+class Exception1 extends Exception {};
+class Exception2 extends Exception {};
+
+public class CatchInlineExceptions {
+    private static int counter0;
+    private static int counter1;
+    private static int counter2;
+    private static int counter;
+
+    static void foo(int i) throws Exception {
+        if ((i & 1023) == 2) {
+            counter0++;
+            throw new Exception2();
+        }
+    }
+
+    static void test(int i) throws Exception {
+        try {
+           foo(i);
+        }
+        catch (Exception e) {
+            if (e instanceof Exception1) {
+                counter1++;
+            } else if (e instanceof Exception2) {
+                counter2++;
+            }
+            counter++;
+            throw e;
+        }
+    }
+
+    public static void main(String[] args) throws Throwable {
+        for (int i = 0; i < 15000; i++) {
+            try {
+                test(i);
+            } catch (Exception e) {
+                // expected
+            }
+        }
+        if (counter1 != 0) {
+            throw new RuntimeException("Failed: counter1(" + counter1  + ") != 0");
+        }
+        if (counter2 != counter) {
+            throw new RuntimeException("Failed: counter2(" + counter2  + ") != counter0(" + counter0  + ")");
+        }
+        if (counter2 != counter) {
+            throw new RuntimeException("Failed: counter2(" + counter2  + ") != counter(" + counter  + ")");
+        }
+        System.out.println("TEST PASSED");
+    }
+}