Merge
authorduke
Wed, 05 Jul 2017 20:46:18 +0200
changeset 32235 26648f472fe5
parent 32234 421773f441c6 (current diff)
parent 32207 6b12d11555ce (diff)
child 32237 125cdb60b55f
Merge
--- a/.hgtags-top-repo	Thu Aug 20 12:29:24 2015 -0700
+++ b/.hgtags-top-repo	Wed Jul 05 20:46:18 2017 +0200
@@ -320,3 +320,4 @@
 8fd6eeb878606e39c908f12535f34ebbfd225a4a jdk9-b75
 d82072b699b880a1f647a5e2d7c0f86cec958941 jdk9-b76
 7972dc8f2a47f0c4cd8f02fa5662af41f028aa14 jdk9-b77
+8c40d4143ee13bdf8170c68cc384c36ab1e9fadb jdk9-b78
--- a/common/bin/compare_exceptions.sh.incl	Thu Aug 20 12:29:24 2015 -0700
+++ b/common/bin/compare_exceptions.sh.incl	Wed Jul 05 20:46:18 2017 +0200
@@ -42,7 +42,6 @@
 ./demo/jvmti/gctest/lib/libgctest.so
 ./demo/jvmti/heapTracker/lib/libheapTracker.so
 ./demo/jvmti/heapViewer/lib/libheapViewer.so
-./demo/jvmti/hprof/lib/libhprof.so
 ./demo/jvmti/minst/lib/libminst.so
 ./demo/jvmti/mtrace/lib/libmtrace.so
 ./demo/jvmti/versionCheck/lib/libversionCheck.so
@@ -54,7 +53,6 @@
 ./demo/jvmti/gctest/lib/libgctest.so
 ./demo/jvmti/heapTracker/lib/libheapTracker.so
 ./demo/jvmti/heapViewer/lib/libheapViewer.so
-./demo/jvmti/hprof/lib/libhprof.so
 ./demo/jvmti/minst/lib/libminst.so
 ./demo/jvmti/mtrace/lib/libmtrace.so
 ./demo/jvmti/versionCheck/lib/libversionCheck.so
@@ -62,9 +60,7 @@
 ./lib/i386/client/libjvm.so
 ./lib/i386/libattach.so
 ./lib/i386/libdt_socket.so
-./lib/i386/libhprof.so
 ./lib/i386/libinstrument.so
-./lib/i386/libjava_crw_demo.so
 ./lib/i386/libjsdt.so
 ./lib/i386/libmanagement.so
 ./lib/i386/libnpt.so
@@ -118,7 +114,6 @@
 ./demo/jvmti/gctest/lib/libgctest.so
 ./demo/jvmti/heapTracker/lib/libheapTracker.so
 ./demo/jvmti/heapViewer/lib/libheapViewer.so
-./demo/jvmti/hprof/lib/libhprof.so
 ./demo/jvmti/minst/lib/libminst.so
 ./demo/jvmti/mtrace/lib/libmtrace.so
 ./demo/jvmti/versionCheck/lib/libversionCheck.so
@@ -130,16 +125,13 @@
 ./demo/jvmti/gctest/lib/libgctest.so
 ./demo/jvmti/heapTracker/lib/libheapTracker.so
 ./demo/jvmti/heapViewer/lib/libheapViewer.so
-./demo/jvmti/hprof/lib/libhprof.so
 ./demo/jvmti/minst/lib/libminst.so
 ./demo/jvmti/mtrace/lib/libmtrace.so
 ./demo/jvmti/versionCheck/lib/libversionCheck.so
 ./demo/jvmti/waiters/lib/libwaiters.so
 ./lib/amd64/libattach.so
 ./lib/amd64/libdt_socket.so
-./lib/amd64/libhprof.so
 ./lib/amd64/libinstrument.so
-./lib/amd64/libjava_crw_demo.so
 ./lib/amd64/libjsdt.so
 ./lib/amd64/libjsig.so
 ./lib/amd64/libmanagement.so
@@ -197,7 +189,6 @@
 ./demo/jvmti/gctest/lib/libgctest.so
 ./demo/jvmti/heapTracker/lib/libheapTracker.so
 ./demo/jvmti/heapViewer/lib/libheapViewer.so
-./demo/jvmti/hprof/lib/libhprof.so
 ./demo/jvmti/minst/lib/libminst.so
 ./demo/jvmti/mtrace/lib/libmtrace.so
 ./demo/jvmti/versionCheck/lib/libversionCheck.so
@@ -217,7 +208,6 @@
 ./demo/jvmti/gctest/lib/libgctest.so
 ./demo/jvmti/heapTracker/lib/libheapTracker.so
 ./demo/jvmti/heapViewer/lib/libheapViewer.so
-./demo/jvmti/hprof/lib/libhprof.so
 ./demo/jvmti/minst/lib/libminst.so
 ./demo/jvmti/mtrace/lib/libmtrace.so
 ./demo/jvmti/versionCheck/lib/libversionCheck.so
@@ -232,7 +222,6 @@
 ./lib/amd64/libdcpr.so
 ./lib/amd64/libdt_socket.so
 ./lib/amd64/libfontmanager.so
-./lib/amd64/libhprof.so
 ./lib/amd64/libinstrument.so
 ./lib/amd64/libj2gss.so
 ./lib/amd64/libj2pcsc.so
@@ -240,7 +229,6 @@
 ./lib/amd64/libj2ucrypto.so
 ./lib/amd64/libjaas_unix.so
 ./lib/amd64/libjava.so
-./lib/amd64/libjava_crw_demo.so
 ./lib/amd64/libjawt.so
 ./lib/amd64/libjdwp.so
 ./lib/amd64/libjfr.so
@@ -330,7 +318,6 @@
 ./demo/jvmti/gctest/lib/libgctest.so
 ./demo/jvmti/heapTracker/lib/libheapTracker.so
 ./demo/jvmti/heapViewer/lib/libheapViewer.so
-./demo/jvmti/hprof/lib/libhprof.so
 ./demo/jvmti/minst/lib/libminst.so
 ./demo/jvmti/mtrace/lib/libmtrace.so
 ./demo/jvmti/versionCheck/lib/libversionCheck.so
@@ -353,7 +340,6 @@
 ./demo/jvmti/gctest/lib/libgctest.so
 ./demo/jvmti/heapTracker/lib/libheapTracker.so
 ./demo/jvmti/heapViewer/lib/libheapViewer.so
-./demo/jvmti/hprof/lib/libhprof.so
 ./demo/jvmti/minst/lib/libminst.so
 ./demo/jvmti/mtrace/lib/libmtrace.so
 ./demo/jvmti/versionCheck/lib/libversionCheck.so
@@ -369,7 +355,6 @@
 ./lib/sparcv9/libdcpr.so
 ./lib/sparcv9/libdt_socket.so
 ./lib/sparcv9/libfontmanager.so
-./lib/sparcv9/libhprof.so
 ./lib/sparcv9/libinstrument.so
 ./lib/sparcv9/libj2gss.so
 ./lib/sparcv9/libj2pcsc.so
@@ -377,7 +362,6 @@
 ./lib/sparcv9/libj2ucrypto.so
 ./lib/sparcv9/libjaas_unix.so
 ./lib/sparcv9/libjava.so
-./lib/sparcv9/libjava_crw_demo.so
 ./lib/sparcv9/libjawt.so
 ./lib/sparcv9/libjdwp.so
 ./lib/sparcv9/libjfr.so
@@ -473,7 +457,6 @@
 ./demo/jvmti/heapTracker/lib/heapTracker.dll
 ./demo/jvmti/minst/lib/minst.dll
 ./bin/attach.dll
-./bin/java_crw_demo.dll
 ./bin/jsoundds.dll
 ./bin/server/jvm.dll
 ./bin/appletviewer.exe
@@ -611,9 +594,7 @@
 ./Contents/Home/lib/libawt_lwawt.dylib
 ./Contents/Home/lib/libdeploy.dylib
 ./Contents/Home/lib/libdt_socket.dylib
-./Contents/Home/lib/libhprof.dylib
 ./Contents/Home/lib/libinstrument.dylib
-./Contents/Home/lib/libjava_crw_demo.dylib
 ./Contents/Home/lib/libjdwp.dylib
 ./Contents/Home/lib/libjsdt.dylib
 ./Contents/Home/lib/libjsig.dylib
@@ -635,9 +616,7 @@
 ./lib/libawt_lwawt.dylib
 ./lib/libdeploy.dylib
 ./lib/libdt_socket.dylib
-./lib/libhprof.dylib
 ./lib/libinstrument.dylib
-./lib/libjava_crw_demo.dylib
 ./lib/libjdwp.dylib
 ./lib/libjsdt.dylib
 ./lib/libjsig.dylib
--- a/hotspot/.hgtags	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/.hgtags	Wed Jul 05 20:46:18 2017 +0200
@@ -480,3 +480,4 @@
 2f354281e9915275693c4e519a959b8a6f22d3a3 jdk9-b75
 0bc8d1656d6f2b1fdfe803c1305a108bb9939f35 jdk9-b76
 e66c3813789debfc06f206afde1bf7a84cb08451 jdk9-b77
+20dc06b04fe5ec373879414d60ef82ac70faef98 jdk9-b78
--- a/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -416,7 +416,8 @@
   int jmp_off = __ offset();
   __ jmp(_patch_site_entry);
   // Add enough nops so deoptimization can overwrite the jmp above with a call
-  // and not destroy the world.
+  // and not destroy the world. We cannot use fat nops here, since the concurrent
+  // code rewrite may transiently create the illegal instruction sequence.
   for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
     __ nop();
   }
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -345,9 +345,7 @@
   const bool do_post_padding = VerifyOops || UseCompressedClassPointers;
   if (!do_post_padding) {
     // insert some nops so that the verified entry point is aligned on CodeEntryAlignment
-    while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) {
-      __ nop();
-    }
+    __ align(CodeEntryAlignment, __ offset() + ic_cmp_size);
   }
   int offset = __ offset();
   __ inline_cache_check(receiver, IC_Klass);
@@ -2861,9 +2859,7 @@
       case lir_virtual_call:  // currently, sparc-specific for niagara
       default: ShouldNotReachHere();
     }
-    while (offset++ % BytesPerWord != 0) {
-      __ nop();
-    }
+    __ align(BytesPerWord, offset);
   }
 }
 
@@ -2902,10 +2898,7 @@
   int start = __ offset();
   if (os::is_MP()) {
     // make sure that the displacement word of the call ends up word aligned
-    int offset = __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset;
-    while (offset++ % BytesPerWord != 0) {
-      __ nop();
-    }
+    __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
   }
   __ relocate(static_stub_Relocation::spec(call_pc));
   __ mov_metadata(rbx, (Metadata*)NULL);
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -970,8 +970,12 @@
 }
 
 void MacroAssembler::align(int modulus) {
-  if (offset() % modulus != 0) {
-    nop(modulus - (offset() % modulus));
+  align(modulus, offset());
+}
+
+void MacroAssembler::align(int modulus, int target) {
+  if (target % modulus != 0) {
+    nop(modulus - (target % modulus));
   }
 }
 
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp	Wed Jul 05 20:46:18 2017 +0200
@@ -192,6 +192,7 @@
 
   // Alignment
   void align(int modulus);
+  void align(int modulus, int target);
 
   // A 5 byte nop that is safe for patching (see patch_verified_entry)
   void fat_nop();
--- a/hotspot/src/os/bsd/dtrace/generateJvmOffsets.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/os/bsd/dtrace/generateJvmOffsets.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -108,7 +108,7 @@
 #define GEN_SIZE(Type)                                  \
   switch(gen_variant) {                                 \
   case GEN_OFFSET:                                      \
-    printf("#define SIZE_%-35s %ld\n",                   \
+    printf("#define SIZE_%-35s %ld\n",                  \
             #Type, sizeof(Type));                       \
     break;                                              \
   case GEN_INDEX:                                       \
@@ -134,7 +134,7 @@
   }
 
 void gen_prologue(GEN_variant gen_variant) {
-    const char *suffix;
+    const char *suffix = "Undefined-Suffix";
 
     switch(gen_variant) {
       case GEN_OFFSET: suffix = ".h";        break;
@@ -228,10 +228,10 @@
   printf("\n");
 
   GEN_OFFS(Method, _constMethod);
-  GEN_OFFS(Method, _constants);
   GEN_OFFS(Method, _access_flags);
   printf("\n");
 
+  GEN_OFFS(ConstMethod, _constants);
   GEN_OFFS(ConstMethod, _flags);
   GEN_OFFS(ConstMethod, _code_size);
   GEN_OFFS(ConstMethod, _name_index);
@@ -264,7 +264,7 @@
 
   GEN_OFFS(nmethod, _method);
   GEN_OFFS(nmethod, _dependencies_offset);
-  GEN_OFFS(nmethod, _oops_offset);
+  GEN_OFFS(nmethod, _metadata_offset);
   GEN_OFFS(nmethod, _scopes_data_offset);
   GEN_OFFS(nmethod, _scopes_pcs_offset);
   GEN_OFFS(nmethod, _handler_table_offset);
--- a/hotspot/src/os/bsd/dtrace/jhelper.d	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/os/bsd/dtrace/jhelper.d	Wed Jul 05 20:46:18 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -49,7 +49,7 @@
 extern pointer __1cIUniverseO_collectedHeap_;
 
 extern pointer __1cHnmethodG__vtbl_;
-extern pointer __1cNMethodG__vtbl_;
+extern pointer __1cGMethodG__vtbl_;
 extern pointer __1cKBufferBlobG__vtbl_;
 
 #define copyin_ptr(ADDR)    *(pointer*)  copyin((pointer) (ADDR), sizeof(pointer))
@@ -164,7 +164,7 @@
   this->number_of_heaps = copyin_uint32(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_len);
 
   this->Method_vtbl = (pointer) &``__1cGMethodG__vtbl_;
-  
+ 
   /*
    * Get Java heap bounds
    */
@@ -457,12 +457,15 @@
 
   this->nameSymbol = copyin_ptr(this->constantPool +
       this->nameIndex * sizeof (pointer) + SIZE_ConstantPool);
+  /* The symbol is a CPSlot and has lower bit set to indicate metadata */
+  this->nameSymbol &= (~1); /* remove metadata lsb */
 
   this->nameSymbolLength = copyin_uint16(this->nameSymbol +
       OFFSET_Symbol_length);
 
   this->signatureSymbol = copyin_ptr(this->constantPool +
       this->signatureIndex * sizeof (pointer) + SIZE_ConstantPool);
+  this->signatureSymbol &= (~1); /* remove metadata lsb */
 
   this->signatureSymbolLength = copyin_uint16(this->signatureSymbol +
       OFFSET_Symbol_length);
--- a/hotspot/src/os/bsd/dtrace/jvm_dtrace.c	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/os/bsd/dtrace/jvm_dtrace.c	Wed Jul 05 20:46:18 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -227,7 +227,7 @@
 /* attach to given JVM */
 jvm_t* jvm_attach(pid_t pid) {
     jvm_t* jvm;
-    int door_fd, attach_fd, i;
+    int door_fd, attach_fd, i = 0;
 
     jvm = (jvm_t*) calloc(1, sizeof(jvm_t));
     if (jvm == NULL) {
@@ -292,14 +292,13 @@
 /* detach the givenb JVM */
 int jvm_detach(jvm_t* jvm) {
     if (jvm) {
-        int res;
+        int res = 0;
         if (jvm->door_fd != -1) {
             if (file_close(jvm->door_fd) != 0) {
                 set_jvm_error(JVM_ERR_CANT_CLOSE_DOOR);
                 res = -1;
             } else {
                 clear_jvm_error();
-                res = 0;
             }
         }
         free(jvm);
--- a/hotspot/src/os/bsd/dtrace/libjvm_db.c	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/os/bsd/dtrace/libjvm_db.c	Wed Jul 05 20:46:18 2017 +0200
@@ -882,7 +882,7 @@
 /* Finds a PcDesc with real-pc equal to N->pc */
 static int pc_desc_at(Nmethod_t *N)
 {
-  uint64_t pc_diff;
+  uint64_t pc_diff = 999;
   int32_t offs;
   int32_t err;
 
--- a/hotspot/src/os/linux/vm/perfMemory_linux.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/os/linux/vm/perfMemory_linux.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -217,9 +217,9 @@
     //
     return false;
   }
-  // See if the uid of the directory matches the effective uid of the process.
-  //
-  if (statp->st_uid != geteuid()) {
+  // If user is not root then see if the uid of the directory matches the effective uid of the process.
+  uid_t euid = geteuid();
+  if ((euid != 0) && (statp->st_uid != euid)) {
     // The directory was not created by this user, declare it insecure.
     //
     return false;
--- a/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -85,7 +85,7 @@
 #define GEN_OFFS_NAME(Type,Name,OutputType)             \
   switch(gen_variant) {                                 \
   case GEN_OFFSET:                                      \
-    printf("#define OFFSET_%-33s %d\n",                 \
+    printf("#define OFFSET_%-33s %ld\n",                \
             #OutputType #Name, offset_of(Type, Name));  \
     break;                                              \
   case GEN_INDEX:                                       \
@@ -103,7 +103,7 @@
 #define GEN_SIZE(Type)                                  \
   switch(gen_variant) {                                 \
   case GEN_OFFSET:                                      \
-    printf("#define SIZE_%-35s %d\n",                   \
+    printf("#define SIZE_%-35s %ld\n",                  \
             #Type, sizeof(Type));                       \
     break;                                              \
   case GEN_INDEX:                                       \
@@ -129,7 +129,7 @@
   }
 
 void gen_prologue(GEN_variant gen_variant) {
-    const char *suffix;
+    const char *suffix = "Undefined-Suffix";
 
     switch(gen_variant) {
       case GEN_OFFSET: suffix = ".h";        break;
@@ -211,7 +211,7 @@
   GEN_OFFS(ConstantPool, _pool_holder);
   printf("\n");
 
-  GEN_VALUE(OFFSET_HeapBlockHeader_used, offset_of(HeapBlock::Header, _used));
+  GEN_VALUE(OFFSET_HeapBlockHeader_used, (int) offset_of(HeapBlock::Header, _used));
   GEN_OFFS(oopDesc, _metadata);
   printf("\n");
 
@@ -275,7 +275,7 @@
   GEN_OFFS(NarrowPtrStruct, _shift);
   printf("\n");
 
-  GEN_VALUE(SIZE_HeapBlockHeader, sizeof(HeapBlock::Header));
+  GEN_VALUE(SIZE_HeapBlockHeader, (int) sizeof(HeapBlock::Header));
   GEN_SIZE(oopDesc);
   GEN_SIZE(ConstantPool);
   printf("\n");
--- a/hotspot/src/os/solaris/dtrace/jvm_dtrace.c	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/os/solaris/dtrace/jvm_dtrace.c	Wed Jul 05 20:46:18 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -227,7 +227,7 @@
 /* attach to given JVM */
 jvm_t* jvm_attach(pid_t pid) {
     jvm_t* jvm;
-    int door_fd, attach_fd, i;
+    int door_fd, attach_fd, i = 0;
 
     jvm = (jvm_t*) calloc(1, sizeof(jvm_t));
     if (jvm == NULL) {
@@ -292,14 +292,13 @@
 /* detach the givenb JVM */
 int jvm_detach(jvm_t* jvm) {
     if (jvm) {
-        int res;
+        int res = 0;
         if (jvm->door_fd != -1) {
             if (file_close(jvm->door_fd) != 0) {
                 set_jvm_error(JVM_ERR_CANT_CLOSE_DOOR);
                 res = -1;
             } else {
                 clear_jvm_error();
-                res = 0;
             }
         }
         free(jvm);
--- a/hotspot/src/os/solaris/dtrace/libjvm_db.c	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/os/solaris/dtrace/libjvm_db.c	Wed Jul 05 20:46:18 2017 +0200
@@ -882,7 +882,7 @@
 /* Finds a PcDesc with real-pc equal to N->pc */
 static int pc_desc_at(Nmethod_t *N)
 {
-  uint64_t pc_diff;
+  uint64_t pc_diff = 999;
   int32_t offs;
   int32_t err;
 
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -182,75 +182,6 @@
 
 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
 
-// Thread Local Storage
-// This is common to all Solaris platforms so it is defined here,
-// in this common file.
-// The declarations are in the os_cpu threadLS*.hpp files.
-//
-// Static member initialization for TLS
-Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL};
-
-#ifndef PRODUCT
-  #define _PCT(n,d)       ((100.0*(double)(n))/(double)(d))
-
-int ThreadLocalStorage::_tcacheHit = 0;
-int ThreadLocalStorage::_tcacheMiss = 0;
-
-void ThreadLocalStorage::print_statistics() {
-  int total = _tcacheMiss+_tcacheHit;
-  tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n",
-                _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total));
-}
-  #undef _PCT
-#endif // PRODUCT
-
-Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id,
-                                                        int index) {
-  Thread *thread = get_thread_slow();
-  if (thread != NULL) {
-    address sp = os::current_stack_pointer();
-    guarantee(thread->_stack_base == NULL ||
-              (sp <= thread->_stack_base &&
-              sp >= thread->_stack_base - thread->_stack_size) ||
-              is_error_reported(),
-              "sp must be inside of selected thread stack");
-
-    thread->set_self_raw_id(raw_id);  // mark for quick retrieval
-    _get_thread_cache[index] = thread;
-  }
-  return thread;
-}
-
-
-static const double all_zero[sizeof(Thread) / sizeof(double) + 1] = {0};
-#define NO_CACHED_THREAD ((Thread*)all_zero)
-
-void ThreadLocalStorage::pd_set_thread(Thread* thread) {
-
-  // Store the new value before updating the cache to prevent a race
-  // between get_thread_via_cache_slowly() and this store operation.
-  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-
-  // Update thread cache with new thread if setting on thread create,
-  // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit.
-  uintptr_t raw = pd_raw_thread_id();
-  int ix = pd_cache_index(raw);
-  _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread;
-}
-
-void ThreadLocalStorage::pd_init() {
-  for (int i = 0; i < _pd_cache_size; i++) {
-    _get_thread_cache[i] = NO_CACHED_THREAD;
-  }
-}
-
-// Invalidate all the caches (happens to be the same as pd_init).
-void ThreadLocalStorage::pd_invalidate_all() { pd_init(); }
-
-#undef NO_CACHED_THREAD
-
-// END Thread Local Storage
-
 static inline size_t adjust_stack_size(address base, size_t size) {
   if ((ssize_t)size < 0) {
     // 4759953: Compensate for ridiculous stack size.
@@ -1289,67 +1220,6 @@
   return (int)(_initial_pid ? _initial_pid : getpid());
 }
 
-int os::allocate_thread_local_storage() {
-  // %%%       in Win32 this allocates a memory segment pointed to by a
-  //           register.  Dan Stein can implement a similar feature in
-  //           Solaris.  Alternatively, the VM can do the same thing
-  //           explicitly: malloc some storage and keep the pointer in a
-  //           register (which is part of the thread's context) (or keep it
-  //           in TLS).
-  // %%%       In current versions of Solaris, thr_self and TSD can
-  //           be accessed via short sequences of displaced indirections.
-  //           The value of thr_self is available as %g7(36).
-  //           The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4),
-  //           assuming that the current thread already has a value bound to k.
-  //           It may be worth experimenting with such access patterns,
-  //           and later having the parameters formally exported from a Solaris
-  //           interface.  I think, however, that it will be faster to
-  //           maintain the invariant that %g2 always contains the
-  //           JavaThread in Java code, and have stubs simply
-  //           treat %g2 as a caller-save register, preserving it in a %lN.
-  thread_key_t tk;
-  if (thr_keycreate(&tk, NULL)) {
-    fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed "
-                  "(%s)", strerror(errno)));
-  }
-  return int(tk);
-}
-
-void os::free_thread_local_storage(int index) {
-  // %%% don't think we need anything here
-  // if (pthread_key_delete((pthread_key_t) tk)) {
-  //   fatal("os::free_thread_local_storage: pthread_key_delete failed");
-  // }
-}
-
-// libthread allocate for tsd_common is a version specific
-// small number - point is NO swap space available
-#define SMALLINT 32
-void os::thread_local_storage_at_put(int index, void* value) {
-  // %%% this is used only in threadLocalStorage.cpp
-  if (thr_setspecific((thread_key_t)index, value)) {
-    if (errno == ENOMEM) {
-      vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR,
-                            "thr_setspecific: out of swap space");
-    } else {
-      fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
-                    "(%s)", strerror(errno)));
-    }
-  } else {
-    ThreadLocalStorage::set_thread_in_slot((Thread *) value);
-  }
-}
-
-// This function could be called before TLS is initialized, for example, when
-// VM receives an async signal or when VM causes a fatal error during
-// initialization. Return NULL if thr_getspecific() fails.
-void* os::thread_local_storage_at(int index) {
-  // %%% this is used only in threadLocalStorage.cpp
-  void* r = NULL;
-  return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
-}
-
-
 // gethrtime() should be monotonic according to the documentation,
 // but some virtualized platforms are known to break this guarantee.
 // getTimeNanos() must be guaranteed not to move backwards, so we
--- a/hotspot/src/os/solaris/vm/perfMemory_solaris.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/os/solaris/vm/perfMemory_solaris.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -219,9 +219,9 @@
     //
     return false;
   }
-  // See if the uid of the directory matches the effective uid of the process.
-  //
-  if (statp->st_uid != geteuid()) {
+  // If user is not root then see if the uid of the directory matches the effective uid of the process.
+  uid_t euid = geteuid();
+  if ((euid != 0) && (statp->st_uid != euid)) {
     // The directory was not created by this user, declare it insecure.
     //
     return false;
--- a/hotspot/src/os/solaris/vm/thread_solaris.inline.hpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/os/solaris/vm/thread_solaris.inline.hpp	Wed Jul 05 20:46:18 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,19 +39,12 @@
 // For SPARC, to avoid excessive register window spill-fill faults,
 // we aggressively inline these routines.
 
-inline Thread* ThreadLocalStorage::thread()  {
-  // don't use specialized code if +UseMallocOnly -- may confuse Purify et al.
-  debug_only(if (UseMallocOnly) return get_thread_slow(););
+inline void ThreadLocalStorage::set_thread(Thread* thread) {
+  _thr_current = thread;
+}
 
-  uintptr_t raw = pd_raw_thread_id();
-  int ix = pd_cache_index(raw);
-  Thread* candidate = ThreadLocalStorage::_get_thread_cache[ix];
-  if (candidate->self_raw_id() == raw) {
-    // hit
-    return candidate;
-  } else {
-    return ThreadLocalStorage::get_thread_via_cache_slowly(raw, ix);
-  }
+inline Thread* ThreadLocalStorage::thread()  {
+  return _thr_current;
 }
 
 #endif // OS_SOLARIS_VM_THREAD_SOLARIS_INLINE_HPP
--- a/hotspot/src/os_cpu/solaris_sparc/vm/threadLS_solaris_sparc.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/threadLS_solaris_sparc.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,19 +26,26 @@
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadLocalStorage.hpp"
 
-// Provides an entry point we can link against and
-// a buffer we can emit code into. The buffer is
-// filled by ThreadLocalStorage::generate_code_for_get_thread
-// and called from ThreadLocalStorage::thread()
+// True thread-local variable
+__thread Thread * ThreadLocalStorage::_thr_current = NULL;
+
+// Implementations needed to support the shared API
 
-#include <sys/systeminfo.h>
+void ThreadLocalStorage::pd_invalidate_all() {} // nothing to do
 
-// The portable TLS mechanism (get_thread_via_cache) is enough on SPARC.
-// There is no need for hand-assembling a special function.
-void ThreadLocalStorage::generate_code_for_get_thread() {
+bool ThreadLocalStorage::_initialized = false;
+
+void ThreadLocalStorage::init() {
+  _initialized = true;
 }
 
-void ThreadLocalStorage::set_thread_in_slot (Thread * self) {}
+bool ThreadLocalStorage::is_initialized() {
+  return _initialized;
+}
+
+Thread* ThreadLocalStorage::get_thread_slow() {
+    return thread();
+}
 
 extern "C" Thread* get_thread() {
   return ThreadLocalStorage::thread();
--- a/hotspot/src/os_cpu/solaris_sparc/vm/threadLS_solaris_sparc.hpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/threadLS_solaris_sparc.hpp	Wed Jul 05 20:46:18 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,47 +25,15 @@
 #ifndef OS_CPU_SOLARIS_SPARC_VM_THREADLS_SOLARIS_SPARC_HPP
 #define OS_CPU_SOLARIS_SPARC_VM_THREADLS_SOLARIS_SPARC_HPP
 
-public:
-  // Java Thread  - force inlining
-  static inline Thread* thread() ;
+// Solaris specific implementation involves simple, direct use
+// of a compiler-based thread-local variable
 
 private:
-  static Thread* _get_thread_cache[];  // index by [(raw_id>>9)^(raw_id>>20) % _pd_cache_size]
-  static Thread* get_thread_via_cache_slowly(uintptr_t raw_id, int index);
+  static __thread Thread * _thr_current;
 
-  NOT_PRODUCT(static int _tcacheHit;)
-  NOT_PRODUCT(static int _tcacheMiss;)
+  static bool _initialized;  // needed for shared API
 
 public:
-
-  // Print cache hit/miss statistics
-  static void print_statistics() PRODUCT_RETURN;
-
-  enum Constants {
-    _pd_cache_size         =  256*2  // projected typical # of threads * 2
-  };
-
-  static void set_thread_in_slot (Thread *) ;
-
-  static uintptr_t pd_raw_thread_id() {
-    return _raw_thread_id();
-  }
-
-  static int pd_cache_index(uintptr_t raw_id) {
-    // Hash function: From email from Dave:
-    // The hash function deserves an explanation.  %g7 points to libthread's
-    // "thread" structure.  On T1 the thread structure is allocated on the
-    // user's stack (yes, really!) so the ">>20" handles T1 where the JVM's
-    // stack size is usually >= 1Mb.  The ">>9" is for T2 where Roger allocates
-    // globs of thread blocks contiguously.  The "9" has to do with the
-    // expected size of the T2 thread structure.  If these constants are wrong
-    // the worst thing that'll happen is that the hit rate for heavily threaded
-    // apps won't be as good as it could be.  If you want to burn another
-    // shift+xor you could mix together _all of the %g7 bits to form the hash,
-    // but I think that's excessive.  Making the change above changed the
-    // T$ miss rate on SpecJBB (on a 16X system) from about 3% to imperceptible.
-    uintptr_t ix = (int) (((raw_id >> 9) ^ (raw_id >> 20)) % _pd_cache_size);
-    return ix;
-  }
+  static inline Thread* thread();
 
 #endif // OS_CPU_SOLARIS_SPARC_VM_THREADLS_SOLARIS_SPARC_HPP
--- a/hotspot/src/os_cpu/solaris_x86/vm/assembler_solaris_x86.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/os_cpu/solaris_x86/vm/assembler_solaris_x86.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,11 +23,10 @@
  */
 
 #include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "runtime/os.hpp"
 #include "runtime/threadLocalStorage.hpp"
-
+#include "runtime/thread.inline.hpp"
 
 void MacroAssembler::int3() {
   push(rax);
@@ -39,98 +38,32 @@
   pop(rax);
 }
 
-#define __  _masm->
-#ifndef _LP64
-static void slow_call_thr_specific(MacroAssembler* _masm, Register thread) {
-
-  // slow call to of thr_getspecific
-  // int thr_getspecific(thread_key_t key, void **value);
-  // Consider using pthread_getspecific instead.
-
-__  push(0);                                                            // allocate space for return value
-  if (thread != rax) __ push(rax);                                      // save rax, if caller still wants it
-__  push(rcx);                                                          // save caller save
-__  push(rdx);                                                          // save caller save
+// This is simply a call to ThreadLocalStorage::thread()
+void MacroAssembler::get_thread(Register thread) {
   if (thread != rax) {
-__    lea(thread, Address(rsp, 3 * sizeof(int)));                       // address of return value
-  } else {
-__    lea(thread, Address(rsp, 2 * sizeof(int)));                       // address of return value
+    push(rax);
   }
-__  push(thread);                                                       // and pass the address
-__  push(ThreadLocalStorage::thread_index());                           // the key
-__  call(RuntimeAddress(CAST_FROM_FN_PTR(address, thr_getspecific)));
-__  increment(rsp, 2 * wordSize);
-__  pop(rdx);
-__  pop(rcx);
-  if (thread != rax) __ pop(rax);
-__  pop(thread);
-
-}
-#else
-static void slow_call_thr_specific(MacroAssembler* _masm, Register thread) {
-  // slow call to of thr_getspecific
-  // int thr_getspecific(thread_key_t key, void **value);
-  // Consider using pthread_getspecific instead.
+  push(rdi);
+  push(rsi);
+  push(rdx);
+  push(rcx);
+  push(r8);
+  push(r9);
+  push(r10);
+  push(r11);
 
+  call(RuntimeAddress(CAST_FROM_FN_PTR(address, ThreadLocalStorage::thread)));
+
+  pop(r11);
+  pop(r10);
+  pop(r9);
+  pop(r8);
+  pop(rcx);
+  pop(rdx);
+  pop(rsi);
+  pop(rdi);
   if (thread != rax) {
-__    push(rax);
-  }
-__  push(0); // space for return value
-__  push(rdi);
-__  push(rsi);
-__  lea(rsi, Address(rsp, 16)); // pass return value address
-__  push(rdx);
-__  push(rcx);
-__  push(r8);
-__  push(r9);
-__  push(r10);
-  // XXX
-__  mov(r10, rsp);
-__  andptr(rsp, -16);
-__  push(r10);
-__  push(r11);
-
-__  movl(rdi, ThreadLocalStorage::thread_index());
-__  call(RuntimeAddress(CAST_FROM_FN_PTR(address, thr_getspecific)));
-
-__  pop(r11);
-__  pop(rsp);
-__  pop(r10);
-__  pop(r9);
-__  pop(r8);
-__  pop(rcx);
-__  pop(rdx);
-__  pop(rsi);
-__  pop(rdi);
-__  pop(thread); // load return value
-  if (thread != rax) {
-__    pop(rax);
+    movl(thread, rax);
+    pop(rax);
   }
 }
-#endif //LP64
-
-void MacroAssembler::get_thread(Register thread) {
-
-  int segment = NOT_LP64(Assembler::GS_segment) LP64_ONLY(Assembler::FS_segment);
-  // Try to emit a Solaris-specific fast TSD/TLS accessor.
-  ThreadLocalStorage::pd_tlsAccessMode tlsMode = ThreadLocalStorage::pd_getTlsAccessMode ();
-  if (tlsMode == ThreadLocalStorage::pd_tlsAccessIndirect) {            // T1
-     // Use thread as a temporary: mov r, gs:[0]; mov r, [r+tlsOffset]
-     emit_int8 (segment);
-     // ExternalAddress doesn't work because it can't take NULL
-     AddressLiteral null(0, relocInfo::none);
-     movptr (thread, null);
-     movptr(thread, Address(thread, ThreadLocalStorage::pd_getTlsOffset())) ;
-     return ;
-  } else
-  if (tlsMode == ThreadLocalStorage::pd_tlsAccessDirect) {              // T2
-     // mov r, gs:[tlsOffset]
-     emit_int8 (segment);
-     AddressLiteral tls_off((address)ThreadLocalStorage::pd_getTlsOffset(), relocInfo::none);
-     movptr (thread, tls_off);
-     return ;
-  }
-
-  slow_call_thr_specific(this, thread);
-
-}
--- a/hotspot/src/os_cpu/solaris_x86/vm/threadLS_solaris_x86.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/os_cpu/solaris_x86/vm/threadLS_solaris_x86.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,167 +26,27 @@
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadLocalStorage.hpp"
 
-#ifdef AMD64
-extern "C" Thread*  fs_load(ptrdiff_t tlsOffset);
-extern "C" intptr_t fs_thread();
-#else
-// From solaris_i486.s
-extern "C" Thread*  gs_load(ptrdiff_t tlsOffset);
-extern "C" intptr_t gs_thread();
-#endif // AMD64
+// True thread-local variable
+__thread Thread * ThreadLocalStorage::_thr_current = NULL;
+
+// Implementations needed to support the shared API
 
-// tlsMode encoding:
-//
-// pd_tlsAccessUndefined : uninitialized
-// pd_tlsAccessSlow      : not available
-// pd_tlsAccessIndirect  :
-//   old-style indirect access - present in "T1" libthread.
-//   use thr_slot_sync_allocate() to attempt to allocate a slot.
-// pd_tlsAccessDirect    :
-//   new-style direct access - present in late-model "T2" libthread.
-//   Allocate the offset (slot) via _thr_slot_offset() or by
-//   defining an IE- or LE-mode TLS/TSD slot in the launcher and then passing
-//   that offset into libjvm.so.
-//   See http://sac.eng/Archives/CaseLog/arc/PSARC/2003/159/.
-//
-// Note that we have a capability gap - some early model T2 forms
-// (e.g., unpatched S9) have neither _thr_slot_sync_allocate() nor
-// _thr_slot_offset().  In that case we revert to the usual
-// thr_getspecific accessor.
-//
+void ThreadLocalStorage::pd_invalidate_all() {} // nothing to do
 
-static ThreadLocalStorage::pd_tlsAccessMode tlsMode = ThreadLocalStorage::pd_tlsAccessUndefined ;
-static ptrdiff_t tlsOffset = 0 ;
-static thread_key_t tlsKey ;
-
-typedef int (*TSSA_Entry) (ptrdiff_t *, int, int) ;
-typedef ptrdiff_t (*TSO_Entry) (int) ;
+bool ThreadLocalStorage::_initialized = false;
 
-ThreadLocalStorage::pd_tlsAccessMode ThreadLocalStorage::pd_getTlsAccessMode ()
-{
-   guarantee (tlsMode != pd_tlsAccessUndefined, "tlsMode not set") ;
-   return tlsMode ;
-}
-
-ptrdiff_t ThreadLocalStorage::pd_getTlsOffset () {
-   guarantee (tlsMode != pd_tlsAccessUndefined, "tlsMode not set") ;
-   return tlsOffset ;
+void ThreadLocalStorage::init() {
+  _initialized = true;
 }
 
-// TODO: Consider the following improvements:
-//
-// 1.   Convert from thr_*specific* to pthread_*specific*.
-//      The pthread_ forms are slightly faster.  Also, the
-//      pthread_ forms have a pthread_key_delete() API which
-//      would aid in clean JVM shutdown and the eventual goal
-//      of permitting a JVM to reinstantiate itself withing a process.
-//
-// 2.   See ThreadLocalStorage::init().  We end up allocating
-//      two TLS keys during VM startup.  That's benign, but we could collapse
-//      down to one key without too much trouble.
-//
-// 3.   MacroAssembler::get_thread() currently emits calls to thr_getspecific().
-//      Modify get_thread() to call Thread::current() instead.
-//
-// 4.   Thread::current() currently uses a cache keyed by %gs:[0].
-//      (The JVM has PSARC permission to use %g7/%gs:[0]
-//      as an opaque temporally unique thread identifier).
-//      For C++ access to a thread's reflexive "self" pointer we
-//      should consider using one of the following:
-//      a. a radix tree keyed by %esp - as in EVM.
-//         This requires two loads (the 2nd dependent on the 1st), but
-//         is easily inlined and doesn't require a "miss" slow path.
-//      b. a fast TLS/TSD slot allocated by _thr_slot_offset
-//         or _thr_slot_sync_allocate.
-//
-// 5.   'generate_code_for_get_thread' is a misnomer.
-//      We should change it to something more general like
-//      pd_ThreadSelf_Init(), for instance.
-//
-
-static void AllocateTLSOffset ()
-{
-   int rslt ;
-   TSSA_Entry tssa ;
-   TSO_Entry  tso ;
-   ptrdiff_t off ;
-
-   guarantee (tlsMode == ThreadLocalStorage::pd_tlsAccessUndefined, "tlsMode not set") ;
-   tlsMode = ThreadLocalStorage::pd_tlsAccessSlow ;
-   tlsOffset = 0 ;
-#ifndef AMD64
-
-   tssa = (TSSA_Entry) dlsym (RTLD_DEFAULT, "thr_slot_sync_allocate") ;
-   if (tssa != NULL) {
-        off = -1 ;
-        rslt = (*tssa)(&off, NULL, NULL) ;                // (off,dtor,darg)
-        if (off != -1) {
-           tlsOffset = off ;
-           tlsMode = ThreadLocalStorage::pd_tlsAccessIndirect ;
-           return ;
-        }
-    }
-
-    rslt = thr_keycreate (&tlsKey, NULL) ;
-    if (rslt != 0) {
-        tlsMode = ThreadLocalStorage::pd_tlsAccessSlow ;   // revert to slow mode
-        return ;
-    }
-
-    tso = (TSO_Entry) dlsym (RTLD_DEFAULT, "_thr_slot_offset") ;
-    if (tso != NULL) {
-        off = (*tso)(tlsKey) ;
-        if (off >= 0) {
-           tlsOffset = off ;
-           tlsMode = ThreadLocalStorage::pd_tlsAccessDirect ;
-           return ;
-        }
-    }
-
-    // Failure: Too bad ... we've allocated a TLS slot we don't need and there's
-    // no provision in the ABI for returning the slot.
-    //
-    // If we didn't find a slot then then:
-    // 1. We might be on liblwp.
-    // 2. We might be on T2 libthread, but all "fast" slots are already
-    //    consumed
-    // 3. We might be on T1, and all TSD (thr_slot_sync_allocate) slots are
-    //    consumed.
-    // 4. We might be on T2 libthread, but it's be re-architected
-    //    so that fast slots are no longer g7-relative.
-    //
-
-    tlsMode = ThreadLocalStorage::pd_tlsAccessSlow ;
-    return ;
-#endif // AMD64
+bool ThreadLocalStorage::is_initialized() {
+  return _initialized;
 }
 
-void ThreadLocalStorage::generate_code_for_get_thread() {
-    AllocateTLSOffset() ;
+Thread* ThreadLocalStorage::get_thread_slow() {
+    return thread();
 }
 
-void ThreadLocalStorage::set_thread_in_slot(Thread *thread) {
-  guarantee (tlsMode != pd_tlsAccessUndefined, "tlsMode not set") ;
-  if (tlsMode == pd_tlsAccessIndirect) {
-#ifdef AMD64
-        intptr_t tbase = fs_thread();
-#else
-        intptr_t tbase = gs_thread();
-#endif // AMD64
-        *((Thread**) (tbase + tlsOffset)) = thread ;
-  } else
-  if (tlsMode == pd_tlsAccessDirect) {
-        thr_setspecific (tlsKey, (void *) thread) ;
-        // set with thr_setspecific and then readback with gs_load to validate.
-#ifdef AMD64
-        guarantee (thread == fs_load(tlsOffset), "tls readback failure") ;
-#else
-        guarantee (thread == gs_load(tlsOffset), "tls readback failure") ;
-#endif // AMD64
-  }
-}
-
-
 extern "C" Thread* get_thread() {
   return ThreadLocalStorage::thread();
 }
--- a/hotspot/src/os_cpu/solaris_x86/vm/threadLS_solaris_x86.hpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/os_cpu/solaris_x86/vm/threadLS_solaris_x86.hpp	Wed Jul 05 20:46:18 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,61 +25,15 @@
 #ifndef OS_CPU_SOLARIS_X86_VM_THREADLS_SOLARIS_X86_HPP
 #define OS_CPU_SOLARIS_X86_VM_THREADLS_SOLARIS_X86_HPP
 
-// Processor dependent parts of ThreadLocalStorage
+// Solaris specific implementation involves simple, direct use
+// of a compiler-based thread-local variable
 
 private:
-  static Thread* _get_thread_cache[];  // index by [(raw_id>>9)^(raw_id>>20) % _pd_cache_size]
-  static Thread* get_thread_via_cache_slowly(uintptr_t raw_id, int index);
+  static __thread Thread * _thr_current;
 
-  NOT_PRODUCT(static int _tcacheHit;)
-  NOT_PRODUCT(static int _tcacheMiss;)
+  static bool _initialized;  // needed for shared API
 
 public:
-  // Cache hit/miss statistics
-  static void print_statistics() PRODUCT_RETURN;
-
-  enum Constants {
-#ifdef AMD64
-    _pd_cache_size         =  256*2   // projected typical # of threads * 2
-#else
-    _pd_cache_size         =  128*2   // projected typical # of threads * 2
-#endif // AMD64
-  };
-
-  enum pd_tlsAccessMode {
-     pd_tlsAccessUndefined      = -1,
-     pd_tlsAccessSlow           = 0,
-     pd_tlsAccessIndirect       = 1,
-     pd_tlsAccessDirect         = 2
-  } ;
-
-  static void set_thread_in_slot (Thread *) ;
-
-  static pd_tlsAccessMode pd_getTlsAccessMode () ;
-  static ptrdiff_t pd_getTlsOffset () ;
-
-  static uintptr_t pd_raw_thread_id() {
-#ifdef _GNU_SOURCE
-#ifdef AMD64
-    uintptr_t rv;
-    __asm__ __volatile__ ("movq %%fs:0, %0" : "=r"(rv));
-    return rv;
-#else
-    return gs_thread();
-#endif // AMD64
-#else  //_GNU_SOURCE
-    return _raw_thread_id();
-#endif //_GNU_SOURCE
-  }
-
-  static int pd_cache_index(uintptr_t raw_id) {
-    // Copied from the sparc version. Dave said it should also work fine
-    // for solx86.
-    int ix = (int) (((raw_id >> 9) ^ (raw_id >> 20)) % _pd_cache_size);
-    return ix;
-  }
-
-  // Java Thread
   static inline Thread* thread();
 
 #endif // OS_CPU_SOLARIS_X86_VM_THREADLS_SOLARIS_X86_HPP
--- a/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -33,7 +33,9 @@
 #include "runtime/os.hpp"
 
 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
-  // we must have enough patching space so that call can be inserted
+  // We must have enough patching space so that call can be inserted.
+  // We cannot use fat nops here, since the concurrent code rewrite may transiently
+  // create the illegal instruction sequence.
   while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) {
     _masm->nop();
   }
@@ -592,9 +594,7 @@
 void LIR_Assembler::emit_op0(LIR_Op0* op) {
   switch (op->code()) {
     case lir_word_align: {
-      while (code_offset() % BytesPerWord != 0) {
-        _masm->nop();
-      }
+      _masm->align(BytesPerWord);
       break;
     }
 
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -620,12 +620,12 @@
   // Support for parallelizing survivor space rescan
   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
     const size_t max_plab_samples =
-      ((DefNewGeneration*)_young_gen)->max_survivor_size() / plab_sample_minimum_size();
+      _young_gen->max_survivor_size() / (ThreadLocalAllocBuffer::min_size() * HeapWordSize);
 
     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
-    _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
+    _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
-    _survivor_chunk_capacity = 2*max_plab_samples;
+    _survivor_chunk_capacity = max_plab_samples;
     for (uint i = 0; i < ParallelGCThreads; i++) {
       HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
       ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
@@ -641,12 +641,6 @@
   _inter_sweep_timer.start();  // start of time
 }
 
-size_t CMSCollector::plab_sample_minimum_size() {
-  // The default value of MinTLABSize is 2k, but there is
-  // no way to get the default value if the flag has been overridden.
-  return MAX2(ThreadLocalAllocBuffer::min_size() * HeapWordSize, 2 * K);
-}
-
 const char* ConcurrentMarkSweepGeneration::name() const {
   return "concurrent mark-sweep generation";
 }
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp	Wed Jul 05 20:46:18 2017 +0200
@@ -739,10 +739,6 @@
   size_t*    _cursor;
   ChunkArray* _survivor_plab_array;
 
-  // A bounded minimum size of PLABs, should not return too small values since
-  // this will affect the size of the data structures used for parallel young gen rescan
-  size_t plab_sample_minimum_size();
-
   // Support for marking stack overflow handling
   bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
   bool par_take_from_overflow_list(size_t num,
--- a/hotspot/src/share/vm/gc/g1/g1AllocRegion.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1AllocRegion.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -300,5 +300,3 @@
   }
   return G1AllocRegion::release();
 }
-
-
--- a/hotspot/src/share/vm/gc/g1/g1Allocator.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1Allocator.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -23,7 +23,7 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/g1/g1Allocator.hpp"
+#include "gc/g1/g1Allocator.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1MarkSweep.hpp"
@@ -67,11 +67,11 @@
     // retired. We have to remove it now, since we don't allow regions
     // we allocate to in the region sets. We'll re-add it later, when
     // it's retired again.
-    _g1h->_old_set.remove(retained_region);
+    _g1h->old_set_remove(retained_region);
     bool during_im = _g1h->collector_state()->during_initial_mark_pause();
     retained_region->note_start_of_copying(during_im);
     old->set(retained_region);
-    _g1h->_hr_printer.reuse(retained_region);
+    _g1h->hr_printer()->reuse(retained_region);
     evacuation_info.set_alloc_regions_used_before(retained_region->used());
   }
 }
@@ -116,15 +116,85 @@
 G1PLAB::G1PLAB(size_t gclab_word_size) :
   PLAB(gclab_word_size), _retired(true) { }
 
-HeapWord* G1ParGCAllocator::allocate_direct_or_new_plab(InCSetState dest,
-                                                        size_t word_sz,
-                                                        AllocationContext_t context) {
+size_t G1Allocator::unsafe_max_tlab_alloc(AllocationContext_t context) {
+  // Return the remaining space in the cur alloc region, but not less than
+  // the min TLAB size.
+
+  // Also, this value can be at most the humongous object threshold,
+  // since we can't allow tlabs to grow big enough to accommodate
+  // humongous objects.
+
+  HeapRegion* hr = mutator_alloc_region(context)->get();
+  size_t max_tlab = _g1h->max_tlab_size() * wordSize;
+  if (hr == NULL) {
+    return max_tlab;
+  } else {
+    return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
+  }
+}
+
+HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
+                                              size_t word_size,
+                                              AllocationContext_t context) {
+  switch (dest.value()) {
+    case InCSetState::Young:
+      return survivor_attempt_allocation(word_size, context);
+    case InCSetState::Old:
+      return old_attempt_allocation(word_size, context);
+    default:
+      ShouldNotReachHere();
+      return NULL; // Keep some compilers happy
+  }
+}
+
+HeapWord* G1Allocator::survivor_attempt_allocation(size_t word_size,
+                                                   AllocationContext_t context) {
+  assert(!_g1h->is_humongous(word_size),
+         "we should not be seeing humongous-size allocations in this path");
+
+  HeapWord* result = survivor_gc_alloc_region(context)->attempt_allocation(word_size,
+                                                                           false /* bot_updates */);
+  if (result == NULL) {
+    MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
+    result = survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,
+                                                                          false /* bot_updates */);
+  }
+  if (result != NULL) {
+    _g1h->dirty_young_block(result, word_size);
+  }
+  return result;
+}
+
+HeapWord* G1Allocator::old_attempt_allocation(size_t word_size,
+                                              AllocationContext_t context) {
+  assert(!_g1h->is_humongous(word_size),
+         "we should not be seeing humongous-size allocations in this path");
+
+  HeapWord* result = old_gc_alloc_region(context)->attempt_allocation(word_size,
+                                                                      true /* bot_updates */);
+  if (result == NULL) {
+    MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
+    result = old_gc_alloc_region(context)->attempt_allocation_locked(word_size,
+                                                                     true /* bot_updates */);
+  }
+  return result;
+}
+
+G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) :
+  _g1h(G1CollectedHeap::heap()),
+  _allocator(allocator),
+  _survivor_alignment_bytes(calc_survivor_alignment_bytes()) {
+}
+
+HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest,
+                                                       size_t word_sz,
+                                                       AllocationContext_t context) {
   size_t gclab_word_size = _g1h->desired_plab_sz(dest);
   if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
     G1PLAB* alloc_buf = alloc_buffer(dest, context);
     alloc_buf->retire();
 
-    HeapWord* buf = _g1h->par_allocate_during_gc(dest, gclab_word_size, context);
+    HeapWord* buf = _allocator->par_allocate_during_gc(dest, gclab_word_size, context);
     if (buf == NULL) {
       return NULL; // Let caller handle allocation failure.
     }
@@ -136,14 +206,18 @@
     assert(obj != NULL, "buffer was definitely big enough...");
     return obj;
   } else {
-    return _g1h->par_allocate_during_gc(dest, word_sz, context);
+    return _allocator->par_allocate_during_gc(dest, word_sz, context);
   }
 }
 
-G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
-  G1ParGCAllocator(g1h),
-  _surviving_alloc_buffer(g1h->desired_plab_sz(InCSetState::Young)),
-  _tenured_alloc_buffer(g1h->desired_plab_sz(InCSetState::Old)) {
+void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
+  alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
+}
+
+G1DefaultPLABAllocator::G1DefaultPLABAllocator(G1Allocator* allocator) :
+  G1PLABAllocator(allocator),
+  _surviving_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Young)),
+  _tenured_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Old)) {
   for (uint state = 0; state < InCSetState::Num; state++) {
     _alloc_buffers[state] = NULL;
   }
@@ -151,7 +225,7 @@
   _alloc_buffers[InCSetState::Old]  = &_tenured_alloc_buffer;
 }
 
-void G1DefaultParGCAllocator::retire_alloc_buffers() {
+void G1DefaultPLABAllocator::retire_alloc_buffers() {
   for (uint state = 0; state < InCSetState::Num; state++) {
     G1PLAB* const buf = _alloc_buffers[state];
     if (buf != NULL) {
@@ -160,7 +234,7 @@
   }
 }
 
-void G1DefaultParGCAllocator::waste(size_t& wasted, size_t& undo_wasted) {
+void G1DefaultPLABAllocator::waste(size_t& wasted, size_t& undo_wasted) {
   wasted = 0;
   undo_wasted = 0;
   for (uint state = 0; state < InCSetState::Num; state++) {
@@ -190,8 +264,8 @@
   }
   assert(hr->is_empty(), err_msg("expected empty region (index %u)", hr->hrm_index()));
   hr->set_archive();
-  _g1h->_old_set.add(hr);
-  _g1h->_hr_printer.alloc(hr, G1HRPrinter::Archive);
+  _g1h->old_set_add(hr);
+  _g1h->hr_printer()->alloc(hr, G1HRPrinter::Archive);
   _allocated_regions.append(hr);
   _allocation_region = hr;
 
--- a/hotspot/src/share/vm/gc/g1/g1Allocator.hpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1Allocator.hpp	Wed Jul 05 20:46:18 2017 +0200
@@ -33,17 +33,36 @@
 
 class EvacuationInfo;
 
-// Base class for G1 allocators.
+// Interface to keep track of which regions G1 is currently allocating into. Provides
+// some accessors (e.g. allocating into them, or getting their occupancy).
+// Also keeps track of retained regions across GCs.
 class G1Allocator : public CHeapObj<mtGC> {
   friend class VMStructs;
 protected:
   G1CollectedHeap* _g1h;
 
+  virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) = 0;
+
+  // Accessors to the allocation regions.
+  virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
+  virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) = 0;
+
+  // Allocation attempt during GC for a survivor object / PLAB.
+  inline HeapWord* survivor_attempt_allocation(size_t word_size,
+                                               AllocationContext_t context);
+  // Allocation attempt during GC for an old object / PLAB.
+  inline HeapWord* old_attempt_allocation(size_t word_size,
+                                          AllocationContext_t context);
 public:
   G1Allocator(G1CollectedHeap* heap) : _g1h(heap) { }
+  virtual ~G1Allocator() { }
 
   static G1Allocator* create_allocator(G1CollectedHeap* g1h);
 
+#ifdef ASSERT
+  // Do we currently have an active mutator region to allocate into?
+  bool has_mutator_alloc_region(AllocationContext_t context) { return mutator_alloc_region(context)->get() != NULL; }
+#endif
   virtual void init_mutator_alloc_region() = 0;
   virtual void release_mutator_alloc_region() = 0;
 
@@ -51,24 +70,35 @@
   virtual void release_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
   virtual void abandon_gc_alloc_regions() = 0;
 
-  virtual MutatorAllocRegion*    mutator_alloc_region(AllocationContext_t context) = 0;
-  virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
-  virtual OldGCAllocRegion*      old_gc_alloc_region(AllocationContext_t context) = 0;
-  virtual size_t                 used_in_alloc_regions() = 0;
-  virtual bool                   is_retained_old_region(HeapRegion* hr) = 0;
+  // Management of retained regions.
+
+  virtual bool is_retained_old_region(HeapRegion* hr) = 0;
+  void reuse_retained_old_region(EvacuationInfo& evacuation_info,
+                                 OldGCAllocRegion* old,
+                                 HeapRegion** retained);
+
+  // Allocate blocks of memory during mutator time.
 
-  void                           reuse_retained_old_region(EvacuationInfo& evacuation_info,
-                                                           OldGCAllocRegion* old,
-                                                           HeapRegion** retained);
+  inline HeapWord* attempt_allocation(size_t word_size, AllocationContext_t context);
+  inline HeapWord* attempt_allocation_locked(size_t word_size, AllocationContext_t context);
+  inline HeapWord* attempt_allocation_force(size_t word_size, AllocationContext_t context);
+
+  size_t unsafe_max_tlab_alloc(AllocationContext_t context);
 
-  virtual HeapRegion* new_heap_region(uint hrs_index,
-                                      G1BlockOffsetSharedArray* sharedOffsetArray,
-                                      MemRegion mr) {
-    return new HeapRegion(hrs_index, sharedOffsetArray, mr);
-  }
+  // Allocate blocks of memory during garbage collection. Will ensure an
+  // allocation region, either by picking one or expanding the
+  // heap, and then allocate a block of the given size. The block
+  // may not be a humongous - it must fit into a single heap region.
+  HeapWord* par_allocate_during_gc(InCSetState dest,
+                                   size_t word_size,
+                                   AllocationContext_t context);
+
+  virtual size_t used_in_alloc_regions() = 0;
 };
 
-// The default allocator for G1.
+// The default allocation region manager for G1. Provides a single mutator, survivor
+// and old generation allocation region.
+// Can retain the (single) old generation allocation region across GCs.
 class G1DefaultAllocator : public G1Allocator {
 protected:
   // Alloc region used to satisfy mutator allocation requests.
@@ -152,10 +182,14 @@
   }
 };
 
-class G1ParGCAllocator : public CHeapObj<mtGC> {
+// Manages the PLABs used during garbage collection. Interface for allocation from PLABs.
+// Needs to handle multiple contexts, extra alignment in any "survivor" area and some
+// statistics.
+class G1PLABAllocator : public CHeapObj<mtGC> {
   friend class G1ParScanThreadState;
 protected:
   G1CollectedHeap* _g1h;
+  G1Allocator* _allocator;
 
   // The survivor alignment in effect in bytes.
   // == 0 : don't align survivors
@@ -182,11 +216,10 @@
   }
 
 public:
-  G1ParGCAllocator(G1CollectedHeap* g1h) :
-    _g1h(g1h), _survivor_alignment_bytes(calc_survivor_alignment_bytes()) { }
-  virtual ~G1ParGCAllocator() { }
+  G1PLABAllocator(G1Allocator* allocator);
+  virtual ~G1PLABAllocator() { }
 
-  static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
+  static G1PLABAllocator* create_allocator(G1Allocator* allocator);
 
   virtual void waste(size_t& wasted, size_t& undo_wasted) = 0;
 
@@ -219,18 +252,18 @@
     return allocate_direct_or_new_plab(dest, word_sz, context);
   }
 
-  void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
-    alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
-  }
+  void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context);
 };
 
-class G1DefaultParGCAllocator : public G1ParGCAllocator {
+// The default PLAB allocator for G1. Keeps the current (single) PLAB for survivor
+// and old generation allocation.
+class G1DefaultPLABAllocator : public G1PLABAllocator {
   G1PLAB  _surviving_alloc_buffer;
   G1PLAB  _tenured_alloc_buffer;
   G1PLAB* _alloc_buffers[InCSetState::Num];
 
 public:
-  G1DefaultParGCAllocator(G1CollectedHeap* g1h);
+  G1DefaultPLABAllocator(G1Allocator* _allocator);
 
   virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) {
     assert(dest.is_valid(),
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1Allocator.inline.hpp	Wed Jul 05 20:46:18 2017 +0200
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1ALLOCATOR_INLINE_HPP
+#define SHARE_VM_GC_G1_G1ALLOCATOR_INLINE_HPP
+
+#include "gc/g1/g1Allocator.hpp"
+#include "gc/g1/g1AllocRegion.inline.hpp"
+
+HeapWord* G1Allocator::attempt_allocation(size_t word_size, AllocationContext_t context) {
+  return mutator_alloc_region(context)->attempt_allocation(word_size, false /* bot_updates */);
+}
+
+HeapWord* G1Allocator::attempt_allocation_locked(size_t word_size, AllocationContext_t context) {
+  HeapWord* result = mutator_alloc_region(context)->attempt_allocation_locked(word_size, false /* bot_updates */);
+  assert(result != NULL || mutator_alloc_region(context)->get() == NULL,
+         err_msg("Must not have a mutator alloc region if there is no memory, but is " PTR_FORMAT, p2i(mutator_alloc_region(context)->get())));
+  return result;
+}
+
+HeapWord* G1Allocator::attempt_allocation_force(size_t word_size, AllocationContext_t context) {
+  return mutator_alloc_region(context)->attempt_allocation_force(word_size, false /* bot_updates */);
+}
+
+#endif // SHARE_VM_GC_G1_G1ALLOCATOR_HPP
--- a/hotspot/src/share/vm/gc/g1/g1Allocator_ext.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1Allocator_ext.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -30,6 +30,6 @@
   return new G1DefaultAllocator(g1h);
 }
 
-G1ParGCAllocator* G1ParGCAllocator::create_allocator(G1CollectedHeap* g1h) {
-  return new G1DefaultParGCAllocator(g1h);
+G1PLABAllocator* G1PLABAllocator::create_allocator(G1Allocator* allocator) {
+  return new G1DefaultPLABAllocator(allocator);
 }
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -31,7 +31,7 @@
 #include "gc/g1/concurrentG1Refine.hpp"
 #include "gc/g1/concurrentG1RefineThread.hpp"
 #include "gc/g1/concurrentMarkThread.inline.hpp"
-#include "gc/g1/g1AllocRegion.inline.hpp"
+#include "gc/g1/g1Allocator.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1CollectorState.hpp"
@@ -815,22 +815,16 @@
 
     {
       MutexLockerEx x(Heap_lock);
-      result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
-                                                                                    false /* bot_updates */);
+      result = _allocator->attempt_allocation_locked(word_size, context);
       if (result != NULL) {
         return result;
       }
 
-      // If we reach here, attempt_allocation_locked() above failed to
-      // allocate a new region. So the mutator alloc region should be NULL.
-      assert(_allocator->mutator_alloc_region(context)->get() == NULL, "only way to get here");
-
       if (GC_locker::is_active_and_needs_gc()) {
         if (g1_policy()->can_expand_young_list()) {
           // No need for an ergo verbose message here,
           // can_expand_young_list() does this when it returns true.
-          result = _allocator->mutator_alloc_region(context)->attempt_allocation_force(word_size,
-                                                                                       false /* bot_updates */);
+          result = _allocator->attempt_allocation_force(word_size, context);
           if (result != NULL) {
             return result;
           }
@@ -890,8 +884,7 @@
     // first attempt (without holding the Heap_lock) here and the
     // follow-on attempt will be at the start of the next loop
     // iteration (after taking the Heap_lock).
-    result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
-                                                                           false /* bot_updates */);
+    result = _allocator->attempt_allocation(word_size, context);
     if (result != NULL) {
       return result;
     }
@@ -1109,6 +1102,29 @@
   }
 }
 
+inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
+                                                     uint* gc_count_before_ret,
+                                                     uint* gclocker_retry_count_ret) {
+  assert_heap_not_locked_and_not_at_safepoint();
+  assert(!is_humongous(word_size), "attempt_allocation() should not "
+         "be called for humongous allocation requests");
+
+  AllocationContext_t context = AllocationContext::current();
+  HeapWord* result = _allocator->attempt_allocation(word_size, context);
+
+  if (result == NULL) {
+    result = attempt_allocation_slow(word_size,
+                                     context,
+                                     gc_count_before_ret,
+                                     gclocker_retry_count_ret);
+  }
+  assert_heap_not_locked();
+  if (result != NULL) {
+    dirty_young_block(result, word_size);
+  }
+  return result;
+}
+
 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
                                                         uint* gc_count_before_ret,
                                                         uint* gclocker_retry_count_ret) {
@@ -1231,13 +1247,11 @@
                                                            AllocationContext_t context,
                                                            bool expect_null_mutator_alloc_region) {
   assert_at_safepoint(true /* should_be_vm_thread */);
-  assert(_allocator->mutator_alloc_region(context)->get() == NULL ||
-                                             !expect_null_mutator_alloc_region,
+  assert(!_allocator->has_mutator_alloc_region(context) || !expect_null_mutator_alloc_region,
          "the current alloc region was unexpectedly found to be non-NULL");
 
   if (!is_humongous(word_size)) {
-    return _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
-                                                      false /* bot_updates */);
+    return _allocator->attempt_allocation_locked(word_size, context);
   } else {
     HeapWord* result = humongous_obj_allocate(word_size, context);
     if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
@@ -2373,7 +2387,6 @@
   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
 }
 
-
 // Computes the sum of the storage used by the various regions.
 size_t G1CollectedHeap::used() const {
   size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
@@ -2632,6 +2645,11 @@
 }
 #endif
 
+bool G1CollectedHeap::obj_in_cs(oop obj) {
+  HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj);
+  return r != NULL && r->in_collection_set();
+}
+
 // Iteration functions.
 
 // Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.
@@ -2833,20 +2851,8 @@
 }
 
 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
-  // Return the remaining space in the cur alloc region, but not less than
-  // the min TLAB size.
-
-  // Also, this value can be at most the humongous object threshold,
-  // since we can't allow tlabs to grow big enough to accommodate
-  // humongous objects.
-
-  HeapRegion* hr = _allocator->mutator_alloc_region(AllocationContext::current())->get();
-  size_t max_tlab = max_tlab_size() * wordSize;
-  if (hr == NULL) {
-    return max_tlab;
-  } else {
-    return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
-  }
+  AllocationContext_t context = AllocationContext::current();
+  return _allocator->unsafe_max_tlab_alloc(context);
 }
 
 size_t G1CollectedHeap::max_capacity() const {
@@ -4279,18 +4285,18 @@
   g1_policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0);
 }
 
-void G1CollectedHeap::preserve_mark_during_evac_failure(uint queue_num, oop obj, markOop m) {
+void G1CollectedHeap::preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m) {
   if (!_evacuation_failed) {
     _evacuation_failed = true;
   }
 
-  _evacuation_failed_info_array[queue_num].register_copy_failure(obj->size());
+  _evacuation_failed_info_array[worker_id].register_copy_failure(obj->size());
 
   // We want to call the "for_promotion_failure" version only in the
   // case of a promotion failure.
   if (m->must_be_preserved_for_promotion_failure(obj)) {
     OopAndMarkOop elem(obj, m);
-    _preserved_objs[queue_num].push(elem);
+    _preserved_objs[worker_id].push(elem);
   }
 }
 
@@ -4334,7 +4340,7 @@
 
   oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
 
-  assert(_worker_id == _par_scan_state->queue_num(), "sanity");
+  assert(_worker_id == _par_scan_state->worker_id(), "sanity");
 
   const InCSetState state = _g1->in_cset_state(obj);
   if (state.is_in_cset()) {
@@ -4443,9 +4449,6 @@
   ParallelTaskTerminator _terminator;
   uint _n_workers;
 
-  Mutex _stats_lock;
-  Mutex* stats_lock() { return &_stats_lock; }
-
 public:
   G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor, uint n_workers)
     : AbstractGangTask("G1 collection"),
@@ -4453,8 +4456,7 @@
       _queues(task_queues),
       _root_processor(root_processor),
       _terminator(n_workers, _queues),
-      _n_workers(n_workers),
-      _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
+      _n_workers(n_workers)
   {}
 
   RefToScanQueueSet* queues() { return _queues; }
@@ -4581,8 +4583,8 @@
       _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
 
       if (PrintTerminationStats) {
-        MutexLocker x(stats_lock());
-        pss.print_termination_stats(worker_id);
+        MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
+        pss.print_termination_stats();
       }
 
       assert(pss.queue_is_empty(), "should be empty");
@@ -5009,7 +5011,7 @@
 bool G1STWIsAliveClosure::do_object_b(oop p) {
   // An object is reachable if it is outside the collection set,
   // or is inside and copied.
-  return !_g1->obj_in_cs(p) || p->is_forwarded();
+  return !_g1->is_in_cset(p) || p->is_forwarded();
 }
 
 // Non Copying Keep Alive closure
@@ -5498,7 +5500,9 @@
     }
 
     // The individual threads will set their evac-failure closures.
-    if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr();
+    if (PrintTerminationStats) {
+      G1ParScanThreadState::print_termination_stats_hdr();
+    }
 
     workers()->run_task(&g1_par_task);
     end_par_time_sec = os::elapsedTime();
@@ -6491,7 +6495,6 @@
   return NULL;
 }
 
-
 // Heap region set verification
 
 class VerifyRegionListsClosure : public HeapRegionClosure {
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Wed Jul 05 20:46:18 2017 +0200
@@ -27,7 +27,6 @@
 
 #include "gc/g1/concurrentMark.hpp"
 #include "gc/g1/evacuationInfo.hpp"
-#include "gc/g1/g1AllocRegion.hpp"
 #include "gc/g1/g1AllocationContext.hpp"
 #include "gc/g1/g1Allocator.hpp"
 #include "gc/g1/g1BiasedArray.hpp"
@@ -187,13 +186,11 @@
   friend class MutatorAllocRegion;
   friend class SurvivorGCAllocRegion;
   friend class OldGCAllocRegion;
-  friend class G1Allocator;
-  friend class G1ArchiveAllocator;
 
   // Closures used in implementation.
   friend class G1ParScanThreadState;
   friend class G1ParTask;
-  friend class G1ParGCAllocator;
+  friend class G1PLABAllocator;
   friend class G1PrepareCompactClosure;
 
   // Other related classes.
@@ -248,7 +245,7 @@
   // The sequence of all heap regions in the heap.
   HeapRegionManager _hrm;
 
-  // Class that handles the different kinds of allocations.
+  // Handles non-humongous allocations in the G1CollectedHeap.
   G1Allocator* _allocator;
 
   // Outside of GC pauses, the number of bytes used in all regions other
@@ -280,22 +277,6 @@
   // start of each GC.
   bool _expand_heap_after_alloc_failure;
 
-  // It resets the mutator alloc region before new allocations can take place.
-  void init_mutator_alloc_region();
-
-  // It releases the mutator alloc region.
-  void release_mutator_alloc_region();
-
-  // It initializes the GC alloc regions at the start of a GC.
-  void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
-
-  // It releases the GC alloc regions at the end of a GC.
-  void release_gc_alloc_regions(EvacuationInfo& evacuation_info);
-
-  // It does any cleanup that needs to be done on the GC alloc regions
-  // before a Full GC.
-  void abandon_gc_alloc_regions();
-
   // Helper for monitoring and management support.
   G1MonitoringSupport* _g1mm;
 
@@ -551,31 +532,6 @@
                                             AllocationContext_t context,
                                             bool expect_null_mutator_alloc_region);
 
-  // It dirties the cards that cover the block so that so that the post
-  // write barrier never queues anything when updating objects on this
-  // block. It is assumed (and in fact we assert) that the block
-  // belongs to a young region.
-  inline void dirty_young_block(HeapWord* start, size_t word_size);
-
-  // Allocate blocks during garbage collection. Will ensure an
-  // allocation region, either by picking one or expanding the
-  // heap, and then allocate a block of the given size. The block
-  // may not be a humongous - it must fit into a single heap region.
-  inline HeapWord* par_allocate_during_gc(InCSetState dest,
-                                          size_t word_size,
-                                          AllocationContext_t context);
-  // Ensure that no further allocations can happen in "r", bearing in mind
-  // that parallel threads might be attempting allocations.
-  void par_allocate_remaining_space(HeapRegion* r);
-
-  // Allocation attempt during GC for a survivor object / PLAB.
-  inline HeapWord* survivor_attempt_allocation(size_t word_size,
-                                               AllocationContext_t context);
-
-  // Allocation attempt during GC for an old object / PLAB.
-  inline HeapWord* old_attempt_allocation(size_t word_size,
-                                          AllocationContext_t context);
-
   // These methods are the "callbacks" from the G1AllocRegion class.
 
   // For mutator alloc regions.
@@ -589,10 +545,6 @@
   void retire_gc_alloc_region(HeapRegion* alloc_region,
                               size_t allocated_bytes, InCSetState dest);
 
-  // Allocate the highest free region in the reserved heap. This will commit
-  // regions as necessary.
-  HeapRegion* alloc_highest_free_region();
-
   // - if explicit_gc is true, the GC is for a System.gc() or a heap
   //   inspection request and should collect the entire heap
   // - if clear_all_soft_refs is true, all soft references should be
@@ -725,6 +677,13 @@
 
   G1HRPrinter* hr_printer() { return &_hr_printer; }
 
+  // Allocates a new heap region instance.
+  HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
+
+  // Allocate the highest free region in the reserved heap. This will commit
+  // regions as necessary.
+  HeapRegion* alloc_highest_free_region();
+
   // Frees a non-humongous region by initializing its contents and
   // adding it to the free list that's passed as a parameter (this is
   // usually a local list which will be appended to the master free
@@ -738,6 +697,12 @@
                    bool par,
                    bool locked = false);
 
+  // It dirties the cards that cover the block so that the post
+  // write barrier never queues anything when updating objects on this
+  // block. It is assumed (and in fact we assert) that the block
+  // belongs to a young region.
+  inline void dirty_young_block(HeapWord* start, size_t word_size);
+
   // Frees a humongous region by collapsing it into individual regions
   // and calling free_region() for each of them. The freed regions
   // will be added to the free list that's passed as a parameter (this
@@ -887,7 +852,7 @@
 
   // Preserve the mark of "obj", if necessary, in preparation for its mark
   // word being overwritten with a self-forwarding-pointer.
-  void preserve_mark_during_evac_failure(uint queue, oop obj, markOop m);
+  void preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m);
 
 #ifndef PRODUCT
   // Support for forcing evacuation failures. Analogous to
@@ -1216,6 +1181,7 @@
     }
   }
 
+  inline void old_set_add(HeapRegion* hr);
   inline void old_set_remove(HeapRegion* hr);
 
   size_t non_young_capacity_bytes() {
@@ -1263,7 +1229,7 @@
 
   // Return "TRUE" iff the given object address is within the collection
   // set. Slow implementation.
-  inline bool obj_in_cs(oop obj);
+  bool obj_in_cs(oop obj);
 
   inline bool is_in_cset(const HeapRegion *hr);
   inline bool is_in_cset(oop obj);
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp	Wed Jul 05 20:46:18 2017 +0200
@@ -26,7 +26,6 @@
 #define SHARE_VM_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
 
 #include "gc/g1/concurrentMark.hpp"
-#include "gc/g1/g1AllocRegion.inline.hpp"
 #include "gc/g1/g1CollectedHeap.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1CollectorState.hpp"
@@ -57,20 +56,6 @@
   return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
 }
 
-HeapWord* G1CollectedHeap::par_allocate_during_gc(InCSetState dest,
-                                                  size_t word_size,
-                                                  AllocationContext_t context) {
-  switch (dest.value()) {
-    case InCSetState::Young:
-      return survivor_attempt_allocation(word_size, context);
-    case InCSetState::Old:
-      return old_attempt_allocation(word_size, context);
-    default:
-      ShouldNotReachHere();
-      return NULL; // Keep some compilers happy
-  }
-}
-
 // Inline functions for G1CollectedHeap
 
 inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() {
@@ -122,69 +107,12 @@
   OrderAccess::fence();
 }
 
-inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
-  _old_set.remove(hr);
-}
-
-inline bool G1CollectedHeap::obj_in_cs(oop obj) {
-  HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj);
-  return r != NULL && r->in_collection_set();
-}
-
-inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
-                                                     uint* gc_count_before_ret,
-                                                     uint* gclocker_retry_count_ret) {
-  assert_heap_not_locked_and_not_at_safepoint();
-  assert(!is_humongous(word_size), "attempt_allocation() should not "
-         "be called for humongous allocation requests");
-
-  AllocationContext_t context = AllocationContext::current();
-  HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
-                                                                                   false /* bot_updates */);
-  if (result == NULL) {
-    result = attempt_allocation_slow(word_size,
-                                     context,
-                                     gc_count_before_ret,
-                                     gclocker_retry_count_ret);
-  }
-  assert_heap_not_locked();
-  if (result != NULL) {
-    dirty_young_block(result, word_size);
-  }
-  return result;
+inline void G1CollectedHeap::old_set_add(HeapRegion* hr) {
+  _old_set.add(hr);
 }
 
-inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size,
-                                                              AllocationContext_t context) {
-  assert(!is_humongous(word_size),
-         "we should not be seeing humongous-size allocations in this path");
-
-  HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size,
-                                                                                       false /* bot_updates */);
-  if (result == NULL) {
-    MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
-    result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,
-                                                                                      false /* bot_updates */);
-  }
-  if (result != NULL) {
-    dirty_young_block(result, word_size);
-  }
-  return result;
-}
-
-inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size,
-                                                         AllocationContext_t context) {
-  assert(!is_humongous(word_size),
-         "we should not be seeing humongous-size allocations in this path");
-
-  HeapWord* result = _allocator->old_gc_alloc_region(context)->attempt_allocation(word_size,
-                                                                                  true /* bot_updates */);
-  if (result == NULL) {
-    MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
-    result = _allocator->old_gc_alloc_region(context)->attempt_allocation_locked(word_size,
-                                                                                 true /* bot_updates */);
-  }
-  return result;
+inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
+  _old_set.remove(hr);
 }
 
 // It dirties the cards that cover the block so that so that the post
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap_ext.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap_ext.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "gc/g1/g1CollectedHeap.hpp"
+#include "gc/g1/heapRegion.inline.hpp"
 
 bool G1CollectedHeap::copy_allocation_context_stats(const jint* contexts,
                                                     jlong* totals,
@@ -31,3 +32,8 @@
                                                     jint len) {
   return false;
 }
+
+HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
+                                             MemRegion mr) {
+  return new HeapRegion(hrs_index, bot_shared(), mr);
+}
--- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -31,6 +31,7 @@
 #include "gc/g1/g1ErgoVerbose.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
 #include "gc/g1/g1Log.hpp"
+#include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/shared/gcPolicyCounters.hpp"
 #include "runtime/arguments.hpp"
--- a/hotspot/src/share/vm/gc/g1/g1OopClosures.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1OopClosures.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -48,7 +48,7 @@
   assert(par_scan_state != NULL, "Must set par_scan_state to non-NULL.");
 
   _par_scan_state = par_scan_state;
-  _worker_id = par_scan_state->queue_num();
+  _worker_id = par_scan_state->worker_id();
 
   assert(_worker_id < ParallelGCThreads,
          err_msg("The given worker id %u must be less than the number of threads %u", _worker_id, ParallelGCThreads));
--- a/hotspot/src/share/vm/gc/g1/g1OopClosures.inline.hpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1OopClosures.inline.hpp	Wed Jul 05 20:46:18 2017 +0200
@@ -31,6 +31,7 @@
 #include "gc/g1/g1ParScanThreadState.inline.hpp"
 #include "gc/g1/g1RemSet.hpp"
 #include "gc/g1/g1RemSet.inline.hpp"
+#include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "memory/iterator.inline.hpp"
 #include "runtime/prefetch.inline.hpp"
--- a/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -31,13 +31,13 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/prefetch.inline.hpp"
 
-G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
+G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, ReferenceProcessor* rp)
   : _g1h(g1h),
-    _refs(g1h->task_queue(queue_num)),
+    _refs(g1h->task_queue(worker_id)),
     _dcq(&g1h->dirty_card_queue_set()),
     _ct_bs(g1h->g1_barrier_set()),
     _g1_rem(g1h->g1_rem_set()),
-    _hash_seed(17), _queue_num(queue_num),
+    _hash_seed(17), _worker_id(worker_id),
     _term_attempts(0),
     _tenuring_threshold(g1h->g1_policy()->tenuring_threshold()),
     _age_table(false), _scanner(g1h, rp),
@@ -59,7 +59,7 @@
   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
   memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
 
-  _g1_par_allocator = G1ParGCAllocator::create_allocator(_g1h);
+  _plab_allocator = G1PLABAllocator::create_allocator(_g1h->allocator());
 
   _dest[InCSetState::NotInCSet]    = InCSetState::NotInCSet;
   // The dest for Young is used when the objects are aged enough to
@@ -71,37 +71,29 @@
 }
 
 G1ParScanThreadState::~G1ParScanThreadState() {
-  _g1_par_allocator->retire_alloc_buffers();
-  delete _g1_par_allocator;
+  _plab_allocator->retire_alloc_buffers();
+  delete _plab_allocator;
   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
 }
 
-void
-G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
-{
+void G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st) {
   st->print_raw_cr("GC Termination Stats");
-  st->print_raw_cr("     elapsed  --strong roots-- -------termination-------"
-                   " ------waste (KiB)------");
-  st->print_raw_cr("thr     ms        ms      %        ms      %    attempts"
-                   "  total   alloc    undo");
-  st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
-                   " ------- ------- -------");
+  st->print_raw_cr("     elapsed  --strong roots-- -------termination------- ------waste (KiB)------");
+  st->print_raw_cr("thr     ms        ms      %        ms      %    attempts  total   alloc    undo");
+  st->print_raw_cr("--- --------- --------- ------ --------- ------ -------- ------- ------- -------");
 }
 
-void
-G1ParScanThreadState::print_termination_stats(int i,
-                                              outputStream* const st) const
-{
+void G1ParScanThreadState::print_termination_stats(outputStream* const st) const {
   const double elapsed_ms = elapsed_time() * 1000.0;
   const double s_roots_ms = strong_roots_time() * 1000.0;
   const double term_ms    = term_time() * 1000.0;
   size_t alloc_buffer_waste = 0;
   size_t undo_waste = 0;
-  _g1_par_allocator->waste(alloc_buffer_waste, undo_waste);
-  st->print_cr("%3d %9.2f %9.2f %6.2f "
+  _plab_allocator->waste(alloc_buffer_waste, undo_waste);
+  st->print_cr("%3u %9.2f %9.2f %6.2f "
                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
-               i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
+               _worker_id, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
                term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
                (alloc_buffer_waste + undo_waste) * HeapWordSize / K,
                alloc_buffer_waste * HeapWordSize / K,
@@ -167,8 +159,9 @@
   // Right now we only have two types of regions (young / old) so
   // let's keep the logic here simple. We can generalize it when necessary.
   if (dest->is_young()) {
-    HeapWord* const obj_ptr = _g1_par_allocator->allocate(InCSetState::Old,
-                                                          word_sz, context);
+    HeapWord* const obj_ptr = _plab_allocator->allocate(InCSetState::Old,
+                                                        word_sz,
+                                                        context);
     if (obj_ptr == NULL) {
       return NULL;
     }
@@ -209,12 +202,12 @@
 
   uint age = 0;
   InCSetState dest_state = next_state(state, old_mark, age);
-  HeapWord* obj_ptr = _g1_par_allocator->plab_allocate(dest_state, word_sz, context);
+  HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz, context);
 
   // PLAB allocations should succeed most of the time, so we'll
   // normally check against NULL once and that's it.
   if (obj_ptr == NULL) {
-    obj_ptr = _g1_par_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context);
+    obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context);
     if (obj_ptr == NULL) {
       obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, context);
       if (obj_ptr == NULL) {
@@ -233,7 +226,7 @@
   if (_g1h->evacuation_should_fail()) {
     // Doing this after all the allocation attempts also tests the
     // undo_allocation() method too.
-    _g1_par_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
+    _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
     return handle_evacuation_failure_par(old, old_mark);
   }
 #endif // !PRODUCT
@@ -274,7 +267,7 @@
              "sanity");
       G1StringDedup::enqueue_from_evacuation(is_from_young,
                                              is_to_young,
-                                             queue_num(),
+                                             _worker_id,
                                              obj);
     }
 
@@ -295,7 +288,7 @@
     }
     return obj;
   } else {
-    _g1_par_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
+    _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
     return forward_ptr;
   }
 }
@@ -314,7 +307,7 @@
      _g1h->hr_printer()->evac_failure(r);
     }
 
-    _g1h->preserve_mark_during_evac_failure(_queue_num, old, m);
+    _g1h->preserve_mark_during_evac_failure(_worker_id, old, m);
 
     _scanner.set_region(r);
     old->oop_iterate_backwards(&_scanner);
--- a/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.hpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.hpp	Wed Jul 05 20:46:18 2017 +0200
@@ -46,7 +46,7 @@
   G1SATBCardTableModRefBS* _ct_bs;
   G1RemSet* _g1_rem;
 
-  G1ParGCAllocator* _g1_par_allocator;
+  G1PLABAllocator*  _plab_allocator;
 
   ageTable          _age_table;
   InCSetState       _dest[InCSetState::Num];
@@ -55,7 +55,7 @@
   G1ParScanClosure  _scanner;
 
   int  _hash_seed;
-  uint _queue_num;
+  uint _worker_id;
 
   size_t _term_attempts;
 
@@ -85,7 +85,7 @@
   }
 
  public:
-  G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
+  G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, ReferenceProcessor* rp);
   ~G1ParScanThreadState();
 
   ageTable*         age_table()       { return &_age_table;       }
@@ -112,8 +112,7 @@
     }
   }
 
-  int* hash_seed() { return &_hash_seed; }
-  uint queue_num() { return _queue_num; }
+  uint worker_id() { return _worker_id; }
 
   size_t term_attempts() const  { return _term_attempts; }
   void note_term_attempt() { _term_attempts++; }
@@ -139,8 +138,11 @@
     return os::elapsedTime() - _start;
   }
 
+  // Print the header for the per-thread termination statistics.
   static void print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
-  void print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
+
+  // Print actual per-thread termination statistics.
+  void print_termination_stats(outputStream* const st = gclog_or_tty) const;
 
   size_t* surviving_young_words() {
     // We add on to hide entry 0 which accumulates surviving words for
--- a/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.inline.hpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.inline.hpp	Wed Jul 05 20:46:18 2017 +0200
@@ -56,7 +56,7 @@
   }
 
   assert(obj != NULL, "Must be");
-  update_rs(from, p, queue_num());
+  update_rs(from, p, _worker_id);
 }
 
 template <class T> inline void G1ParScanThreadState::push_on_queue(T* ref) {
@@ -136,7 +136,7 @@
 
 void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) {
   StarTask stolen_task;
-  while (task_queues->steal(queue_num(), hash_seed(), stolen_task)) {
+  while (task_queues->steal(_worker_id, &_hash_seed, stolen_task)) {
     assert(verify_task(stolen_task), "sanity");
     dispatch_reference(stolen_task);
 
--- a/hotspot/src/share/vm/gc/g1/g1RootProcessor.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1RootProcessor.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -34,6 +34,7 @@
 #include "gc/g1/g1GCPhaseTimes.hpp"
 #include "gc/g1/g1RemSet.inline.hpp"
 #include "gc/g1/g1RootProcessor.hpp"
+#include "gc/g1/heapRegion.inline.hpp"
 #include "memory/allocation.inline.hpp"
 #include "runtime/fprofiler.hpp"
 #include "runtime/mutex.hpp"
--- a/hotspot/src/share/vm/gc/g1/heapRegion.hpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/heapRegion.hpp	Wed Jul 05 20:46:18 2017 +0200
@@ -497,20 +497,10 @@
     return _rem_set;
   }
 
-  bool in_collection_set() const;
+  inline bool in_collection_set() const;
 
-  HeapRegion* next_in_collection_set() {
-    assert(in_collection_set(), "should only invoke on member of CS.");
-    assert(_next_in_special_set == NULL ||
-           _next_in_special_set->in_collection_set(),
-           "Malformed CS.");
-    return _next_in_special_set;
-  }
-  void set_next_in_collection_set(HeapRegion* r) {
-    assert(in_collection_set(), "should only invoke on member of CS.");
-    assert(r == NULL || r->in_collection_set(), "Malformed CS.");
-    _next_in_special_set = r;
-  }
+  inline HeapRegion* next_in_collection_set() const;
+  inline void set_next_in_collection_set(HeapRegion* r);
 
   void set_allocation_context(AllocationContext_t context) {
     _allocation_context = context;
--- a/hotspot/src/share/vm/gc/g1/heapRegion.inline.hpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/heapRegion.inline.hpp	Wed Jul 05 20:46:18 2017 +0200
@@ -26,7 +26,7 @@
 #define SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP
 
 #include "gc/g1/g1BlockOffsetTable.inline.hpp"
-#include "gc/g1/g1CollectedHeap.hpp"
+#include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "gc/shared/space.hpp"
 #include "oops/oop.inline.hpp"
@@ -200,4 +200,18 @@
   return G1CollectedHeap::heap()->is_in_cset(this);
 }
 
+inline HeapRegion* HeapRegion::next_in_collection_set() const {
+  assert(in_collection_set(), "should only invoke on member of CS.");
+  assert(_next_in_special_set == NULL ||
+         _next_in_special_set->in_collection_set(),
+         "Malformed CS.");
+  return _next_in_special_set;
+}
+
+void HeapRegion::set_next_in_collection_set(HeapRegion* r) {
+  assert(in_collection_set(), "should only invoke on member of CS.");
+  assert(r == NULL || r->in_collection_set(), "Malformed CS.");
+  _next_in_special_set = r;
+}
+
 #endif // SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP
--- a/hotspot/src/share/vm/gc/g1/heapRegionManager.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/heapRegionManager.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -70,7 +70,7 @@
   HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index);
   MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
   assert(reserved().contains(mr), "invariant");
-  return g1h->allocator()->new_heap_region(hrm_index, g1h->bot_shared(), mr);
+  return g1h->new_heap_region(hrm_index, mr);
 }
 
 void HeapRegionManager::commit_regions(uint index, size_t num_regions) {
--- a/hotspot/src/share/vm/gc/g1/vm_operations_g1.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/vm_operations_g1.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -94,8 +94,9 @@
 
   if (_word_size > 0) {
     // An allocation has been requested. So, try to do that first.
-    _result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
-                                     false /* expect_null_cur_alloc_region */);
+    _result = g1h->attempt_allocation_at_safepoint(_word_size,
+                                                   allocation_context(),
+                                                   false /* expect_null_cur_alloc_region */);
     if (_result != NULL) {
       // If we can successfully allocate before we actually do the
       // pause then we will consider this pause successful.
@@ -147,8 +148,9 @@
     g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
   if (_pause_succeeded && _word_size > 0) {
     // An allocation had been requested.
-    _result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
-                                      true /* expect_null_cur_alloc_region */);
+    _result = g1h->attempt_allocation_at_safepoint(_word_size,
+                                                   allocation_context(),
+                                                   true /* expect_null_cur_alloc_region */);
   } else {
     assert(_result == NULL, "invariant");
     if (!_pause_succeeded) {
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -1303,7 +1303,7 @@
   if (handler_index < 0) {
     if (PrintSignatureHandlers && (handler != Interpreter::slow_signature_handler())) {
       tty->cr();
-      tty->print_cr("argument handler #%d at "PTR_FORMAT" for fingerprint " UINT64_FORMAT,
+      tty->print_cr("argument handler #%d at " PTR_FORMAT " for fingerprint " UINT64_FORMAT,
                     _handlers->length(),
                     handler,
                     fingerprint);
@@ -1313,7 +1313,7 @@
   } else {
     if (PrintSignatureHandlers) {
       tty->cr();
-      tty->print_cr("duplicate argument handler #%d for fingerprint " UINT64_FORMAT "(old: "PTR_FORMAT", new : "PTR_FORMAT")",
+      tty->print_cr("duplicate argument handler #%d for fingerprint " UINT64_FORMAT "(old: " PTR_FORMAT ", new : " PTR_FORMAT ")",
                     _handlers->length(),
                     fingerprint,
                     _handlers->at(handler_index),
--- a/hotspot/src/share/vm/interpreter/linkResolver.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/interpreter/linkResolver.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -379,7 +379,8 @@
   if (!resolved_method->is_abstract() &&
     (InstanceKlass::cast(klass())->default_methods() != NULL)) {
     int index = InstanceKlass::find_method_index(InstanceKlass::cast(klass())->default_methods(),
-                                                 name, signature, Klass::find_overpass, Klass::find_static);
+                                                 name, signature, Klass::find_overpass,
+                                                 Klass::find_static, Klass::find_private);
     if (index >= 0 ) {
       vtable_index = InstanceKlass::cast(klass())->default_vtable_indices()->at(index);
     }
@@ -1189,7 +1190,7 @@
   assert(resolved_method->method_holder()->is_linked(), "must be linked");
 
   // do lookup based on receiver klass using the vtable index
-  if (resolved_method->method_holder()->is_interface()) { // miranda method
+  if (resolved_method->method_holder()->is_interface()) { // default or miranda method
     vtable_index = vtable_index_of_interface_method(resolved_klass,
                            resolved_method);
     assert(vtable_index >= 0 , "we should have valid vtable index at this point");
@@ -1198,7 +1199,7 @@
     selected_method = methodHandle(THREAD, inst->method_at_vtable(vtable_index));
   } else {
     // at this point we are sure that resolved_method is virtual and not
-    // a miranda method; therefore, it must have a valid vtable index.
+    // a default or miranda method; therefore, it must have a valid vtable index.
     assert(!resolved_method->has_itable_index(), "");
     vtable_index = resolved_method->vtable_index();
     // We could get a negative vtable_index for final methods,
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -1381,12 +1381,14 @@
 
 // find_method looks up the name/signature in the local methods array
 Method* InstanceKlass::find_method(Symbol* name, Symbol* signature) const {
-  return find_method_impl(name, signature, find_overpass, find_static);
+  return find_method_impl(name, signature, find_overpass, find_static, find_private);
 }
 
 Method* InstanceKlass::find_method_impl(Symbol* name, Symbol* signature,
-                                        OverpassLookupMode overpass_mode, StaticLookupMode static_mode) const {
-  return InstanceKlass::find_method_impl(methods(), name, signature, overpass_mode, static_mode);
+                                        OverpassLookupMode overpass_mode,
+                                        StaticLookupMode static_mode,
+                                        PrivateLookupMode private_mode) const {
+  return InstanceKlass::find_method_impl(methods(), name, signature, overpass_mode, static_mode, private_mode);
 }
 
 // find_instance_method looks up the name/signature in the local methods array
@@ -1394,7 +1396,7 @@
 Method* InstanceKlass::find_instance_method(
     Array<Method*>* methods, Symbol* name, Symbol* signature) {
   Method* meth = InstanceKlass::find_method_impl(methods, name, signature,
-                                                 find_overpass, skip_static);
+                                                 find_overpass, skip_static, find_private);
   assert(((meth == NULL) || !meth->is_static()), "find_instance_method should have skipped statics");
   return meth;
 }
@@ -1405,22 +1407,51 @@
     return InstanceKlass::find_instance_method(methods(), name, signature);
 }
 
+// Find looks up the name/signature in the local methods array
+// and filters on the overpass, static and private flags
+// This returns the first one found
+// note that the local methods array can have up to one overpass, one static
+// and one instance (private or not) with the same name/signature
+Method* InstanceKlass::find_local_method(Symbol* name, Symbol* signature,
+                                        OverpassLookupMode overpass_mode,
+                                        StaticLookupMode static_mode,
+                                        PrivateLookupMode private_mode) const {
+  return InstanceKlass::find_method_impl(methods(), name, signature, overpass_mode, static_mode, private_mode);
+}
+
+// Find looks up the name/signature in the local methods array
+// and filters on the overpass, static and private flags
+// This returns the first one found
+// note that the local methods array can have up to one overpass, one static
+// and one instance (private or not) with the same name/signature
+Method* InstanceKlass::find_local_method(Array<Method*>* methods,
+                                        Symbol* name, Symbol* signature,
+                                        OverpassLookupMode overpass_mode,
+                                        StaticLookupMode static_mode,
+                                        PrivateLookupMode private_mode) {
+  return InstanceKlass::find_method_impl(methods, name, signature, overpass_mode, static_mode, private_mode);
+}
+
+
 // find_method looks up the name/signature in the local methods array
 Method* InstanceKlass::find_method(
     Array<Method*>* methods, Symbol* name, Symbol* signature) {
-  return InstanceKlass::find_method_impl(methods, name, signature, find_overpass, find_static);
+  return InstanceKlass::find_method_impl(methods, name, signature, find_overpass, find_static, find_private);
 }
 
 Method* InstanceKlass::find_method_impl(
-    Array<Method*>* methods, Symbol* name, Symbol* signature, OverpassLookupMode overpass_mode, StaticLookupMode static_mode) {
-  int hit = find_method_index(methods, name, signature, overpass_mode, static_mode);
+    Array<Method*>* methods, Symbol* name, Symbol* signature,
+    OverpassLookupMode overpass_mode, StaticLookupMode static_mode,
+    PrivateLookupMode private_mode) {
+  int hit = find_method_index(methods, name, signature, overpass_mode, static_mode, private_mode);
   return hit >= 0 ? methods->at(hit): NULL;
 }
 
-bool InstanceKlass::method_matches(Method* m, Symbol* signature, bool skipping_overpass, bool skipping_static) {
-    return (m->signature() == signature) &&
+bool InstanceKlass::method_matches(Method* m, Symbol* signature, bool skipping_overpass, bool skipping_static, bool skipping_private) {
+    return  ((m->signature() == signature) &&
             (!skipping_overpass || !m->is_overpass()) &&
-            (!skipping_static || !m->is_static());
+            (!skipping_static || !m->is_static()) &&
+            (!skipping_private || !m->is_private()));
 }
 
 // Used directly for default_methods to find the index into the
@@ -1430,17 +1461,25 @@
 // the search continues to find a potential non-overpass match.  This capability
 // is important during method resolution to prefer a static method, for example,
 // over an overpass method.
+// There is the possibility in any _method's array to have the same name/signature
+// for a static method, an overpass method and a local instance method
+// To correctly catch a given method, the search criteria may need
+// to explicitly skip the other two. For local instance methods, it
+// is often necessary to skip private methods
 int InstanceKlass::find_method_index(
-    Array<Method*>* methods, Symbol* name, Symbol* signature, OverpassLookupMode overpass_mode, StaticLookupMode static_mode) {
+    Array<Method*>* methods, Symbol* name, Symbol* signature,
+    OverpassLookupMode overpass_mode, StaticLookupMode static_mode,
+    PrivateLookupMode private_mode) {
   bool skipping_overpass = (overpass_mode == skip_overpass);
   bool skipping_static = (static_mode == skip_static);
+  bool skipping_private = (private_mode == skip_private);
   int hit = binary_search(methods, name);
   if (hit != -1) {
     Method* m = methods->at(hit);
 
     // Do linear search to find matching signature.  First, quick check
     // for common case, ignoring overpasses if requested.
-    if (method_matches(m, signature, skipping_overpass, skipping_static)) return hit;
+    if (method_matches(m, signature, skipping_overpass, skipping_static, skipping_private)) return hit;
 
     // search downwards through overloaded methods
     int i;
@@ -1448,18 +1487,18 @@
         Method* m = methods->at(i);
         assert(m->is_method(), "must be method");
         if (m->name() != name) break;
-        if (method_matches(m, signature, skipping_overpass, skipping_static)) return i;
+        if (method_matches(m, signature, skipping_overpass, skipping_static, skipping_private)) return i;
     }
     // search upwards
     for (i = hit + 1; i < methods->length(); ++i) {
         Method* m = methods->at(i);
         assert(m->is_method(), "must be method");
         if (m->name() != name) break;
-        if (method_matches(m, signature, skipping_overpass, skipping_static)) return i;
+        if (method_matches(m, signature, skipping_overpass, skipping_static, skipping_private)) return i;
     }
     // not found
 #ifdef ASSERT
-    int index = (skipping_overpass || skipping_static) ? -1 : linear_search(methods, name, signature);
+    int index = (skipping_overpass || skipping_static || skipping_private) ? -1 : linear_search(methods, name, signature);
     assert(index == -1, err_msg("binary search should have found entry %d", index));
 #endif
   }
@@ -1489,7 +1528,7 @@
   OverpassLookupMode overpass_local_mode = overpass_mode;
   Klass* klass = const_cast<InstanceKlass*>(this);
   while (klass != NULL) {
-    Method* method = InstanceKlass::cast(klass)->find_method_impl(name, signature, overpass_local_mode, find_static);
+    Method* method = InstanceKlass::cast(klass)->find_method_impl(name, signature, overpass_local_mode, find_static, find_private);
     if (method != NULL) {
       return method;
     }
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp	Wed Jul 05 20:46:18 2017 +0200
@@ -503,12 +503,28 @@
   Method* find_instance_method(Symbol* name, Symbol* signature);
   static Method* find_instance_method(Array<Method*>* methods, Symbol* name, Symbol* signature);
 
-  // true if method matches signature and conforms to skipping_X conditions.
-  static bool method_matches(Method* m, Symbol* signature, bool skipping_overpass, bool skipping_static);
+  // find a local method (returns NULL if not found)
+  Method* find_local_method(Symbol* name, Symbol* signature,
+                           OverpassLookupMode overpass_mode,
+                           StaticLookupMode static_mode,
+                           PrivateLookupMode private_mode) const;
 
-  // find a local method index in default_methods (returns -1 if not found)
-  static int find_method_index(Array<Method*>* methods, Symbol* name, Symbol* signature,
-                               OverpassLookupMode overpass_mode, StaticLookupMode static_mode);
+  // find a local method from given methods array (returns NULL if not found)
+  static Method* find_local_method(Array<Method*>* methods,
+                           Symbol* name, Symbol* signature,
+                           OverpassLookupMode overpass_mode,
+                           StaticLookupMode static_mode,
+                           PrivateLookupMode private_mode);
+
+  // true if method matches signature and conforms to skipping_X conditions.
+  static bool method_matches(Method* m, Symbol* signature, bool skipping_overpass, bool skipping_static, bool skipping_private);
+
+  // find a local method index in methods or default_methods (returns -1 if not found)
+  static int find_method_index(Array<Method*>* methods,
+                               Symbol* name, Symbol* signature,
+                               OverpassLookupMode overpass_mode,
+                               StaticLookupMode static_mode,
+                               PrivateLookupMode private_mode);
 
   // lookup operation (returns NULL if not found)
   Method* uncached_lookup_method(Symbol* name, Symbol* signature, OverpassLookupMode overpass_mode) const;
@@ -1153,9 +1169,14 @@
 
   // find a local method (returns NULL if not found)
   Method* find_method_impl(Symbol* name, Symbol* signature,
-                           OverpassLookupMode overpass_mode, StaticLookupMode static_mode) const;
-  static Method* find_method_impl(Array<Method*>* methods, Symbol* name, Symbol* signature,
-                                  OverpassLookupMode overpass_mode, StaticLookupMode static_mode);
+                           OverpassLookupMode overpass_mode,
+                           StaticLookupMode static_mode,
+                           PrivateLookupMode private_mode) const;
+  static Method* find_method_impl(Array<Method*>* methods,
+                                  Symbol* name, Symbol* signature,
+                                  OverpassLookupMode overpass_mode,
+                                  StaticLookupMode static_mode,
+                                  PrivateLookupMode private_mode);
 
   // Free CHeap allocated fields.
   void release_C_heap_structures();
--- a/hotspot/src/share/vm/oops/klass.hpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/oops/klass.hpp	Wed Jul 05 20:46:18 2017 +0200
@@ -161,6 +161,7 @@
   enum DefaultsLookupMode { find_defaults, skip_defaults };
   enum OverpassLookupMode { find_overpass, skip_overpass };
   enum StaticLookupMode   { find_static,   skip_static };
+  enum PrivateLookupMode  { find_private,  skip_private };
 
   bool is_klass() const volatile { return true; }
 
--- a/hotspot/src/share/vm/oops/klassVtable.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/oops/klassVtable.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -683,7 +683,6 @@
   if (mhk->is_interface()) {
     assert(m->is_public(), "should be public");
     assert(ik()->implements_interface(method_holder) , "this class should implement the interface");
-    // the search could find a miranda or a default method
     if (is_miranda(m, ik()->methods(), ik()->default_methods(), ik()->super())) {
       return true;
     }
@@ -691,25 +690,57 @@
   return false;
 }
 
-// check if a method is a miranda method, given a class's methods table,
-// its default_method table  and its super
-// Miranda methods are calculated twice:
-// first: before vtable size calculation: including abstract and superinterface default
+// Check if a method is a miranda method, given a class's methods array,
+// its default_method table and its super class.
+// "Miranda" means an abstract non-private method that would not be
+// overridden for the local class.
+// A "miranda" method should only include non-private interface
+// instance methods, i.e. not private methods, not static methods,
+// not default methods (concrete interface methods), not overpass methods.
+// If a given class already has a local (including overpass) method, a
+// default method, or any of its superclasses has the same which would have
+// overridden an abstract method, then this is not a miranda method.
+//
+// Miranda methods are checked multiple times.
+// Pass 1: during class load/class file parsing: before vtable size calculation:
+// include superinterface abstract and default methods (non-private instance).
 // We include potential default methods to give them space in the vtable.
-// During the first run, the default_methods list is empty
-// This is seen by default method creation
-// Second: recalculated during vtable initialization: only include abstract methods.
+// During the first run, the current instanceKlass has not yet been
+// created, the superclasses and superinterfaces do have instanceKlasses
+// but may not have vtables, the default_methods list is empty, no overpasses.
+// This is seen by default method creation.
+//
+// Pass 2: recalculated during vtable initialization: only include abstract methods.
+// The goal of pass 2 is to walk through the superinterfaces to see if any of
+// the superinterface methods (which were all abstract pre-default methods)
+// need to be added to the vtable.
+// With the addition of default methods, we have three new challenges:
+// overpasses, static interface methods and private interface methods.
+// Static and private interface methods do not get added to the vtable and
+// are not seen by the method resolution process, so we skip those.
+// Overpass methods are already in the vtable, so vtable lookup will
+// find them and we don't need to add a miranda method to the end of
+// the vtable. So we look for overpass methods and if they are found we
+// return false. Note that we inherit our superclasses vtable, so
+// the superclass' search also needs to use find_overpass so that if
+// one is found we return false.
+// False means - we don't need a miranda method added to the vtable.
+//
 // During the second run, default_methods is set up, so concrete methods from
 // superinterfaces with matching names/signatures to default_methods are already
 // in the default_methods list and do not need to be appended to the vtable
-// as mirandas
-// This is seen by link resolution and selection.
-// "miranda" means not static, not defined by this class.
-// private methods in interfaces do not belong in the miranda list.
-// the caller must make sure that the method belongs to an interface implemented by the class
-// Miranda methods only include public interface instance methods
-// Not private methods, not static methods, not default == concrete abstract
-// Miranda methods also do not include overpass methods in interfaces
+// as mirandas. Abstract methods may already have been handled via
+// overpasses - either local or superclass overpasses, which may be
+// in the vtable already.
+//
+// Pass 3: They are also checked by link resolution and selection,
+// for invocation on a method (not interface method) reference that
+// resolves to a method with an interface as its method_holder.
+// Used as part of walking from the bottom of the vtable to find
+// the vtable index for the miranda method.
+//
+// Part of the Miranda Rights in the US mean that if you do not have
+// an attorney one will be appointed for you.
 bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods,
                              Array<Method*>* default_methods, Klass* super) {
   if (m->is_static() || m->is_private() || m->is_overpass()) {
@@ -717,44 +748,36 @@
   }
   Symbol* name = m->name();
   Symbol* signature = m->signature();
-  Method* mo;
 
-  if ((mo = InstanceKlass::find_instance_method(class_methods, name, signature)) == NULL) {
-    // did not find it in the method table of the current class
-    if ((default_methods == NULL) ||
-        InstanceKlass::find_method(default_methods, name, signature) == NULL) {
-      if (super == NULL) {
-        // super doesn't exist
-        return true;
-      }
-
-      mo = InstanceKlass::cast(super)->lookup_method(name, signature);
-      while (mo != NULL && mo->access_flags().is_static()
-             && mo->method_holder() != NULL
-             && mo->method_holder()->super() != NULL)
-      {
-         mo = mo->method_holder()->super()->uncached_lookup_method(name, signature, Klass::find_overpass);
-      }
-      if (mo == NULL || mo->access_flags().is_private() ) {
-        // super class hierarchy does not implement it or protection is different
-        return true;
-      }
-    }
-  } else {
-     // if the local class has a private method, the miranda will not
-     // override it, so a vtable slot is needed
-     if (mo->access_flags().is_private()) {
-
-       // Second round, weed out any superinterface methods that turned
-       // into default methods, i.e. were concrete not abstract in the end
-       if ((default_methods == NULL) ||
-         InstanceKlass::find_method(default_methods, name, signature) == NULL) {
-         return true;
-       }
-    }
+  // First look in local methods to see if already covered
+  if (InstanceKlass::find_local_method(class_methods, name, signature,
+              Klass::find_overpass, Klass::skip_static, Klass::skip_private) != NULL)
+  {
+    return false;
   }
 
-  return false;
+  // Check local default methods
+  if ((default_methods != NULL) &&
+    (InstanceKlass::find_method(default_methods, name, signature) != NULL))
+   {
+     return false;
+   }
+
+  InstanceKlass* cursuper;
+  // Iterate on all superclasses, which should have instanceKlasses
+  // Note that we explicitly look for overpasses at each level.
+  // Overpasses may or may not exist for supers for pass 1,
+  // they should have been created for pass 2 and later.
+
+  for (cursuper = InstanceKlass::cast(super); cursuper != NULL;  cursuper = (InstanceKlass*)cursuper->super())
+  {
+     if (cursuper->find_local_method(name, signature,
+           Klass::find_overpass, Klass::skip_static, Klass::skip_private) != NULL) {
+       return false;
+     }
+  }
+
+  return true;
 }
 
 // Scans current_interface_methods for miranda methods that do not
--- a/hotspot/src/share/vm/opto/block.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/opto/block.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -393,7 +393,7 @@
   VectorSet visited(a);
 
   // Allocate stack with enough space to avoid frequent realloc
-  Node_Stack nstack(a, C->unique() >> 1);
+  Node_Stack nstack(a, C->live_nodes() >> 1);
   nstack.push(_root, 0);
   uint sum = 0;                 // Counter for blocks
 
--- a/hotspot/src/share/vm/opto/cfgnode.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/opto/cfgnode.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -802,7 +802,7 @@
   Compile *C = igvn->C;
   Arena *a = Thread::current()->resource_area();
   Node_Array node_map = new Node_Array(a);
-  Node_Stack stack(a, C->unique() >> 4);
+  Node_Stack stack(a, C->live_nodes() >> 4);
   PhiNode *nphi = slice_memory(at);
   igvn->register_new_node_with_optimizer( nphi );
   node_map.map(_idx, nphi);
--- a/hotspot/src/share/vm/opto/compile.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -3315,7 +3315,7 @@
 
   // Visit everybody reachable!
   // Allocate stack of size C->unique()/2 to avoid frequent realloc
-  Node_Stack nstack(unique() >> 1);
+  Node_Stack nstack(live_nodes() >> 1);
   final_graph_reshaping_walk(nstack, root(), frc);
 
   // Check for unreachable (from below) code (i.e., infinite loops).
--- a/hotspot/src/share/vm/opto/domgraph.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/opto/domgraph.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -507,7 +507,7 @@
 // 'semi' as vertex to DFS mapping.  Set 'parent' to DFS parent.
 int NTarjan::DFS( NTarjan *ntarjan, VectorSet &visited, PhaseIdealLoop *pil, uint *dfsorder) {
   // Allocate stack of size C->unique()/8 to avoid frequent realloc
-  GrowableArray <Node *> dfstack(pil->C->unique() >> 3);
+  GrowableArray <Node *> dfstack(pil->C->live_nodes() >> 3);
   Node *b = pil->C->root();
   int dfsnum = 1;
   dfsorder[b->_idx] = dfsnum; // Cache parent's dfsnum for a later use
--- a/hotspot/src/share/vm/opto/gcm.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/opto/gcm.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -107,8 +107,8 @@
 //------------------------------schedule_pinned_nodes--------------------------
 // Set the basic block for Nodes pinned into blocks
 void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {
-  // Allocate node stack of size C->unique()+8 to avoid frequent realloc
-  GrowableArray <Node *> spstack(C->unique() + 8);
+  // Allocate node stack of size C->live_nodes()+8 to avoid frequent realloc
+  GrowableArray <Node *> spstack(C->live_nodes() + 8);
   spstack.push(_root);
   while (spstack.is_nonempty()) {
     Node* node = spstack.pop();
@@ -1310,7 +1310,7 @@
   visited.Clear();
   Node_List stack(arena);
   // Pre-grow the list
-  stack.map((C->unique() >> 1) + 16, NULL);
+  stack.map((C->live_nodes() >> 1) + 16, NULL);
   if (!schedule_early(visited, stack)) {
     // Bailout without retry
     C->record_method_not_compilable("early schedule failed");
--- a/hotspot/src/share/vm/opto/loopTransform.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/opto/loopTransform.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -1282,7 +1282,7 @@
 
   if (C->do_vector_loop() && (PrintOpto && VerifyLoopOptimizations || TraceLoopOpts)) {
     Arena* arena = Thread::current()->resource_area();
-    Node_Stack stack(arena, C->unique() >> 2);
+    Node_Stack stack(arena, C->live_nodes() >> 2);
     Node_List rpo_list;
     VectorSet visited(arena);
     visited.set(loop_head->_idx);
--- a/hotspot/src/share/vm/opto/loopnode.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/opto/loopnode.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -2231,7 +2231,7 @@
   // _nodes array holds the earliest legal controlling CFG node.
 
   // Allocate stack with enough space to avoid frequent realloc
-  int stack_size = (C->unique() >> 1) + 16; // (unique>>1)+16 from Java2D stats
+  int stack_size = (C->live_nodes() >> 1) + 16; // (live_nodes>>1)+16 from Java2D stats
   Node_Stack nstack( a, stack_size );
 
   visited.Clear();
@@ -2691,7 +2691,7 @@
     }
   }
   if (_dom_stk == NULL) {
-    uint init_size = C->unique() / 100; // Guess that 1/100 is a reasonable initial size.
+    uint init_size = C->live_nodes() / 100; // Guess that 1/100 is a reasonable initial size.
     if (init_size < 10) init_size = 10;
     _dom_stk = new GrowableArray<uint>(init_size);
   }
@@ -2781,8 +2781,8 @@
 // The sort is of size number-of-control-children, which generally limits
 // it to size 2 (i.e., I just choose between my 2 target loops).
 void PhaseIdealLoop::build_loop_tree() {
-  // Allocate stack of size C->unique()/2 to avoid frequent realloc
-  GrowableArray <Node *> bltstack(C->unique() >> 1);
+  // Allocate stack of size C->live_nodes()/2 to avoid frequent realloc
+  GrowableArray <Node *> bltstack(C->live_nodes() >> 1);
   Node *n = C->root();
   bltstack.push(n);
   int pre_order = 1;
@@ -3672,7 +3672,7 @@
 void PhaseIdealLoop::dump( ) const {
   ResourceMark rm;
   Arena* arena = Thread::current()->resource_area();
-  Node_Stack stack(arena, C->unique() >> 2);
+  Node_Stack stack(arena, C->live_nodes() >> 2);
   Node_List rpo_list;
   VectorSet visited(arena);
   visited.set(C->top()->_idx);
--- a/hotspot/src/share/vm/opto/matcher.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/opto/matcher.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -2050,7 +2050,7 @@
 // Set bits if Node is shared or otherwise a root
 void Matcher::find_shared( Node *n ) {
   // Allocate stack of size C->unique() * 2 to avoid frequent realloc
-  MStack mstack(C->unique() * 2);
+  MStack mstack(C->live_nodes() * 2);
   // Mark nodes as address_visited if they are inputs to an address expression
   VectorSet address_visited(Thread::current()->resource_area());
   mstack.push(n, Visit);     // Don't need to pre-visit root node
--- a/hotspot/src/share/vm/opto/node.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/opto/node.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -1799,7 +1799,7 @@
 static void dump_nodes(const Node* start, int d, bool only_ctrl) {
   if (NotANode(start)) return;
 
-  GrowableArray <Node *> nstack(Compile::current()->unique());
+  GrowableArray <Node *> nstack(Compile::current()->live_nodes());
   collect_nodes_i(&nstack, start, d, (uint) ABS(d), true, only_ctrl, false);
 
   int end = nstack.length();
--- a/hotspot/src/share/vm/opto/phaseX.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/opto/phaseX.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -791,7 +791,7 @@
 //------------------------------PhaseIterGVN-----------------------------------
 // Initialize hash table to fresh and clean for +VerifyOpto
 PhaseIterGVN::PhaseIterGVN( PhaseIterGVN *igvn, const char *dummy ) : PhaseGVN(igvn,dummy), _worklist( ),
-                                                                      _stack(C->unique() >> 1),
+                                                                      _stack(C->live_nodes() >> 1),
                                                                       _delay_transform(false) {
 }
 
@@ -808,7 +808,11 @@
 // Initialize with previous PhaseGVN info from Parser
 PhaseIterGVN::PhaseIterGVN( PhaseGVN *gvn ) : PhaseGVN(gvn),
                                               _worklist(*C->for_igvn()),
-                                              _stack(C->unique() >> 1),
+// TODO: Before incremental inlining it was allocated only once and it was fine. Now that
+//       the constructor is used in incremental inlining, this consumes too much memory:
+//                                            _stack(C->live_nodes() >> 1),
+//       So, as a band-aid, we replace this by:
+                                              _stack(C->comp_arena(), 32),
                                               _delay_transform(false)
 {
   uint max;
@@ -1638,7 +1642,7 @@
   _nodes.map( n->_idx, new_node );  // Flag as having been cloned
 
   // Allocate stack of size _nodes.Size()/2 to avoid frequent realloc
-  GrowableArray <Node *> trstack(C->unique() >> 1);
+  GrowableArray <Node *> trstack(C->live_nodes() >> 1);
 
   trstack.push(new_node);           // Process children of cloned node
   while ( trstack.is_nonempty() ) {
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -2295,13 +2295,13 @@
 }
 
 // Checks if name in command-line argument -agent{lib,path}:name[=options]
-// represents a valid HPROF of JDWP agent.  is_path==true denotes that we
+// represents a valid JDWP agent.  is_path==true denotes that we
 // are dealing with -agentpath (case where name is a path), otherwise with
 // -agentlib
-bool valid_hprof_or_jdwp_agent(char *name, bool is_path) {
+bool valid_jdwp_agent(char *name, bool is_path) {
   char *_name;
-  const char *_hprof = "hprof", *_jdwp = "jdwp";
-  size_t _len_hprof, _len_jdwp, _len_prefix;
+  const char *_jdwp = "jdwp";
+  size_t _len_jdwp, _len_prefix;
 
   if (is_path) {
     if ((_name = strrchr(name, (int) *os::file_separator())) == NULL) {
@@ -2316,13 +2316,9 @@
     }
 
     _name += _len_prefix;
-    _len_hprof = strlen(_hprof);
     _len_jdwp = strlen(_jdwp);
 
-    if (strncmp(_name, _hprof, _len_hprof) == 0) {
-      _name += _len_hprof;
-    }
-    else if (strncmp(_name, _jdwp, _len_jdwp) == 0) {
+    if (strncmp(_name, _jdwp, _len_jdwp) == 0) {
       _name += _len_jdwp;
     }
     else {
@@ -2336,7 +2332,7 @@
     return true;
   }
 
-  if (strcmp(name, _hprof) == 0 || strcmp(name, _jdwp) == 0) {
+  if (strcmp(name, _jdwp) == 0) {
     return true;
   }
 
@@ -2427,9 +2423,9 @@
           options = (char*)memcpy(NEW_C_HEAP_ARRAY(char, len2, mtInternal), pos+1, len2);
         }
 #if !INCLUDE_JVMTI
-        if ((strcmp(name, "hprof") == 0) || (strcmp(name, "jdwp") == 0)) {
+        if (strcmp(name, "jdwp") == 0) {
           jio_fprintf(defaultStream::error_stream(),
-            "Profiling and debugging agents are not supported in this VM\n");
+            "Debugging agents are not supported in this VM\n");
           return JNI_ERR;
         }
 #endif // !INCLUDE_JVMTI
@@ -2449,9 +2445,9 @@
           options = os::strdup_check_oom(pos + 1, mtInternal);
         }
 #if !INCLUDE_JVMTI
-        if (valid_hprof_or_jdwp_agent(name, is_absolute_path)) {
+        if (valid_jdwp_agent(name, is_absolute_path)) {
           jio_fprintf(defaultStream::error_stream(),
-            "Profiling and debugging agents are not supported in this VM\n");
+            "Debugging agents are not supported in this VM\n");
           return JNI_ERR;
         }
 #endif // !INCLUDE_JVMTI
@@ -3305,7 +3301,9 @@
 
   if (scp_assembly_required) {
     // Assemble the bootclasspath elements into the final path.
-    Arguments::set_sysclasspath(scp_p->combined_path());
+    char *combined_path = scp_p->combined_path();
+    Arguments::set_sysclasspath(combined_path);
+    FREE_C_HEAP_ARRAY(char, combined_path);
   }
 
   // This must be done after all arguments have been processed.
--- a/hotspot/src/share/vm/runtime/os.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/runtime/os.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -1271,6 +1271,7 @@
   bool has_jimage = (os::stat(jimage, &st) == 0);
   if (has_jimage) {
     Arguments::set_sysclasspath(jimage);
+    FREE_C_HEAP_ARRAY(char, jimage);
     return true;
   }
   FREE_C_HEAP_ARRAY(char, jimage);
@@ -1282,6 +1283,7 @@
       sysclasspath = expand_entries_to_path(modules_dir, fileSep, pathSep);
     }
   }
+  FREE_C_HEAP_ARRAY(char, modules_dir);
 
   // fallback to classes
   if (sysclasspath == NULL)
@@ -1289,6 +1291,7 @@
 
   if (sysclasspath == NULL) return false;
   Arguments::set_sysclasspath(sysclasspath);
+  FREE_C_HEAP_ARRAY(char, sysclasspath);
 
   return true;
 }
--- a/hotspot/src/share/vm/runtime/threadLocalStorage.cpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/runtime/threadLocalStorage.cpp	Wed Jul 05 20:46:18 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,11 @@
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadLocalStorage.hpp"
 
+// Solaris no longer has this kind of ThreadLocalStorage implementation.
+// This will be removed from all platforms in the near future.
+
+#ifndef SOLARIS
+
 // static member initialization
 int ThreadLocalStorage::_thread_index = -1;
 
@@ -54,3 +59,5 @@
 bool ThreadLocalStorage::is_initialized() {
     return (thread_index() != -1);
 }
+
+#endif // SOLARIS
--- a/hotspot/src/share/vm/runtime/threadLocalStorage.hpp	Thu Aug 20 12:29:24 2015 -0700
+++ b/hotspot/src/share/vm/runtime/threadLocalStorage.hpp	Wed Jul 05 20:46:18 2017 +0200
@@ -38,10 +38,14 @@
 extern "C" uintptr_t _raw_thread_id();
 
 class ThreadLocalStorage : AllStatic {
+
+ // Exported API
  public:
   static void    set_thread(Thread* thread);
   static Thread* get_thread_slow();
   static void    invalidate_all() { pd_invalidate_all(); }
+  static void    init();
+  static bool    is_initialized();
 
   // Machine dependent stuff
 #ifdef TARGET_OS_ARCH_linux_x86
@@ -81,17 +85,12 @@
 # include "threadLS_bsd_zero.hpp"
 #endif
 
-
+#ifndef SOLARIS
  public:
   // Accessor
   static inline int  thread_index()              { return _thread_index; }
   static inline void set_thread_index(int index) { _thread_index = index; }
 
-  // Initialization
-  // Called explicitly from VMThread::activate_system instead of init_globals.
-  static void init();
-  static bool is_initialized();
-
  private:
   static int     _thread_index;
 
@@ -100,6 +99,9 @@
   // Processor dependent parts of set_thread and initialization
   static void pd_set_thread(Thread* thread);
   static void pd_init();
+
+#endif // SOLARIS
+
   // Invalidate any thread cacheing or optimization schemes.
   static void pd_invalidate_all();
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/SharedArchiveFile/BasicJarBuilder.java	Wed Jul 05 20:46:18 2017 +0200
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @summary Simple jar builder
+ *   Input: jarName className1 className2 ...
+ *     do not specify extensions, just the names
+ *     E.g. prot_domain ProtDomainA ProtDomainB
+ *   Output: A jar containing compiled classes, placed in a test classes folder
+ */
+
+import jdk.test.lib.*;
+
+import java.io.File;
+import java.util.ArrayList;
+import sun.tools.jar.Main;
+
+public class BasicJarBuilder {
+    private static final String classDir = System.getProperty("test.classes");
+
+    public static void build(String jarName, String ...classNames)
+        throws Exception {
+
+        createSimpleJar(classDir, classDir + File.separator + jarName +
+            ".jar", classNames);
+    }
+
+    private static void createSimpleJar(String jarclassDir, String jarName,
+        String[] classNames) throws Exception {
+        ArrayList<String> args = new ArrayList<String>();
+        args.add("cf");
+        args.add(jarName);
+        addClassArgs(args, jarclassDir, classNames);
+        createJar(args);
+    }
+
+    private static void addClassArgs(ArrayList<String> args, String jarclassDir,
+        String[] classNames) {
+
+        for (String name : classNames) {
+            args.add("-C");
+            args.add(jarclassDir);
+            args.add(name + ".class");
+        }
+    }
+
+    private static void createJar(ArrayList<String> args) {
+        Main jarTool = new Main(System.out, System.err, "jar");
+        if (!jarTool.run(args.toArray(new String[1]))) {
+            throw new RuntimeException("jar operation failed");
+        }
+    }
+
+    // helpers
+    public static String getTestJar(String jar) {
+        File dir = new File(System.getProperty("test.classes", "."));
+        File jarFile = new File(dir, jar);
+        if (!jarFile.exists()) {
+            throw new RuntimeException("Cannot find " + jarFile.getPath());
+        }
+        if (!jarFile.isFile()) {
+            throw new RuntimeException("Not a regular file: " + jarFile.getPath());
+        }
+        return jarFile.getPath();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/SharedArchiveFile/SharedStrings.java	Wed Jul 05 20:46:18 2017 +0200
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary Check to make sure that shared strings in the bootstrap CDS archive
+ *          are actually shared
+ * Feature support: G1GC only, compressed oops/kptrs, 64-bit os, not on windows
+ * @requires (sun.arch.data.model != "32") & (os.family != "windows")
+ * @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
+ * @requires (vm.gc=="G1" | vm.gc=="null")
+ * @library /testlibrary /../../test/lib
+ * @modules java.base/sun.misc
+ *          java.management
+ * @ignore - 8133180
+ * @build SharedStringsWb SharedStrings BasicJarBuilder
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main SharedStrings
+ */
+
+import jdk.test.lib.*;
+
+public class SharedStrings {
+    public static void main(String[] args) throws Exception {
+        BasicJarBuilder.build("whitebox", "sun/hotspot/WhiteBox");
+
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:SharedArchiveFile=./SharedStrings.jsa",
+            "-XX:+PrintSharedSpaces",
+            // Needed for bootclasspath match, for CDS to work with WhiteBox API
+            "-Xbootclasspath/a:" + BasicJarBuilder.getTestJar("whitebox.jar"),
+            "-Xshare:dump");
+
+        new OutputAnalyzer(pb.start())
+            .shouldContain("Loading classes to share")
+            .shouldContain("Shared string table stats")
+            .shouldHaveExitValue(0);
+
+        pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:SharedArchiveFile=./SharedStrings.jsa",
+            // these are required modes for shared strings
+            "-XX:+UseCompressedOops", "-XX:+UseG1GC",
+            // needed for access to white box test API
+            "-Xbootclasspath/a:" + BasicJarBuilder.getTestJar("whitebox.jar"),
+            "-XX:+UnlockDiagnosticVMOptions", "-XX:+WhiteBoxAPI",
+            "-Xshare:on", "-showversion", "SharedStringsWb");
+
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+        try {
+            output.shouldContain("sharing");
+            output.shouldHaveExitValue(0);
+        } catch (RuntimeException e) {
+            output.shouldContain("Unable to use shared archive");
+            output.shouldHaveExitValue(1);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/SharedArchiveFile/SharedStringsWb.java	Wed Jul 05 20:46:18 2017 +0200
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import sun.hotspot.WhiteBox;
+
+// This class is used by the test SharedStrings.java
+// It should be launched in CDS mode
+public class SharedStringsWb {
+    public static void main(String[] args) throws Exception {
+        WhiteBox wb = WhiteBox.getWhiteBox();
+
+        if (wb.areSharedStringsIgnored()) {
+            System.out.println("Shared strings are ignored, assuming PASS");
+            return;
+        }
+
+        // The string "java" is known to be interened and added to CDS archive
+        String s = "java";
+        String internedS = s.intern();
+
+        if (wb.isShared(internedS)) {
+            System.out.println("Found shared string, result: PASS");
+        } else {
+            throw new RuntimeException("String is not shared, result: FAIL");
+        }
+    }
+}
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/lambda-features/TestStaticandInstance.java	Wed Jul 05 20:46:18 2017 +0200
@@ -0,0 +1,272 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 8087342
+ * @summary Test linkresolver search static, instance and overpass duplicates
+ * @run main/othervm -Xverify:none TestStaticandInstance
+ */
+
+
+import java.util.*;
+import jdk.internal.org.objectweb.asm.*;
+import static jdk.internal.org.objectweb.asm.Opcodes.*;
+
+public class TestStaticandInstance {
+  static final String stringC = "C";
+  static final String stringD = "D";
+  static final String stringI = "I";
+
+  public static void main(String args[]) throws Throwable {
+    ClassLoader cl = new ClassLoader() {
+      public Class<?> loadClass(String name) throws ClassNotFoundException {
+        Class retClass;
+        if ((retClass = findLoadedClass(name)) != null) {
+           return retClass;
+        }
+        if (stringC.equals(name)) {
+            byte[] classFile=dumpC();
+            return defineClass(stringC, classFile, 0, classFile.length);
+        }
+        if (stringD.equals(name)) {
+            byte[] classFile=dumpD();
+            return defineClass(stringD, classFile, 0, classFile.length);
+        }
+        if (stringI.equals(name)) {
+            byte[] classFile=dumpI();
+            return defineClass(stringI, classFile, 0, classFile.length);
+        }
+        return super.loadClass(name);
+      }
+    };
+
+    Class classC = cl.loadClass(stringC);
+    Class classI = cl.loadClass(stringI);
+
+    try {
+      int staticret = (Integer)cl.loadClass(stringD).getDeclaredMethod("CallStatic").invoke(null);
+      if (staticret != 1) {
+        throw new RuntimeException("invokestatic failed to call correct method");
+      }
+      System.out.println("staticret: " + staticret); // should be 1
+
+      int invokeinterfaceret = (Integer)cl.loadClass(stringD).getDeclaredMethod("CallInterface").invoke(null);
+      if (invokeinterfaceret != 0) {
+        throw new RuntimeException(String.format("Expected java.lang.AbstractMethodError, got %d", invokeinterfaceret));
+      }
+      System.out.println("invokeinterfaceret: AbstractMethodError");
+
+      int invokevirtualret = (Integer)cl.loadClass(stringD).getDeclaredMethod("CallVirtual").invoke(null);
+      if (invokevirtualret != 0) {
+        throw new RuntimeException(String.format("Expected java.lang.IncompatibleClassChangeError, got %d", invokevirtualret));
+      }
+      System.out.println("invokevirtualret: IncompatibleClassChangeError");
+    } catch (java.lang.Throwable e) {
+      throw new RuntimeException("Unexpected exception: " + e.getMessage());
+    }
+  }
+
+/*
+interface I {
+  public int m(); // abstract
+  default int q() { return 3; } // trigger defmeth processing: C gets AME overpass
+}
+
+// C gets static, private and AME overpass m()I with -Xverify:none
+class C implements I {
+  static int m() { return 1;}  // javac with "n()" and patch to "m()"
+  private int m() { return 2;} // javac with public and patch to private
+}
+
+public class D {
+  public static int CallStatic() {
+    int staticret = C.m();    // javac with "C.n" and patch to "C.m"
+    return staticret;
+  }
+  public static int CallInterface() throws AbstractMethodError{
+    try {
+      I myI = new C();
+      return myI.m();
+    } catch (java.lang.AbstractMethodError e) {
+      return 0; // for success
+    }
+  }
+  public static int CallVirtual() {
+    try {
+      C myC = new C();
+      return myC.m();
+    } catch (java.lang.IncompatibleClassChangeError e) {
+      return 0; // for success
+    }
+  }
+}
+*/
+
+  public static byte[] dumpC() {
+
+    ClassWriter cw = new ClassWriter(0);
+    FieldVisitor fv;
+    MethodVisitor mv;
+    AnnotationVisitor av0;
+
+    cw.visit(52, ACC_SUPER, "C", null, "java/lang/Object", new String[] { "I" });
+
+    {
+      mv = cw.visitMethod(0, "<init>", "()V", null, null);
+      mv.visitCode();
+      mv.visitVarInsn(ALOAD, 0);
+      mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "<init>", "()V", false);
+      mv.visitInsn(RETURN);
+      mv.visitMaxs(1, 1);
+      mv.visitEnd();
+    }
+    {
+      mv = cw.visitMethod(ACC_STATIC, "m", "()I", null, null);
+      mv.visitCode();
+      mv.visitInsn(ICONST_1);
+      mv.visitInsn(IRETURN);
+      mv.visitMaxs(1, 0);
+      mv.visitEnd();
+    }
+    {
+      mv = cw.visitMethod(ACC_PRIVATE, "m", "()I", null, null);
+      mv.visitCode();
+      mv.visitInsn(ICONST_2);
+      mv.visitInsn(IRETURN);
+      mv.visitMaxs(1, 1);
+      mv.visitEnd();
+    }
+    cw.visitEnd();
+
+    return cw.toByteArray();
+  }
+
+  public static byte[] dumpD () {
+
+    ClassWriter cw = new ClassWriter(0);
+    FieldVisitor fv;
+    MethodVisitor mv;
+    AnnotationVisitor av0;
+
+    cw.visit(52, ACC_PUBLIC + ACC_SUPER, "D", null, "java/lang/Object", null);
+
+    {
+      mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
+      mv.visitCode();
+      mv.visitVarInsn(ALOAD, 0);
+      mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "<init>", "()V", false);
+      mv.visitInsn(RETURN);
+      mv.visitMaxs(1, 1);
+      mv.visitEnd();
+    }
+    {
+      mv = cw.visitMethod(ACC_PUBLIC + ACC_STATIC, "CallStatic", "()I", null, null);
+      mv.visitCode();
+      mv.visitMethodInsn(INVOKESTATIC, "C", "m", "()I", false);
+      mv.visitVarInsn(ISTORE, 0);
+      mv.visitVarInsn(ILOAD, 0);
+      mv.visitInsn(IRETURN);
+      mv.visitMaxs(1, 1);
+      mv.visitEnd();
+    }
+    {
+      mv = cw.visitMethod(ACC_PUBLIC + ACC_STATIC, "CallInterface", "()I", null, new String[] { "java/lang/AbstractMethodError" });
+      mv.visitCode();
+      Label l0 = new Label();
+      Label l1 = new Label();
+      Label l2 = new Label();
+      mv.visitTryCatchBlock(l0, l1, l2, "java/lang/AbstractMethodError");
+      mv.visitLabel(l0);
+      mv.visitTypeInsn(NEW, "C");
+      mv.visitInsn(DUP);
+      mv.visitMethodInsn(INVOKESPECIAL, "C", "<init>", "()V", false);
+      mv.visitVarInsn(ASTORE, 0);
+      mv.visitVarInsn(ALOAD, 0);
+      mv.visitMethodInsn(INVOKEINTERFACE, "I", "m", "()I", true);
+      mv.visitLabel(l1);
+      mv.visitInsn(IRETURN);
+      mv.visitLabel(l2);
+      mv.visitFrame(Opcodes.F_SAME1, 0, null, 1, new Object[] {"java/lang/AbstractMethodError"});
+      mv.visitVarInsn(ASTORE, 0);
+      mv.visitInsn(ICONST_0);
+      mv.visitInsn(IRETURN);
+      mv.visitMaxs(2, 1);
+      mv.visitEnd();
+    }
+    {
+      mv = cw.visitMethod(ACC_PUBLIC + ACC_STATIC, "CallVirtual", "()I", null, null);
+      mv.visitCode();
+      Label l0 = new Label();
+      Label l1 = new Label();
+      Label l2 = new Label();
+      mv.visitTryCatchBlock(l0, l1, l2, "java/lang/IncompatibleClassChangeError");
+      mv.visitLabel(l0);
+      mv.visitTypeInsn(NEW, "C");
+      mv.visitInsn(DUP);
+      mv.visitMethodInsn(INVOKESPECIAL, "C", "<init>", "()V", false);
+      mv.visitVarInsn(ASTORE, 0);
+      mv.visitVarInsn(ALOAD, 0);
+      mv.visitMethodInsn(INVOKEVIRTUAL, "C", "m", "()I", false);
+      mv.visitLabel(l1);
+      mv.visitInsn(IRETURN);
+      mv.visitLabel(l2);
+      mv.visitFrame(Opcodes.F_SAME1, 0, null, 1, new Object[] {"java/lang/IncompatibleClassChangeError"});
+      mv.visitVarInsn(ASTORE, 0);
+      mv.visitInsn(ICONST_0);
+      mv.visitInsn(IRETURN);
+      mv.visitMaxs(2, 1);
+      mv.visitEnd();
+    }
+    cw.visitEnd();
+
+    return cw.toByteArray();
+  }
+
+  public static byte[] dumpI() {
+
+    ClassWriter cw = new ClassWriter(0);
+    FieldVisitor fv;
+    MethodVisitor mv;
+    AnnotationVisitor av0;
+
+    cw.visit(52, ACC_ABSTRACT + ACC_INTERFACE, "I", null, "java/lang/Object", null);
+
+    {
+      mv = cw.visitMethod(ACC_PUBLIC + ACC_ABSTRACT, "m", "()I", null, null);
+      mv.visitEnd();
+    }
+    {
+      mv = cw.visitMethod(ACC_PUBLIC, "q", "()I", null, null);
+      mv.visitCode();
+      mv.visitInsn(ICONST_3);
+      mv.visitInsn(IRETURN);
+      mv.visitMaxs(1, 1);
+      mv.visitEnd();
+    }
+    cw.visitEnd();
+
+    return cw.toByteArray();
+  }
+}
--- a/make/Images.gmk	Thu Aug 20 12:29:24 2015 -0700
+++ b/make/Images.gmk	Wed Jul 05 20:46:18 2017 +0200
@@ -47,7 +47,7 @@
 
 # tools
 TOOLS_MODULES += jdk.attach jdk.compiler jdk.dev jdk.internal.le jdk.scripting.nashorn.shell \
-               jdk.javadoc jdk.jcmd jdk.jconsole jdk.hotspot.agent jdk.hprof.agent jdk.jartool \
+               jdk.javadoc jdk.jcmd jdk.jconsole jdk.hotspot.agent jdk.jartool \
                jdk.jdeps jdk.jdi jdk.jdwp.agent jdk.policytool jdk.rmic jdk.xml.bind jdk.xml.ws
 
 ifeq ($(OPENJDK_TARGET_OS), windows)
--- a/modules.xml	Thu Aug 20 12:29:24 2015 -0700
+++ b/modules.xml	Wed Jul 05 20:46:18 2017 +0200
@@ -1623,10 +1623,6 @@
     <depend>jdk.jdi</depend>
   </module>
   <module>
-    <name>jdk.hprof.agent</name>
-    <depend>java.base</depend>
-  </module>
-  <module>
     <name>jdk.httpserver</name>
     <depend>java.base</depend>
     <depend>java.logging</depend>
@@ -1798,7 +1794,7 @@
     <name>jdk.scripting.nashorn</name>
     <depend>java.base</depend>
     <depend>java.logging</depend>
-    <depend>java.scripting</depend>
+    <depend re-exports="true">java.scripting</depend>
     <export>
       <name>jdk.nashorn.internal.runtime</name>
       <to>jdk.scripting.nashorn.shell</to>
@@ -1811,11 +1807,16 @@
       <name>jdk.nashorn.tools</name>
       <to>jdk.scripting.nashorn.shell</to>
     </export>
+    <export>
+      <name>jdk.nashorn.api.scripting</name>
+    </export>
+    <export>
+      <name>jdk.nashorn.api.tree</name>
+    </export>
   </module>
   <module>
     <name>jdk.scripting.nashorn.shell</name>
     <depend>java.base</depend>
-    <depend>java.prefs</depend>
     <depend>jdk.scripting.nashorn</depend>
     <depend>jdk.internal.le</depend>
   </module>