Merge
authorduke
Wed, 05 Jul 2017 19:37:19 +0200
changeset 23941 2b69e3d44dd9
parent 23940 c354cfc60e8e (current diff)
parent 23877 166fb0e4fe4e (diff)
child 23945 9c47c48f448e
Merge
--- a/.hgtags-top-repo	Thu Apr 17 15:20:35 2014 -0700
+++ b/.hgtags-top-repo	Wed Jul 05 19:37:19 2017 +0200
@@ -251,3 +251,4 @@
 d0b525cd31b87abeb6d5b7e3516953eeb13b323c jdk9-b06
 0ea015c298b201c07fa33990f2445b6d0ef3566d jdk9-b07
 db045d8faa0924b7378102d24a1a0d850c1e3834 jdk9-b08
+4a21dc7d57d1069a01f68e7182c074cb37349dfb jdk9-b09
--- a/common/src/fixpath.c	Thu Apr 17 15:20:35 2014 -0700
+++ b/common/src/fixpath.c	Wed Jul 05 19:37:19 2017 +0200
@@ -109,7 +109,7 @@
 
 void append(char **b, size_t *bl, size_t *u, char *add, size_t addlen)
 {
-  while ( (addlen+*u+1) > *bl) {
+  while ((addlen+*u+1) > *bl) {
     *bl *= 2;
     *b = (char*) realloc(*b, *bl);
   }
@@ -118,7 +118,7 @@
 }
 
 /*
- * Creates a new string from in where the first occurance of sub is
+ * Creates a new string from in where the first occurrence of sub is
  * replaced by rep.
  */
 char *replace_substring(char *in, char *sub, char *rep)
@@ -246,7 +246,7 @@
   }
 
   buffer = (char*) malloc(buflen);
-  while((blocklen = fread(block,1,sizeof(block),atin)) > 0) {
+  while ((blocklen = fread(block, 1, sizeof(block), atin)) > 0) {
     append(&buffer, &buflen, &used, block, blocklen);
   }
   buffer[used] = 0;
@@ -280,16 +280,21 @@
   char *current = quoted;
   int pass;
 
-  if(strpbrk(in_arg, " \t\n\v\r\\\"") == NULL) {
+  if (strlen(in_arg) == 0) {
+     // empty string? explicitly quote it.
+     return _strdup("\"\"");
+  }
+
+  if (strpbrk(in_arg, " \t\n\v\r\\\"") == NULL) {
      return _strdup(in_arg);
   }
 
   // process the arg twice. Once to calculate the size and then to copy it.
-  for(pass=1; pass<=2; pass++) {
+  for (pass=1; pass<=2; pass++) {
     char const *arg = in_arg;
 
     // initial "
-    if(pass == 2) {
+    if (pass == 2) {
       *current = '\"';
     }
     current++;
@@ -328,7 +333,7 @@
         *current = *arg;
       }
       current++;
-    } while( *arg++ != '\0');
+    } while (*arg++ != '\0');
 
     // allocate the buffer
     if (pass == 1) {
@@ -362,7 +367,7 @@
 
     if (getenv("DEBUG_FIXPATH") != NULL) {
       char const * cmdline = GetCommandLine();
-      fprintf(stderr, "fixpath input line >%s<\n", strstr( cmdline , argv[1]));
+      fprintf(stderr, "fixpath input line >%s<\n", strstr(cmdline, argv[1]));
     }
 
     if (argv[1][1] == 'c' && argv[1][2] == '\0') {
@@ -399,7 +404,7 @@
         }
 
         rc = SetEnvironmentVariable(var, val);
-        if(!rc) {
+        if (!rc) {
           // Could not set var for some reason.  Try to report why.
           const int msg_len = 80 + var_len + strlen(val);
           char * msg = (char *) alloca(msg_len);
@@ -422,7 +427,7 @@
     // handle command and it's args.
     while (i < argc) {
       char const *replaced = replace_cygdrive(argv[i]);
-      if(replaced[0] == '@') {
+      if (replaced[0] == '@') {
         // Found at-file! Fix it!
         replaced = fix_at_file(replaced);
       }
@@ -433,7 +438,7 @@
     // determine the length of the line
     line = NULL;
     // args
-    for(i = cmd; i < argc; i++) {
+    for (i = cmd; i < argc; i++) {
       line += (ptrdiff_t) strlen(argv[i]);
     }
     // spaces and null
@@ -443,7 +448,7 @@
 
     // copy in args.
     current = line;
-    for(i = cmd; i < argc; i++) {
+    for (i = cmd; i < argc; i++) {
       ptrdiff_t len = strlen(argv[i]);
       if (i != cmd) {
         *current++ = ' ';
@@ -457,16 +462,16 @@
       fprintf(stderr, "fixpath converted line >%s<\n", line);
     }
 
-    if(cmd == argc) {
+    if (cmd == argc) {
        if (getenv("DEBUG_FIXPATH") != NULL) {
          fprintf(stderr, "fixpath no command provided!\n");
        }
        exit(0);
     }
 
-    ZeroMemory(&si,sizeof(si));
+    ZeroMemory(&si, sizeof(si));
     si.cb=sizeof(si);
-    ZeroMemory(&pi,sizeof(pi));
+    ZeroMemory(&pi, sizeof(pi));
 
     fflush(stderr);
     fflush(stdout);
@@ -481,14 +486,14 @@
                        NULL,
                        &si,
                        &pi);
-    if(!rc) {
+    if (!rc) {
       // Could not start process for some reason.  Try to report why:
       report_error("Could not start process!");
       exit(126);
     }
 
-    WaitForSingleObject(pi.hProcess,INFINITE);
-    GetExitCodeProcess(pi.hProcess,&exitCode);
+    WaitForSingleObject(pi.hProcess, INFINITE);
+    GetExitCodeProcess(pi.hProcess, &exitCode);
 
     if (getenv("DEBUG_FIXPATH") != NULL) {
       for (i=0; i<num_files_to_delete; ++i) {
--- a/hotspot/.hgtags	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/.hgtags	Wed Jul 05 19:37:19 2017 +0200
@@ -411,3 +411,4 @@
 52377a30a3f87b62d6135706997b8c7a47366e37 jdk9-b06
 52f7edf2589d9f9d35db3008bc5377f279de9c18 jdk9-b07
 4dedef5e51ed3a36677a8ba82949fc517ad64162 jdk9-b08
+05e8f5242c26ba45d4fa947e4f4f54c058c9b522 jdk9-b09
--- a/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -958,7 +958,7 @@
 
   // reset handle block
   __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), G3_scratch);
-  __ st_ptr(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
+  __ st(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
 
 
   // handle exceptions (exception handling will handle unlocking!)
--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -2687,7 +2687,7 @@
   if (!is_critical_native) {
     // reset handle block
     __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);
-    __ st_ptr(G0, L5, JNIHandleBlock::top_offset_in_bytes());
+    __ st(G0, L5, JNIHandleBlock::top_offset_in_bytes());
 
     __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
     check_forward_pending_exception(masm, G3_scratch);
--- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -1147,7 +1147,7 @@
 
   // reset handle block
   __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch);
-  __ st_ptr(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
+  __ st(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
 
   // If we have an oop result store it where it will be safe for any further gc
   // until we return now that we've released the handle it might be protected by
--- a/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -1358,7 +1358,7 @@
 
   // reset handle block
   __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
-  __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
+  __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
 
   // If result was an oop then unbox and save it in the frame
   { Label L;
--- a/hotspot/src/cpu/x86/vm/globals_x86.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/globals_x86.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -162,7 +162,7 @@
           "Number of milliseconds to wait before start calculating aborts " \
           "for RTM locking")                                                \
                                                                             \
-  experimental(bool, UseRTMXendForLockBusy, false,                          \
+  experimental(bool, UseRTMXendForLockBusy, true,                           \
           "Use RTM Xend instead of Xabort when lock busy")                  \
                                                                             \
   /* assembler */                                                           \
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -1488,11 +1488,10 @@
     movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
     bind(L_rtm_retry);
   }
-  if (!UseRTMXendForLockBusy) {
-    movptr(tmpReg, Address(objReg, 0));
-    testptr(tmpReg, markOopDesc::monitor_value);  // inflated vs stack-locked|neutral|biased
-    jcc(Assembler::notZero, IsInflated);
-  }
+  movptr(tmpReg, Address(objReg, 0));
+  testptr(tmpReg, markOopDesc::monitor_value);  // inflated vs stack-locked|neutral|biased
+  jcc(Assembler::notZero, IsInflated);
+
   if (PrintPreciseRTMLockingStatistics || profile_rtm) {
     Label L_noincrement;
     if (RTMTotalCountIncrRate > 1) {
@@ -1512,10 +1511,7 @@
   Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
   if (UseRTMXendForLockBusy) {
     xend();
-    movptr(tmpReg, Address(objReg, 0));
-    testptr(tmpReg, markOopDesc::monitor_value);  // inflated vs stack-locked|neutral|biased
-    jcc(Assembler::notZero, IsInflated);
-    movptr(abort_status_Reg, 0x1);                // Set the abort status to 1 (as xabort does)
+    movptr(abort_status_Reg, 0x2);   // Set the abort status to 2 (so we can retry)
     jmp(L_decrement_retry);
   }
   else {
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -2266,7 +2266,7 @@
   if (!is_critical_native) {
     // reset handle block
     __ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
-    __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
+    __ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
 
     // Any exception pending?
     __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -2509,7 +2509,7 @@
   if (!is_critical_native) {
     // reset handle block
     __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
-    __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
+    __ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
   }
 
   // pop our frame
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -1287,7 +1287,7 @@
 
   // reset handle block
   __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
-  __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
+  __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
 
   // If result was an oop then unbox and save it in the frame
   { Label L;
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -1259,7 +1259,7 @@
 
   // reset handle block
   __ movptr(t, Address(r15_thread, JavaThread::active_handles_offset()));
-  __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
+  __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
 
   // If result is an oop unbox and store it in frame where gc will see it
   // and result handler will pick it up
--- a/hotspot/src/os/aix/vm/os_aix.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/os/aix/vm/os_aix.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2012, 2014 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -2811,18 +2811,13 @@
 
 os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN; }
 
-void os::yield_all(int attempts) {
+void os::yield_all() {
   // Yields to all threads, including threads with lower priorities
   // Threads on Linux are all with same priority. The Solaris style
   // os::yield_all() with nanosleep(1ms) is not necessary.
   sched_yield();
 }
 
-// Called from the tight loops to possibly influence time-sharing heuristics
-void os::loop_breaker(int attempts) {
-  os::yield_all(attempts);
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 // thread priority support
 
@@ -3079,7 +3074,7 @@
 
   for (int n = 0; !osthread->sr.is_suspended(); n++) {
     for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
-      os::yield_all(i);
+      os::yield_all();
     }
 
     // timeout, try to cancel the request
@@ -3113,7 +3108,7 @@
     if (sr_notify(osthread) == 0) {
       for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
         for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
-          os::yield_all(i);
+          os::yield_all();
         }
       }
     } else {
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -917,9 +917,20 @@
 //////////////////////////////////////////////////////////////////////////////
 // thread local storage
 
+// Restore the thread pointer if the destructor is called. This is in case
+// someone from JNI code sets up a destructor with pthread_key_create to run
+// detachCurrentThread on thread death. Unless we restore the thread pointer we
+// will hang or crash. When detachCurrentThread is called the key will be set
+// to null and we will not be called again. If detachCurrentThread is never
+// called we could loop forever depending on the pthread implementation.
+static void restore_thread_pointer(void* p) {
+  Thread* thread = (Thread*) p;
+  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
+}
+
 int os::allocate_thread_local_storage() {
   pthread_key_t key;
-  int rslt = pthread_key_create(&key, NULL);
+  int rslt = pthread_key_create(&key, restore_thread_pointer);
   assert(rslt == 0, "cannot allocate thread local storage");
   return (int)key;
 }
@@ -2551,18 +2562,13 @@
 
 os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN ;}
 
-void os::yield_all(int attempts) {
+void os::yield_all() {
   // Yields to all threads, including threads with lower priorities
   // Threads on Bsd are all with same priority. The Solaris style
   // os::yield_all() with nanosleep(1ms) is not necessary.
   sched_yield();
 }
 
-// Called from the tight loops to possibly influence time-sharing heuristics
-void os::loop_breaker(int attempts) {
-  os::yield_all(attempts);
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 // thread priority support
 
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -1032,9 +1032,20 @@
 //////////////////////////////////////////////////////////////////////////////
 // thread local storage
 
+// Restore the thread pointer if the destructor is called. This is in case
+// someone from JNI code sets up a destructor with pthread_key_create to run
+// detachCurrentThread on thread death. Unless we restore the thread pointer we
+// will hang or crash. When detachCurrentThread is called the key will be set
+// to null and we will not be called again. If detachCurrentThread is never
+// called we could loop forever depending on the pthread implementation.
+static void restore_thread_pointer(void* p) {
+  Thread* thread = (Thread*) p;
+  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
+}
+
 int os::allocate_thread_local_storage() {
   pthread_key_t key;
-  int rslt = pthread_key_create(&key, NULL);
+  int rslt = pthread_key_create(&key, restore_thread_pointer);
   assert(rslt == 0, "cannot allocate thread local storage");
   return (int)key;
 }
@@ -3781,18 +3792,13 @@
 
 os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN ;}
 
-void os::yield_all(int attempts) {
+void os::yield_all() {
   // Yields to all threads, including threads with lower priorities
   // Threads on Linux are all with same priority. The Solaris style
   // os::yield_all() with nanosleep(1ms) is not necessary.
   sched_yield();
 }
 
-// Called from the tight loops to possibly influence time-sharing heuristics
-void os::loop_breaker(int attempts) {
-  os::yield_all(attempts);
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 // thread priority support
 
--- a/hotspot/src/os/solaris/vm/attachListener_solaris.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/os/solaris/vm/attachListener_solaris.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -29,6 +29,7 @@
 #include "services/dtraceAttacher.hpp"
 
 #include <door.h>
+#include <limits.h>
 #include <string.h>
 #include <signal.h>
 #include <sys/types.h>
@@ -668,11 +669,13 @@
     out->print_cr("No probe specified");
     return JNI_ERR;
   } else {
-    int probe_typess = atoi(probe);
-    if (errno) {
+    char *end;
+    long val = strtol(probe, &end, 10);
+    if (end == probe || val < 0 || val > INT_MAX) {
       out->print_cr("invalid probe type");
       return JNI_ERR;
     } else {
+      int probe_typess = (int) val;
       DTrace::enable_dprobes(probe_typess);
       return JNI_OK;
     }
@@ -703,8 +706,9 @@
   bool flag = true;
   const char* arg1;
   if ((arg1 = op->arg(1)) != NULL) {
-    flag = (atoi(arg1) != 0);
-    if (errno) {
+    char *end;
+    flag = (strtol(arg1, &end, 10) != 0);
+    if (arg1 == end) {
       out->print_cr("flag value has to be an integer");
       return JNI_ERR;
     }
--- a/hotspot/src/os/solaris/vm/osThread_solaris.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/os/solaris/vm/osThread_solaris.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -49,16 +49,6 @@
 
 // copied from synchronizer.cpp
 
-void OSThread::handle_spinlock_contention(int tries) {
-  if (NoYieldsInMicrolock) return;
-
-  if (tries > 10) {
-    os::yield_all(tries); // Yield to threads of any priority
-  } else if (tries > 5) {
-    os::yield();          // Yield to threads of same or higher priority
-  }
-}
-
 void OSThread::SR_handler(Thread* thread, ucontext_t* uc) {
   os::Solaris::SR_handler(thread, uc);
 }
--- a/hotspot/src/os/solaris/vm/osThread_solaris.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/os/solaris/vm/osThread_solaris.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -82,8 +82,6 @@
   void set_ucontext(ucontext_t* ptr) { _ucontext = ptr; }
   static void SR_handler(Thread* thread, ucontext_t* uc);
 
-  static void       handle_spinlock_contention(int tries);                      // Used for thread local eden locking
-
   // ***************************************************************
   // Platform dependent initialization and cleanup
   // ***************************************************************
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -969,9 +969,6 @@
   return true;
 }
 
-// _T2_libthread is true if we believe we are running with the newer
-// SunSoft lwp/libthread.so (2.8 patch, 2.9 default)
-bool os::Solaris::_T2_libthread = false;
 
 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
   // Allocate the OSThread object
@@ -1056,71 +1053,10 @@
   thread->set_osthread(osthread);
 
   // Create the Solaris thread
-  // explicit THR_BOUND for T2_libthread case in case
-  // that assumption is not accurate, but our alternate signal stack
-  // handling is based on it which must have bound threads
   thread_t tid = 0;
-  long     flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED
-                   | ((UseBoundThreads || os::Solaris::T2_libthread() ||
-                       (thr_type == vm_thread) ||
-                       (thr_type == cgc_thread) ||
-                       (thr_type == pgc_thread) ||
-                       (thr_type == compiler_thread && BackgroundCompilation)) ?
-                      THR_BOUND : 0);
+  long     flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED;
   int      status;
 
-  // 4376845 -- libthread/kernel don't provide enough LWPs to utilize all CPUs.
-  //
-  // On multiprocessors systems, libthread sometimes under-provisions our
-  // process with LWPs.  On a 30-way systems, for instance, we could have
-  // 50 user-level threads in ready state and only 2 or 3 LWPs assigned
-  // to our process.  This can result in under utilization of PEs.
-  // I suspect the problem is related to libthread's LWP
-  // pool management and to the kernel's SIGBLOCKING "last LWP parked"
-  // upcall policy.
-  //
-  // The following code is palliative -- it attempts to ensure that our
-  // process has sufficient LWPs to take advantage of multiple PEs.
-  // Proper long-term cures include using user-level threads bound to LWPs
-  // (THR_BOUND) or using LWP-based synchronization.  Note that there is a
-  // slight timing window with respect to sampling _os_thread_count, but
-  // the race is benign.  Also, we should periodically recompute
-  // _processors_online as the min of SC_NPROCESSORS_ONLN and the
-  // the number of PEs in our partition.  You might be tempted to use
-  // THR_NEW_LWP here, but I'd recommend against it as that could
-  // result in undesirable growth of the libthread's LWP pool.
-  // The fix below isn't sufficient; for instance, it doesn't take into count
-  // LWPs parked on IO.  It does, however, help certain CPU-bound benchmarks.
-  //
-  // Some pathologies this scheme doesn't handle:
-  // *  Threads can block, releasing the LWPs.  The LWPs can age out.
-  //    When a large number of threads become ready again there aren't
-  //    enough LWPs available to service them.  This can occur when the
-  //    number of ready threads oscillates.
-  // *  LWPs/Threads park on IO, thus taking the LWP out of circulation.
-  //
-  // Finally, we should call thr_setconcurrency() periodically to refresh
-  // the LWP pool and thwart the LWP age-out mechanism.
-  // The "+3" term provides a little slop -- we want to slightly overprovision.
-
-  if (AdjustConcurrency && os::Solaris::_os_thread_count < (_processors_online+3)) {
-    if (!(flags & THR_BOUND)) {
-      thr_setconcurrency (os::Solaris::_os_thread_count);       // avoid starvation
-    }
-  }
-  // Although this doesn't hurt, we should warn of undefined behavior
-  // when using unbound T1 threads with schedctl().  This should never
-  // happen, as the compiler and VM threads are always created bound
-  DEBUG_ONLY(
-      if ((VMThreadHintNoPreempt || CompilerThreadHintNoPreempt) &&
-          (!os::Solaris::T2_libthread() && (!(flags & THR_BOUND))) &&
-          ((thr_type == vm_thread) || (thr_type == cgc_thread) ||
-           (thr_type == pgc_thread) || (thr_type == compiler_thread && BackgroundCompilation))) {
-         warning("schedctl behavior undefined when Compiler/VM/GC Threads are Unbound");
-      }
-  );
-
-
   // Mark that we don't have an lwp or thread id yet.
   // In case we attempt to set the priority before the thread starts.
   osthread->set_lwp_id(-1);
@@ -1145,13 +1081,6 @@
   // Remember that we created this thread so we can set priority on it
   osthread->set_vm_created();
 
-  // Set the default thread priority.  If using bound threads, setting
-  // lwp priority will be delayed until thread start.
-  set_native_priority(thread,
-                      DefaultThreadPriority == -1 ?
-                        java_to_os_priority[NormPriority] :
-                        DefaultThreadPriority);
-
   // Initial thread state is INITIALIZED, not SUSPENDED
   osthread->set_state(INITIALIZED);
 
@@ -1333,39 +1262,8 @@
     jt->set_stack_size(stack_size);
   }
 
-   // 5/22/01: Right now alternate signal stacks do not handle
-   // throwing stack overflow exceptions, see bug 4463178
-   // Until a fix is found for this, T2 will NOT imply alternate signal
-   // stacks.
-   // If using T2 libthread threads, install an alternate signal stack.
-   // Because alternate stacks associate with LWPs on Solaris,
-   // see sigaltstack(2), if using UNBOUND threads, or if UseBoundThreads
-   // we prefer to explicitly stack bang.
-   // If not using T2 libthread, but using UseBoundThreads any threads
-   // (primordial thread, jni_attachCurrentThread) we do not create,
-   // probably are not bound, therefore they can not have an alternate
-   // signal stack. Since our stack banging code is generated and
-   // is shared across threads, all threads must be bound to allow
-   // using alternate signal stacks.  The alternative is to interpose
-   // on _lwp_create to associate an alt sig stack with each LWP,
-   // and this could be a problem when the JVM is embedded.
-   // We would prefer to use alternate signal stacks with T2
-   // Since there is currently no accurate way to detect T2
-   // we do not. Assuming T2 when running T1 causes sig 11s or assertions
-   // on installing alternate signal stacks
-
-
-   // 05/09/03: removed alternate signal stack support for Solaris
-   // The alternate signal stack mechanism is no longer needed to
-   // handle stack overflow. This is now handled by allocating
-   // guard pages (red zone) and stackbanging.
-   // Initially the alternate signal stack mechanism was removed because
-   // it did not work with T1 llibthread. Alternate
-   // signal stacks MUST have all threads bound to lwps. Applications
-   // can create their own threads and attach them without their being
-   // bound under T1. This is frequently the case for the primordial thread.
-   // If we were ever to reenable this mechanism we would need to
-   // use the dynamic check for T2 libthread.
+  // With the T2 libthread (T1 is no longer supported) threads are always bound
+  // and we use stackbanging in all cases.
 
   os::Solaris::init_thread_fpu_state();
   std::set_terminate(_handle_uncaught_cxx_exception);
@@ -2092,12 +1990,7 @@
 }
 
 void os::Solaris::print_libversion_info(outputStream* st) {
-  if (os::Solaris::T2_libthread()) {
-    st->print("  (T2 libthread)");
-  }
-  else {
-    st->print("  (T1 libthread)");
-  }
+  st->print("  (T2 libthread)");
   st->cr();
 }
 
@@ -3323,41 +3216,10 @@
 
 os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; }
 
-
-// On Solaris we found that yield_all doesn't always yield to all other threads.
-// There have been cases where there is a thread ready to execute but it doesn't
-// get an lwp as the VM thread continues to spin with sleeps of 1 millisecond.
-// The 1 millisecond wait doesn't seem long enough for the kernel to issue a
-// SIGWAITING signal which will cause a new lwp to be created. So we count the
-// number of times yield_all is called in the one loop and increase the sleep
-// time after 8 attempts. If this fails too we increase the concurrency level
-// so that the starving thread would get an lwp
-
-void os::yield_all(int attempts) {
+void os::yield_all() {
   // Yields to all threads, including threads with lower priorities
-  if (attempts == 0) {
-    os::sleep(Thread::current(), 1, false);
-  } else {
-    int iterations = attempts % 30;
-    if (iterations == 0 && !os::Solaris::T2_libthread()) {
-      // thr_setconcurrency and _getconcurrency make sense only under T1.
-      int noofLWPS = thr_getconcurrency();
-      if (noofLWPS < (Threads::number_of_threads() + 2)) {
-        thr_setconcurrency(thr_getconcurrency() + 1);
-      }
-    } else if (iterations < 25) {
-      os::sleep(Thread::current(), 1, false);
-    } else {
-      os::sleep(Thread::current(), 10, false);
-    }
-  }
-}
-
-// Called from the tight loops to possibly influence time-sharing heuristics
-void os::loop_breaker(int attempts) {
-  os::yield_all(attempts);
-}
-
+  os::sleep(Thread::current(), 1, false);
+}
 
 // Interface for setting lwp priorities.  If we are using T2 libthread,
 // which forces the use of BoundThreads or we manually set UseBoundThreads,
@@ -3365,6 +3227,9 @@
 // function is meaningless in this mode so we must adjust the real lwp's priority
 // The routines below implement the getting and setting of lwp priorities.
 //
+// Note: T2 is now the only supported libthread. UseBoundThreads flag is
+//       being deprecated and all threads are now BoundThreads
+//
 // Note: There are three priority scales used on Solaris.  Java priotities
 //       which range from 1 to 10, libthread "thr_setprio" scale which range
 //       from 0 to 127, and the current scheduling class of the process we
@@ -3437,29 +3302,19 @@
 
   if (!UseThreadPriorities) return 0;
 
-  // We are using Bound threads, we need to determine our priority ranges
-  if (os::Solaris::T2_libthread() || UseBoundThreads) {
-    // If ThreadPriorityPolicy is 1, switch tables
-    if (ThreadPriorityPolicy == 1) {
-      for (i = 0 ; i < CriticalPriority+1; i++)
-        os::java_to_os_priority[i] = prio_policy1[i];
-    }
-    if (UseCriticalJavaThreadPriority) {
-      // MaxPriority always maps to the FX scheduling class and criticalPrio.
-      // See set_native_priority() and set_lwp_class_and_priority().
-      // Save original MaxPriority mapping in case attempt to
-      // use critical priority fails.
-      java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
-      // Set negative to distinguish from other priorities
-      os::java_to_os_priority[MaxPriority] = -criticalPrio;
-    }
-  }
-  // Not using Bound Threads, set to ThreadPolicy 1
-  else {
-    for ( i = 0 ; i < CriticalPriority+1; i++ ) {
+  // If ThreadPriorityPolicy is 1, switch tables
+  if (ThreadPriorityPolicy == 1) {
+    for (i = 0 ; i < CriticalPriority+1; i++)
       os::java_to_os_priority[i] = prio_policy1[i];
-    }
-    return 0;
+  }
+  if (UseCriticalJavaThreadPriority) {
+    // MaxPriority always maps to the FX scheduling class and criticalPrio.
+    // See set_native_priority() and set_lwp_class_and_priority().
+    // Save original MaxPriority mapping in case attempt to
+    // use critical priority fails.
+    java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
+    // Set negative to distinguish from other priorities
+    os::java_to_os_priority[MaxPriority] = -criticalPrio;
   }
 
   // Get IDs for a set of well-known scheduling classes.
@@ -3583,10 +3438,6 @@
 
 
 // set_lwp_class_and_priority
-//
-// Set the class and priority of the lwp.  This call should only
-// be made when using bound threads (T2 threads are bound by default).
-//
 int set_lwp_class_and_priority(int ThreadID, int lwpid,
                                int newPrio, int new_class, bool scale) {
   int rslt;
@@ -3812,23 +3663,20 @@
     status = thr_setprio(thread->osthread()->thread_id(), newpri);
   }
 
-  if (os::Solaris::T2_libthread() ||
-      (UseBoundThreads && osthread->is_vm_created())) {
-    int lwp_status =
-      set_lwp_class_and_priority(osthread->thread_id(),
-                                 osthread->lwp_id(),
-                                 newpri,
-                                 fxcritical ? fxLimits.schedPolicy : myClass,
-                                 !fxcritical);
-    if (lwp_status != 0 && fxcritical) {
-      // Try again, this time without changing the scheduling class
-      newpri = java_MaxPriority_to_os_priority;
-      lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
-                                              osthread->lwp_id(),
-                                              newpri, myClass, false);
-    }
-    status |= lwp_status;
-  }
+  int lwp_status =
+          set_lwp_class_and_priority(osthread->thread_id(),
+          osthread->lwp_id(),
+          newpri,
+          fxcritical ? fxLimits.schedPolicy : myClass,
+          !fxcritical);
+  if (lwp_status != 0 && fxcritical) {
+    // Try again, this time without changing the scheduling class
+    newpri = java_MaxPriority_to_os_priority;
+    lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
+            osthread->lwp_id(),
+            newpri, myClass, false);
+  }
+  status |= lwp_status;
   return (status == 0) ? OS_OK : OS_ERR;
 }
 
@@ -4495,13 +4343,6 @@
   }
 }
 
-// (Static) wrappers for the new libthread API
-int_fnP_thread_t_iP_uP_stack_tP_gregset_t os::Solaris::_thr_getstate;
-int_fnP_thread_t_i_gregset_t os::Solaris::_thr_setstate;
-int_fnP_thread_t_i os::Solaris::_thr_setmutator;
-int_fnP_thread_t os::Solaris::_thr_suspend_mutator;
-int_fnP_thread_t os::Solaris::_thr_continue_mutator;
-
 // (Static) wrapper for getisax(2) call.
 os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
 
@@ -4536,78 +4377,9 @@
   return addr;
 }
 
-
-
-// isT2_libthread()
-//
-// Routine to determine if we are currently using the new T2 libthread.
-//
-// We determine if we are using T2 by reading /proc/self/lstatus and
-// looking for a thread with the ASLWP bit set.  If we find this status
-// bit set, we must assume that we are NOT using T2.  The T2 team
-// has approved this algorithm.
-//
-// We need to determine if we are running with the new T2 libthread
-// since setting native thread priorities is handled differently
-// when using this library.  All threads created using T2 are bound
-// threads. Calling thr_setprio is meaningless in this case.
-//
-bool isT2_libthread() {
-  static prheader_t * lwpArray = NULL;
-  static int lwpSize = 0;
-  static int lwpFile = -1;
-  lwpstatus_t * that;
-  char lwpName [128];
-  bool isT2 = false;
-
-#define ADR(x)  ((uintptr_t)(x))
-#define LWPINDEX(ary,ix)   ((lwpstatus_t *)(((ary)->pr_entsize * (ix)) + (ADR((ary) + 1))))
-
-  lwpFile = ::open("/proc/self/lstatus", O_RDONLY, 0);
-  if (lwpFile < 0) {
-      if (ThreadPriorityVerbose) warning ("Couldn't open /proc/self/lstatus\n");
-      return false;
-  }
-  lwpSize = 16*1024;
-  for (;;) {
-    ::lseek64 (lwpFile, 0, SEEK_SET);
-    lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize, mtInternal);
-    if (::read(lwpFile, lwpArray, lwpSize) < 0) {
-      if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n");
-      break;
-    }
-    if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) {
-       // We got a good snapshot - now iterate over the list.
-      int aslwpcount = 0;
-      for (int i = 0; i < lwpArray->pr_nent; i++ ) {
-        that = LWPINDEX(lwpArray,i);
-        if (that->pr_flags & PR_ASLWP) {
-          aslwpcount++;
-        }
-      }
-      if (aslwpcount == 0) isT2 = true;
-      break;
-    }
-    lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize;
-    FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal);  // retry.
-  }
-
-  FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal);
-  ::close (lwpFile);
-  if (ThreadPriorityVerbose) {
-    if (isT2) tty->print_cr("We are running with a T2 libthread\n");
-    else tty->print_cr("We are not running with a T2 libthread\n");
-  }
-  return isT2;
-}
-
-
 void os::Solaris::libthread_init() {
   address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
 
-  // Determine if we are running with the new T2 libthread
-  os::Solaris::set_T2_libthread(isT2_libthread());
-
   lwp_priocntl_init();
 
   // RTLD_DEFAULT was not defined on some early versions of 5.5.1
@@ -4618,22 +4390,6 @@
     guarantee(func != NULL, "libthread.so is too old.");
   }
 
-  // Initialize the new libthread getstate API wrappers
-  func = resolve_symbol("thr_getstate");
-  os::Solaris::set_thr_getstate(CAST_TO_FN_PTR(int_fnP_thread_t_iP_uP_stack_tP_gregset_t, func));
-
-  func = resolve_symbol("thr_setstate");
-  os::Solaris::set_thr_setstate(CAST_TO_FN_PTR(int_fnP_thread_t_i_gregset_t, func));
-
-  func = resolve_symbol("thr_setmutator");
-  os::Solaris::set_thr_setmutator(CAST_TO_FN_PTR(int_fnP_thread_t_i, func));
-
-  func = resolve_symbol("thr_suspend_mutator");
-  os::Solaris::set_thr_suspend_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
-
-  func = resolve_symbol("thr_continue_mutator");
-  os::Solaris::set_thr_continue_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
-
   int size;
   void (*handler_info_func)(address *, int *);
   handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
@@ -5536,11 +5292,7 @@
 }
 
 bool os::is_thread_cpu_time_supported() {
-  if ( os::Solaris::T2_libthread() || UseBoundThreads ) {
-    return true;
-  } else {
-    return false;
-  }
+  return true;
 }
 
 // System loadavg support.  Returns -1 if load average cannot be obtained.
--- a/hotspot/src/os/solaris/vm/os_solaris.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/os/solaris/vm/os_solaris.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -41,19 +41,6 @@
   #define TRS_LWPID       2
   #define TRS_INVALID     3
 
-  // _T2_libthread is true if we believe we are running with the newer
-  // SunSoft lib/lwp/libthread: default Solaris 9, available Solaris 8
-  // which is a lightweight libthread that also supports all T1
-  static bool _T2_libthread;
-  // These refer to new libthread interface functions
-  // They get intialized if we dynamically detect new libthread
-  static int_fnP_thread_t_iP_uP_stack_tP_gregset_t _thr_getstate;
-  static int_fnP_thread_t_i_gregset_t _thr_setstate;
-  static int_fnP_thread_t_i _thr_setmutator;
-  static int_fnP_thread_t _thr_suspend_mutator;
-  static int_fnP_thread_t _thr_continue_mutator;
-  // libthread_init sets the above, if the new functionality is detected
-
   // initialized to libthread or lwp synchronization primitives depending on UseLWPSychronization
   static int_fnP_mutex_tP _mutex_lock;
   static int_fnP_mutex_tP _mutex_trylock;
@@ -214,29 +201,6 @@
   static struct sigaction *get_chained_signal_action(int sig);
   static bool chained_handler(int sig, siginfo_t *siginfo, void *context);
 
-  // The following allow us to link against both the old and new libthread (2.8)
-  // and exploit the new libthread functionality if available.
-
-  static bool T2_libthread()                                { return _T2_libthread; }
-  static void set_T2_libthread(bool T2_libthread) { _T2_libthread = T2_libthread; }
-
-  static int thr_getstate(thread_t tid, int *flag, unsigned *lwp, stack_t *ss, gregset_t rs)
-    { return _thr_getstate(tid, flag, lwp, ss, rs); }
-  static void set_thr_getstate(int_fnP_thread_t_iP_uP_stack_tP_gregset_t func)
-    { _thr_getstate = func; }
-
-  static int thr_setstate(thread_t tid, int flag, gregset_t rs)   { return _thr_setstate(tid, flag, rs); }
-  static void set_thr_setstate(int_fnP_thread_t_i_gregset_t func) { _thr_setstate = func; }
-
-  static int thr_setmutator(thread_t tid, int enabled)    { return _thr_setmutator(tid, enabled); }
-  static void set_thr_setmutator(int_fnP_thread_t_i func) { _thr_setmutator = func; }
-
-  static int  thr_suspend_mutator(thread_t tid)              { return _thr_suspend_mutator(tid); }
-  static void set_thr_suspend_mutator(int_fnP_thread_t func) { _thr_suspend_mutator = func; }
-
-  static int  thr_continue_mutator(thread_t tid)              { return _thr_continue_mutator(tid); }
-  static void set_thr_continue_mutator(int_fnP_thread_t func) { _thr_continue_mutator = func; }
-
   // Allows us to switch between lwp and thread -based synchronization
   static int mutex_lock(mutex_t *mx)    { return _mutex_lock(mx); }
   static int mutex_trylock(mutex_t *mx) { return _mutex_trylock(mx); }
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -3518,7 +3518,7 @@
 
 void os::yield() {  os::NakedYield(); }
 
-void os::yield_all(int attempts) {
+void os::yield_all() {
   // Yields to all threads, including threads with lower priorities
   Sleep(1);
 }
@@ -3864,12 +3864,6 @@
   win32::setmode_streams();
   init_page_sizes((size_t) win32::vm_page_size());
 
-  // For better scalability on MP systems (must be called after initialize_system_info)
-#ifndef PRODUCT
-  if (is_MP()) {
-    NoYieldsInMicrolock = true;
-  }
-#endif
   // This may be overridden later when argument processing is done.
   FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation,
     os::win32::is_windows_2003());
--- a/hotspot/src/os/windows/vm/os_windows.inline.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/os/windows/vm/os_windows.inline.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -52,9 +52,6 @@
   return (void*)::GetProcAddress((HMODULE)lib, name);
 }
 
-// Used to improve time-sharing on some systems
-inline void os::loop_breaker(int attempts) {}
-
 inline bool os::obsolete_option(const JavaVMOption *option) {
   return false;
 }
--- a/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -270,31 +270,6 @@
   }
 }
 
-static int threadgetstate(thread_t tid, int *flags, lwpid_t *lwp, stack_t *ss, gregset_t rs, lwpstatus_t *lwpstatus) {
-  char lwpstatusfile[PROCFILE_LENGTH];
-  int lwpfd, err;
-
-  if (err = os::Solaris::thr_getstate(tid, flags, lwp, ss, rs))
-    return (err);
-  if (*flags == TRS_LWPID) {
-    sprintf(lwpstatusfile, "/proc/%d/lwp/%d/lwpstatus", getpid(),
-            *lwp);
-    if ((lwpfd = ::open(lwpstatusfile, O_RDONLY)) < 0) {
-      perror("thr_mutator_status: open lwpstatus");
-      return (EINVAL);
-    }
-    if (pread(lwpfd, lwpstatus, sizeof (lwpstatus_t), (off_t)0) !=
-        sizeof (lwpstatus_t)) {
-      perror("thr_mutator_status: read lwpstatus");
-      (void) ::close(lwpfd);
-      return (EINVAL);
-    }
-    (void) ::close(lwpfd);
-  }
-  return (0);
-}
-
-
 bool os::is_allocatable(size_t bytes) {
 #ifdef _LP64
    return true;
--- a/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -256,30 +256,6 @@
   }
 }
 
-static int threadgetstate(thread_t tid, int *flags, lwpid_t *lwp, stack_t *ss, gregset_t rs, lwpstatus_t *lwpstatus) {
-  char lwpstatusfile[PROCFILE_LENGTH];
-  int lwpfd, err;
-
-  if (err = os::Solaris::thr_getstate(tid, flags, lwp, ss, rs))
-    return (err);
-  if (*flags == TRS_LWPID) {
-    sprintf(lwpstatusfile, "/proc/%d/lwp/%d/lwpstatus", getpid(),
-            *lwp);
-    if ((lwpfd = open(lwpstatusfile, O_RDONLY)) < 0) {
-      perror("thr_mutator_status: open lwpstatus");
-      return (EINVAL);
-    }
-    if (pread(lwpfd, lwpstatus, sizeof (lwpstatus_t), (off_t)0) !=
-        sizeof (lwpstatus_t)) {
-      perror("thr_mutator_status: read lwpstatus");
-      (void) close(lwpfd);
-      return (EINVAL);
-    }
-    (void) close(lwpfd);
-  }
-  return (0);
-}
-
 #ifndef AMD64
 
 // Detecting SSE support by OS
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -2777,6 +2777,11 @@
                      "Short length on BootstrapMethods in class file %s",
                      CHECK);
 
+  guarantee_property(attribute_byte_length > sizeof(u2),
+                     "Invalid BootstrapMethods attribute length %u in class file %s",
+                     attribute_byte_length,
+                     CHECK);
+
   // The attribute contains a counted array of counted tuples of shorts,
   // represending bootstrap specifiers:
   //    length*{bootstrap_method_index, argument_count*{argument_index}}
@@ -4180,8 +4185,12 @@
 
   clear_class_metadata();
 
-  // deallocate the klass if already created.
-  MetadataFactory::free_metadata(_loader_data, _klass);
+  // deallocate the klass if already created.  Don't directly deallocate, but add
+  // to the deallocate list so that the klass is removed from the CLD::_klasses list
+  // at a safepoint.
+  if (_klass != NULL) {
+    _loader_data->add_to_deallocate_list(_klass);
+  }
   _klass = NULL;
 }
 
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -464,25 +464,26 @@
 void java_lang_String::print(oop java_string, outputStream* st) {
   assert(java_string->klass() == SystemDictionary::String_klass(), "must be java_string");
   typeArrayOop value  = java_lang_String::value(java_string);
-  int          offset = java_lang_String::offset(java_string);
-  int          length = java_lang_String::length(java_string);
-
-  int end = MIN2(length, 100);
+
   if (value == NULL) {
     // This can happen if, e.g., printing a String
     // object before its initializer has been called
-    st->print_cr("NULL");
-  } else {
-    st->print("\"");
-    for (int index = 0; index < length; index++) {
-      st->print("%c", value->char_at(index + offset));
-    }
-    st->print("\"");
+    st->print("NULL");
+    return;
   }
-}
-
-static void initialize_static_field(fieldDescriptor* fd, TRAPS) {
-  Handle mirror (THREAD, fd->field_holder()->java_mirror());
+
+  int offset = java_lang_String::offset(java_string);
+  int length = java_lang_String::length(java_string);
+
+  st->print("\"");
+  for (int index = 0; index < length; index++) {
+    st->print("%c", value->char_at(index + offset));
+  }
+  st->print("\"");
+}
+
+
+static void initialize_static_field(fieldDescriptor* fd, Handle mirror, TRAPS) {
   assert(mirror.not_null() && fd->is_static(), "just checking");
   if (fd->has_initial_value()) {
     BasicType t = fd->field_type();
@@ -549,21 +550,45 @@
   create_mirror(k, Handle(NULL), CHECK);
 }
 
-oop java_lang_Class::create_mirror(KlassHandle k, Handle protection_domain, TRAPS) {
+void java_lang_Class::initialize_mirror_fields(KlassHandle k,
+                                               Handle mirror,
+                                               Handle protection_domain,
+                                               TRAPS) {
+  // Allocate a simple java object for a lock.
+  // This needs to be a java object because during class initialization
+  // it can be held across a java call.
+  typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK);
+  set_init_lock(mirror(), r);
+
+  // Set protection domain also
+  set_protection_domain(mirror(), protection_domain());
+
+  // Initialize static fields
+  InstanceKlass::cast(k())->do_local_static_fields(&initialize_static_field, mirror, CHECK);
+}
+
+void java_lang_Class::create_mirror(KlassHandle k, Handle protection_domain, TRAPS) {
   assert(k->java_mirror() == NULL, "should only assign mirror once");
   // Use this moment of initialization to cache modifier_flags also,
   // to support Class.getModifiers().  Instance classes recalculate
   // the cached flags after the class file is parsed, but before the
   // class is put into the system dictionary.
-  int computed_modifiers = k->compute_modifier_flags(CHECK_0);
+  int computed_modifiers = k->compute_modifier_flags(CHECK);
   k->set_modifier_flags(computed_modifiers);
   // Class_klass has to be loaded because it is used to allocate
   // the mirror.
   if (SystemDictionary::Class_klass_loaded()) {
     // Allocate mirror (java.lang.Class instance)
-    Handle mirror = InstanceMirrorKlass::cast(SystemDictionary::Class_klass())->allocate_instance(k, CHECK_0);
+    Handle mirror = InstanceMirrorKlass::cast(SystemDictionary::Class_klass())->allocate_instance(k, CHECK);
+
+    // Setup indirection from mirror->klass
+    if (!k.is_null()) {
+      java_lang_Class::set_klass(mirror(), k());
+    }
 
     InstanceMirrorKlass* mk = InstanceMirrorKlass::cast(mirror->klass());
+    assert(oop_size(mirror()) == mk->instance_size(k), "should have been set");
+
     java_lang_Class::set_static_oop_field_count(mirror(), mk->compute_static_oop_field_count(mirror()));
 
     // It might also have a component mirror.  This mirror must already exist.
@@ -576,29 +601,32 @@
         assert(k->oop_is_objArray(), "Must be");
         Klass* element_klass = ObjArrayKlass::cast(k())->element_klass();
         assert(element_klass != NULL, "Must have an element klass");
-          comp_mirror = element_klass->java_mirror();
+        comp_mirror = element_klass->java_mirror();
       }
       assert(comp_mirror.not_null(), "must have a mirror");
 
-        // Two-way link between the array klass and its component mirror:
+      // Two-way link between the array klass and its component mirror:
       ArrayKlass::cast(k())->set_component_mirror(comp_mirror());
       set_array_klass(comp_mirror(), k());
     } else {
       assert(k->oop_is_instance(), "Must be");
 
-      // Allocate a simple java object for a lock.
-      // This needs to be a java object because during class initialization
-      // it can be held across a java call.
-      typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK_NULL);
-      set_init_lock(mirror(), r);
-
-      // Set protection domain also
-      set_protection_domain(mirror(), protection_domain());
-
-      // Initialize static fields
-      InstanceKlass::cast(k())->do_local_static_fields(&initialize_static_field, CHECK_NULL);
+      initialize_mirror_fields(k, mirror, protection_domain, THREAD);
+      if (HAS_PENDING_EXCEPTION) {
+        // If any of the fields throws an exception like OOM remove the klass field
+        // from the mirror so GC doesn't follow it after the klass has been deallocated.
+        // This mirror looks like a primitive type, which logically it is because it
+        // it represents no class.
+        java_lang_Class::set_klass(mirror(), NULL);
+        return;
+      }
     }
-    return mirror();
+
+    // Setup indirection from klass->mirror last
+    // after any exceptions can happen during allocations.
+    if (!k.is_null()) {
+      k->set_java_mirror(mirror());
+    }
   } else {
     if (fixup_mirror_list() == NULL) {
       GrowableArray<Klass*>* list =
@@ -606,12 +634,10 @@
       set_fixup_mirror_list(list);
     }
     fixup_mirror_list()->push(k());
-    return NULL;
   }
 }
 
 
-
 int  java_lang_Class::oop_size(oop java_class) {
   assert(_oop_size_offset != 0, "must be set");
   return java_class->int_field(_oop_size_offset);
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -246,11 +246,12 @@
 
   static void set_init_lock(oop java_class, oop init_lock);
   static void set_protection_domain(oop java_class, oop protection_domain);
+  static void initialize_mirror_fields(KlassHandle k, Handle mirror, Handle protection_domain, TRAPS);
  public:
   static void compute_offsets();
 
   // Instance creation
-  static oop  create_mirror(KlassHandle k, Handle protection_domain, TRAPS);
+  static void create_mirror(KlassHandle k, Handle protection_domain, TRAPS);
   static void fixup_mirror(KlassHandle k, TRAPS);
   static oop  create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS);
   // Conversion
--- a/hotspot/src/share/vm/classfile/symbolTable.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/classfile/symbolTable.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -810,11 +810,11 @@
   const int limit = the_table()->table_size();
 
   assert(0 <= start_idx && start_idx <= limit,
-         err_msg("start_idx (" INT32_FORMAT ") is out of bounds", start_idx));
+         err_msg("start_idx (%d) is out of bounds", start_idx));
   assert(0 <= end_idx && end_idx <= limit,
-         err_msg("end_idx (" INT32_FORMAT ") is out of bounds", end_idx));
+         err_msg("end_idx (%d) is out of bounds", end_idx));
   assert(start_idx <= end_idx,
-         err_msg("Index ordering: start_idx=" INT32_FORMAT", end_idx=" INT32_FORMAT,
+         err_msg("Index ordering: start_idx=%d, end_idx=%d",
                  start_idx, end_idx));
 
   for (int i = start_idx; i < end_idx; i += 1) {
@@ -833,11 +833,11 @@
   const int limit = the_table()->table_size();
 
   assert(0 <= start_idx && start_idx <= limit,
-         err_msg("start_idx (" INT32_FORMAT ") is out of bounds", start_idx));
+         err_msg("start_idx (%d) is out of bounds", start_idx));
   assert(0 <= end_idx && end_idx <= limit,
-         err_msg("end_idx (" INT32_FORMAT ") is out of bounds", end_idx));
+         err_msg("end_idx (%d) is out of bounds", end_idx));
   assert(start_idx <= end_idx,
-         err_msg("Index ordering: start_idx=" INT32_FORMAT", end_idx=" INT32_FORMAT,
+         err_msg("Index ordering: start_idx=%d, end_idx=%d",
                  start_idx, end_idx));
 
   for (int i = start_idx; i < end_idx; ++i) {
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -826,47 +826,6 @@
       }
     } // load_instance_class loop
 
-    if (HAS_PENDING_EXCEPTION) {
-      // An exception, such as OOM could have happened at various places inside
-      // load_instance_class. We might have partially initialized a shared class
-      // and need to clean it up.
-      if (class_loader.is_null()) {
-        // In some cases k may be null. Let's find the shared class again.
-        instanceKlassHandle ik(THREAD, find_shared_class(name));
-        if (ik.not_null()) {
-          if (ik->class_loader_data() == NULL) {
-            // We didn't go as far as Klass::restore_unshareable_info(),
-            // so nothing to clean up.
-          } else {
-            Klass *kk;
-            {
-              MutexLocker mu(SystemDictionary_lock, THREAD);
-              kk = find_class(d_index, d_hash, name, ik->class_loader_data());
-            }
-            if (kk != NULL) {
-              // No clean up is needed if the shared class has been entered
-              // into system dictionary, as load_shared_class() won't be called
-              // again.
-            } else {
-              // This must be done outside of the SystemDictionary_lock to
-              // avoid deadlock.
-              //
-              // Note that Klass::restore_unshareable_info (called via
-              // load_instance_class above) is also called outside
-              // of SystemDictionary_lock. Other threads are blocked from
-              // loading this class because they are waiting on the
-              // SystemDictionary_lock until this thread removes
-              // the placeholder below.
-              //
-              // This need to be re-thought when parallel-capable non-boot
-              // classloaders are supported by CDS (today they're not).
-              clean_up_shared_class(ik, class_loader, THREAD);
-            }
-          }
-        }
-      }
-    }
-
     if (load_instance_added == true) {
       // clean up placeholder entries for LOAD_INSTANCE success or error
       // This brackets the SystemDictionary updates for both defining
@@ -1272,19 +1231,6 @@
   return ik;
 }
 
-void SystemDictionary::clean_up_shared_class(instanceKlassHandle ik, Handle class_loader, TRAPS) {
-  // Updating methods must be done under a lock so multiple
-  // threads don't update these in parallel
-  // Shared classes are all currently loaded by the bootstrap
-  // classloader, so this will never cause a deadlock on
-  // a custom class loader lock.
-  {
-    Handle lockObject = compute_loader_lock_object(class_loader, THREAD);
-    check_loader_lock_contention(lockObject, THREAD);
-    ObjectLocker ol(lockObject, THREAD, true);
-    ik->remove_unshareable_info();
-  }
-}
 
 instanceKlassHandle SystemDictionary::load_instance_class(Symbol* class_name, Handle class_loader, TRAPS) {
   instanceKlassHandle nh = instanceKlassHandle(); // null Handle
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -617,7 +617,6 @@
                                                Handle class_loader, TRAPS);
   static instanceKlassHandle load_shared_class(instanceKlassHandle ik,
                                                Handle class_loader, TRAPS);
-  static void clean_up_shared_class(instanceKlassHandle ik, Handle class_loader, TRAPS);
   static instanceKlassHandle load_instance_class(Symbol* class_name, Handle class_loader, TRAPS);
   static Handle compute_loader_lock_object(Handle class_loader, TRAPS);
   static void check_loader_lock_contention(Handle loader_lock, TRAPS);
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -57,10 +57,10 @@
 
   _threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads, mtGC);
 
-  int worker_id_offset = (int)DirtyCardQueueSet::num_par_ids();
+  uint worker_id_offset = DirtyCardQueueSet::num_par_ids();
 
   ConcurrentG1RefineThread *next = NULL;
-  for (int i = _n_threads - 1; i >= 0; i--) {
+  for (uint i = _n_threads - 1; i != UINT_MAX; i--) {
     ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i);
     assert(t != NULL, "Conc refine should have been created");
     if (t->osthread() == NULL) {
@@ -87,7 +87,7 @@
 
 void ConcurrentG1Refine::stop() {
   if (_threads != NULL) {
-    for (int i = 0; i < _n_threads; i++) {
+    for (uint i = 0; i < _n_threads; i++) {
       _threads[i]->stop();
     }
   }
@@ -96,7 +96,7 @@
 void ConcurrentG1Refine::reinitialize_threads() {
   reset_threshold_step();
   if (_threads != NULL) {
-    for (int i = 0; i < _n_threads; i++) {
+    for (uint i = 0; i < _n_threads; i++) {
       _threads[i]->initialize();
     }
   }
@@ -104,7 +104,7 @@
 
 ConcurrentG1Refine::~ConcurrentG1Refine() {
   if (_threads != NULL) {
-    for (int i = 0; i < _n_threads; i++) {
+    for (uint i = 0; i < _n_threads; i++) {
       delete _threads[i];
     }
     FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads, mtGC);
@@ -113,7 +113,7 @@
 
 void ConcurrentG1Refine::threads_do(ThreadClosure *tc) {
   if (_threads != NULL) {
-    for (int i = 0; i < _n_threads; i++) {
+    for (uint i = 0; i < _n_threads; i++) {
       tc->do_thread(_threads[i]);
     }
   }
@@ -121,20 +121,20 @@
 
 void ConcurrentG1Refine::worker_threads_do(ThreadClosure * tc) {
   if (_threads != NULL) {
-    for (int i = 0; i < worker_thread_num(); i++) {
+    for (uint i = 0; i < worker_thread_num(); i++) {
       tc->do_thread(_threads[i]);
     }
   }
 }
 
-int ConcurrentG1Refine::thread_num() {
-  int n_threads = (G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads
+uint ConcurrentG1Refine::thread_num() {
+  uint n_threads = (G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads
                                                 : ParallelGCThreads;
-  return MAX2<int>(n_threads, 1);
+  return MAX2<uint>(n_threads, 1);
 }
 
 void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
-  for (int i = 0; i < _n_threads; ++i) {
+  for (uint i = 0; i < _n_threads; ++i) {
     _threads[i]->print_on(st);
     st->cr();
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -39,8 +39,8 @@
 
 class ConcurrentG1Refine: public CHeapObj<mtGC> {
   ConcurrentG1RefineThread** _threads;
-  int _n_threads;
-  int _n_worker_threads;
+  uint _n_threads;
+  uint _n_worker_threads;
  /*
   * The value of the update buffer queue length falls into one of 3 zones:
   * green, yellow, red. If the value is in [0, green) nothing is
@@ -88,7 +88,7 @@
   // The RS sampling thread
   ConcurrentG1RefineThread * sampling_thread() const;
 
-  static int thread_num();
+  static uint thread_num();
 
   void print_worker_threads_on(outputStream* st) const;
 
@@ -100,8 +100,8 @@
   int yellow_zone() const     { return _yellow_zone; }
   int red_zone() const        { return _red_zone;    }
 
-  int total_thread_num() const  { return _n_threads;        }
-  int worker_thread_num() const { return _n_worker_threads; }
+  uint total_thread_num() const  { return _n_threads;        }
+  uint worker_thread_num() const { return _n_worker_threads; }
 
   int thread_threshold_step() const { return _thread_threshold_step; }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -33,7 +33,7 @@
 
 ConcurrentG1RefineThread::
 ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *next,
-                         int worker_id_offset, int worker_id) :
+                         uint worker_id_offset, uint worker_id) :
   ConcurrentGCThread(),
   _worker_id_offset(worker_id_offset),
   _worker_id(worker_id),
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -38,8 +38,8 @@
 
   double _vtime_start;  // Initial virtual time.
   double _vtime_accum;  // Initial virtual time.
-  int _worker_id;
-  int _worker_id_offset;
+  uint _worker_id;
+  uint _worker_id_offset;
 
   // The refinement threads collection is linked list. A predecessor can activate a successor
   // when the number of the rset update buffer crosses a certain threshold. A successor
@@ -71,7 +71,7 @@
   virtual void run();
   // Constructor
   ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread* next,
-                           int worker_id_offset, int worker_id);
+                           uint worker_id_offset, uint worker_id);
 
   void initialize();
 
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -567,8 +567,8 @@
   _root_regions.init(_g1h, this);
 
   if (ConcGCThreads > ParallelGCThreads) {
-    warning("Can't have more ConcGCThreads (" UINT32_FORMAT ") "
-            "than ParallelGCThreads (" UINT32_FORMAT ").",
+    warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") "
+            "than ParallelGCThreads (" UINTX_FORMAT ").",
             ConcGCThreads, ParallelGCThreads);
     return;
   }
@@ -1804,7 +1804,6 @@
 
 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
   G1CollectedHeap* _g1;
-  int _worker_num;
   size_t _max_live_bytes;
   uint _regions_claimed;
   size_t _freed_bytes;
@@ -1817,10 +1816,9 @@
 
 public:
   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
-                             int worker_num,
                              FreeRegionList* local_cleanup_list,
                              HRRSCleanupTask* hrrs_cleanup_task) :
-    _g1(g1), _worker_num(worker_num),
+    _g1(g1),
     _max_live_bytes(0), _regions_claimed(0),
     _freed_bytes(0),
     _claimed_region_time(0.0), _max_region_time(0.0),
@@ -1893,7 +1891,7 @@
     double start = os::elapsedTime();
     FreeRegionList local_cleanup_list("Local Cleanup List");
     HRRSCleanupTask hrrs_cleanup_task;
-    G1NoteEndOfConcMarkClosure g1_note_end(_g1h, worker_id, &local_cleanup_list,
+    G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
                                            &hrrs_cleanup_task);
     if (G1CollectedHeap::use_parallel_gc_threads()) {
       _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
@@ -2145,7 +2143,7 @@
 
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
-  _cleanup_list.verify_list();
+  _cleanup_list.verify_optional();
   FreeRegionList tmp_free_list("Tmp Free List");
 
   if (G1ConcRegionFreeingVerbose) {
--- a/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -34,12 +34,12 @@
 
 bool DirtyCardQueue::apply_closure(CardTableEntryClosure* cl,
                                    bool consume,
-                                   size_t worker_i) {
+                                   uint worker_i) {
   bool res = true;
   if (_buf != NULL) {
     res = apply_closure_to_buffer(cl, _buf, _index, _sz,
                                   consume,
-                                  (int) worker_i);
+                                  worker_i);
     if (res && consume) _index = _sz;
   }
   return res;
@@ -49,7 +49,7 @@
                                              void** buf,
                                              size_t index, size_t sz,
                                              bool consume,
-                                             int worker_i) {
+                                             uint worker_i) {
   if (cl == NULL) return true;
   for (size_t i = index; i < sz; i += oopSize) {
     int ind = byte_index_to_index((int)i);
@@ -79,8 +79,8 @@
 }
 
 // Determines how many mutator threads can process the buffers in parallel.
-size_t DirtyCardQueueSet::num_par_ids() {
-  return os::processor_count();
+uint DirtyCardQueueSet::num_par_ids() {
+  return (uint)os::processor_count();
 }
 
 void DirtyCardQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
@@ -103,7 +103,7 @@
 }
 
 void DirtyCardQueueSet::iterate_closure_all_threads(bool consume,
-                                                    size_t worker_i) {
+                                                    uint worker_i) {
   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
   for(JavaThread* t = Threads::first(); t; t = t->next()) {
     bool b = t->dirty_card_queue().apply_closure(_closure, consume);
@@ -126,11 +126,11 @@
 
   // We get the the number of any par_id that this thread
   // might have already claimed.
-  int worker_i = thread->get_claimed_par_id();
+  uint worker_i = thread->get_claimed_par_id();
 
-  // If worker_i is not -1 then the thread has already claimed
+  // If worker_i is not UINT_MAX then the thread has already claimed
   // a par_id. We make note of it using the already_claimed value
-  if (worker_i != -1) {
+  if (worker_i != UINT_MAX) {
     already_claimed = true;
   } else {
 
@@ -142,7 +142,7 @@
   }
 
   bool b = false;
-  if (worker_i != -1) {
+  if (worker_i != UINT_MAX) {
     b = DirtyCardQueue::apply_closure_to_buffer(_closure, buf, 0,
                                                 _sz, true, worker_i);
     if (b) Atomic::inc(&_processed_buffers_mut);
@@ -154,8 +154,8 @@
       // we release the id
       _free_ids->release_par_id(worker_i);
 
-      // and set the claimed_id in the thread to -1
-      thread->set_claimed_par_id(-1);
+      // and set the claimed_id in the thread to UINT_MAX
+      thread->set_claimed_par_id(UINT_MAX);
     }
   }
   return b;
@@ -186,7 +186,7 @@
 
 bool DirtyCardQueueSet::
 apply_closure_to_completed_buffer_helper(CardTableEntryClosure* cl,
-                                         int worker_i,
+                                         uint worker_i,
                                          BufferNode* nd) {
   if (nd != NULL) {
     void **buf = BufferNode::make_buffer_from_node(nd);
@@ -208,7 +208,7 @@
 }
 
 bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
-                                                          int worker_i,
+                                                          uint worker_i,
                                                           int stop_at,
                                                           bool during_pause) {
   assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
@@ -218,7 +218,7 @@
   return res;
 }
 
-bool DirtyCardQueueSet::apply_closure_to_completed_buffer(int worker_i,
+bool DirtyCardQueueSet::apply_closure_to_completed_buffer(uint worker_i,
                                                           int stop_at,
                                                           bool during_pause) {
   return apply_closure_to_completed_buffer(_closure, worker_i,
--- a/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -36,7 +36,7 @@
 public:
   // Process the card whose card table entry is "card_ptr".  If returns
   // "false", terminate the iteration early.
-  virtual bool do_card_ptr(jbyte* card_ptr, int worker_i = 0) = 0;
+  virtual bool do_card_ptr(jbyte* card_ptr, uint worker_i = 0) = 0;
 };
 
 // A ptrQueue whose elements are "oops", pointers to object heads.
@@ -53,7 +53,7 @@
   // deletes processed entries from logs.
   bool apply_closure(CardTableEntryClosure* cl,
                      bool consume = true,
-                     size_t worker_i = 0);
+                     uint worker_i = 0);
 
   // Apply the closure to all elements of "buf", down to "index"
   // (inclusive.)  If returns "false", then a closure application returned
@@ -63,7 +63,7 @@
   static bool apply_closure_to_buffer(CardTableEntryClosure* cl,
                                       void** buf, size_t index, size_t sz,
                                       bool consume = true,
-                                      int worker_i = 0);
+                                      uint worker_i = 0);
   void **get_buf() { return _buf;}
   void set_buf(void **buf) {_buf = buf;}
   size_t get_index() { return _index;}
@@ -98,7 +98,7 @@
 
   // The number of parallel ids that can be claimed to allow collector or
   // mutator threads to do card-processing work.
-  static size_t num_par_ids();
+  static uint num_par_ids();
 
   static void handle_zero_index_for_thread(JavaThread* t);
 
@@ -115,7 +115,7 @@
   // change in the future.)  If "consume" is true, processed entries are
   // discarded.
   void iterate_closure_all_threads(bool consume = true,
-                                   size_t worker_i = 0);
+                                   uint worker_i = 0);
 
   // If there exists some completed buffer, pop it, then apply the
   // registered closure to all its elements, nulling out those elements
@@ -124,7 +124,7 @@
   // but is only partially completed before a "yield" happens, the
   // partially completed buffer (with its processed elements set to NULL)
   // is returned to the completed buffer set, and this call returns false.
-  bool apply_closure_to_completed_buffer(int worker_i = 0,
+  bool apply_closure_to_completed_buffer(uint worker_i = 0,
                                          int stop_at = 0,
                                          bool during_pause = false);
 
@@ -136,13 +136,13 @@
   // partially completed buffer (with its processed elements set to NULL)
   // is returned to the completed buffer set, and this call returns false.
   bool apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
-                                         int worker_i = 0,
+                                         uint worker_i = 0,
                                          int stop_at = 0,
                                          bool during_pause = false);
 
   // Helper routine for the above.
   bool apply_closure_to_completed_buffer_helper(CardTableEntryClosure* cl,
-                                                int worker_i,
+                                                uint worker_i,
                                                 BufferNode* nd);
 
   BufferNode* get_completed_buffer(int stop_at);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -304,26 +304,26 @@
     if (c - start_card > BlockOffsetArray::power_to_cards_back(1)) {
       guarantee(entry > N_words,
                 err_msg("Should be in logarithmic region - "
-                        "entry: " UINT32_FORMAT ", "
-                        "_array->offset_array(c): " UINT32_FORMAT ", "
-                        "N_words: " UINT32_FORMAT,
-                        entry, _array->offset_array(c), N_words));
+                        "entry: %u, "
+                        "_array->offset_array(c): %u, "
+                        "N_words: %u",
+                        (uint)entry, (uint)_array->offset_array(c), (uint)N_words));
     }
     size_t backskip = BlockOffsetArray::entry_to_cards_back(entry);
     size_t landing_card = c - backskip;
     guarantee(landing_card >= (start_card - 1), "Inv");
     if (landing_card >= start_card) {
       guarantee(_array->offset_array(landing_card) <= entry,
-                err_msg("Monotonicity - landing_card offset: " UINT32_FORMAT ", "
-                        "entry: " UINT32_FORMAT,
-                        _array->offset_array(landing_card), entry));
+                err_msg("Monotonicity - landing_card offset: %u, "
+                        "entry: %u",
+                        (uint)_array->offset_array(landing_card), (uint)entry));
     } else {
       guarantee(landing_card == start_card - 1, "Tautology");
       // Note that N_words is the maximum offset value
       guarantee(_array->offset_array(landing_card) <= N_words,
-                err_msg("landing card offset: " UINT32_FORMAT ", "
-                        "N_words: " UINT32_FORMAT,
-                        _array->offset_array(landing_card), N_words));
+                err_msg("landing card offset: %u, "
+                        "N_words: %u",
+                        (uint)_array->offset_array(landing_card), (uint)N_words));
     }
   }
 }
@@ -554,21 +554,20 @@
           (_array->offset_array(orig_index) > 0 &&
          _array->offset_array(orig_index) <= N_words),
          err_msg("offset array should have been set - "
-                  "orig_index offset: " UINT32_FORMAT ", "
+                  "orig_index offset: %u, "
                   "blk_start: " PTR_FORMAT ", "
                   "boundary: " PTR_FORMAT,
-                  _array->offset_array(orig_index),
+                  (uint)_array->offset_array(orig_index),
                   blk_start, boundary));
   for (size_t j = orig_index + 1; j <= end_index; j++) {
     assert(_array->offset_array(j) > 0 &&
            _array->offset_array(j) <=
              (u_char) (N_words+BlockOffsetArray::N_powers-1),
            err_msg("offset array should have been set - "
-                   UINT32_FORMAT " not > 0 OR "
-                   UINT32_FORMAT " not <= " UINT32_FORMAT,
-                   _array->offset_array(j),
-                   _array->offset_array(j),
-                   (u_char) (N_words+BlockOffsetArray::N_powers-1)));
+                   "%u not > 0 OR %u not <= %u",
+                   (uint) _array->offset_array(j),
+                   (uint) _array->offset_array(j),
+                   (uint) (N_words+BlockOffsetArray::N_powers-1)));
   }
 #endif
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -146,8 +146,8 @@
   void check_offset(size_t offset, const char* msg) const {
     assert(offset <= N_words,
            err_msg("%s - "
-                   "offset: " UINT32_FORMAT", N_words: " UINT32_FORMAT,
-                   msg, offset, N_words));
+                   "offset: " SIZE_FORMAT", N_words: %u",
+                   msg, offset, (uint)N_words));
   }
 
   // Bounds checking accessors:
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -102,7 +102,7 @@
                               ConcurrentG1Refine* cg1r) :
     _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
   {}
-  bool do_card_ptr(jbyte* card_ptr, int worker_i) {
+  bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
     bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false);
     // This path is executed by the concurrent refine or mutator threads,
     // concurrently, and so we do not care if card_ptr contains references
@@ -131,7 +131,7 @@
   {
     for (int i = 0; i < 256; i++) _histo[i] = 0;
   }
-  bool do_card_ptr(jbyte* card_ptr, int worker_i) {
+  bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
       _calls++;
       unsigned char* ujb = (unsigned char*)card_ptr;
@@ -160,7 +160,7 @@
   RedirtyLoggedCardTableEntryClosure() :
     _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) {}
 
-  bool do_card_ptr(jbyte* card_ptr, int worker_i) {
+  bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
       _calls++;
       *card_ptr = 0;
@@ -1288,7 +1288,7 @@
   print_heap_before_gc();
   trace_heap_before_gc(gc_tracer);
 
-  size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
+  size_t metadata_prev_used = MetaspaceAux::used_bytes();
 
   verify_region_sets_optional();
 
@@ -2314,7 +2314,7 @@
 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
                                                  DirtyCardQueue* into_cset_dcq,
                                                  bool concurrent,
-                                                 int worker_i) {
+                                                 uint worker_i) {
   // Clean cards in the hot card cache
   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
   hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
@@ -2843,7 +2843,7 @@
 
 // Given the id of a worker, obtain or calculate a suitable
 // starting region for iterating over the current collection set.
-HeapRegion* G1CollectedHeap::start_cset_region_for_worker(int worker_i) {
+HeapRegion* G1CollectedHeap::start_cset_region_for_worker(uint worker_i) {
   assert(get_gc_time_stamp() > 0, "should have been updated by now");
 
   HeapRegion* result = NULL;
@@ -5103,7 +5103,7 @@
                         OopClosure* scan_non_heap_roots,
                         OopsInHeapRegionClosure* scan_rs,
                         G1KlassScanClosure* scan_klasses,
-                        int worker_i) {
+                        uint worker_i) {
 
   // First scan the strong roots
   double ext_roots_start = os::elapsedTime();
@@ -5207,10 +5207,10 @@
 
   ~G1StringSymbolTableUnlinkTask() {
     guarantee(!_process_strings || !_do_in_parallel || StringTable::parallel_claimed_index() >= _initial_string_table_size,
-              err_msg("claim value "INT32_FORMAT" after unlink less than initial string table size "INT32_FORMAT,
+              err_msg("claim value %d after unlink less than initial string table size %d",
                       StringTable::parallel_claimed_index(), _initial_string_table_size));
     guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
-              err_msg("claim value "INT32_FORMAT" after unlink less than initial symbol table size "INT32_FORMAT,
+              err_msg("claim value %d after unlink less than initial symbol table size %d",
                       SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
   }
 
@@ -5275,7 +5275,7 @@
 
 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
 public:
-  bool do_card_ptr(jbyte* card_ptr, int worker_i) {
+  bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
     *card_ptr = CardTableModRefBS::dirty_card_val();
     return true;
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -845,7 +845,7 @@
                                OopClosure* scan_non_heap_roots,
                                OopsInHeapRegionClosure* scan_rs,
                                G1KlassScanClosure* scan_klasses,
-                               int worker_i);
+                               uint worker_i);
 
   // Notifies all the necessary spaces that the committed space has
   // been updated (either expanded or shrunk). It should be called
@@ -1139,7 +1139,7 @@
 
   void iterate_dirty_card_closure(CardTableEntryClosure* cl,
                                   DirtyCardQueue* into_cset_dcq,
-                                  bool concurrent, int worker_i);
+                                  bool concurrent, uint worker_i);
 
   // The shared block offset table array.
   G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
@@ -1370,7 +1370,7 @@
 
   // Given the id of a worker, obtain or calculate a suitable
   // starting region for iterating over the current collection set.
-  HeapRegion* start_cset_region_for_worker(int worker_i);
+  HeapRegion* start_cset_region_for_worker(uint worker_i);
 
   // This is a convenience method that is used by the
   // HeapRegionIterator classes to calculate the starting region for
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -1204,7 +1204,7 @@
          (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
 
   if (full) {
-    _metaspace_used_bytes_before_gc = MetaspaceAux::allocated_used_bytes();
+    _metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes();
   }
 }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -147,7 +147,7 @@
 void WorkerDataArray<T>::verify() {
   for (uint i = 0; i < _length; i++) {
     assert(_data[i] != _uninitialized,
-        err_msg("Invalid data for worker " UINT32_FORMAT ", data: %lf, uninitialized: %lf",
+        err_msg("Invalid data for worker %u, data: %lf, uninitialized: %lf",
             i, (double)_data[i], (double)_uninitialized));
   }
 }
@@ -246,8 +246,8 @@
   LineBuffer(level).append_and_print_cr("[%s: %.1lf ms]", str, value);
 }
 
-void G1GCPhaseTimes::print_stats(int level, const char* str, double value, int workers) {
-  LineBuffer(level).append_and_print_cr("[%s: %.1lf ms, GC Workers: %d]", str, value, workers);
+void G1GCPhaseTimes::print_stats(int level, const char* str, double value, uint workers) {
+  LineBuffer(level).append_and_print_cr("[%s: %.1lf ms, GC Workers: %u]", str, value, workers);
 }
 
 double G1GCPhaseTimes::accounted_time_ms() {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -161,7 +161,7 @@
 
   // Helper methods for detailed logging
   void print_stats(int level, const char* str, double value);
-  void print_stats(int level, const char* str, double value, int workers);
+  void print_stats(int level, const char* str, double value, uint workers);
 
  public:
   G1GCPhaseTimes(uint max_gc_threads);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -44,9 +44,9 @@
     _hot_cache_idx = 0;
 
     // For refining the cards in the hot cache in parallel
-    int n_workers = (ParallelGCThreads > 0 ?
+    uint n_workers = (ParallelGCThreads > 0 ?
                         _g1h->workers()->total_workers() : 1);
-    _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / n_workers);
+    _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / (int)n_workers);
     _hot_cache_par_claimed_idx = 0;
 
     _card_counts.initialize();
@@ -89,7 +89,7 @@
   return res;
 }
 
-void G1HotCardCache::drain(int worker_i,
+void G1HotCardCache::drain(uint worker_i,
                            G1RemSet* g1rs,
                            DirtyCardQueue* into_cset_dcq) {
   if (!default_use_cache()) {
@@ -122,8 +122,8 @@
             // RSet updating while within an evacuation pause.
             // In this case worker_i should be the id of a GC worker thread
             assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
-            assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads),
-                   err_msg("incorrect worker id: "INT32_FORMAT, worker_i));
+            assert(worker_i < (ParallelGCThreads == 0 ? 1 : ParallelGCThreads),
+                   err_msg("incorrect worker id: %u", worker_i));
 
             into_cset_dcq->enqueue(card_ptr);
           }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -99,7 +99,7 @@
 
   // Refine the cards that have delayed as a result of
   // being in the cache.
-  void drain(int worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq);
+  void drain(uint worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq);
 
   // Set up for parallel processing of the cards in the hot cache
   void reset_hot_cache_claimed_index() {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -234,14 +234,14 @@
   HeapRegion* _from;
   OopsInHeapRegionClosure* _push_ref_cl;
   bool _record_refs_into_cset;
-  int _worker_i;
+  uint _worker_i;
 
 public:
   G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
                                 G1RemSet* rs,
                                 OopsInHeapRegionClosure* push_ref_cl,
                                 bool record_refs_into_cset,
-                                int worker_i = 0);
+                                uint worker_i = 0);
 
   void set_from(HeapRegion* from) {
     assert(from != NULL, "from region must be non-NULL");
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -113,14 +113,14 @@
   G1SATBCardTableModRefBS *_ct_bs;
 
   double _strong_code_root_scan_time_sec;
-  int    _worker_i;
+  uint   _worker_i;
   int    _block_size;
   bool   _try_claimed;
 
 public:
   ScanRSClosure(OopsInHeapRegionClosure* oc,
                 CodeBlobToOopClosure* code_root_cl,
-                int worker_i) :
+                uint worker_i) :
     _oc(oc),
     _code_root_cl(code_root_cl),
     _strong_code_root_scan_time_sec(0.0),
@@ -162,7 +162,7 @@
 
   void printCard(HeapRegion* card_region, size_t card_index,
                  HeapWord* card_start) {
-    gclog_or_tty->print_cr("T %d Region [" PTR_FORMAT ", " PTR_FORMAT ") "
+    gclog_or_tty->print_cr("T %u Region [" PTR_FORMAT ", " PTR_FORMAT ") "
                            "RS names card %p: "
                            "[" PTR_FORMAT ", " PTR_FORMAT ")",
                            _worker_i,
@@ -241,7 +241,7 @@
 
 void G1RemSet::scanRS(OopsInHeapRegionClosure* oc,
                       CodeBlobToOopClosure* code_root_cl,
-                      int worker_i) {
+                      uint worker_i) {
   double rs_time_start = os::elapsedTime();
   HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
 
@@ -274,13 +274,13 @@
                                               DirtyCardQueue* into_cset_dcq) :
     _g1rs(g1h->g1_rem_set()), _into_cset_dcq(into_cset_dcq)
   {}
-  bool do_card_ptr(jbyte* card_ptr, int worker_i) {
+  bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
     // The only time we care about recording cards that
     // contain references that point into the collection set
     // is during RSet updating within an evacuation pause.
     // In this case worker_i should be the id of a GC worker thread.
     assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
-    assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker");
+    assert(worker_i < (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker");
 
     if (_g1rs->refine_card(card_ptr, worker_i, true)) {
       // 'card_ptr' contains references that point into the collection
@@ -295,7 +295,7 @@
   }
 };
 
-void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) {
+void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i) {
   double start = os::elapsedTime();
   // Apply the given closure to all remaining log entries.
   RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
@@ -320,14 +320,14 @@
 
 void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
                                            CodeBlobToOopClosure* code_root_cl,
-                                           int worker_i) {
+                                           uint worker_i) {
 #if CARD_REPEAT_HISTO
   ct_freq_update_histo_and_reset();
 #endif
 
   // We cache the value of 'oc' closure into the appropriate slot in the
   // _cset_rs_update_cl for this worker
-  assert(worker_i < (int)n_workers(), "sanity");
+  assert(worker_i < n_workers(), "sanity");
   _cset_rs_update_cl[worker_i] = oc;
 
   // A DirtyCardQueue that is used to hold cards containing references
@@ -399,7 +399,7 @@
     _g1(g1), _ct_bs(bs)
   { }
 
-  bool do_card_ptr(jbyte* card_ptr, int worker_i) {
+  bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
     // Construct the region representing the card.
     HeapWord* start = _ct_bs->addr_for(card_ptr);
     // And find the region containing it.
@@ -543,7 +543,7 @@
                               G1RemSet* rs,
                               OopsInHeapRegionClosure* push_ref_cl,
                               bool record_refs_into_cset,
-                              int worker_i) :
+                              uint worker_i) :
   _g1(g1h), _g1_rem_set(rs), _from(NULL),
   _record_refs_into_cset(record_refs_into_cset),
   _push_ref_cl(push_ref_cl), _worker_i(worker_i) { }
@@ -552,7 +552,7 @@
 // into the collection set, if we're checking for such references;
 // false otherwise.
 
-bool G1RemSet::refine_card(jbyte* card_ptr, int worker_i,
+bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i,
                            bool check_for_refs_into_cset) {
 
   // If the card is no longer dirty, nothing to do.
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -97,7 +97,7 @@
   // In the sequential case this param will be ignored.
   void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
                                    CodeBlobToOopClosure* code_root_cl,
-                                   int worker_i);
+                                   uint worker_i);
 
   // Prepare for and cleanup after an oops_into_collection_set_do
   // call.  Must call each of these once before and after (in sequential
@@ -109,9 +109,9 @@
 
   void scanRS(OopsInHeapRegionClosure* oc,
               CodeBlobToOopClosure* code_root_cl,
-              int worker_i);
+              uint worker_i);
 
-  void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i);
+  void updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i);
 
   CardTableModRefBS* ct_bs() { return _ct_bs; }
   size_t cardsScanned() { return _total_cards_scanned; }
@@ -138,7 +138,7 @@
   // if the given card contains oops that have references into the
   // current collection set.
   virtual bool refine_card(jbyte* card_ptr,
-                           int worker_i,
+                           uint worker_i,
                            bool check_for_refs_into_cset);
 
   // Print accumulated summary info from the start of the VM.
@@ -171,12 +171,12 @@
 class UpdateRSOopClosure: public ExtendedOopClosure {
   HeapRegion* _from;
   G1RemSet* _rs;
-  int _worker_i;
+  uint _worker_i;
 
   template <class T> void do_oop_work(T* p);
 
 public:
-  UpdateRSOopClosure(G1RemSet* rs, int worker_i = 0) :
+  UpdateRSOopClosure(G1RemSet* rs, uint worker_i = 0) :
     _from(NULL), _rs(rs), _worker_i(worker_i)
   {}
 
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -390,7 +390,7 @@
 void FromCardCache::print(outputStream* out) {
   for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
     for (uint j = 0; j < _max_regions; j++) {
-      out->print_cr("_from_card_cache["UINT32_FORMAT"]["UINT32_FORMAT"] = "INT32_FORMAT".",
+      out->print_cr("_from_card_cache[%u][%u] = %d.",
                     i, j, at(i, j));
     }
   }
@@ -430,7 +430,7 @@
   int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
 
   if (G1TraceHeapRegionRememberedSet) {
-    gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = "INT32_FORMAT")",
+    gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)",
                   hr()->bottom(), from_card,
                   FromCardCache::at((uint)tid, cur_hrs_ind));
   }
@@ -853,13 +853,13 @@
 // This can be done by either mutator threads together with the
 // concurrent refinement threads or GC threads.
 uint HeapRegionRemSet::num_par_rem_sets() {
-  return (uint)MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads);
+  return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), (uint)ParallelGCThreads);
 }
 
 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
                                    HeapRegion* hr)
   : _bosa(bosa),
-    _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #"UINT32_FORMAT, hr->hrs_index()), true),
+    _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrs_index()), true),
     _code_roots(), _other_regions(hr, &_m) {
   reset_for_par_iteration();
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -30,7 +30,7 @@
 inline void HeapRegionSetBase::add(HeapRegion* hr) {
   check_mt_safety();
   assert(hr->containing_set() == NULL, hrs_ext_msg(this, "should not already have a containing set %u"));
-  assert(hr->next() == NULL, hrs_ext_msg(this, "should not already be linked"));
+  assert(hr->next() == NULL && hr->prev() == NULL, hrs_ext_msg(this, "should not already be linked"));
 
   _count.increment(1u, hr->capacity());
   hr->set_containing_set(this);
@@ -40,7 +40,7 @@
 inline void HeapRegionSetBase::remove(HeapRegion* hr) {
   check_mt_safety();
   verify_region(hr);
-  assert(hr->next() == NULL, hrs_ext_msg(this, "should already be unlinked"));
+  assert(hr->next() == NULL && hr->prev() == NULL, hrs_ext_msg(this, "should already be unlinked"));
 
   hr->set_containing_set(NULL);
   assert(_count.length() > 0, hrs_ext_msg(this, "pre-condition"));
--- a/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -290,7 +290,7 @@
   shared_satb_queue()->apply_closure_and_empty(_closure);
 }
 
-void SATBMarkQueueSet::par_iterate_closure_all_threads(int worker) {
+void SATBMarkQueueSet::par_iterate_closure_all_threads(uint worker) {
   SharedHeap* sh = SharedHeap::heap();
   int parity = sh->strong_roots_parity();
 
@@ -315,7 +315,7 @@
 }
 
 bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par,
-                                                              int worker) {
+                                                              uint worker) {
   BufferNode* nd = NULL;
   {
     MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
--- a/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -84,7 +84,7 @@
   // Utility function to support sequential and parallel versions.  If
   // "par" is true, then "worker" is the par thread id; if "false", worker
   // is ignored.
-  bool apply_closure_to_completed_buffer_work(bool par, int worker);
+  bool apply_closure_to_completed_buffer_work(bool par, uint worker);
 
 #ifdef ASSERT
   void dump_active_states(bool expected_active);
@@ -124,7 +124,7 @@
   // be called serially and at a safepoint.
   void iterate_closure_all_threads();
   // Parallel version of the above.
-  void par_iterate_closure_all_threads(int worker);
+  void par_iterate_closure_all_threads(uint worker);
 
   // If there exists some completed buffer, pop it, then apply the
   // registered closure to all its elements, and return true.  If no
@@ -133,7 +133,7 @@
     return apply_closure_to_completed_buffer_work(false, 0);
   }
   // Parallel version of the above.
-  bool par_apply_closure_to_completed_buffer(int worker) {
+  bool par_apply_closure_to_completed_buffer(uint worker) {
     return apply_closure_to_completed_buffer_work(true, worker);
   }
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -184,7 +184,7 @@
     size_t prev_used = heap->used();
 
     // Capture metadata size before collection for sizing.
-    size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
+    size_t metadata_prev_used = MetaspaceAux::used_bytes();
 
     // For PrintGCDetails
     size_t old_gen_prev_used = old_gen->used_in_bytes();
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -928,7 +928,7 @@
     _heap_used      = heap->used();
     _young_gen_used = heap->young_gen()->used_in_bytes();
     _old_gen_used   = heap->old_gen()->used_in_bytes();
-    _metadata_used  = MetaspaceAux::allocated_used_bytes();
+    _metadata_used  = MetaspaceAux::used_bytes();
   };
 
   size_t heap_used() const      { return _heap_used; }
--- a/hotspot/src/share/vm/gc_implementation/shared/gcHeapSummary.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/gcHeapSummary.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -62,16 +62,16 @@
 };
 
 class MetaspaceSizes : public StackObj {
-  size_t _capacity;
+  size_t _committed;
   size_t _used;
   size_t _reserved;
 
  public:
-  MetaspaceSizes() : _capacity(0), _used(0), _reserved(0) {}
-  MetaspaceSizes(size_t capacity, size_t used, size_t reserved) :
-    _capacity(capacity), _used(used), _reserved(reserved) {}
+  MetaspaceSizes() : _committed(0), _used(0), _reserved(0) {}
+  MetaspaceSizes(size_t committed, size_t used, size_t reserved) :
+    _committed(committed), _used(used), _reserved(reserved) {}
 
-  size_t capacity() const { return _capacity; }
+  size_t committed() const { return _committed; }
   size_t used() const { return _used; }
   size_t reserved() const { return _reserved; }
 };
--- a/hotspot/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -258,7 +258,7 @@
 static TraceStructMetaspaceSizes to_trace_struct(const MetaspaceSizes& sizes) {
   TraceStructMetaspaceSizes meta_sizes;
 
-  meta_sizes.set_capacity(sizes.capacity());
+  meta_sizes.set_committed(sizes.committed());
   meta_sizes.set_used(sizes.used());
   meta_sizes.set_reserved(sizes.reserved());
 
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -85,16 +85,16 @@
 
 MetaspaceSummary CollectedHeap::create_metaspace_summary() {
   const MetaspaceSizes meta_space(
-      MetaspaceAux::allocated_capacity_bytes(),
-      MetaspaceAux::allocated_used_bytes(),
+      MetaspaceAux::committed_bytes(),
+      MetaspaceAux::used_bytes(),
       MetaspaceAux::reserved_bytes());
   const MetaspaceSizes data_space(
-      MetaspaceAux::allocated_capacity_bytes(Metaspace::NonClassType),
-      MetaspaceAux::allocated_used_bytes(Metaspace::NonClassType),
+      MetaspaceAux::committed_bytes(Metaspace::NonClassType),
+      MetaspaceAux::used_bytes(Metaspace::NonClassType),
       MetaspaceAux::reserved_bytes(Metaspace::NonClassType));
   const MetaspaceSizes class_space(
-      MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType),
-      MetaspaceAux::allocated_used_bytes(Metaspace::ClassType),
+      MetaspaceAux::committed_bytes(Metaspace::ClassType),
+      MetaspaceAux::used_bytes(Metaspace::ClassType),
       MetaspaceAux::reserved_bytes(Metaspace::ClassType));
 
   const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary =
@@ -582,36 +582,6 @@
   }
 }
 
-oop CollectedHeap::Class_obj_allocate(KlassHandle klass, int size, KlassHandle real_klass, TRAPS) {
-  debug_only(check_for_valid_allocation_state());
-  assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
-  assert(size >= 0, "int won't convert to size_t");
-  HeapWord* obj;
-    assert(ScavengeRootsInCode > 0, "must be");
-    obj = common_mem_allocate_init(real_klass, size, CHECK_NULL);
-  post_allocation_setup_common(klass, obj);
-  assert(Universe::is_bootstrapping() ||
-         !((oop)obj)->is_array(), "must not be an array");
-  NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
-  oop mirror = (oop)obj;
-
-  java_lang_Class::set_oop_size(mirror, size);
-
-  // Setup indirections
-  if (!real_klass.is_null()) {
-    java_lang_Class::set_klass(mirror, real_klass());
-    real_klass->set_java_mirror(mirror);
-  }
-
-  InstanceMirrorKlass* mk = InstanceMirrorKlass::cast(mirror->klass());
-  assert(size == mk->instance_size(real_klass), "should have been set");
-
-  // notify jvmti and dtrace
-  post_allocation_notify(klass, (oop)obj);
-
-  return mirror;
-}
-
 /////////////// Unit tests ///////////////
 
 #ifndef PRODUCT
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -312,9 +312,6 @@
   // May be overridden to set additional parallelism.
   virtual void set_par_threads(uint t) { _n_par_threads = t; };
 
-  // Allocate and initialize instances of Class
-  static oop Class_obj_allocate(KlassHandle klass, int size, KlassHandle real_klass, TRAPS);
-
   // General obj/array allocation facilities.
   inline static oop obj_allocate(KlassHandle klass, int size, TRAPS);
   inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS);
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -257,6 +257,12 @@
   assert(_min_gen0_size % _gen_alignment == 0, "_min_gen0_size alignment");
   assert(_initial_gen0_size % _gen_alignment == 0, "_initial_gen0_size alignment");
   assert(_max_gen0_size % _gen_alignment == 0, "_max_gen0_size alignment");
+  assert(_min_gen0_size <= bound_minus_alignment(_min_gen0_size, _min_heap_byte_size),
+      "Ergonomics made minimum young generation larger than minimum heap");
+  assert(_initial_gen0_size <=  bound_minus_alignment(_initial_gen0_size, _initial_heap_byte_size),
+      "Ergonomics made initial young generation larger than initial heap");
+  assert(_max_gen0_size <= bound_minus_alignment(_max_gen0_size, _max_heap_byte_size),
+      "Ergonomics made maximum young generation lager than maximum heap");
 }
 
 void TwoGenerationCollectorPolicy::assert_size_info() {
@@ -267,6 +273,9 @@
   assert(_max_gen1_size % _gen_alignment == 0, "_max_gen1_size alignment");
   assert(_initial_gen1_size % _gen_alignment == 0, "_initial_gen1_size alignment");
   assert(_max_heap_byte_size <= (_max_gen0_size + _max_gen1_size), "Total maximum heap sizes must be sum of generation maximum sizes");
+  assert(_min_gen0_size + _min_gen1_size <= _min_heap_byte_size, "Minimum generation sizes exceed minimum heap size");
+  assert(_initial_gen0_size + _initial_gen1_size == _initial_heap_byte_size, "Initial generation sizes should match initial heap size");
+  assert(_max_gen0_size + _max_gen1_size == _max_heap_byte_size, "Maximum generation sizes should match maximum heap size");
 }
 #endif // ASSERT
 
@@ -303,20 +312,26 @@
     }
   }
 
+  // Make sure NewSize allows an old generation to fit even if set on the command line
+  if (FLAG_IS_CMDLINE(NewSize) && NewSize >= _initial_heap_byte_size) {
+    warning("NewSize was set larger than initial heap size, will use initial heap size.");
+    NewSize = bound_minus_alignment(NewSize, _initial_heap_byte_size);
+  }
+
   // Now take the actual NewSize into account. We will silently increase NewSize
   // if the user specified a smaller or unaligned value.
-  smallest_new_size = MAX2(smallest_new_size, (uintx)align_size_down(NewSize, _gen_alignment));
-  if (smallest_new_size != NewSize) {
+  uintx bounded_new_size = bound_minus_alignment(NewSize, MaxHeapSize);
+  bounded_new_size = MAX2(smallest_new_size, (uintx)align_size_down(bounded_new_size, _gen_alignment));
+  if (bounded_new_size != NewSize) {
     // Do not use FLAG_SET_ERGO to update NewSize here, since this will override
     // if NewSize was set on the command line or not. This information is needed
     // later when setting the initial and minimum young generation size.
-    NewSize = smallest_new_size;
+    NewSize = bounded_new_size;
   }
+  _min_gen0_size = smallest_new_size;
   _initial_gen0_size = NewSize;
 
   if (!FLAG_IS_DEFAULT(MaxNewSize)) {
-    uintx min_new_size = MAX2(_gen_alignment, _min_gen0_size);
-
     if (MaxNewSize >= MaxHeapSize) {
       // Make sure there is room for an old generation
       uintx smaller_max_new_size = MaxHeapSize - _gen_alignment;
@@ -330,8 +345,8 @@
         FLAG_SET_ERGO(uintx, NewSize, MaxNewSize);
         _initial_gen0_size = NewSize;
       }
-    } else if (MaxNewSize < min_new_size) {
-      FLAG_SET_ERGO(uintx, MaxNewSize, min_new_size);
+    } else if (MaxNewSize < _initial_gen0_size) {
+      FLAG_SET_ERGO(uintx, MaxNewSize, _initial_gen0_size);
     } else if (!is_size_aligned(MaxNewSize, _gen_alignment)) {
       FLAG_SET_ERGO(uintx, MaxNewSize, align_size_down(MaxNewSize, _gen_alignment));
     }
@@ -361,7 +376,9 @@
   GenCollectorPolicy::initialize_flags();
 
   if (!is_size_aligned(OldSize, _gen_alignment)) {
-    FLAG_SET_ERGO(uintx, OldSize, align_size_down(OldSize, _gen_alignment));
+    // Setting OldSize directly to preserve information about the possible
+    // setting of OldSize on the command line.
+    OldSize = align_size_down(OldSize, _gen_alignment);
   }
 
   if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(MaxHeapSize)) {
@@ -400,6 +417,20 @@
     }
   }
 
+  // Update NewSize, if possible, to avoid sizing gen0 to small when only
+  // OldSize is set on the command line.
+  if (FLAG_IS_CMDLINE(OldSize) && !FLAG_IS_CMDLINE(NewSize)) {
+    if (OldSize < _initial_heap_byte_size) {
+      size_t new_size = _initial_heap_byte_size - OldSize;
+      // Need to compare against the flag value for max since _max_gen0_size
+      // might not have been set yet.
+      if (new_size >= _min_gen0_size && new_size <= MaxNewSize) {
+        FLAG_SET_ERGO(uintx, NewSize, new_size);
+        _initial_gen0_size = NewSize;
+      }
+    }
+  }
+
   always_do_update_barrier = UseConcMarkSweepGC;
 
   DEBUG_ONLY(TwoGenerationCollectorPolicy::assert_flags();)
@@ -441,57 +472,37 @@
   // Given the maximum gen0 size, determine the initial and
   // minimum gen0 sizes.
 
-  if (_max_heap_byte_size == _min_heap_byte_size) {
-    // The maximum and minimum heap sizes are the same so the generations
-    // minimum and initial must be the same as its maximum.
-    _min_gen0_size = max_new_size;
-    _initial_gen0_size = max_new_size;
-    _max_gen0_size = max_new_size;
+  if (_max_heap_byte_size == _initial_heap_byte_size) {
+    // The maxium and initial heap sizes are the same so the generation's
+    // initial size must be the same as it maximum size. Use NewSize as the
+    // size if set on command line.
+    size_t fixed_young_size = FLAG_IS_CMDLINE(NewSize) ? NewSize : max_new_size;
+
+    _initial_gen0_size = fixed_young_size;
+    _max_gen0_size = fixed_young_size;
+
+    // Also update the minimum size if min == initial == max.
+    if (_max_heap_byte_size == _min_heap_byte_size) {
+      _min_gen0_size = fixed_young_size;
+    }
   } else {
     size_t desired_new_size = 0;
     if (FLAG_IS_CMDLINE(NewSize)) {
-      // If NewSize is set on the command line, we must use it as
-      // the initial size and it also makes sense to use it as the
-      // lower limit.
-      _min_gen0_size = NewSize;
-      desired_new_size = NewSize;
-      max_new_size = MAX2(max_new_size, NewSize);
-    } else if (FLAG_IS_ERGO(NewSize)) {
-      // If NewSize is set ergonomically, we should use it as a lower
-      // limit, but use NewRatio to calculate the initial size.
-      _min_gen0_size = NewSize;
+      // If NewSize is set on the command line, we should use it as
+      // the initial size, but make sure it is within the heap bounds.
       desired_new_size =
-        MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
-      max_new_size = MAX2(max_new_size, NewSize);
+        MIN2(max_new_size, bound_minus_alignment(NewSize, _initial_heap_byte_size));
+      _min_gen0_size = bound_minus_alignment(desired_new_size, _min_heap_byte_size);
     } else {
-      // For the case where NewSize is the default, use NewRatio
-      // to size the minimum and initial generation sizes.
-      // Use the default NewSize as the floor for these values.  If
-      // NewRatio is overly large, the resulting sizes can be too small.
-      _min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), NewSize);
+      // For the case where NewSize is not set on the command line, use
+      // NewRatio to size the initial generation size. Use the current
+      // NewSize as the floor, because if NewRatio is overly large, the resulting
+      // size can be too small.
       desired_new_size =
-        MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
+        MIN2(max_new_size, MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize));
     }
-
-    assert(_min_gen0_size > 0, "Sanity check");
     _initial_gen0_size = desired_new_size;
     _max_gen0_size = max_new_size;
-
-    // At this point the desirable initial and minimum sizes have been
-    // determined without regard to the maximum sizes.
-
-    // Bound the sizes by the corresponding overall heap sizes.
-    _min_gen0_size = bound_minus_alignment(_min_gen0_size, _min_heap_byte_size);
-    _initial_gen0_size = bound_minus_alignment(_initial_gen0_size, _initial_heap_byte_size);
-    _max_gen0_size = bound_minus_alignment(_max_gen0_size, _max_heap_byte_size);
-
-    // At this point all three sizes have been checked against the
-    // maximum sizes but have not been checked for consistency among the three.
-
-    // Final check min <= initial <= max
-    _min_gen0_size = MIN2(_min_gen0_size, _max_gen0_size);
-    _initial_gen0_size = MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size);
-    _min_gen0_size = MIN2(_min_gen0_size, _initial_gen0_size);
   }
 
   // Write back to flags if necessary.
@@ -512,33 +523,6 @@
   DEBUG_ONLY(GenCollectorPolicy::assert_size_info();)
 }
 
-// Call this method during the sizing of the gen1 to make
-// adjustments to gen0 because of gen1 sizing policy.  gen0 initially has
-// the most freedom in sizing because it is done before the
-// policy for gen1 is applied.  Once gen1 policies have been applied,
-// there may be conflicts in the shape of the heap and this method
-// is used to make the needed adjustments.  The application of the
-// policies could be more sophisticated (iterative for example) but
-// keeping it simple also seems a worthwhile goal.
-bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr,
-                                                     size_t* gen1_size_ptr,
-                                                     const size_t heap_size) {
-  bool result = false;
-
-  if ((*gen0_size_ptr + *gen1_size_ptr) > heap_size) {
-    uintx smallest_new_size = young_gen_size_lower_bound();
-    if ((heap_size < (*gen0_size_ptr + _min_gen1_size)) &&
-        (heap_size >= _min_gen1_size + smallest_new_size)) {
-      // Adjust gen0 down to accommodate _min_gen1_size
-      *gen0_size_ptr = align_size_down_bounded(heap_size - _min_gen1_size, _gen_alignment);
-      result = true;
-    } else {
-      *gen1_size_ptr = align_size_down_bounded(heap_size - *gen0_size_ptr, _gen_alignment);
-    }
-  }
-  return result;
-}
-
 // Minimum sizes of the generations may be different than
 // the initial sizes.  An inconsistency is permitted here
 // in the total size that can be specified explicitly by
@@ -564,57 +548,64 @@
     // with the overall heap size).  In either case make
     // the minimum, maximum and initial sizes consistent
     // with the gen0 sizes and the overall heap sizes.
-    _min_gen1_size = MAX2(_min_heap_byte_size - _min_gen0_size, _gen_alignment);
-    _initial_gen1_size = MAX2(_initial_heap_byte_size - _initial_gen0_size, _gen_alignment);
+    _min_gen1_size = _gen_alignment;
+    _initial_gen1_size = MIN2(_max_gen1_size, MAX2(_initial_heap_byte_size - _initial_gen0_size, _min_gen1_size));
     // _max_gen1_size has already been made consistent above
     FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size);
   } else {
-    // OldSize has been explicitly set on the command line. Use the
-    // OldSize and then determine the consequences.
-    _min_gen1_size = MIN2(OldSize, _min_heap_byte_size - _min_gen0_size);
-    _initial_gen1_size = OldSize;
-
+    // OldSize has been explicitly set on the command line. Use it
+    // for the initial size but make sure the minimum allow a young
+    // generation to fit as well.
     // If the user has explicitly set an OldSize that is inconsistent
     // with other command line flags, issue a warning.
     // The generation minimums and the overall heap minimum should
     // be within one generation alignment.
-    if ((_min_gen1_size + _min_gen0_size + _gen_alignment) < _min_heap_byte_size) {
-      warning("Inconsistency between minimum heap size and minimum "
-              "generation sizes: using minimum heap = " SIZE_FORMAT,
-              _min_heap_byte_size);
-    }
     if (OldSize > _max_gen1_size) {
       warning("Inconsistency between maximum heap size and maximum "
-              "generation sizes: using maximum heap = " SIZE_FORMAT
-              " -XX:OldSize flag is being ignored",
-              _max_heap_byte_size);
+          "generation sizes: using maximum heap = " SIZE_FORMAT
+          " -XX:OldSize flag is being ignored",
+          _max_heap_byte_size);
+      FLAG_SET_ERGO(uintx, OldSize, _max_gen1_size);
     }
-    // If there is an inconsistency between the OldSize and the minimum and/or
-    // initial size of gen0, since OldSize was explicitly set, OldSize wins.
-    if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size, _min_heap_byte_size)) {
-      if (PrintGCDetails && Verbose) {
-        gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
-              SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
-              _min_gen0_size, _initial_gen0_size, _max_gen0_size);
-      }
+
+    _min_gen1_size = MIN2(OldSize, _min_heap_byte_size - _min_gen0_size);
+    _initial_gen1_size = OldSize;
+  }
+
+  // The initial generation sizes should match the initial heap size,
+  // if not issue a warning and resize the generations. This behavior
+  // differs from JDK8 where the generation sizes have higher priority
+  // than the initial heap size.
+  if ((_initial_gen1_size + _initial_gen0_size) != _initial_heap_byte_size) {
+    warning("Inconsistency between generation sizes and heap size, resizing "
+            "the generations to fit the heap.");
+
+    size_t desired_gen0_size = _initial_heap_byte_size - _initial_gen1_size;
+    if (_initial_heap_byte_size < _initial_gen1_size) {
+      // Old want all memory, use minimum for young and rest for old
+      _initial_gen0_size = _min_gen0_size;
+      _initial_gen1_size = _initial_heap_byte_size - _min_gen0_size;
+    } else if (desired_gen0_size > _max_gen0_size) {
+      // Need to increase both young and old generation
+      _initial_gen0_size = _max_gen0_size;
+      _initial_gen1_size = _initial_heap_byte_size - _max_gen0_size;
+    } else if (desired_gen0_size < _min_gen0_size) {
+      // Need to decrease both young and old generation
+      _initial_gen0_size = _min_gen0_size;
+      _initial_gen1_size = _initial_heap_byte_size - _min_gen0_size;
+    } else {
+      // The young generation boundaries allow us to only update the
+      // young generation.
+      _initial_gen0_size = desired_gen0_size;
     }
-    // The same as above for the old gen initial size.
-    if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
-                          _initial_heap_byte_size)) {
-      if (PrintGCDetails && Verbose) {
-        gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
-          SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
-          _min_gen0_size, _initial_gen0_size, _max_gen0_size);
-      }
+
+    if (PrintGCDetails && Verbose) {
+      gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
+        SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
+        _min_gen0_size, _initial_gen0_size, _max_gen0_size);
     }
   }
 
-  _min_gen1_size = MIN2(_min_gen1_size, _max_gen1_size);
-
-  // Make sure that min gen1 <= initial gen1 <= max gen1.
-  _initial_gen1_size = MAX2(_initial_gen1_size, _min_gen1_size);
-  _initial_gen1_size = MIN2(_initial_gen1_size, _max_gen1_size);
-
   // Write back to flags if necessary
   if (NewSize != _initial_gen0_size) {
     FLAG_SET_ERGO(uintx, NewSize, _initial_gen0_size);
@@ -994,56 +985,88 @@
 // verify that there are some basic rules for NewSize honored by the policies.
 class TestGenCollectorPolicy {
 public:
-  static void test() {
+  static void test_new_size() {
     size_t flag_value;
 
     save_flags();
 
-    // Set some limits that makes the math simple.
-    FLAG_SET_ERGO(uintx, MaxHeapSize, 180 * M);
-    FLAG_SET_ERGO(uintx, InitialHeapSize, 120 * M);
-    Arguments::set_min_heap_size(40 * M);
-
     // If NewSize is set on the command line, it should be used
     // for both min and initial young size if less than min heap.
     flag_value = 20 * M;
+    set_basic_flag_values();
     FLAG_SET_CMDLINE(uintx, NewSize, flag_value);
-    verify_min(flag_value);
-    verify_initial(flag_value);
+    verify_gen0_min(flag_value);
+
+    set_basic_flag_values();
+    FLAG_SET_CMDLINE(uintx, NewSize, flag_value);
+    verify_gen0_initial(flag_value);
 
     // If NewSize is set on command line, but is larger than the min
     // heap size, it should only be used for initial young size.
     flag_value = 80 * M;
+    set_basic_flag_values();
     FLAG_SET_CMDLINE(uintx, NewSize, flag_value);
-    verify_initial(flag_value);
+    verify_gen0_initial(flag_value);
 
     // If NewSize has been ergonomically set, the collector policy
     // should use it for min but calculate the initial young size
     // using NewRatio.
     flag_value = 20 * M;
+    set_basic_flag_values();
     FLAG_SET_ERGO(uintx, NewSize, flag_value);
-    verify_min(flag_value);
-    verify_scaled_initial(InitialHeapSize);
+    verify_gen0_min(flag_value);
+
+    set_basic_flag_values();
+    FLAG_SET_ERGO(uintx, NewSize, flag_value);
+    verify_scaled_gen0_initial(InitialHeapSize);
 
     restore_flags();
+  }
+
+  static void test_old_size() {
+      size_t flag_value;
+
+      save_flags();
+
+      // If OldSize is set on the command line, it should be used
+      // for both min and initial old size if less than min heap.
+      flag_value = 20 * M;
+      set_basic_flag_values();
+      FLAG_SET_CMDLINE(uintx, OldSize, flag_value);
+      verify_gen1_min(flag_value);
+
+      set_basic_flag_values();
+      FLAG_SET_CMDLINE(uintx, OldSize, flag_value);
+      verify_gen1_initial(flag_value);
+
+      // If MaxNewSize is large, the maximum OldSize will be less than
+      // what's requested on the command line and it should be reset
+      // ergonomically.
+      flag_value = 30 * M;
+      set_basic_flag_values();
+      FLAG_SET_CMDLINE(uintx, OldSize, flag_value);
+      FLAG_SET_CMDLINE(uintx, MaxNewSize, 170*M);
+      // Calculate what we expect the flag to be.
+      flag_value = MaxHeapSize - MaxNewSize;
+      verify_gen1_initial(flag_value);
 
   }
 
-  static void verify_min(size_t expected) {
+  static void verify_gen0_min(size_t expected) {
     MarkSweepPolicy msp;
     msp.initialize_all();
 
     assert(msp.min_gen0_size() <= expected, err_msg("%zu  > %zu", msp.min_gen0_size(), expected));
   }
 
-  static void verify_initial(size_t expected) {
+  static void verify_gen0_initial(size_t expected) {
     MarkSweepPolicy msp;
     msp.initialize_all();
 
     assert(msp.initial_gen0_size() == expected, err_msg("%zu != %zu", msp.initial_gen0_size(), expected));
   }
 
-  static void verify_scaled_initial(size_t initial_heap_size) {
+  static void verify_scaled_gen0_initial(size_t initial_heap_size) {
     MarkSweepPolicy msp;
     msp.initialize_all();
 
@@ -1053,6 +1076,21 @@
         err_msg("NewSize should have been set ergonomically to %zu, but was %zu", expected, NewSize));
   }
 
+  static void verify_gen1_min(size_t expected) {
+    MarkSweepPolicy msp;
+    msp.initialize_all();
+
+    assert(msp.min_gen1_size() <= expected, err_msg("%zu  > %zu", msp.min_gen1_size(), expected));
+  }
+
+  static void verify_gen1_initial(size_t expected) {
+    MarkSweepPolicy msp;
+    msp.initialize_all();
+
+    assert(msp.initial_gen1_size() == expected, err_msg("%zu != %zu", msp.initial_gen1_size(), expected));
+  }
+
+
 private:
   static size_t original_InitialHeapSize;
   static size_t original_MaxHeapSize;
@@ -1061,6 +1099,15 @@
   static size_t original_NewSize;
   static size_t original_OldSize;
 
+  static void set_basic_flag_values() {
+    FLAG_SET_ERGO(uintx, MaxHeapSize, 180 * M);
+    FLAG_SET_ERGO(uintx, InitialHeapSize, 100 * M);
+    FLAG_SET_ERGO(uintx, OldSize, 4 * M);
+    FLAG_SET_ERGO(uintx, NewSize, 1 * M);
+    FLAG_SET_ERGO(uintx, MaxNewSize, 80 * M);
+    Arguments::set_min_heap_size(40 * M);
+  }
+
   static void save_flags() {
     original_InitialHeapSize   = InitialHeapSize;
     original_MaxHeapSize       = MaxHeapSize;
@@ -1088,7 +1135,11 @@
 size_t TestGenCollectorPolicy::original_OldSize           = 0;
 
 void TestNewSize_test() {
-  TestGenCollectorPolicy::test();
+  TestGenCollectorPolicy::test_new_size();
+}
+
+void TestOldSize_test() {
+  TestGenCollectorPolicy::test_old_size();
 }
 
 #endif
--- a/hotspot/src/share/vm/memory/collectorPolicy.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/memory/collectorPolicy.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -248,13 +248,13 @@
   // Compute max heap alignment.
   size_t compute_max_alignment();
 
- // Scale the base_size by NewRatio according to
- //     result = base_size / (NewRatio + 1)
- // and align by min_alignment()
- size_t scale_by_NewRatio_aligned(size_t base_size);
+  // Scale the base_size by NewRatio according to
+  //     result = base_size / (NewRatio + 1)
+  // and align by min_alignment()
+  size_t scale_by_NewRatio_aligned(size_t base_size);
 
- // Bound the value by the given maximum minus the min_alignment.
- size_t bound_minus_alignment(size_t desired_size, size_t maximum_size);
+  // Bound the value by the given maximum minus the min_alignment.
+  size_t bound_minus_alignment(size_t desired_size, size_t maximum_size);
 
  public:
   GenCollectorPolicy();
@@ -335,10 +335,6 @@
   virtual CollectorPolicy::Name kind() {
     return CollectorPolicy::TwoGenerationCollectorPolicyKind;
   }
-
-  // Returns true if gen0 sizes were adjusted
-  bool adjust_gen0_sizes(size_t* gen0_size_ptr, size_t* gen1_size_ptr,
-                         const size_t heap_size);
 };
 
 class MarkSweepPolicy : public TwoGenerationCollectorPolicy {
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -374,7 +374,7 @@
 
   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
 
-  const size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
+  const size_t metadata_prev_used = MetaspaceAux::used_bytes();
 
   print_heap_before_gc();
 
--- a/hotspot/src/share/vm/memory/metaspace.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/memory/metaspace.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -1447,7 +1447,7 @@
   uint current_shrink_factor = _shrink_factor;
   _shrink_factor = 0;
 
-  const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
+  const size_t used_after_gc = MetaspaceAux::capacity_bytes();
   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
 
   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
@@ -2538,8 +2538,8 @@
 // MetaspaceAux
 
 
-size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0};
-size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
+size_t MetaspaceAux::_capacity_words[] = {0, 0};
+size_t MetaspaceAux::_used_words[] = {0, 0};
 
 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
@@ -2552,38 +2552,38 @@
 
 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
   assert_lock_strong(SpaceManager::expand_lock());
-  assert(words <= allocated_capacity_words(mdtype),
+  assert(words <= capacity_words(mdtype),
     err_msg("About to decrement below 0: words " SIZE_FORMAT
-            " is greater than _allocated_capacity_words[%u] " SIZE_FORMAT,
-            words, mdtype, allocated_capacity_words(mdtype)));
-  _allocated_capacity_words[mdtype] -= words;
+            " is greater than _capacity_words[%u] " SIZE_FORMAT,
+            words, mdtype, capacity_words(mdtype)));
+  _capacity_words[mdtype] -= words;
 }
 
 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
   assert_lock_strong(SpaceManager::expand_lock());
   // Needs to be atomic
-  _allocated_capacity_words[mdtype] += words;
+  _capacity_words[mdtype] += words;
 }
 
 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
-  assert(words <= allocated_used_words(mdtype),
+  assert(words <= used_words(mdtype),
     err_msg("About to decrement below 0: words " SIZE_FORMAT
-            " is greater than _allocated_used_words[%u] " SIZE_FORMAT,
-            words, mdtype, allocated_used_words(mdtype)));
+            " is greater than _used_words[%u] " SIZE_FORMAT,
+            words, mdtype, used_words(mdtype)));
   // For CMS deallocation of the Metaspaces occurs during the
   // sweep which is a concurrent phase.  Protection by the expand_lock()
   // is not enough since allocation is on a per Metaspace basis
   // and protected by the Metaspace lock.
   jlong minus_words = (jlong) - (jlong) words;
-  Atomic::add_ptr(minus_words, &_allocated_used_words[mdtype]);
+  Atomic::add_ptr(minus_words, &_used_words[mdtype]);
 }
 
 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
-  // _allocated_used_words tracks allocations for
+  // _used_words tracks allocations for
   // each piece of metadata.  Those allocations are
   // generally done concurrently by different application
   // threads so must be done atomically.
-  Atomic::add_ptr(words, &_allocated_used_words[mdtype]);
+  Atomic::add_ptr(words, &_used_words[mdtype]);
 }
 
 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
@@ -2630,16 +2630,16 @@
 
 size_t MetaspaceAux::capacity_bytes_slow() {
 #ifdef PRODUCT
-  // Use allocated_capacity_bytes() in PRODUCT instead of this function.
+  // Use capacity_bytes() in PRODUCT instead of this function.
   guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
 #endif
   size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
   size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
-  assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
-      err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
+  assert(capacity_bytes() == class_capacity + non_class_capacity,
+      err_msg("bad accounting: capacity_bytes() " SIZE_FORMAT
         " class_capacity + non_class_capacity " SIZE_FORMAT
         " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
-        allocated_capacity_bytes(), class_capacity + non_class_capacity,
+        capacity_bytes(), class_capacity + non_class_capacity,
         class_capacity, non_class_capacity));
 
   return class_capacity + non_class_capacity;
@@ -2699,14 +2699,14 @@
                         "->" SIZE_FORMAT
                         "("  SIZE_FORMAT ")",
                         prev_metadata_used,
-                        allocated_used_bytes(),
+                        used_bytes(),
                         reserved_bytes());
   } else {
     gclog_or_tty->print(" "  SIZE_FORMAT "K"
                         "->" SIZE_FORMAT "K"
                         "("  SIZE_FORMAT "K)",
                         prev_metadata_used/K,
-                        allocated_used_bytes()/K,
+                        used_bytes()/K,
                         reserved_bytes()/K);
   }
 
@@ -2722,8 +2722,8 @@
                 "capacity "  SIZE_FORMAT "K, "
                 "committed " SIZE_FORMAT "K, "
                 "reserved "  SIZE_FORMAT "K",
-                allocated_used_bytes()/K,
-                allocated_capacity_bytes()/K,
+                used_bytes()/K,
+                capacity_bytes()/K,
                 committed_bytes()/K,
                 reserved_bytes()/K);
 
@@ -2734,8 +2734,8 @@
                   "capacity "  SIZE_FORMAT "K, "
                   "committed " SIZE_FORMAT "K, "
                   "reserved "  SIZE_FORMAT "K",
-                  allocated_used_bytes(ct)/K,
-                  allocated_capacity_bytes(ct)/K,
+                  used_bytes(ct)/K,
+                  capacity_bytes(ct)/K,
                   committed_bytes(ct)/K,
                   reserved_bytes(ct)/K);
   }
@@ -2837,42 +2837,42 @@
 
 void MetaspaceAux::verify_capacity() {
 #ifdef ASSERT
-  size_t running_sum_capacity_bytes = allocated_capacity_bytes();
+  size_t running_sum_capacity_bytes = capacity_bytes();
   // For purposes of the running sum of capacity, verify against capacity
   size_t capacity_in_use_bytes = capacity_bytes_slow();
   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
-    err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT
+    err_msg("capacity_words() * BytesPerWord " SIZE_FORMAT
             " capacity_bytes_slow()" SIZE_FORMAT,
             running_sum_capacity_bytes, capacity_in_use_bytes));
   for (Metaspace::MetadataType i = Metaspace::ClassType;
        i < Metaspace:: MetadataTypeCount;
        i = (Metaspace::MetadataType)(i + 1)) {
     size_t capacity_in_use_bytes = capacity_bytes_slow(i);
-    assert(allocated_capacity_bytes(i) == capacity_in_use_bytes,
-      err_msg("allocated_capacity_bytes(%u) " SIZE_FORMAT
+    assert(capacity_bytes(i) == capacity_in_use_bytes,
+      err_msg("capacity_bytes(%u) " SIZE_FORMAT
               " capacity_bytes_slow(%u)" SIZE_FORMAT,
-              i, allocated_capacity_bytes(i), i, capacity_in_use_bytes));
+              i, capacity_bytes(i), i, capacity_in_use_bytes));
   }
 #endif
 }
 
 void MetaspaceAux::verify_used() {
 #ifdef ASSERT
-  size_t running_sum_used_bytes = allocated_used_bytes();
+  size_t running_sum_used_bytes = used_bytes();
   // For purposes of the running sum of used, verify against used
   size_t used_in_use_bytes = used_bytes_slow();
-  assert(allocated_used_bytes() == used_in_use_bytes,
-    err_msg("allocated_used_bytes() " SIZE_FORMAT
+  assert(used_bytes() == used_in_use_bytes,
+    err_msg("used_bytes() " SIZE_FORMAT
             " used_bytes_slow()" SIZE_FORMAT,
-            allocated_used_bytes(), used_in_use_bytes));
+            used_bytes(), used_in_use_bytes));
   for (Metaspace::MetadataType i = Metaspace::ClassType;
        i < Metaspace:: MetadataTypeCount;
        i = (Metaspace::MetadataType)(i + 1)) {
     size_t used_in_use_bytes = used_bytes_slow(i);
-    assert(allocated_used_bytes(i) == used_in_use_bytes,
-      err_msg("allocated_used_bytes(%u) " SIZE_FORMAT
+    assert(used_bytes(i) == used_in_use_bytes,
+      err_msg("used_bytes(%u) " SIZE_FORMAT
               " used_bytes_slow(%u)" SIZE_FORMAT,
-              i, allocated_used_bytes(i), i, used_in_use_bytes));
+              i, used_bytes(i), i, used_in_use_bytes));
   }
 #endif
 }
--- a/hotspot/src/share/vm/memory/metaspace.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/memory/metaspace.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -280,11 +280,11 @@
   // allocated to a Metaspace.  This is used instead of
   // iterating over all the classloaders. One for each
   // type of Metadata
-  static size_t _allocated_capacity_words[Metaspace:: MetadataTypeCount];
-  // Running sum of space in all Metachunks that have
+  static size_t _capacity_words[Metaspace:: MetadataTypeCount];
+  // Running sum of space in all Metachunks that
   // are being used for metadata. One for each
   // type of Metadata.
-  static size_t _allocated_used_words[Metaspace:: MetadataTypeCount];
+  static size_t _used_words[Metaspace:: MetadataTypeCount];
 
  public:
   // Decrement and increment _allocated_capacity_words
@@ -308,32 +308,32 @@
   static size_t free_chunks_total_bytes();
   static size_t free_chunks_total_bytes(Metaspace::MetadataType mdtype);
 
-  static size_t allocated_capacity_words(Metaspace::MetadataType mdtype) {
-    return _allocated_capacity_words[mdtype];
+  static size_t capacity_words(Metaspace::MetadataType mdtype) {
+    return _capacity_words[mdtype];
   }
-  static size_t allocated_capacity_words() {
-    return allocated_capacity_words(Metaspace::NonClassType) +
-           allocated_capacity_words(Metaspace::ClassType);
+  static size_t capacity_words() {
+    return capacity_words(Metaspace::NonClassType) +
+           capacity_words(Metaspace::ClassType);
   }
-  static size_t allocated_capacity_bytes(Metaspace::MetadataType mdtype) {
-    return allocated_capacity_words(mdtype) * BytesPerWord;
+  static size_t capacity_bytes(Metaspace::MetadataType mdtype) {
+    return capacity_words(mdtype) * BytesPerWord;
   }
-  static size_t allocated_capacity_bytes() {
-    return allocated_capacity_words() * BytesPerWord;
+  static size_t capacity_bytes() {
+    return capacity_words() * BytesPerWord;
   }
 
-  static size_t allocated_used_words(Metaspace::MetadataType mdtype) {
-    return _allocated_used_words[mdtype];
+  static size_t used_words(Metaspace::MetadataType mdtype) {
+    return _used_words[mdtype];
   }
-  static size_t allocated_used_words() {
-    return allocated_used_words(Metaspace::NonClassType) +
-           allocated_used_words(Metaspace::ClassType);
+  static size_t used_words() {
+    return used_words(Metaspace::NonClassType) +
+           used_words(Metaspace::ClassType);
   }
-  static size_t allocated_used_bytes(Metaspace::MetadataType mdtype) {
-    return allocated_used_words(mdtype) * BytesPerWord;
+  static size_t used_bytes(Metaspace::MetadataType mdtype) {
+    return used_words(mdtype) * BytesPerWord;
   }
-  static size_t allocated_used_bytes() {
-    return allocated_used_words() * BytesPerWord;
+  static size_t used_bytes() {
+    return used_words() * BytesPerWord;
   }
 
   static size_t free_bytes();
--- a/hotspot/src/share/vm/memory/metaspaceCounters.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/memory/metaspaceCounters.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -66,7 +66,7 @@
 MetaspacePerfCounters* MetaspaceCounters::_perf_counters = NULL;
 
 size_t MetaspaceCounters::used() {
-  return MetaspaceAux::allocated_used_bytes();
+  return MetaspaceAux::used_bytes();
 }
 
 size_t MetaspaceCounters::capacity() {
@@ -98,7 +98,7 @@
 MetaspacePerfCounters* CompressedClassSpaceCounters::_perf_counters = NULL;
 
 size_t CompressedClassSpaceCounters::used() {
-  return MetaspaceAux::allocated_used_bytes(Metaspace::ClassType);
+  return MetaspaceAux::used_bytes(Metaspace::ClassType);
 }
 
 size_t CompressedClassSpaceCounters::capacity() {
--- a/hotspot/src/share/vm/oops/constantPool.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/oops/constantPool.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -144,6 +144,10 @@
 // CDS support. Create a new resolved_references array.
 void ConstantPool::restore_unshareable_info(TRAPS) {
 
+  // Only create the new resolved references array and lock if it hasn't been
+  // attempted before
+  if (resolved_references() != NULL) return;
+
   // restore the C++ vtable from the shared archive
   restore_vtable();
 
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -1289,17 +1289,18 @@
 }
 
 
-void InstanceKlass::do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS) {
+void InstanceKlass::do_local_static_fields(void f(fieldDescriptor*, Handle, TRAPS), Handle mirror, TRAPS) {
   instanceKlassHandle h_this(THREAD, this);
-  do_local_static_fields_impl(h_this, f, CHECK);
+  do_local_static_fields_impl(h_this, f, mirror, CHECK);
 }
 
 
-void InstanceKlass::do_local_static_fields_impl(instanceKlassHandle this_k, void f(fieldDescriptor* fd, TRAPS), TRAPS) {
+void InstanceKlass::do_local_static_fields_impl(instanceKlassHandle this_k,
+                             void f(fieldDescriptor* fd, Handle, TRAPS), Handle mirror, TRAPS) {
   for (JavaFieldStream fs(this_k()); !fs.done(); fs.next()) {
     if (fs.access_flags().is_static()) {
       fieldDescriptor& fd = fs.field_descriptor();
-      f(&fd, CHECK);
+      f(&fd, mirror, CHECK);
     }
   }
 }
@@ -2240,9 +2241,7 @@
   int num_methods = methods->length();
   for (int index2 = 0; index2 < num_methods; ++index2) {
     methodHandle m(THREAD, methods->at(index2));
-    m()->link_method(m, CHECK);
-    // restore method's vtable by calling a virtual function
-    m->restore_vtable();
+    m->restore_unshareable_info(CHECK);
   }
   if (JvmtiExport::has_redefined_a_class()) {
     // Reinitialize vtable because RedefineClasses may have changed some
@@ -3409,6 +3408,10 @@
               ("purge: %s(%s): prev method @%d in version @%d is alive",
               method->name()->as_C_string(),
               method->signature()->as_C_string(), j, i));
+            if (method->method_data() != NULL) {
+              // Clean out any weak method links
+              method->method_data()->clean_weak_method_links();
+            }
           }
         }
       }
@@ -3418,6 +3421,14 @@
       ("purge: previous version stats: live=%d, deleted=%d", live_count,
       deleted_count));
   }
+
+  Array<Method*>* methods = ik->methods();
+  int num_methods = methods->length();
+  for (int index2 = 0; index2 < num_methods; ++index2) {
+    if (methods->at(index2)->method_data() != NULL) {
+      methods->at(index2)->method_data()->clean_weak_method_links();
+    }
+  }
 }
 
 // External interface for use during class unloading.
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -802,7 +802,7 @@
   // Iterators
   void do_local_static_fields(FieldClosure* cl);
   void do_nonstatic_fields(FieldClosure* cl); // including inherited fields
-  void do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS);
+  void do_local_static_fields(void f(fieldDescriptor*, Handle, TRAPS), Handle, TRAPS);
 
   void methods_do(void f(Method* method));
   void array_klasses_do(void f(Klass* k));
@@ -1010,7 +1010,7 @@
   static void set_initialization_state_and_notify_impl  (instanceKlassHandle this_k, ClassState state, TRAPS);
   static void call_class_initializer_impl               (instanceKlassHandle this_k, TRAPS);
   static Klass* array_klass_impl                        (instanceKlassHandle this_k, bool or_null, int n, TRAPS);
-  static void do_local_static_fields_impl               (instanceKlassHandle this_k, void f(fieldDescriptor* fd, TRAPS), TRAPS);
+  static void do_local_static_fields_impl               (instanceKlassHandle this_k, void f(fieldDescriptor* fd, Handle, TRAPS), Handle, TRAPS);
   /* jni_id_for_impl for jfieldID only */
   static JNIid* jni_id_for_impl                         (instanceKlassHandle this_k, int offset);
 
--- a/hotspot/src/share/vm/oops/instanceMirrorKlass.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/oops/instanceMirrorKlass.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -367,7 +367,12 @@
   // Query before forming handle.
   int size = instance_size(k);
   KlassHandle h_k(THREAD, this);
-  instanceOop i = (instanceOop) CollectedHeap::Class_obj_allocate(h_k, size, k, CHECK_NULL);
+  instanceOop i = (instanceOop)CollectedHeap::obj_allocate(h_k, size, CHECK_NULL);
+
+  // Since mirrors can be variable sized because of the static fields, store
+  // the size in the mirror itself.
+  java_lang_Class::set_oop_size(i, size);
+
   return i;
 }
 
--- a/hotspot/src/share/vm/oops/klass.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/oops/klass.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -475,12 +475,8 @@
 }
 
 void Klass::remove_unshareable_info() {
-  if (!DumpSharedSpaces) {
-    // Clean up after OOM during class loading
-    if (class_loader_data() != NULL) {
-      class_loader_data()->remove_class(this);
-    }
-  }
+  assert (DumpSharedSpaces, "only called for DumpSharedSpaces");
+
   set_subklass(NULL);
   set_next_sibling(NULL);
   // Clear the java mirror
@@ -492,17 +488,26 @@
 }
 
 void Klass::restore_unshareable_info(TRAPS) {
-  ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
-  // Restore class_loader_data to the null class loader data
-  set_class_loader_data(loader_data);
+  // If an exception happened during CDS restore, some of these fields may already be
+  // set.  We leave the class on the CLD list, even if incomplete so that we don't
+  // modify the CLD list outside a safepoint.
+  if (class_loader_data() == NULL) {
+    ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
+    // Restore class_loader_data to the null class loader data
+    set_class_loader_data(loader_data);
 
-  // Add to null class loader list first before creating the mirror
-  // (same order as class file parsing)
-  loader_data->add_class(this);
+    // Add to null class loader list first before creating the mirror
+    // (same order as class file parsing)
+    loader_data->add_class(this);
+  }
 
   // Recreate the class mirror.  The protection_domain is always null for
   // boot loader, for now.
-  java_lang_Class::create_mirror(this, Handle(NULL), CHECK);
+  // Only recreate it if not present.  A previous attempt to restore may have
+  // gotten an OOM later but keep the mirror if it was created.
+  if (java_mirror() == NULL) {
+    java_lang_Class::create_mirror(this, Handle(NULL), CHECK);
+  }
 }
 
 Klass* Klass::array_klass_or_null(int rank) {
--- a/hotspot/src/share/vm/oops/method.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/oops/method.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -903,6 +903,19 @@
   return adapter->get_c2i_entry();
 }
 
+void Method::restore_unshareable_info(TRAPS) {
+  // Since restore_unshareable_info can be called more than once for a method, don't
+  // redo any work.   If this field is restored, there is nothing to do.
+  if (_from_compiled_entry == NULL) {
+    // restore method's vtable by calling a virtual function
+    restore_vtable();
+
+    methodHandle mh(THREAD, this);
+    link_method(mh, CHECK);
+  }
+}
+
+
 // The verified_code_entry() must be called when a invoke is resolved
 // on this method.
 
--- a/hotspot/src/share/vm/oops/method.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/oops/method.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -123,6 +123,8 @@
   void restore_vtable() { guarantee(is_method(), "vtable restored by this call"); }
   bool is_method() const volatile { return true; }
 
+  void restore_unshareable_info(TRAPS);
+
   // accessors for instance variables
 
   ConstMethod* constMethod() const             { return _constMethod; }
--- a/hotspot/src/share/vm/oops/methodData.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/oops/methodData.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -1531,9 +1531,35 @@
   }
 }
 
-// Remove SpeculativeTrapData entries that reference an unloaded
-// method
-void MethodData::clean_extra_data(BoolObjectClosure* is_alive) {
+class CleanExtraDataClosure : public StackObj {
+public:
+  virtual bool is_live(Method* m) = 0;
+};
+
+// Check for entries that reference an unloaded method
+class CleanExtraDataKlassClosure : public CleanExtraDataClosure {
+private:
+  BoolObjectClosure* _is_alive;
+public:
+  CleanExtraDataKlassClosure(BoolObjectClosure* is_alive) : _is_alive(is_alive) {}
+  bool is_live(Method* m) {
+    return m->method_holder()->is_loader_alive(_is_alive);
+  }
+};
+
+// Check for entries that reference a redefined method
+class CleanExtraDataMethodClosure : public CleanExtraDataClosure {
+public:
+  CleanExtraDataMethodClosure() {}
+  bool is_live(Method* m) {
+    return m->on_stack();
+  }
+};
+
+
+// Remove SpeculativeTrapData entries that reference an unloaded or
+// redefined method
+void MethodData::clean_extra_data(CleanExtraDataClosure* cl) {
   DataLayout* dp  = extra_data_base();
   DataLayout* end = extra_data_limit();
 
@@ -1544,7 +1570,7 @@
       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
       Method* m = data->method();
       assert(m != NULL, "should have a method");
-      if (!m->method_holder()->is_loader_alive(is_alive)) {
+      if (!cl->is_live(m)) {
         // "shift" accumulates the number of cells for dead
         // SpeculativeTrapData entries that have been seen so
         // far. Following entries must be shifted left by that many
@@ -1575,9 +1601,9 @@
   }
 }
 
-// Verify there's no unloaded method referenced by a
+// Verify there's no unloaded or redefined method referenced by a
 // SpeculativeTrapData entry
-void MethodData::verify_extra_data_clean(BoolObjectClosure* is_alive) {
+void MethodData::verify_extra_data_clean(CleanExtraDataClosure* cl) {
 #ifdef ASSERT
   DataLayout* dp  = extra_data_base();
   DataLayout* end = extra_data_limit();
@@ -1587,7 +1613,7 @@
     case DataLayout::speculative_trap_data_tag: {
       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
       Method* m = data->method();
-      assert(m != NULL && m->method_holder()->is_loader_alive(is_alive), "Method should exist");
+      assert(m != NULL && cl->is_live(m), "Method should exist");
       break;
     }
     case DataLayout::bit_data_tag:
@@ -1613,6 +1639,19 @@
     parameters->clean_weak_klass_links(is_alive);
   }
 
-  clean_extra_data(is_alive);
-  verify_extra_data_clean(is_alive);
+  CleanExtraDataKlassClosure cl(is_alive);
+  clean_extra_data(&cl);
+  verify_extra_data_clean(&cl);
 }
+
+void MethodData::clean_weak_method_links() {
+  for (ProfileData* data = first_data();
+       is_valid(data);
+       data = next_data(data)) {
+    data->clean_weak_method_links();
+  }
+
+  CleanExtraDataMethodClosure cl;
+  clean_extra_data(&cl);
+  verify_extra_data_clean(&cl);
+}
--- a/hotspot/src/share/vm/oops/methodData.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/oops/methodData.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -251,6 +251,9 @@
 
   // GC support
   void clean_weak_klass_links(BoolObjectClosure* cl);
+
+  // Redefinition support
+  void clean_weak_method_links();
 };
 
 
@@ -506,6 +509,9 @@
   // GC support
   virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {}
 
+  // Redefinition support
+  virtual void clean_weak_method_links() {}
+
   // CI translation: ProfileData can represent both MethodDataOop data
   // as well as CIMethodData data. This function is provided for translating
   // an oop in a ProfileData to the ci equivalent. Generally speaking,
@@ -1989,6 +1995,7 @@
 //
 
 CC_INTERP_ONLY(class BytecodeInterpreter;)
+class CleanExtraDataClosure;
 
 class MethodData : public Metadata {
   friend class VMStructs;
@@ -2146,9 +2153,9 @@
   static bool profile_parameters_jsr292_only();
   static bool profile_all_parameters();
 
-  void clean_extra_data(BoolObjectClosure* is_alive);
+  void clean_extra_data(CleanExtraDataClosure* cl);
   void clean_extra_data_helper(DataLayout* dp, int shift, bool reset = false);
-  void verify_extra_data_clean(BoolObjectClosure* is_alive);
+  void verify_extra_data_clean(CleanExtraDataClosure* cl);
 
 public:
   static int header_size() {
@@ -2440,6 +2447,8 @@
   static bool profile_return_jsr292_only();
 
   void clean_method_data(BoolObjectClosure* is_alive);
+
+  void clean_weak_method_links();
 };
 
 #endif // SHARE_VM_OOPS_METHODDATAOOP_HPP
--- a/hotspot/src/share/vm/oops/objArrayKlass.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/oops/objArrayKlass.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -269,7 +269,7 @@
         if (element_is_null ||
             (new_val->klass())->is_subtype_of(bound)) {
           bs->write_ref_field_pre(p, new_val);
-          *p = *from;
+          *p = element;
         } else {
           // We must do a barrier to cover the partial copy.
           const size_t pd = pointer_delta(p, dst, (size_t)heapOopSize);
--- a/hotspot/src/share/vm/opto/callGenerator.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/opto/callGenerator.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -70,6 +70,7 @@
 
 JVMState* ParseGenerator::generate(JVMState* jvms, Parse* parent_parser) {
   Compile* C = Compile::current();
+  C->print_inlining_update(this);
 
   if (is_osr()) {
     // The JVMS for a OSR has a single argument (see its TypeFunc).
@@ -126,6 +127,7 @@
 
 JVMState* DirectCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
   GraphKit kit(jvms);
+  kit.C->print_inlining_update(this);
   bool is_static = method()->is_static();
   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
@@ -178,6 +180,8 @@
   GraphKit kit(jvms);
   Node* receiver = kit.argument(0);
 
+  kit.C->print_inlining_update(this);
+
   if (kit.C->log() != NULL) {
     kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
   }
@@ -271,14 +275,13 @@
   LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
     DirectCallGenerator(method, true), _inline_cg(inline_cg) {}
 
-  virtual bool      is_late_inline() const { return true; }
+  virtual bool is_late_inline() const { return true; }
 
   // Convert the CallStaticJava into an inline
   virtual void do_late_inline();
 
   virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
     Compile *C = Compile::current();
-    C->print_inlining_skip(this);
 
     // Record that this call site should be revisited once the main
     // parse is finished.
@@ -296,10 +299,11 @@
   virtual void print_inlining_late(const char* msg) {
     CallNode* call = call_node();
     Compile* C = Compile::current();
-    C->print_inlining_insert(this);
+    C->print_inlining_assert_ready();
     C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
+    C->print_inlining_move_to(this);
+    C->print_inlining_update_delayed(this);
   }
-
 };
 
 void LateInlineCallGenerator::do_late_inline() {
@@ -360,6 +364,10 @@
     map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
   }
 
+  C->print_inlining_assert_ready();
+
+  C->print_inlining_move_to(this);
+
   // This check is done here because for_method_handle_inline() method
   // needs jvms for inlined state.
   if (!do_late_inline_check(jvms)) {
@@ -367,8 +375,6 @@
     return;
   }
 
-  C->print_inlining_insert(this);
-
   CompileLog* log = C->log();
   if (log != NULL) {
     log->head("late_inline method='%d'", log->identify(method()));
@@ -388,7 +394,7 @@
     C->set_default_node_notes(entry_nn);
   }
 
-  // Now perform the inling using the synthesized JVMState
+  // Now perform the inlining using the synthesized JVMState
   JVMState* new_jvms = _inline_cg->generate(jvms, NULL);
   if (new_jvms == NULL)  return;  // no change
   if (C->failing())      return;
@@ -431,6 +437,7 @@
 
   virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
     JVMState* new_jvms = LateInlineCallGenerator::generate(jvms, parent_parser);
+
     if (_input_not_const) {
       // inlining won't be possible so no need to enqueue right now.
       call_node()->set_generator(this);
@@ -439,17 +446,14 @@
     }
     return new_jvms;
   }
-
-  virtual void print_inlining_late(const char* msg) {
-    if (!_input_not_const) return;
-    LateInlineCallGenerator::print_inlining_late(msg);
-  }
 };
 
 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
 
   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
 
+  Compile::current()->print_inlining_update_delayed(this);
+
   if (!_input_not_const) {
     _attempt++;
   }
@@ -479,8 +483,6 @@
 
   virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
     Compile *C = Compile::current();
-    C->print_inlining_skip(this);
-
     C->add_string_late_inline(this);
 
     JVMState* new_jvms =  DirectCallGenerator::generate(jvms, parent_parser);
@@ -502,7 +504,6 @@
 
   virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
     Compile *C = Compile::current();
-    C->print_inlining_skip(this);
 
     C->add_boxing_late_inline(this);
 
@@ -554,6 +555,8 @@
 
 JVMState* WarmCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
   Compile* C = Compile::current();
+  C->print_inlining_update(this);
+
   if (C->log() != NULL) {
     C->log()->elem("warm_call bci='%d'", jvms->bci());
   }
@@ -632,6 +635,7 @@
 
 JVMState* PredictedCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
   GraphKit kit(jvms);
+  kit.C->print_inlining_update(this);
   PhaseGVN& gvn = kit.gvn();
   // We need an explicit receiver null_check before checking its type.
   // We share a map with the caller, so his JVMS gets adjusted.
@@ -779,6 +783,9 @@
         assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
         if (cg != NULL && cg->is_inline())
           return cg;
+      } else {
+        const char* msg = "receiver not constant";
+        if (PrintInlining)  C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
       }
     }
     break;
@@ -844,11 +851,13 @@
           // provide us with a type
           speculative_receiver_type = receiver_type->speculative_type();
         }
-
         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true);
         assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
         if (cg != NULL && cg->is_inline())
           return cg;
+      } else {
+        const char* msg = "member_name not constant";
+        if (PrintInlining)  C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
       }
     }
     break;
@@ -904,6 +913,7 @@
   if (kit.failing())
     return NULL;  // might happen because of NodeCountInliningCutoff
 
+  kit.C->print_inlining_update(this);
   SafePointNode* slow_map = NULL;
   JVMState* slow_jvms;
   if (slow_ctl != NULL) {
@@ -1017,6 +1027,7 @@
 
 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
   GraphKit kit(jvms);
+  kit.C->print_inlining_update(this);
   // Take the trap with arguments pushed on the stack.  (Cf. null_check_receiver).
   int nargs = method()->arg_size();
   kit.inc_sp(nargs);
--- a/hotspot/src/share/vm/opto/compile.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -662,6 +662,7 @@
                   _inlining_progress(false),
                   _inlining_incrementally(false),
                   _print_inlining_list(NULL),
+                  _print_inlining_stream(NULL),
                   _print_inlining_idx(0),
                   _preserve_jvm_state(0) {
   C = this;
@@ -723,9 +724,7 @@
   PhaseGVN gvn(node_arena(), estimated_size);
   set_initial_gvn(&gvn);
 
-  if (print_inlining() || print_intrinsics()) {
-    _print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
-  }
+  print_inlining_init();
   { // Scope for timing the parser
     TracePhase t3("parse", &_t_parser, true);
 
@@ -967,6 +966,7 @@
     _inlining_progress(false),
     _inlining_incrementally(false),
     _print_inlining_list(NULL),
+    _print_inlining_stream(NULL),
     _print_inlining_idx(0),
     _preserve_jvm_state(0),
     _allowed_reasons(0) {
@@ -2023,6 +2023,8 @@
   ResourceMark rm;
   int          loop_opts_cnt;
 
+  print_inlining_reinit();
+
   NOT_PRODUCT( verify_graph_edges(); )
 
   print_method(PHASE_AFTER_PARSING);
@@ -3755,30 +3757,114 @@
   }
 }
 
-void Compile::dump_inlining() {
+// The message about the current inlining is accumulated in
+// _print_inlining_stream and transfered into the _print_inlining_list
+// once we know whether inlining succeeds or not. For regular
+// inlining, messages are appended to the buffer pointed by
+// _print_inlining_idx in the _print_inlining_list. For late inlining,
+// a new buffer is added after _print_inlining_idx in the list. This
+// way we can update the inlining message for late inlining call site
+// when the inlining is attempted again.
+void Compile::print_inlining_init() {
+  if (print_inlining() || print_intrinsics()) {
+    _print_inlining_stream = new stringStream();
+    _print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
+  }
+}
+
+void Compile::print_inlining_reinit() {
+  if (print_inlining() || print_intrinsics()) {
+    // Re allocate buffer when we change ResourceMark
+    _print_inlining_stream = new stringStream();
+  }
+}
+
+void Compile::print_inlining_reset() {
+  _print_inlining_stream->reset();
+}
+
+void Compile::print_inlining_commit() {
+  assert(print_inlining() || print_intrinsics(), "PrintInlining off?");
+  // Transfer the message from _print_inlining_stream to the current
+  // _print_inlining_list buffer and clear _print_inlining_stream.
+  _print_inlining_list->at(_print_inlining_idx).ss()->write(_print_inlining_stream->as_string(), _print_inlining_stream->size());
+  print_inlining_reset();
+}
+
+void Compile::print_inlining_push() {
+  // Add new buffer to the _print_inlining_list at current position
+  _print_inlining_idx++;
+  _print_inlining_list->insert_before(_print_inlining_idx, PrintInliningBuffer());
+}
+
+Compile::PrintInliningBuffer& Compile::print_inlining_current() {
+  return _print_inlining_list->at(_print_inlining_idx);
+}
+
+void Compile::print_inlining_update(CallGenerator* cg) {
   if (print_inlining() || print_intrinsics()) {
+    if (!cg->is_late_inline()) {
+      if (print_inlining_current().cg() != NULL) {
+        print_inlining_push();
+      }
+      print_inlining_commit();
+    } else {
+      if (print_inlining_current().cg() != cg &&
+          (print_inlining_current().cg() != NULL ||
+           print_inlining_current().ss()->size() != 0)) {
+        print_inlining_push();
+      }
+      print_inlining_commit();
+      print_inlining_current().set_cg(cg);
+    }
+  }
+}
+
+void Compile::print_inlining_move_to(CallGenerator* cg) {
+  // We resume inlining at a late inlining call site. Locate the
+  // corresponding inlining buffer so that we can update it.
+  if (print_inlining()) {
+    for (int i = 0; i < _print_inlining_list->length(); i++) {
+      if (_print_inlining_list->adr_at(i)->cg() == cg) {
+        _print_inlining_idx = i;
+        return;
+      }
+    }
+    ShouldNotReachHere();
+  }
+}
+
+void Compile::print_inlining_update_delayed(CallGenerator* cg) {
+  if (print_inlining()) {
+    assert(_print_inlining_stream->size() > 0, "missing inlining msg");
+    assert(print_inlining_current().cg() == cg, "wrong entry");
+    // replace message with new message
+    _print_inlining_list->at_put(_print_inlining_idx, PrintInliningBuffer());
+    print_inlining_commit();
+    print_inlining_current().set_cg(cg);
+  }
+}
+
+void Compile::print_inlining_assert_ready() {
+  assert(!_print_inlining || _print_inlining_stream->size() == 0, "loosing data");
+}
+
+void Compile::dump_inlining() {
+  bool do_print_inlining = print_inlining() || print_intrinsics();
+  if (do_print_inlining) {
     // Print inlining message for candidates that we couldn't inline
-    // for lack of space or non constant receiver
+    // for lack of space
     for (int i = 0; i < _late_inlines.length(); i++) {
       CallGenerator* cg = _late_inlines.at(i);
-      cg->print_inlining_late("live nodes > LiveNodeCountInliningCutoff");
-    }
-    Unique_Node_List useful;
-    useful.push(root());
-    for (uint next = 0; next < useful.size(); ++next) {
-      Node* n  = useful.at(next);
-      if (n->is_Call() && n->as_Call()->generator() != NULL && n->as_Call()->generator()->call_node() == n) {
-        CallNode* call = n->as_Call();
-        CallGenerator* cg = call->generator();
-        cg->print_inlining_late("receiver not constant");
-      }
-      uint max = n->len();
-      for ( uint i = 0; i < max; ++i ) {
-        Node *m = n->in(i);
-        if ( m == NULL ) continue;
-        useful.push(m);
+      if (!cg->is_mh_late_inline()) {
+        const char* msg = "live nodes > LiveNodeCountInliningCutoff";
+        if (do_print_inlining) {
+          cg->print_inlining_late(msg);
+        }
       }
     }
+  }
+  if (do_print_inlining) {
     for (int i = 0; i < _print_inlining_list->length(); i++) {
       tty->print(_print_inlining_list->adr_at(i)->ss()->as_string());
     }
--- a/hotspot/src/share/vm/opto/compile.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/opto/compile.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -416,6 +416,7 @@
     void set_cg(CallGenerator* cg) { _cg = cg; }
   };
 
+  stringStream* _print_inlining_stream;
   GrowableArray<PrintInliningBuffer>* _print_inlining_list;
   int _print_inlining_idx;
 
@@ -433,33 +434,24 @@
 
   void* _replay_inline_data; // Pointer to data loaded from file
 
+  void print_inlining_init();
+  void print_inlining_reinit();
+  void print_inlining_commit();
+  void print_inlining_push();
+  PrintInliningBuffer& print_inlining_current();
+
  public:
 
   outputStream* print_inlining_stream() const {
-    return _print_inlining_list->adr_at(_print_inlining_idx)->ss();
-  }
-
-  void print_inlining_skip(CallGenerator* cg) {
-    if (_print_inlining) {
-      _print_inlining_list->adr_at(_print_inlining_idx)->set_cg(cg);
-      _print_inlining_idx++;
-      _print_inlining_list->insert_before(_print_inlining_idx, PrintInliningBuffer());
-    }
+    assert(print_inlining() || print_intrinsics(), "PrintInlining off?");
+    return _print_inlining_stream;
   }
 
-  void print_inlining_insert(CallGenerator* cg) {
-    if (_print_inlining) {
-      for (int i = 0; i < _print_inlining_list->length(); i++) {
-        if (_print_inlining_list->adr_at(i)->cg() == cg) {
-          _print_inlining_list->insert_before(i+1, PrintInliningBuffer());
-          _print_inlining_idx = i+1;
-          _print_inlining_list->adr_at(i)->set_cg(NULL);
-          return;
-        }
-      }
-      ShouldNotReachHere();
-    }
-  }
+  void print_inlining_update(CallGenerator* cg);
+  void print_inlining_update_delayed(CallGenerator* cg);
+  void print_inlining_move_to(CallGenerator* cg);
+  void print_inlining_assert_ready();
+  void print_inlining_reset();
 
   void print_inlining(ciMethod* method, int inline_level, int bci, const char* msg = NULL) {
     stringStream ss;
--- a/hotspot/src/share/vm/opto/doCall.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/opto/doCall.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -294,6 +294,8 @@
   // There was no special inlining tactic, or it bailed out.
   // Use a more generic tactic, like a simple call.
   if (call_does_dispatch) {
+    const char* msg = "virtual call";
+    if (PrintInlining) print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
     return CallGenerator::for_virtual_call(callee, vtable_index);
   } else {
     // Class Hierarchy Analysis or Type Profile reveals a unique target,
@@ -396,6 +398,8 @@
   // our contribution to it is cleaned up right here.
   kill_dead_locals();
 
+  C->print_inlining_assert_ready();
+
   // Set frequently used booleans
   const bool is_virtual = bc() == Bytecodes::_invokevirtual;
   const bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;
@@ -531,7 +535,8 @@
     // intrinsic was expecting to optimize. Should always be possible to
     // get a normal java call that may inline in that case
     cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type, /* allow_intrinsics= */ false);
-    if ((new_jvms = cg->generate(jvms, this)) == NULL) {
+    new_jvms = cg->generate(jvms, this);
+    if (new_jvms == NULL) {
       guarantee(failing(), "call failed to generate:  calls should work");
       return;
     }
--- a/hotspot/src/share/vm/opto/library_call.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -620,6 +620,7 @@
     }
     // Push the result from the inlined method onto the stack.
     kit.push_result();
+    C->print_inlining_update(this);
     return kit.transfer_exceptions_into_jvms();
   }
 
@@ -637,6 +638,7 @@
     }
   }
   C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
+  C->print_inlining_update(this);
   return NULL;
 }
 
--- a/hotspot/src/share/vm/prims/jni.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/prims/jni.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -3877,6 +3877,7 @@
 void TestMetachunk_test();
 void TestVirtualSpaceNode_test();
 void TestNewSize_test();
+void TestOldSize_test();
 void TestKlass_test();
 void TestBitMap_test();
 #if INCLUDE_ALL_GCS
@@ -3903,6 +3904,7 @@
     run_unit_test(AltHashing::test_alt_hash());
     run_unit_test(test_loggc_filename());
     run_unit_test(TestNewSize_test());
+    run_unit_test(TestOldSize_test());
     run_unit_test(TestKlass_test());
     run_unit_test(TestBitMap_test());
 #if INCLUDE_VM_STRUCTS
--- a/hotspot/src/share/vm/prims/whitebox.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/prims/whitebox.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -438,6 +438,30 @@
   return (mh->queued_for_compilation() || nm != NULL);
 WB_END
 
+class VM_WhiteBoxOperation : public VM_Operation {
+ public:
+  VM_WhiteBoxOperation()                         { }
+  VMOp_Type type()                  const        { return VMOp_WhiteBoxOperation; }
+  bool allow_nested_vm_operations() const        { return true; }
+};
+
+class AlwaysFalseClosure : public BoolObjectClosure {
+ public:
+  bool do_object_b(oop p) { return false; }
+};
+
+static AlwaysFalseClosure always_false;
+
+class VM_WhiteBoxCleanMethodData : public VM_WhiteBoxOperation {
+ public:
+  VM_WhiteBoxCleanMethodData(MethodData* mdo) : _mdo(mdo) { }
+  void doit() {
+    _mdo->clean_method_data(&always_false);
+  }
+ private:
+  MethodData* _mdo;
+};
+
 WB_ENTRY(void, WB_ClearMethodState(JNIEnv* env, jobject o, jobject method))
   jmethodID jmid = reflected_method_to_jmid(thread, env, method);
   CHECK_JNI_EXCEPTION(env);
@@ -453,6 +477,8 @@
     for (int i = 0; i < arg_count; i++) {
       mdo->set_arg_modified(i, 0);
     }
+    VM_WhiteBoxCleanMethodData op(mdo);
+    VMThread::execute(&op);
   }
 
   mh->clear_not_c1_compilable();
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -307,6 +307,9 @@
                            JDK_Version::jdk_update(7, 2), JDK_Version::jdk(8) },
 #endif // PRODUCT
   { "UseVMInterruptibleIO",          JDK_Version::jdk(8), JDK_Version::jdk(9) },
+  { "UseBoundThreads",               JDK_Version::jdk(9), JDK_Version::jdk(10) },
+  { "DefaultThreadPriority",         JDK_Version::jdk(9), JDK_Version::jdk(10) },
+  { "NoYieldsInMicrolock",           JDK_Version::jdk(9), JDK_Version::jdk(10) },
   { NULL, JDK_Version(0), JDK_Version(0) }
 };
 
@@ -2078,17 +2081,6 @@
   // Note: Needs platform-dependent factoring.
   bool status = true;
 
-  // Allow both -XX:-UseStackBanging and -XX:-UseBoundThreads in non-product
-  // builds so the cost of stack banging can be measured.
-#if (defined(PRODUCT) && defined(SOLARIS))
-  if (!UseBoundThreads && !UseStackBanging) {
-    jio_fprintf(defaultStream::error_stream(),
-                "-UseStackBanging conflicts with -UseBoundThreads\n");
-
-     status = false;
-  }
-#endif
-
   if (TLABRefillWasteFraction == 0) {
     jio_fprintf(defaultStream::error_stream(),
                 "TLABRefillWasteFraction should be a denominator, "
--- a/hotspot/src/share/vm/runtime/os.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/runtime/os.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -929,6 +929,10 @@
 }
 
 void os::print_date_and_time(outputStream *st) {
+  const int secs_per_day  = 86400;
+  const int secs_per_hour = 3600;
+  const int secs_per_min  = 60;
+
   time_t tloc;
   (void)time(&tloc);
   st->print("time: %s", ctime(&tloc));  // ctime adds newline.
@@ -937,7 +941,17 @@
   // NOTE: It tends to crash after a SEGV if we want to printf("%f",...) in
   //       Linux. Must be a bug in glibc ? Workaround is to round "t" to int
   //       before printf. We lost some precision, but who cares?
-  st->print_cr("elapsed time: %d seconds", (int)t);
+  int eltime = (int)t;  // elapsed time in seconds
+
+  // print elapsed time in a human-readable format:
+  int eldays = eltime / secs_per_day;
+  int day_secs = eldays * secs_per_day;
+  int elhours = (eltime - day_secs) / secs_per_hour;
+  int hour_secs = elhours * secs_per_hour;
+  int elmins = (eltime - day_secs - hour_secs) / secs_per_min;
+  int minute_secs = elmins * secs_per_min;
+  int elsecs = (eltime - day_secs - hour_secs - minute_secs);
+  st->print_cr("elapsed time: %d seconds (%dd %dh %dm %ds)", eltime, eldays, elhours, elmins, elsecs);
 }
 
 // moved from debug.cpp (used to be find()) but still called from there
--- a/hotspot/src/share/vm/runtime/os.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/runtime/os.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -450,8 +450,8 @@
     // yield that can be used in lieu of blocking.
   } ;
   static YieldResult NakedYield () ;
-  static void yield_all(int attempts = 0); // Yields to all other threads including lower priority
-  static void loop_breaker(int attempts);  // called from within tight loops to possibly influence time-sharing
+  static void yield_all(); // Yields to all other threads including lower priority
+                           // (for the default scheduling policy)
   static OSReturn set_priority(Thread* thread, ThreadPriority priority);
   static OSReturn get_priority(const Thread* const thread, ThreadPriority& priority);
 
--- a/hotspot/src/share/vm/runtime/safepoint.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/runtime/safepoint.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -319,7 +319,7 @@
       if (steps < DeferThrSuspendLoopCount) {
         os::NakedYield() ;
       } else {
-        os::yield_all(steps) ;
+        os::yield_all() ;
         // Alternately, the VM thread could transiently depress its scheduling priority or
         // transiently increase the priority of the tardy mutator(s).
       }
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -924,12 +924,6 @@
 JRT_END
 #endif // !PRODUCT
 
-
-JRT_ENTRY(void, SharedRuntime::yield_all(JavaThread* thread, int attempts))
-  os::yield_all(attempts);
-JRT_END
-
-
 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* thread, oopDesc* obj))
   assert(obj->is_oop(), "must be a valid oop");
   assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
@@ -1268,8 +1262,6 @@
       }
 #endif
       if (is_virtual) {
-        nmethod* nm = callee_nm;
-        if (nm == NULL) CodeCache::find_blob(caller_frame.pc());
         CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
         if (inline_cache->is_clean()) {
           inline_cache->set_to_monomorphic(virtual_call_info);
--- a/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -253,9 +253,6 @@
   // bytecode tracing is only used by the TraceBytecodes
   static intptr_t trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2) PRODUCT_RETURN0;
 
-  // Used to back off a spin lock that is under heavy contention
-  static void yield_all(JavaThread* thread, int attempts = 0);
-
   static oop retrieve_receiver( Symbol* sig, frame caller );
 
   static void register_finalizer(JavaThread* thread, oopDesc* obj);
--- a/hotspot/src/share/vm/runtime/thread.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -1394,8 +1394,8 @@
 void JavaThread::initialize() {
   // Initialize fields
 
-  // Set the claimed par_id to -1 (ie not claiming any par_ids)
-  set_claimed_par_id(-1);
+  // Set the claimed par_id to UINT_MAX (ie not claiming any par_ids)
+  set_claimed_par_id(UINT_MAX);
 
   set_saved_exception_pc(NULL);
   set_threadObj(NULL);
--- a/hotspot/src/share/vm/runtime/thread.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -1778,12 +1778,12 @@
   void set_done_attaching_via_jni() { _jni_attach_state = _attached_via_jni; OrderAccess::fence(); }
 private:
   // This field is used to determine if a thread has claimed
-  // a par_id: it is -1 if the thread has not claimed a par_id;
+  // a par_id: it is UINT_MAX if the thread has not claimed a par_id;
   // otherwise its value is the par_id that has been claimed.
-  int _claimed_par_id;
+  uint _claimed_par_id;
 public:
-  int get_claimed_par_id() { return _claimed_par_id; }
-  void set_claimed_par_id(int id) { _claimed_par_id = id;}
+  uint get_claimed_par_id() { return _claimed_par_id; }
+  void set_claimed_par_id(uint id) { _claimed_par_id = id;}
 };
 
 // Inline implementation of JavaThread::current
--- a/hotspot/src/share/vm/runtime/vframe.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/runtime/vframe.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -321,24 +321,38 @@
   }
 }
 
-StackValueCollection*  interpretedVFrame::expressions() const {
-  int length = fr().interpreter_frame_expression_stack_size();
-  if (method()->is_native()) {
-    // If the method is native, there is no expression stack
-    length = 0;
+StackValueCollection* interpretedVFrame::expressions() const {
+
+  InterpreterOopMap oop_mask;
+
+  if (!method()->is_native()) {
+    // Get oopmap describing oops and int for current bci
+    if (TraceDeoptimization && Verbose) {
+      methodHandle m_h(method());
+      OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask);
+    } else {
+      method()->mask_for(bci(), &oop_mask);
+    }
+  }
+
+  // If the bci is a call instruction, i.e. any of the invoke* instructions,
+  // the InterpreterOopMap does not include expression/operand stack liveness
+  // info in the oop_mask/bit_mask. This can lead to a discrepancy of what
+  // is actually on the expression stack compared to what is given by the
+  // oop_map. We need to use the length reported in the oop_map.
+  int length = oop_mask.expression_stack_size();
+
+  assert(fr().interpreter_frame_expression_stack_size() >= length,
+    "error in expression stack!");
+
+  StackValueCollection* result = new StackValueCollection(length);
+
+  if (0 == length) {
+    return result;
   }
 
   int nof_locals = method()->max_locals();
-  StackValueCollection* result = new StackValueCollection(length);
 
-  InterpreterOopMap oop_mask;
-  // Get oopmap describing oops and int for current bci
-  if (TraceDeoptimization && Verbose) {
-    methodHandle m_h(method());
-    OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask);
-  } else {
-    method()->mask_for(bci(), &oop_mask);
-  }
   // handle expressions
   for(int i=0; i < length; i++) {
     // Find stack location
--- a/hotspot/src/share/vm/runtime/vmThread.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/runtime/vmThread.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -305,6 +305,9 @@
     _terminate_lock->notify();
   }
 
+  // Thread destructor usually does this.
+  ThreadLocalStorage::set_thread(NULL);
+
   // Deletion must be done synchronously by the JNI DestroyJavaVM thread
   // so that the VMThread deletion completes before the main thread frees
   // up the CodeHeap.
--- a/hotspot/src/share/vm/runtime/vm_operations.hpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/runtime/vm_operations.hpp	Wed Jul 05 19:37:19 2017 +0200
@@ -97,6 +97,7 @@
   template(Exit)                                  \
   template(LinuxDllLoad)                          \
   template(RotateGCLog)                           \
+  template(WhiteBoxOperation)                     \
 
 class VM_Operation: public CHeapObj<mtInternal> {
  public:
--- a/hotspot/src/share/vm/services/memoryPool.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/services/memoryPool.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -268,7 +268,7 @@
 }
 
 size_t MetaspacePool::used_in_bytes() {
-  return MetaspaceAux::allocated_used_bytes();
+  return MetaspaceAux::used_bytes();
 }
 
 size_t MetaspacePool::calculate_max_size() const {
@@ -280,7 +280,7 @@
   MemoryPool("Compressed Class Space", NonHeap, 0, CompressedClassSpaceSize, true, false) { }
 
 size_t CompressedKlassSpacePool::used_in_bytes() {
-  return MetaspaceAux::allocated_used_bytes(Metaspace::ClassType);
+  return MetaspaceAux::used_bytes(Metaspace::ClassType);
 }
 
 MemoryUsage CompressedKlassSpacePool::get_memory_usage() {
--- a/hotspot/src/share/vm/trace/trace.xml	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/trace/trace.xml	Wed Jul 05 19:37:19 2017 +0200
@@ -185,7 +185,7 @@
     </event>
 
     <struct id="MetaspaceSizes">
-      <value type="BYTES64" field="capacity" label="Capacity" description="Total available memory to allocate in" />
+      <value type="BYTES64" field="committed" label="Committed" description="Committed memory for this space" />
       <value type="BYTES64" field="used" label="Used" description="Bytes allocated by objects in the space" />
       <value type="BYTES64" field="reserved" label="Reserved" description="Reserved memory for this space" />
     </struct>
--- a/hotspot/src/share/vm/utilities/taskqueue.cpp	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/src/share/vm/utilities/taskqueue.cpp	Wed Jul 05 19:37:19 2017 +0200
@@ -239,8 +239,8 @@
 
 #ifdef TRACESPINNING
 void ParallelTaskTerminator::print_termination_counts() {
-  gclog_or_tty->print_cr("ParallelTaskTerminator Total yields: " UINT32_FORMAT
-    " Total spins: " UINT32_FORMAT " Total peeks: " UINT32_FORMAT,
+  gclog_or_tty->print_cr("ParallelTaskTerminator Total yields: %u"
+    " Total spins: %u Total peeks: %u",
     total_yields(),
     total_spins(),
     total_peeks());
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/profiling/spectrapredefineclass/Agent.java	Wed Jul 05 19:37:19 2017 +0200
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.security.*;
+import java.lang.instrument.*;
+import java.lang.reflect.*;
+import java.lang.management.ManagementFactory;
+import com.sun.tools.attach.VirtualMachine;
+
+class A {
+    void m() {
+    }
+}
+
+class B extends A {
+    void m() {
+    }
+}
+
+class C extends A {
+    void m() {
+    }
+}
+
+class Test {
+
+    static public void m() throws Exception {
+        for (int i = 0; i < 20000; i++) {
+            m1(a);
+        }
+        for (int i = 0; i < 4; i++) {
+            m1(b);
+        }
+    }
+
+    static boolean m1(A a) {
+        boolean res =  Agent.m2(a);
+        return res;
+    }
+
+    static public A a = new A();
+    static public B b = new B();
+    static public C c = new C();
+}
+
+public class Agent implements ClassFileTransformer {
+
+
+    static class MemoryChunk {
+        MemoryChunk other;
+        long[] array;
+        MemoryChunk(MemoryChunk other) {
+            other = other;
+            array = new long[1024 * 1024 * 1024];
+        }
+    }
+
+    static public boolean m2(A a) {
+        boolean res = false;
+        if (a.getClass() == B.class) {
+            a.m();
+        } else {
+            res = true;
+        }
+        return res;
+    }
+
+    static public void main(String[] args) throws Exception {
+        // Create speculative trap entries
+        Test.m();
+
+        String nameOfRunningVM = ManagementFactory.getRuntimeMXBean().getName();
+        int p = nameOfRunningVM.indexOf('@');
+        String pid = nameOfRunningVM.substring(0, p);
+
+        // Make the nmethod go away
+        for (int i = 0; i < 10; i++) {
+            System.gc();
+        }
+
+        // Redefine class
+        try {
+            VirtualMachine vm = VirtualMachine.attach(pid);
+            vm.loadAgent(System.getProperty("test.classes",".") + "/agent.jar", "");
+            vm.detach();
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+
+        Test.m();
+        // GC will hit dead method pointer
+        for (int i = 0; i < 10; i++) {
+            System.gc();
+        }
+    }
+
+    public synchronized byte[] transform(final ClassLoader classLoader,
+                                         final String className,
+                                         Class<?> classBeingRedefined,
+                                         ProtectionDomain protectionDomain,
+                                         byte[] classfileBuffer) {
+        System.out.println("Transforming class " + className);
+        return classfileBuffer;
+    }
+
+    public static void redefine(String agentArgs, Instrumentation instrumentation, Class to_redefine) {
+
+        try {
+            instrumentation.retransformClasses(to_redefine);
+        } catch (Exception e) {
+            e.printStackTrace();
+        }
+
+    }
+
+    public static void agentmain(String agentArgs, Instrumentation instrumentation) throws Exception {
+        Agent transformer = new Agent();
+        instrumentation.addTransformer(transformer, true);
+
+        redefine(agentArgs, instrumentation, Test.class);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/profiling/spectrapredefineclass/Launcher.java	Wed Jul 05 19:37:19 2017 +0200
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+import java.io.PrintWriter;
+import com.oracle.java.testlibrary.*;
+
+/*
+ * @test
+ * @bug 8038636
+ * @library /testlibrary
+ * @build Agent
+ * @run main ClassFileInstaller Agent
+ * @run main Launcher
+ * @run main/othervm -XX:-TieredCompilation -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:TypeProfileLevel=222 -Xmx1M -XX:ReservedCodeCacheSize=3M Agent
+ */
+public class Launcher {
+    public static void main(String[] args) throws Exception  {
+
+      PrintWriter pw = new PrintWriter("MANIFEST.MF");
+      pw.println("Agent-Class: Agent");
+      pw.println("Can-Retransform-Classes: true");
+      pw.close();
+
+      ProcessBuilder pb = new ProcessBuilder();
+      pb.command(new String[] { JDKToolFinder.getJDKTool("jar"), "cmf", "MANIFEST.MF", System.getProperty("test.classes",".") + "/agent.jar", "Agent.class"});
+      pb.start().waitFor();
+    }
+}
--- a/hotspot/test/gc/g1/TestStringDeduplicationTools.java	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/test/gc/g1/TestStringDeduplicationTools.java	Wed Jul 05 19:37:19 2017 +0200
@@ -310,7 +310,9 @@
             }
 
             System.gc();
+
             System.out.println("Heap Memory Usage: " + ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getUsed());
+            System.out.println("Array Header Size: " + unsafe.ARRAY_CHAR_BASE_OFFSET);
 
             System.out.println("End: MemoryUsageTest");
         }
@@ -482,31 +484,40 @@
     public static void testMemoryUsage() throws Exception {
         // Test that memory usage is reduced after deduplication
         OutputAnalyzer output;
-        final String usagePattern = "Heap Memory Usage: (\\d+)";
+        final String heapMemoryUsagePattern = "Heap Memory Usage: (\\d+)";
+        final String arrayHeaderSizePattern = "Array Header Size: (\\d+)";
 
         // Run without deduplication
         output = MemoryUsageTest.run(false);
         output.shouldHaveExitValue(0);
-        final long memoryUsageWithoutDedup = Long.parseLong(output.firstMatch(usagePattern, 1));
+        final long heapMemoryUsageWithoutDedup = Long.parseLong(output.firstMatch(heapMemoryUsagePattern, 1));
+        final long arrayHeaderSizeWithoutDedup = Long.parseLong(output.firstMatch(arrayHeaderSizePattern, 1));
 
         // Run with deduplication
         output = MemoryUsageTest.run(true);
         output.shouldHaveExitValue(0);
-        final long memoryUsageWithDedup = Long.parseLong(output.firstMatch(usagePattern, 1));
+        final long heapMemoryUsageWithDedup = Long.parseLong(output.firstMatch(heapMemoryUsagePattern, 1));
+        final long arrayHeaderSizeWithDedup = Long.parseLong(output.firstMatch(arrayHeaderSizePattern, 1));
+
+        // Sanity check to make sure one instance isn't using compressed class pointers and the other not
+        if (arrayHeaderSizeWithoutDedup != arrayHeaderSizeWithDedup) {
+            throw new Exception("Unexpected difference between array header sizes");
+        }
 
         // Calculate expected memory usage with deduplication enabled. This calculation does
         // not take alignment and padding into account, so it's a conservative estimate.
-        final long sizeOfChar = 2; // bytes
-        final long bytesSaved = (LargeNumberOfStrings - 1) * (StringLength * sizeOfChar + unsafe.ARRAY_CHAR_BASE_OFFSET);
-        final long memoryUsageWithDedupExpected = memoryUsageWithoutDedup - bytesSaved;
+        final long sizeOfChar = unsafe.ARRAY_CHAR_INDEX_SCALE;
+        final long sizeOfCharArray = StringLength * sizeOfChar + arrayHeaderSizeWithoutDedup;
+        final long bytesSaved = (LargeNumberOfStrings - 1) * sizeOfCharArray;
+        final long heapMemoryUsageWithDedupExpected = heapMemoryUsageWithoutDedup - bytesSaved;
 
         System.out.println("Memory usage summary:");
-        System.out.println("   memoryUsageWithoutDedup:      " + memoryUsageWithoutDedup);
-        System.out.println("   memoryUsageWithDedup:         " + memoryUsageWithDedup);
-        System.out.println("   memoryUsageWithDedupExpected: " + memoryUsageWithDedupExpected);
+        System.out.println("   heapMemoryUsageWithoutDedup:      " + heapMemoryUsageWithoutDedup);
+        System.out.println("   heapMemoryUsageWithDedup:         " + heapMemoryUsageWithDedup);
+        System.out.println("   heapMemoryUsageWithDedupExpected: " + heapMemoryUsageWithDedupExpected);
 
-        if (memoryUsageWithDedup > memoryUsageWithDedupExpected) {
-            throw new Exception("Unexpected memory usage, memoryUsageWithDedup should less or equal to memoryUsageWithDedupExpected");
+        if (heapMemoryUsageWithDedup > heapMemoryUsageWithDedupExpected) {
+            throw new Exception("Unexpected memory usage, heapMemoryUsageWithDedup should be less or equal to heapMemoryUsageWithDedupExpected");
         }
     }
 }
--- a/hotspot/test/runtime/6626217/Test6626217.sh	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/test/runtime/6626217/Test6626217.sh	Wed Jul 05 19:37:19 2017 +0200
@@ -22,7 +22,6 @@
 # 
 
 
-# @ignore 8028733
 # @test @(#)Test6626217.sh
 # @bug 6626217
 # @summary Loader-constraint table allows arrays instead of only the base-classes
--- a/hotspot/test/runtime/6888954/vmerrors.sh	Thu Apr 17 15:20:35 2014 -0700
+++ b/hotspot/test/runtime/6888954/vmerrors.sh	Wed Jul 05 19:37:19 2017 +0200
@@ -85,7 +85,7 @@
     [ $i -lt 10 ] && i2=0$i
 
     "$TESTJAVA/bin/java" $TESTVMOPTS -XX:+IgnoreUnrecognizedVMOptions \
-        -XX:-TransmitErrorReport \
+        -XX:-TransmitErrorReport -XX:-CreateMinidumpOnCrash \
         -XX:ErrorHandlerTest=${i} -version > ${i2}.out 2>&1
 
     # If ErrorHandlerTest is ignored (product build), stop.
--- a/make/Javadoc.gmk	Thu Apr 17 15:20:35 2014 -0700
+++ b/make/Javadoc.gmk	Wed Jul 05 19:37:19 2017 +0200
@@ -72,6 +72,7 @@
 TREEAPI_FIRST_COPYRIGHT_YEAR = 2005
 JNLP_FIRST_COPYRIGHT_YEAR = 1998
 PLUGIN2_FIRST_COPYRIGHT_YEAR = 2007
+JDKNET_FIRST_COPYRIGHT_YEAR = 2014
 
 # Oracle name
 FULL_COMPANY_NAME = Oracle and/or its affiliates
@@ -1160,6 +1161,57 @@
 
 #############################################################
 #
+# jdk.net docs
+#
+
+ALL_OTHER_TARGETS += jdknetdocs
+
+JDKNET_DOCDIR := $(JRE_API_DOCSDIR)/net/socketoptions/spec
+JDKNET2COREAPI := ../../../$(JDKJRE2COREAPI)
+JDKNET_DOCTITLE := jdk.net API
+JDKNET_WINDOWTITLE := jdk.net API
+JDKNET_HEADER := <strong>jdk.net API</strong>
+JDKNET_BOTTOM := $(call CommonBottom,$(JDKNET_FIRST_COPYRIGHT_YEAR))
+JDKNET_PKGS := jdk.net
+
+JDKNET_INDEX_HTML = $(JDKNET_DOCDIR)/index.html
+JDKNET_OPTIONS_FILE = $(DOCSTMPDIR)/jdknet.options
+JDKNET_PACKAGES_FILE = $(DOCSTMPDIR)/jdknet.packages
+
+jdknetdocs: $(JDKNET_INDEX_HTML)
+
+# Set relative location to core api document root
+$(JDKNET_INDEX_HTML): GET2DOCSDIR=$(JDKNET2COREAPI)/..
+
+# Run javadoc if the index file is out of date or missing
+$(JDKNET_INDEX_HTML): $(JDKNET_OPTIONS_FILE) $(JDKNET_PACKAGES_FILE) coredocs
+	$(prep-javadoc)
+	$(call JavadocSummary,$(JDKNET_OPTIONS_FILE),$(JDKNET_PACKAGES_FILE))
+	$(JAVADOC_CMD) -d $(@D) \
+	    @$(JDKNET_OPTIONS_FILE) @$(JDKNET_PACKAGES_FILE)
+
+# Create file with javadoc options in it
+$(JDKNET_OPTIONS_FILE):
+	$(prep-target)
+	@($(call OptionOnly,$(COMMON_JAVADOCFLAGS)) ; \
+	  $(call OptionOnly,-Xdoclint:none) ; \
+	  $(call OptionPair,-sourcepath,$(RELEASEDOCS_SOURCEPATH)) ; \
+	  $(call OptionPair,-encoding,ascii) ; \
+	  $(call OptionOnly,-nodeprecatedlist) ; \
+	  $(call OptionPair,-doctitle,$(JDKNET_DOCTITLE)) ; \
+	  $(call OptionPair,-windowtitle,$(JDKNET_WINDOWTITLE) $(DRAFT_WINTITLE)); \
+	  $(call OptionPair,-header,$(JDKNET_HEADER)$(DRAFT_HEADER)); \
+	  $(call OptionPair,-bottom,$(JDKNET_BOTTOM)$(DRAFT_BOTTOM)); \
+	  $(call OptionTrip,-linkoffline,$(JDKNET2COREAPI),$(COREAPI_DOCSDIR)/); \
+	) >> $@
+
+# Create a file with the package names in it
+$(JDKNET_PACKAGES_FILE): $(DIRECTORY_CACHE) $(call PackageDependencies,$(JDKNET_PKGS))
+	$(prep-target)
+	$(call PackageFilter,$(JDKNET_PKGS))
+
+#############################################################
+#
 # Get a cache of all the directories
 
 $(DIRECTORY_CACHE): $(ALL_EXISTING_SOURCE_DIRS)
--- a/make/common/NON_CORE_PKGS.gmk	Thu Apr 17 15:20:35 2014 -0700
+++ b/make/common/NON_CORE_PKGS.gmk	Wed Jul 05 19:37:19 2017 +0200
@@ -94,7 +94,8 @@
       com.apple.eio
 endif
 
-JDK_PKGS = jdk
+JDK_PKGS = jdk \
+      jdk.net
 
 # non-core packages in rt.jar
 NON_CORE_PKGS = $(DOMAPI_PKGS) \