Merge
authorduke
Wed, 05 Jul 2017 17:18:57 +0200
changeset 6089 05094f7246b9
parent 6088 fef4187d7622 (current diff)
parent 6087 d4d925450e84 (diff)
child 6090 10bc903a228d
Merge
--- a/.hgtags-top-repo	Thu Jul 29 13:33:32 2010 -0700
+++ b/.hgtags-top-repo	Wed Jul 05 17:18:57 2017 +0200
@@ -77,3 +77,4 @@
 b218a53ec7d3d42be61d31d6917a6c5c037b6f56 jdk7-b100
 4193eaf5f1b82794c6a0fb1a8d11af43d1b1d611 jdk7-b101
 a136a51f5113da4dad3853b74a8536ab583ab112 jdk7-b102
+be2aedc4e3b1751c1310f334242ba69e90867f38 jdk7-b103
--- a/corba/.hgtags	Thu Jul 29 13:33:32 2010 -0700
+++ b/corba/.hgtags	Wed Jul 05 17:18:57 2017 +0200
@@ -77,3 +77,4 @@
 a56d734a1e970e1a21a8f4feb13053e9a33674c7 jdk7-b100
 86a239832646a74811695428984b6947c0bd6dc8 jdk7-b101
 78561a95779090b5106c8d0f1a75360a027ef087 jdk7-b102
+11e7678c3eb169b77d9a9892fe5e3dfa1d1a0d51 jdk7-b103
--- a/hotspot/.hgtags	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/.hgtags	Wed Jul 05 17:18:57 2017 +0200
@@ -107,3 +107,5 @@
 6c3a919105b68c15b7db923ec9a00006e9560910 jdk7-b101
 ad1977f08c4d69162a0775fe3f9576b9fd521d10 hs19-b03
 c5cadf1a07717955cf60dbaec16e35b529fd2cb0 jdk7-b102
+cb4250ef73b21de6c487ea14e2b0b99eed67b4b6 jdk7-b103
+e55900b5c1b865cac17e18abc639c7dc50de7fd8 hs19-b04
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java	Wed Jul 05 17:18:57 2017 +0200
@@ -297,6 +297,7 @@
     case JVM_CONSTANT_NameAndType:        return "JVM_CONSTANT_NameAndType";
     case JVM_CONSTANT_MethodHandle:       return "JVM_CONSTANT_MethodHandle";
     case JVM_CONSTANT_MethodType:         return "JVM_CONSTANT_MethodType";
+    case JVM_CONSTANT_InvokeDynamic:      return "JVM_CONSTANT_InvokeDynamic";
     case JVM_CONSTANT_Invalid:            return "JVM_CONSTANT_Invalid";
     case JVM_CONSTANT_UnresolvedClass:    return "JVM_CONSTANT_UnresolvedClass";
     case JVM_CONSTANT_UnresolvedClassInError:    return "JVM_CONSTANT_UnresolvedClassInError";
@@ -355,6 +356,7 @@
         case JVM_CONSTANT_NameAndType:
         case JVM_CONSTANT_MethodHandle:
         case JVM_CONSTANT_MethodType:
+        case JVM_CONSTANT_InvokeDynamic:
           visitor.doInt(new IntField(new NamedFieldIdentifier(nameForTag(ctag)), indexOffset(index), true), true);
           break;
         }
@@ -517,6 +519,18 @@
                                           + ", type = " + signatureIndex);
                   break;
               }
+
+              case JVM_CONSTANT_InvokeDynamic: {
+                  dos.writeByte(cpConstType);
+                  int value = getIntAt(ci);
+                  short bootstrapMethodIndex = (short) extractLowShortFromInt(value);
+                  short nameAndTypeIndex = (short) extractHighShortFromInt(value);
+                  dos.writeShort(bootstrapMethodIndex);
+                  dos.writeShort(nameAndTypeIndex);
+                  if (DEBUG) debugMessage("CP[" + ci + "] = indy BSM = " + bootstrapMethodIndex
+                                          + ", N&T = " + nameAndTypeIndex);
+                  break;
+              }
               default:
                   throw new InternalError("unknown tag: " + cpConstType);
           } // switch
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/ClassConstants.java	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/ClassConstants.java	Wed Jul 05 17:18:57 2017 +0200
@@ -42,6 +42,7 @@
     public static final int JVM_CONSTANT_NameAndType        = 12;
     public static final int JVM_CONSTANT_MethodHandle       = 15;
     public static final int JVM_CONSTANT_MethodType         = 16;
+    public static final int JVM_CONSTANT_InvokeDynamic      = 17;
 
     // JVM_CONSTANT_MethodHandle subtypes
     public static final int JVM_REF_getField                = 1;
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java	Wed Jul 05 17:18:57 2017 +0200
@@ -303,12 +303,12 @@
                 case JVM_CONSTANT_MethodHandle: {
                      dos.writeByte(cpConstType);
                      int value = cpool.getIntAt(ci);
-                     short refIndex = (short) extractHighShortFromInt(value);
-                     byte  refKind  = (byte)  extractLowShortFromInt(value);
-                     dos.writeByte(refKind);
-                     dos.writeShort(refIndex);
-                     if (DEBUG) debugMessage("CP[" + ci + "] = MH index = " + refIndex
-                                        + ", kind = " + refKind);
+                     short bootstrapMethodIndex = (short) extractLowShortFromInt(value);
+                     short nameAndTypeIndex = (short) extractHighShortFromInt(value);
+                     dos.writeShort(bootstrapMethodIndex);
+                     dos.writeShort(nameAndTypeIndex);
+                     if (DEBUG) debugMessage("CP[" + ci + "] = indy BSM = " +
+                           bootstrapMethodIndex + ", N&T = " + nameAndTypeIndex);
                      break;
                 }
 
@@ -321,6 +321,15 @@
                      break;
                 }
 
+                case JVM_CONSTANT_InvokeDynamic: {
+                     dos.writeByte(cpConstType);
+                     int value = cpool.getIntAt(ci);
+                     short refIndex = (short) value;
+                     dos.writeShort(refIndex);
+                     if (DEBUG) debugMessage("CP[" + ci + "] = MT index = " + refIndex);
+                     break;
+                }
+
                 default:
                   throw new InternalError("Unknown tag: " + cpConstType);
             } // switch
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java	Wed Jul 05 17:18:57 2017 +0200
@@ -582,6 +582,11 @@
                buf.cell(Integer.toString(cpool.getIntAt(index)));
                break;
 
+            case JVM_CONSTANT_InvokeDynamic:
+               buf.cell("JVM_CONSTANT_InvokeDynamic");
+               buf.cell(genLowHighShort(cpool.getIntAt(index)));
+               break;
+
             default:
                throw new InternalError("unknown tag: " + ctag);
          }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/ConstantTag.java	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/ConstantTag.java	Wed Jul 05 17:18:57 2017 +0200
@@ -40,6 +40,7 @@
   private static int JVM_CONSTANT_NameAndType             = 12;
   private static int JVM_CONSTANT_MethodHandle            = 15;  // JSR 292
   private static int JVM_CONSTANT_MethodType              = 16;  // JSR 292
+  private static int JVM_CONSTANT_InvokeDynamic           = 17;  // JSR 292
   private static int JVM_CONSTANT_Invalid                 = 0;   // For bad value initialization
   private static int JVM_CONSTANT_UnresolvedClass         = 100; // Temporary tag until actual use
   private static int JVM_CONSTANT_ClassIndex              = 101; // Temporary tag while constructing constant pool
@@ -78,6 +79,7 @@
   public boolean isUtf8()             { return tag == JVM_CONSTANT_Utf8; }
   public boolean isMethodHandle()     { return tag == JVM_CONSTANT_MethodHandle; }
   public boolean isMethodType()       { return tag == JVM_CONSTANT_MethodType; }
+  public boolean isInvokeDynamic()    { return tag == JVM_CONSTANT_InvokeDynamic; }
 
   public boolean isInvalid()          { return tag == JVM_CONSTANT_Invalid; }
 
--- a/hotspot/make/hotspot_version	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/make/hotspot_version	Wed Jul 05 17:18:57 2017 +0200
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=19
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=04
+HS_BUILD_NUMBER=05
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=7
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -1007,9 +1007,9 @@
         __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, (*NOLp));
       __ delayed()->cmp(to_from, byte_count);
       if (NOLp == NULL)
-        __ brx(Assembler::greaterEqual, false, Assembler::pt, no_overlap_target);
+        __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, no_overlap_target);
       else
-        __ brx(Assembler::greaterEqual, false, Assembler::pt, (*NOLp));
+        __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, (*NOLp));
       __ delayed()->nop();
   }
 
--- a/hotspot/src/share/vm/ci/ciEnv.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -728,8 +728,8 @@
   }
 
   // Get the invoker methodOop from the constant pool.
-  intptr_t f2_value = cpool->cache()->main_entry_at(index)->f2();
-  methodOop signature_invoker = methodOop(f2_value);
+  oop f1_value = cpool->cache()->main_entry_at(index)->f1();
+  methodOop signature_invoker = methodOop(f1_value);
   assert(signature_invoker != NULL && signature_invoker->is_method() && signature_invoker->is_method_handle_invoke(),
          "correct result from LinkResolver::resolve_invokedynamic");
 
--- a/hotspot/src/share/vm/ci/ciMethod.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/ci/ciMethod.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -694,30 +694,21 @@
 // ------------------------------------------------------------------
 // ciMethod::is_method_handle_invoke
 //
-// Return true if the method is a MethodHandle target.
+// Return true if the method is an instance of one of the two
+// signature-polymorphic MethodHandle methods, invokeExact or invokeGeneric.
 bool ciMethod::is_method_handle_invoke() const {
-  bool flag = (holder()->name() == ciSymbol::java_dyn_MethodHandle() &&
-               methodOopDesc::is_method_handle_invoke_name(name()->sid()));
-#ifdef ASSERT
-  if (is_loaded()) {
-    bool flag2 = ((flags().as_int() & JVM_MH_INVOKE_BITS) == JVM_MH_INVOKE_BITS);
-    {
-      VM_ENTRY_MARK;
-      bool flag3 = get_methodOop()->is_method_handle_invoke();
-      assert(flag2 == flag3, "consistent");
-      assert(flag  == flag3, "consistent");
-    }
-  }
-#endif //ASSERT
-  return flag;
+  if (!is_loaded())  return false;
+  VM_ENTRY_MARK;
+  return get_methodOop()->is_method_handle_invoke();
 }
 
 // ------------------------------------------------------------------
 // ciMethod::is_method_handle_adapter
 //
 // Return true if the method is a generated MethodHandle adapter.
+// These are built by MethodHandleCompiler.
 bool ciMethod::is_method_handle_adapter() const {
-  check_is_loaded();
+  if (!is_loaded())  return false;
   VM_ENTRY_MARK;
   return get_methodOop()->is_method_handle_adapter();
 }
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -122,7 +122,7 @@
         if (!EnableMethodHandles ||
             _major_version < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) {
           classfile_parse_error(
-            (!EnableInvokeDynamic ?
+            (!EnableMethodHandles ?
              "This JVM does not support constant tag %u in class file %s" :
              "Class file version does not support constant tag %u in class file %s"),
             tag, CHECK);
@@ -140,6 +140,22 @@
           ShouldNotReachHere();
         }
         break;
+      case JVM_CONSTANT_InvokeDynamic :
+        {
+          if (!EnableInvokeDynamic ||
+              _major_version < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) {
+            classfile_parse_error(
+              (!EnableInvokeDynamic ?
+               "This JVM does not support constant tag %u in class file %s" :
+               "Class file version does not support constant tag %u in class file %s"),
+              tag, CHECK);
+          }
+          cfs->guarantee_more(5, CHECK);  // bsm_index, name_and_type_index, tag/access_flags
+          u2 bootstrap_method_index = cfs->get_u2_fast();
+          u2 name_and_type_index = cfs->get_u2_fast();
+          cp->invoke_dynamic_at_put(index, bootstrap_method_index, name_and_type_index);
+        }
+        break;
       case JVM_CONSTANT_Integer :
         {
           cfs->guarantee_more(5, CHECK);  // bytes, tag/access_flags
@@ -414,6 +430,24 @@
               ref_index, CHECK_(nullHandle));
         }
         break;
+      case JVM_CONSTANT_InvokeDynamic :
+        {
+          int bootstrap_method_ref_index = cp->invoke_dynamic_bootstrap_method_ref_index_at(index);
+          int name_and_type_ref_index = cp->invoke_dynamic_name_and_type_ref_index_at(index);
+          check_property((bootstrap_method_ref_index == 0 && AllowTransitionalJSR292)
+                         ||
+                         (valid_cp_range(bootstrap_method_ref_index, length) &&
+                          cp->tag_at(bootstrap_method_ref_index).is_method_handle()),
+                         "Invalid constant pool index %u in class file %s",
+                         bootstrap_method_ref_index,
+                         CHECK_(nullHandle));
+          check_property(valid_cp_range(name_and_type_ref_index, length) &&
+                         cp->tag_at(name_and_type_ref_index).is_name_and_type(),
+                         "Invalid constant pool index %u in class file %s",
+                         name_and_type_ref_index,
+                         CHECK_(nullHandle));
+          break;
+        }
       default:
         fatal(err_msg("bad constant pool tag value %u",
                       cp->tag_at(index).value()));
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -2507,6 +2507,10 @@
                                                 int caller_bci,
                                                 TRAPS) {
   Handle empty;
+  guarantee(bootstrap_method.not_null() &&
+            java_dyn_MethodHandle::is_instance(bootstrap_method()),
+            "caller must supply a valid BSM");
+
   Handle caller_mname = MethodHandles::new_MemberName(CHECK_(empty));
   MethodHandles::init_MemberName(caller_mname(), caller_method());
 
@@ -2537,20 +2541,61 @@
   return call_site_oop;
 }
 
-Handle SystemDictionary::find_bootstrap_method(KlassHandle caller, TRAPS) {
+Handle SystemDictionary::find_bootstrap_method(methodHandle caller_method, int caller_bci,
+                                               int cache_index, TRAPS) {
   Handle empty;
-  if (!caller->oop_is_instance())  return empty;
-
-  instanceKlassHandle ik(THREAD, caller());
-
-  oop boot_method_oop = ik->bootstrap_method();
-  if (boot_method_oop != NULL) {
+
+  constantPoolHandle pool;
+  {
+    klassOop caller = caller_method->method_holder();
+    if (!Klass::cast(caller)->oop_is_instance())  return empty;
+    pool = constantPoolHandle(THREAD, instanceKlass::cast(caller)->constants());
+  }
+
+  int constant_pool_index = pool->cache()->entry_at(cache_index)->constant_pool_index();
+  constantTag tag = pool->tag_at(constant_pool_index);
+
+  if (tag.is_invoke_dynamic()) {
+    // JVM_CONSTANT_InvokeDynamic is an ordered pair of [bootm, name&type]
+    // The bootm, being a JVM_CONSTANT_MethodHandle, has its own cache entry.
+    int bsm_index = pool->invoke_dynamic_bootstrap_method_ref_index_at(constant_pool_index);
+    if (bsm_index != 0) {
+      int bsm_index_in_cache = pool->cache()->entry_at(cache_index)->bootstrap_method_index_in_cache();
+      DEBUG_ONLY(int bsm_index_2 = pool->cache()->entry_at(bsm_index_in_cache)->constant_pool_index());
+      assert(bsm_index == bsm_index_2, "BSM constant lifted to cache");
+      if (TraceMethodHandles) {
+        tty->print_cr("resolving bootstrap method for "PTR_FORMAT" at %d at cache[%d]CP[%d]...",
+                      (intptr_t) caller_method(), caller_bci, cache_index, constant_pool_index);
+      }
+      oop bsm_oop = pool->resolve_cached_constant_at(bsm_index_in_cache, CHECK_(empty));
+      if (TraceMethodHandles) {
+        tty->print_cr("bootstrap method for "PTR_FORMAT" at %d retrieved as "PTR_FORMAT":",
+                      (intptr_t) caller_method(), caller_bci, (intptr_t) bsm_oop);
+      }
+      assert(bsm_oop->is_oop()
+             && java_dyn_MethodHandle::is_instance(bsm_oop), "must be sane");
+      return Handle(THREAD, bsm_oop);
+    }
+    // else null BSM; fall through
+  } else if (tag.is_name_and_type()) {
+    // JSR 292 EDR does not have JVM_CONSTANT_InvokeDynamic
+    // a bare name&type defaults its BSM to null, so fall through...
+  } else {
+    ShouldNotReachHere();  // verifier does not allow this
+  }
+
+  // Fall through to pick up the per-class bootstrap method.
+  // This mechanism may go away in the PFD.
+  assert(AllowTransitionalJSR292, "else the verifier should have stopped us already");
+  oop bsm_oop = instanceKlass::cast(caller_method->method_holder())->bootstrap_method();
+  if (bsm_oop != NULL) {
     if (TraceMethodHandles) {
-      tty->print_cr("bootstrap method for "PTR_FORMAT" cached as "PTR_FORMAT":", ik(), boot_method_oop);
+      tty->print_cr("bootstrap method for "PTR_FORMAT" registered as "PTR_FORMAT":",
+                    (intptr_t) caller_method(), (intptr_t) bsm_oop);
     }
-    assert(boot_method_oop->is_oop()
-           && java_dyn_MethodHandle::is_instance(boot_method_oop), "must be sane");
-    return Handle(THREAD, boot_method_oop);
+    assert(bsm_oop->is_oop()
+           && java_dyn_MethodHandle::is_instance(bsm_oop), "must be sane");
+    return Handle(THREAD, bsm_oop);
   }
 
   return empty;
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp	Wed Jul 05 17:18:57 2017 +0200
@@ -492,7 +492,10 @@
                                           TRAPS);
 
   // coordinate with Java about bootstrap methods
-  static Handle    find_bootstrap_method(KlassHandle caller, TRAPS);
+  static Handle    find_bootstrap_method(methodHandle caller_method,
+                                         int caller_bci,  // N.B. must be an invokedynamic
+                                         int cache_index, // must be corresponding main_entry
+                                         TRAPS);
 
   // Utility for printing loader "name" as part of tracing constraints
   static const char* loader_name(oop loader) {
--- a/hotspot/src/share/vm/classfile/verifier.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/classfile/verifier.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -1913,7 +1913,8 @@
   unsigned int types = (opcode == Bytecodes::_invokeinterface
                                 ? 1 << JVM_CONSTANT_InterfaceMethodref
                       : opcode == Bytecodes::_invokedynamic
-                                ? 1 << JVM_CONSTANT_NameAndType
+                                ? (1 << JVM_CONSTANT_NameAndType
+                                  |1 << JVM_CONSTANT_InvokeDynamic)
                                 : 1 << JVM_CONSTANT_Methodref);
   verify_cp_type(index, cp, types, CHECK_VERIFY(this));
 
--- a/hotspot/src/share/vm/code/codeBlob.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/code/codeBlob.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -202,6 +202,11 @@
 //----------------------------------------------------------------------------------------------------
 // Implementation of AdapterBlob
 
+AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) :
+  BufferBlob("I2C/C2I adapters", size, cb) {
+  CodeCache::commit(this);
+}
+
 AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
 
@@ -210,7 +215,6 @@
   {
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
     blob = new (size) AdapterBlob(size, cb);
-    CodeCache::commit(blob);
   }
   // Track memory usage statistic after releasing CodeCache_lock
   MemoryService::track_code_cache_memory_usage();
--- a/hotspot/src/share/vm/code/codeBlob.hpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/code/codeBlob.hpp	Wed Jul 05 17:18:57 2017 +0200
@@ -219,8 +219,7 @@
 
 class AdapterBlob: public BufferBlob {
 private:
-  AdapterBlob(int size)                 : BufferBlob("I2C/C2I adapters", size) {}
-  AdapterBlob(int size, CodeBuffer* cb) : BufferBlob("I2C/C2I adapters", size, cb) {}
+  AdapterBlob(int size, CodeBuffer* cb);
 
 public:
   // Creation
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -664,19 +664,14 @@
         return;
       }
 
-      // XXX use a global constant instead of 64!
-      typedef struct OopTaskQueuePadded {
-        OopTaskQueue work_queue;
-        char pad[64 - sizeof(OopTaskQueue)];  // prevent false sharing
-      } OopTaskQueuePadded;
-
+      typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
       for (i = 0; i < num_queues; i++) {
-        OopTaskQueuePadded *q_padded = new OopTaskQueuePadded();
-        if (q_padded == NULL) {
+        PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
+        if (q == NULL) {
           warning("work_queue allocation failure.");
           return;
         }
-        _task_queues->register_queue(i, &q_padded->work_queue);
+        _task_queues->register_queue(i, q);
       }
       for (i = 0; i < num_queues; i++) {
         _task_queues->queue(i)->initialize();
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -234,6 +234,11 @@
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   if (_gc_cause != GCCause::_gc_locker &&
       gch->total_full_collections_completed() <= _full_gc_count_before) {
+    // maybe we should change the condition to test _gc_cause ==
+    // GCCause::_java_lang_system_gc, instead of
+    // _gc_cause != GCCause::_gc_locker
+    assert(_gc_cause == GCCause::_java_lang_system_gc,
+           "the only way to get here if this was a System.gc()-induced GC");
     assert(ExplicitGCInvokesConcurrent, "Error");
     // Now, wait for witnessing concurrent gc cycle to complete,
     // but do so in native mode, because we want to lock the
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -271,21 +271,16 @@
     if (cas_res == prev_epoch_entry) {
       // We successfully updated the card num value in the epoch entry
       count_ptr->_count = 0; // initialize counter for new card num
+      jbyte* old_card_ptr = card_num_2_ptr(old_card_num);
 
       // Even though the region containg the card at old_card_num was not
       // in the young list when old_card_num was recorded in the epoch
       // cache it could have been added to the free list and subsequently
-      // added to the young list in the intervening time. If the evicted
-      // card is in a young region just return the card_ptr and the evicted
-      // card will not be cleaned. See CR 6817995.
-
-      jbyte* old_card_ptr = card_num_2_ptr(old_card_num);
-      if (is_young_card(old_card_ptr)) {
-        *count = 0;
-        // We can defer the processing of card_ptr
-        *defer = true;
-        return card_ptr;
-      }
+      // added to the young list in the intervening time. See CR 6817995.
+      // We do not deal with this case here - it will be handled in
+      // HeapRegion::oops_on_card_seq_iterate_careful after it has been
+      // determined that the region containing the card has been allocated
+      // to, and it's safe to check the young type of the region.
 
       // We do not want to defer processing of card_ptr in this case
       // (we need to refine old_card_ptr and card_ptr)
@@ -301,22 +296,22 @@
   jbyte* cached_ptr = add_card_count(card_ptr, &count, defer);
   assert(cached_ptr != NULL, "bad cached card ptr");
 
-  if (is_young_card(cached_ptr)) {
-    // The region containing cached_ptr has been freed during a clean up
-    // pause, reallocated, and tagged as young.
-    assert(cached_ptr != card_ptr, "shouldn't be");
+  // We've just inserted a card pointer into the card count cache
+  // and got back the card that we just inserted or (evicted) the
+  // previous contents of that count slot.
 
-    // We've just inserted a new old-gen card pointer into the card count
-    // cache and evicted the previous contents of that count slot.
-    // The evicted card pointer has been determined to be in a young region
-    // and so cannot be the newly inserted card pointer (that will be
-    // in an old region).
-    // The count for newly inserted card will be set to zero during the
-    // insertion, so we don't want to defer the cleaning of the newly
-    // inserted card pointer.
-    assert(*defer == false, "deferring non-hot card");
-    return NULL;
-  }
+  // The card we got back could be in a young region. When the
+  // returned card (if evicted) was originally inserted, we had
+  // determined that its containing region was not young. However
+  // it is possible for the region to be freed during a cleanup
+  // pause, then reallocated and tagged as young which will result
+  // in the returned card residing in a young region.
+  //
+  // We do not deal with this case here - the change from non-young
+  // to young could be observed at any time - it will be handled in
+  // HeapRegion::oops_on_card_seq_iterate_careful after it has been
+  // determined that the region containing the card has been allocated
+  // to.
 
   // The card pointer we obtained from card count cache is not hot
   // so do not store it in the cache; return it for immediate
@@ -325,7 +320,7 @@
     return cached_ptr;
   }
 
-  // Otherwise, the pointer we got from the _card_counts is hot.
+  // Otherwise, the pointer we got from the _card_counts cache is hot.
   jbyte* res = NULL;
   MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag);
   if (_n_hot == _hot_cache_size) {
@@ -338,17 +333,8 @@
   if (_hot_cache_idx == _hot_cache_size) _hot_cache_idx = 0;
   _n_hot++;
 
-  if (res != NULL) {
-    // Even though the region containg res was not in the young list
-    // when it was recorded in the hot cache it could have been added
-    // to the free list and subsequently added to the young list in
-    // the intervening time. If res is in a young region, return NULL
-    // so that res is not cleaned. See CR 6817995.
-
-    if (is_young_card(res)) {
-      res = NULL;
-    }
-  }
+  // The card obtained from the hot card cache could be in a young
+  // region. See above on how this can happen.
 
   return res;
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -266,6 +266,12 @@
       _cm->clearNextBitmap();
       _sts.leave();
     }
+
+    // Update the number of full collections that have been
+    // completed. This will also notify the FullGCCount_lock in case a
+    // Java thread is waiting for a full GC to happen (e.g., it
+    // called System.gc() with +ExplicitGCInvokesConcurrent).
+    g1->increment_full_collections_completed(true /* outer */);
   }
   assert(_should_terminate, "just checking");
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -638,6 +638,11 @@
 
     // Now retry the allocation.
     if (_cur_alloc_region != NULL) {
+      if (allocated_young_region != NULL) {
+        // We need to ensure that the store to top does not
+        // float above the setting of the young type.
+        OrderAccess::storestore();
+      }
       res = _cur_alloc_region->allocate(word_size);
     }
   }
@@ -809,7 +814,8 @@
   }
 };
 
-void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
+void G1CollectedHeap::do_collection(bool explicit_gc,
+                                    bool clear_all_soft_refs,
                                     size_t word_size) {
   if (GC_locker::check_active_before_gc()) {
     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
@@ -821,10 +827,6 @@
     Universe::print_heap_before_gc();
   }
 
-  if (full && DisableExplicitGC) {
-    return;
-  }
-
   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
 
@@ -837,9 +839,11 @@
     IsGCActiveMark x;
 
     // Timing
+    bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc);
+    assert(!system_gc || explicit_gc, "invariant");
     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    TraceTime t(full ? "Full GC (System.gc())" : "Full GC",
+    TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC",
                 PrintGC, true, gclog_or_tty);
 
     TraceMemoryManagerStats tms(true /* fullGC */);
@@ -944,7 +948,7 @@
     heap_region_iterate(&rs_clear);
 
     // Resize the heap if necessary.
-    resize_if_necessary_after_full_collection(full ? 0 : word_size);
+    resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
 
     if (_cg1r->use_cache()) {
       _cg1r->clear_and_record_card_counts();
@@ -1009,13 +1013,18 @@
             "young list should be empty at this point");
   }
 
+  // Update the number of full collections that have been completed.
+  increment_full_collections_completed(false /* outer */);
+
   if (PrintHeapAtGC) {
     Universe::print_heap_after_gc();
   }
 }
 
 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
-  do_collection(true, clear_all_soft_refs, 0);
+  do_collection(true,                /* explicit_gc */
+                clear_all_soft_refs,
+                0                    /* word_size */);
 }
 
 // This code is mostly copied from TenuredGeneration.
@@ -1331,6 +1340,7 @@
   _young_list(new YoungList(this)),
   _gc_time_stamp(0),
   _surviving_young_words(NULL),
+  _full_collections_completed(0),
   _in_cset_fast_test(NULL),
   _in_cset_fast_test_base(NULL),
   _dirty_cards_region_list(NULL) {
@@ -1689,6 +1699,51 @@
   return car->free();
 }
 
+bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
+  return
+    ((cause == GCCause::_gc_locker           && GCLockerInvokesConcurrent) ||
+     (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
+}
+
+void G1CollectedHeap::increment_full_collections_completed(bool outer) {
+  MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
+
+  // We have already incremented _total_full_collections at the start
+  // of the GC, so total_full_collections() represents how many full
+  // collections have been started.
+  unsigned int full_collections_started = total_full_collections();
+
+  // Given that this method is called at the end of a Full GC or of a
+  // concurrent cycle, and those can be nested (i.e., a Full GC can
+  // interrupt a concurrent cycle), the number of full collections
+  // completed should be either one (in the case where there was no
+  // nesting) or two (when a Full GC interrupted a concurrent cycle)
+  // behind the number of full collections started.
+
+  // This is the case for the inner caller, i.e. a Full GC.
+  assert(outer ||
+         (full_collections_started == _full_collections_completed + 1) ||
+         (full_collections_started == _full_collections_completed + 2),
+         err_msg("for inner caller: full_collections_started = %u "
+                 "is inconsistent with _full_collections_completed = %u",
+                 full_collections_started, _full_collections_completed));
+
+  // This is the case for the outer caller, i.e. the concurrent cycle.
+  assert(!outer ||
+         (full_collections_started == _full_collections_completed + 1),
+         err_msg("for outer caller: full_collections_started = %u "
+                 "is inconsistent with _full_collections_completed = %u",
+                 full_collections_started, _full_collections_completed));
+
+  _full_collections_completed += 1;
+
+  // This notify_all() will ensure that a thread that called
+  // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
+  // and it's waiting for a full GC to finish will be woken up. It is
+  // waiting in VM_G1IncCollectionPause::doit_epilogue().
+  FullGCCount_lock->notify_all();
+}
+
 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
   assert(Thread::current()->is_VM_thread(), "Precondition#1");
   assert(Heap_lock->is_locked(), "Precondition#2");
@@ -1709,25 +1764,41 @@
   // The caller doesn't have the Heap_lock
   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 
-  int gc_count_before;
+  unsigned int gc_count_before;
+  unsigned int full_gc_count_before;
   {
     MutexLocker ml(Heap_lock);
     // Read the GC count while holding the Heap_lock
     gc_count_before = SharedHeap::heap()->total_collections();
+    full_gc_count_before = SharedHeap::heap()->total_full_collections();
 
     // Don't want to do a GC until cleanup is completed.
     wait_for_cleanup_complete();
-  } // We give up heap lock; VMThread::execute gets it back below
-  switch (cause) {
-    case GCCause::_scavenge_alot: {
-      // Do an incremental pause, which might sometimes be abandoned.
-      VM_G1IncCollectionPause op(gc_count_before, cause);
+
+    // We give up heap lock; VMThread::execute gets it back below
+  }
+
+  if (should_do_concurrent_full_gc(cause)) {
+    // Schedule an initial-mark evacuation pause that will start a
+    // concurrent cycle.
+    VM_G1IncCollectionPause op(gc_count_before,
+                               true, /* should_initiate_conc_mark */
+                               g1_policy()->max_pause_time_ms(),
+                               cause);
+    VMThread::execute(&op);
+  } else {
+    if (cause == GCCause::_gc_locker
+        DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
+
+      // Schedule a standard evacuation pause.
+      VM_G1IncCollectionPause op(gc_count_before,
+                                 false, /* should_initiate_conc_mark */
+                                 g1_policy()->max_pause_time_ms(),
+                                 cause);
       VMThread::execute(&op);
-      break;
-    }
-    default: {
-      // In all other cases, we currently do a full gc.
-      VM_G1CollectFull op(gc_count_before, cause);
+    } else {
+      // Schedule a Full GC.
+      VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
       VMThread::execute(&op);
     }
   }
@@ -1989,6 +2060,11 @@
 
 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
                                                   HeapRegionClosure *cl) {
+  if (r == NULL) {
+    // The CSet is empty so there's nothing to do.
+    return;
+  }
+
   assert(r->in_collection_set(),
          "Start region must be a member of the collection set.");
   HeapRegion* cur = r;
@@ -2481,11 +2557,13 @@
 }
 
 void G1CollectedHeap::do_collection_pause() {
+  assert(Heap_lock->owned_by_self(), "we assume we'reholding the Heap_lock");
+
   // Read the GC count while holding the Heap_lock
   // we need to do this _before_ wait_for_cleanup_complete(), to
   // ensure that we do not give up the heap lock and potentially
   // pick up the wrong count
-  int gc_count_before = SharedHeap::heap()->total_collections();
+  unsigned int gc_count_before = SharedHeap::heap()->total_collections();
 
   // Don't want to do a GC pause while cleanup is being completed!
   wait_for_cleanup_complete();
@@ -2493,7 +2571,10 @@
   g1_policy()->record_stop_world_start();
   {
     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
-    VM_G1IncCollectionPause op(gc_count_before);
+    VM_G1IncCollectionPause op(gc_count_before,
+                               false, /* should_initiate_conc_mark */
+                               g1_policy()->max_pause_time_ms(),
+                               GCCause::_g1_inc_collection_pause);
     VMThread::execute(&op);
   }
 }
@@ -2612,7 +2693,7 @@
 };
 
 void
-G1CollectedHeap::do_collection_pause_at_safepoint() {
+G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
   if (GC_locker::check_active_before_gc()) {
     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
   }
@@ -2637,8 +2718,12 @@
       else
         strcat(verbose_str, "(partial)");
     }
-    if (g1_policy()->during_initial_mark_pause())
+    if (g1_policy()->during_initial_mark_pause()) {
       strcat(verbose_str, " (initial-mark)");
+      // We are about to start a marking cycle, so we increment the
+      // full collection counter.
+      increment_total_full_collections();
+    }
 
     // if PrintGCDetails is on, we'll print long statistics information
     // in the collector policy code, so let's not print this as the output
@@ -2661,7 +2746,6 @@
              "young list should be well formed");
     }
 
-    bool abandoned = false;
     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
       IsGCActiveMark x;
 
@@ -2743,7 +2827,7 @@
 
       // Now choose the CS. We may abandon a pause if we find no
       // region that will fit in the MMU pause.
-      bool abandoned = g1_policy()->choose_collection_set();
+      bool abandoned = g1_policy()->choose_collection_set(target_pause_time_ms);
 
       // Nothing to do if we were unable to choose a collection set.
       if (!abandoned) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Jul 05 17:18:57 2017 +0200
@@ -277,6 +277,18 @@
   void update_surviving_young_words(size_t* surv_young_words);
   void cleanup_surviving_young_words();
 
+  // It decides whether an explicit GC should start a concurrent cycle
+  // instead of doing a STW GC. Currently, a concurrent cycle is
+  // explicitly started if:
+  // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
+  // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
+  bool should_do_concurrent_full_gc(GCCause::Cause cause);
+
+  // Keeps track of how many "full collections" (i.e., Full GCs or
+  // concurrent cycles) we have completed. The number of them we have
+  // started is maintained in _total_full_collections in CollectedHeap.
+  volatile unsigned int _full_collections_completed;
+
 protected:
 
   // Returns "true" iff none of the gc alloc regions have any allocations
@@ -356,13 +368,14 @@
   // GC pause.
   void  retire_alloc_region(HeapRegion* alloc_region, bool par);
 
-  // Helper function for two callbacks below.
-  // "full", if true, indicates that the GC is for a System.gc() request,
-  // and should collect the entire heap.  If "clear_all_soft_refs" is true,
-  // all soft references are cleared during the GC.  If "full" is false,
-  // "word_size" describes the allocation that the GC should
-  // attempt (at least) to satisfy.
-  void do_collection(bool full, bool clear_all_soft_refs,
+  // - if explicit_gc is true, the GC is for a System.gc() or a heap
+  // inspection request and should collect the entire heap
+  // - if clear_all_soft_refs is true, all soft references are cleared
+  // during the GC
+  // - if explicit_gc is false, word_size describes the allocation that
+  // the GC should attempt (at least) to satisfy
+  void do_collection(bool explicit_gc,
+                     bool clear_all_soft_refs,
                      size_t word_size);
 
   // Callback from VM_G1CollectFull operation.
@@ -431,6 +444,26 @@
         _in_cset_fast_test_length * sizeof(bool));
   }
 
+  // This is called at the end of either a concurrent cycle or a Full
+  // GC to update the number of full collections completed. Those two
+  // can happen in a nested fashion, i.e., we start a concurrent
+  // cycle, a Full GC happens half-way through it which ends first,
+  // and then the cycle notices that a Full GC happened and ends
+  // too. The outer parameter is a boolean to help us do a bit tighter
+  // consistency checking in the method. If outer is false, the caller
+  // is the inner caller in the nesting (i.e., the Full GC). If outer
+  // is true, the caller is the outer caller in this nesting (i.e.,
+  // the concurrent cycle). Further nesting is not currently
+  // supported. The end of the this call also notifies the
+  // FullGCCount_lock in case a Java thread is waiting for a full GC
+  // to happen (e.g., it called System.gc() with
+  // +ExplicitGCInvokesConcurrent).
+  void increment_full_collections_completed(bool outer);
+
+  unsigned int full_collections_completed() {
+    return _full_collections_completed;
+  }
+
 protected:
 
   // Shrink the garbage-first heap by at most the given size (in bytes!).
@@ -444,7 +477,7 @@
 
   // The guts of the incremental collection pause, executed by the vm
   // thread.
-  virtual void do_collection_pause_at_safepoint();
+  virtual void do_collection_pause_at_safepoint(double target_pause_time_ms);
 
   // Actually do the work of evacuating the collection set.
   virtual void evacuate_collection_set();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -154,7 +154,6 @@
   _known_garbage_bytes(0),
 
   _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
-  _target_pause_time_ms(-1.0),
 
    _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
 
@@ -1635,8 +1634,6 @@
   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
   adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
   // </NEW PREDICTION>
-
-  _target_pause_time_ms = -1.0;
 }
 
 // <NEW PREDICTION>
@@ -2366,7 +2363,6 @@
     if (reached_target_length) {
       assert( young_list_length > 0 && _g1->young_list()->length() > 0,
               "invariant" );
-      _target_pause_time_ms = max_pause_time_ms;
       return true;
     }
   } else {
@@ -2398,6 +2394,17 @@
 }
 #endif
 
+bool
+G1CollectorPolicy::force_initial_mark_if_outside_cycle() {
+  bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
+  if (!during_cycle) {
+    set_initiate_conc_mark_if_possible();
+    return true;
+  } else {
+    return false;
+  }
+}
+
 void
 G1CollectorPolicy::decide_on_conc_mark_initiation() {
   // We are about to decide on whether this pause will be an
@@ -2864,7 +2871,8 @@
 #endif // !PRODUCT
 
 bool
-G1CollectorPolicy_BestRegionsFirst::choose_collection_set() {
+G1CollectorPolicy_BestRegionsFirst::choose_collection_set(
+                                                  double target_pause_time_ms) {
   // Set this here - in case we're not doing young collections.
   double non_young_start_time_sec = os::elapsedTime();
 
@@ -2877,26 +2885,19 @@
 
   start_recording_regions();
 
-  guarantee(_target_pause_time_ms > -1.0
-            NOT_PRODUCT(|| Universe::heap()->gc_cause() == GCCause::_scavenge_alot),
-            "_target_pause_time_ms should have been set!");
-#ifndef PRODUCT
-  if (_target_pause_time_ms <= -1.0) {
-    assert(ScavengeALot && Universe::heap()->gc_cause() == GCCause::_scavenge_alot, "Error");
-    _target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
-  }
-#endif
-  assert(_collection_set == NULL, "Precondition");
+  guarantee(target_pause_time_ms > 0.0,
+            err_msg("target_pause_time_ms = %1.6lf should be positive",
+                    target_pause_time_ms));
+  guarantee(_collection_set == NULL, "Precondition");
 
   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
   double predicted_pause_time_ms = base_time_ms;
 
-  double target_time_ms = _target_pause_time_ms;
-  double time_remaining_ms = target_time_ms - base_time_ms;
+  double time_remaining_ms = target_pause_time_ms - base_time_ms;
 
   // the 10% and 50% values are arbitrary...
-  if (time_remaining_ms < 0.10*target_time_ms) {
-    time_remaining_ms = 0.50 * target_time_ms;
+  if (time_remaining_ms < 0.10 * target_pause_time_ms) {
+    time_remaining_ms = 0.50 * target_pause_time_ms;
     _within_target = false;
   } else {
     _within_target = true;
@@ -3059,7 +3060,18 @@
   _recorded_non_young_cset_choice_time_ms =
     (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
 
-  return abandon_collection;
+  // Here we are supposed to return whether the pause should be
+  // abandoned or not (i.e., whether the collection set is empty or
+  // not). However, this introduces a subtle issue when a pause is
+  // initiated explicitly with System.gc() and
+  // +ExplicitGCInvokesConcurrent (see Comment #2 in CR 6944166), it's
+  // supposed to start a marking cycle, and it's abandoned. So, by
+  // returning false here we are telling the caller never to consider
+  // a pause to be abandoned. We'll actually remove all the code
+  // associated with abandoned pauses as part of CR 6963209, but we are
+  // just disabling them this way for the moment to avoid increasing
+  // further the amount of changes for CR 6944166.
+  return false;
 }
 
 void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Wed Jul 05 17:18:57 2017 +0200
@@ -199,8 +199,6 @@
   size_t _young_cset_length;
   bool   _last_young_gc_full;
 
-  double _target_pause_time_ms;
-
   unsigned              _full_young_pause_num;
   unsigned              _partial_young_pause_num;
 
@@ -526,6 +524,10 @@
     return _mmu_tracker;
   }
 
+  double max_pause_time_ms() {
+    return _mmu_tracker->max_gc_time() * 1000.0;
+  }
+
   double predict_init_time_ms() {
     return get_new_prediction(_concurrent_mark_init_times_ms);
   }
@@ -1008,7 +1010,7 @@
   // Choose a new collection set.  Marks the chosen regions as being
   // "in_collection_set", and links them together.  The head and number of
   // the collection set are available via access methods.
-  virtual bool choose_collection_set() = 0;
+  virtual bool choose_collection_set(double target_pause_time_ms) = 0;
 
   // The head of the list (via "next_in_collection_set()") representing the
   // current collection set.
@@ -1077,6 +1079,12 @@
   void set_during_initial_mark_pause()  { _during_initial_mark_pause = true;  }
   void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
 
+  // This sets the initiate_conc_mark_if_possible() flag to start a
+  // new cycle, as long as we are not already in one. It's best if it
+  // is called during a safepoint when the test whether a cycle is in
+  // progress or not is stable.
+  bool force_initial_mark_if_outside_cycle();
+
   // This is called at the very beginning of an evacuation pause (it
   // has to be the first thing that the pause does). If
   // initiate_conc_mark_if_possible() is true, and the concurrent
@@ -1259,7 +1267,7 @@
   // If the estimated is less then desirable, resize if possible.
   void expand_if_possible(size_t numRegions);
 
-  virtual bool choose_collection_set();
+  virtual bool choose_collection_set(double target_pause_time_ms);
   virtual void record_collection_pause_start(double start_time_sec,
                                              size_t start_used);
   virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -676,9 +676,27 @@
   // We must complete this write before we do any of the reads below.
   OrderAccess::storeload();
   // And process it, being careful of unallocated portions of TLAB's.
+
+  // The region for the current card may be a young region. The
+  // current card may have been a card that was evicted from the
+  // card cache. When the card was inserted into the cache, we had
+  // determined that its region was non-young. While in the cache,
+  // the region may have been freed during a cleanup pause, reallocated
+  // and tagged as young.
+  //
+  // We wish to filter out cards for such a region but the current
+  // thread, if we're running conucrrently, may "see" the young type
+  // change at any time (so an earlier "is_young" check may pass or
+  // fail arbitrarily). We tell the iteration code to perform this
+  // filtering when it has been determined that there has been an actual
+  // allocation in this region and making it safe to check the young type.
+  bool filter_young = true;
+
   HeapWord* stop_point =
     r->oops_on_card_seq_iterate_careful(dirtyRegion,
-                                        &filter_then_update_rs_oop_cl);
+                                        &filter_then_update_rs_oop_cl,
+                                        filter_young);
+
   // If stop_point is non-null, then we encountered an unallocated region
   // (perhaps the unfilled portion of a TLAB.)  For now, we'll dirty the
   // card and re-enqueue: if we put off the card until a GC pause, then the
@@ -789,8 +807,14 @@
       if (r == NULL) {
         assert(_g1->is_in_permanent(start), "Or else where?");
       } else {
-        guarantee(!r->is_young(), "It was evicted in the current minor cycle.");
-        // Process card pointer we get back from the hot card cache
+        // Checking whether the region we got back from the cache
+        // is young here is inappropriate. The region could have been
+        // freed, reallocated and tagged as young while in the cache.
+        // Hence we could see its young type change at any time.
+        //
+        // Process card pointer we get back from the hot card cache. This
+        // will check whether the region containing the card is young
+        // _after_ checking that the region has been allocated from.
         concurrentRefineOneCard_impl(res, worker_i);
       }
     }
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -658,7 +658,8 @@
 HeapWord*
 HeapRegion::
 oops_on_card_seq_iterate_careful(MemRegion mr,
-                                     FilterOutOfRegionClosure* cl) {
+                                 FilterOutOfRegionClosure* cl,
+                                 bool filter_young) {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
   // If we're within a stop-world GC, then we might look at a card in a
@@ -672,6 +673,16 @@
   if (mr.is_empty()) return NULL;
   // Otherwise, find the obj that extends onto mr.start().
 
+  // The intersection of the incoming mr (for the card) and the
+  // allocated part of the region is non-empty. This implies that
+  // we have actually allocated into this region. The code in
+  // G1CollectedHeap.cpp that allocates a new region sets the
+  // is_young tag on the region before allocating. Thus we
+  // safely know if this region is young.
+  if (is_young() && filter_young) {
+    return NULL;
+  }
+
   // We used to use "block_start_careful" here.  But we're actually happy
   // to update the BOT while we do this...
   HeapWord* cur = block_start(mr.start());
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed Jul 05 17:18:57 2017 +0200
@@ -252,7 +252,7 @@
                                 // survivor
   };
 
-  YoungType _young_type;
+  volatile YoungType _young_type;
   int  _young_index_in_cset;
   SurvRateGroup* _surv_rate_group;
   int  _age_index;
@@ -726,9 +726,12 @@
   HeapWord*
   object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
 
+  // In this version - if filter_young is true and the region
+  // is a young region then we skip the iteration.
   HeapWord*
   oops_on_card_seq_iterate_careful(MemRegion mr,
-                                   FilterOutOfRegionClosure* cl);
+                                   FilterOutOfRegionClosure* cl,
+                                   bool filter_young);
 
   // The region "mr" is entirely in "this", and starts and ends at block
   // boundaries. The caller declares that all the contained blocks are
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -42,8 +42,65 @@
 void VM_G1IncCollectionPause::doit() {
   JvmtiGCForAllocationMarker jgcm;
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  assert(!_should_initiate_conc_mark ||
+  ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
+   (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)),
+         "only a GC locker or a System.gc() induced GC should start a cycle");
+
   GCCauseSetter x(g1h, _gc_cause);
-  g1h->do_collection_pause_at_safepoint();
+  if (_should_initiate_conc_mark) {
+    // It's safer to read full_collections_completed() here, given
+    // that noone else will be updating it concurrently. Since we'll
+    // only need it if we're initiating a marking cycle, no point in
+    // setting it earlier.
+    _full_collections_completed_before = g1h->full_collections_completed();
+
+    // At this point we are supposed to start a concurrent cycle. We
+    // will do so if one is not already in progress.
+    bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle();
+  }
+  g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
+}
+
+void VM_G1IncCollectionPause::doit_epilogue() {
+  VM_GC_Operation::doit_epilogue();
+
+  // If the pause was initiated by a System.gc() and
+  // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle
+  // that just started (or maybe one that was already in progress) to
+  // finish.
+  if (_gc_cause == GCCause::_java_lang_system_gc &&
+      _should_initiate_conc_mark) {
+    assert(ExplicitGCInvokesConcurrent,
+           "the only way to be here is if ExplicitGCInvokesConcurrent is set");
+
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+
+    // In the doit() method we saved g1h->full_collections_completed()
+    // in the _full_collections_completed_before field. We have to
+    // wait until we observe that g1h->full_collections_completed()
+    // has increased by at least one. This can happen if a) we started
+    // a cycle and it completes, b) a cycle already in progress
+    // completes, or c) a Full GC happens.
+
+    // If the condition has already been reached, there's no point in
+    // actually taking the lock and doing the wait.
+    if (g1h->full_collections_completed() <=
+                                          _full_collections_completed_before) {
+      // The following is largely copied from CMS
+
+      Thread* thr = Thread::current();
+      assert(thr->is_Java_thread(), "invariant");
+      JavaThread* jt = (JavaThread*)thr;
+      ThreadToNativeFromVM native(jt);
+
+      MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
+      while (g1h->full_collections_completed() <=
+                                          _full_collections_completed_before) {
+        FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
+      }
+    }
+  }
 }
 
 void VM_CGC_Operation::doit() {
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Wed Jul 05 17:18:57 2017 +0200
@@ -31,13 +31,12 @@
 //   - VM_G1PopRegionCollectionPause
 
 class VM_G1CollectFull: public VM_GC_Operation {
- private:
  public:
-  VM_G1CollectFull(int gc_count_before,
-                   GCCause::Cause gc_cause)
-    : VM_GC_Operation(gc_count_before)
-  {
-    _gc_cause = gc_cause;
+  VM_G1CollectFull(unsigned int gc_count_before,
+                   unsigned int full_gc_count_before,
+                   GCCause::Cause cause)
+    : VM_GC_Operation(gc_count_before, full_gc_count_before) {
+    _gc_cause = cause;
   }
   ~VM_G1CollectFull() {}
   virtual VMOp_Type type() const { return VMOp_G1CollectFull; }
@@ -67,12 +66,28 @@
 };
 
 class VM_G1IncCollectionPause: public VM_GC_Operation {
- public:
-  VM_G1IncCollectionPause(int gc_count_before,
-                          GCCause::Cause gc_cause = GCCause::_g1_inc_collection_pause) :
-    VM_GC_Operation(gc_count_before) { _gc_cause = gc_cause; }
+private:
+  bool _should_initiate_conc_mark;
+  double _target_pause_time_ms;
+  unsigned int _full_collections_completed_before;
+public:
+  VM_G1IncCollectionPause(unsigned int   gc_count_before,
+                          bool           should_initiate_conc_mark,
+                          double         target_pause_time_ms,
+                          GCCause::Cause cause)
+    : VM_GC_Operation(gc_count_before),
+      _full_collections_completed_before(0),
+      _should_initiate_conc_mark(should_initiate_conc_mark),
+      _target_pause_time_ms(target_pause_time_ms) {
+    guarantee(target_pause_time_ms > 0.0,
+              err_msg("target_pause_time_ms = %1.6lf should be positive",
+                      target_pause_time_ms));
+
+    _gc_cause = cause;
+  }
   virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; }
   virtual void doit();
+  virtual void doit_epilogue();
   virtual const char* name() const {
     return "garbage-first incremental collection pause";
   }
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_g1	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_g1	Wed Jul 05 17:18:57 2017 +0200
@@ -367,4 +367,6 @@
 
 vm_operations_g1.cpp			vm_operations_g1.hpp
 vm_operations_g1.cpp                    g1CollectedHeap.inline.hpp
+vm_operations_g1.cpp                    g1CollectorPolicy.hpp
+vm_operations_g1.cpp                    interfaceSupport.hpp
 vm_operations_g1.cpp                    isGCActiveMark.hpp
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -539,10 +539,9 @@
   guarantee(_task_queues != NULL, "task_queues allocation failure.");
 
   for (uint i1 = 0; i1 < ParallelGCThreads; i1++) {
-    ObjToScanQueuePadded *q_padded = new ObjToScanQueuePadded();
-    guarantee(q_padded != NULL, "work_queue Allocation failure.");
-
-    _task_queues->register_queue(i1, &q_padded->work_queue);
+    ObjToScanQueue *q = new ObjToScanQueue();
+    guarantee(q != NULL, "work_queue Allocation failure.");
+    _task_queues->register_queue(i1, q);
   }
 
   for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Wed Jul 05 17:18:57 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,8 +33,8 @@
 // but they must be here to allow ParScanClosure::do_oop_work to be defined
 // in genOopClosures.inline.hpp.
 
-typedef OopTaskQueue       ObjToScanQueue;
-typedef OopTaskQueueSet    ObjToScanQueueSet;
+typedef Padded<OopTaskQueue> ObjToScanQueue;
+typedef GenericTaskQueueSet<ObjToScanQueue> ObjToScanQueueSet;
 
 // Enable this to get push/pop/steal stats.
 const int PAR_STATS_ENABLED = 0;
@@ -304,12 +304,6 @@
   friend class ParEvacuateFollowersClosure;
 
  private:
-  // XXX use a global constant instead of 64!
-  struct ObjToScanQueuePadded {
-        ObjToScanQueue work_queue;
-        char pad[64 - sizeof(ObjToScanQueue)];  // prevent false sharing
-  };
-
   // The per-worker-thread work queues
   ObjToScanQueueSet* _task_queues;
 
--- a/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.hpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.hpp	Wed Jul 05 17:18:57 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,8 @@
 
 class ParScanThreadState;
 class ParNewGeneration;
-typedef OopTaskQueueSet ObjToScanQueueSet;
+typedef Padded<OopTaskQueue> ObjToScanQueue;
+typedef GenericTaskQueueSet<ObjToScanQueue> ObjToScanQueueSet;
 class ParallelTaskTerminator;
 
 class ParScanClosure: public OopsInGenClosure {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -90,10 +90,7 @@
 }
 
 void PSPromotionManager::post_scavenge() {
-#if PS_PM_STATS
-  print_stats();
-#endif // PS_PM_STATS
-
+  TASKQUEUE_STATS_ONLY(if (PrintGCDetails && ParallelGCVerbose) print_stats());
   for (uint i = 0; i < ParallelGCThreads + 1; i++) {
     PSPromotionManager* manager = manager_array(i);
     if (UseDepthFirstScavengeOrder) {
@@ -105,37 +102,58 @@
   }
 }
 
-#if PS_PM_STATS
-
+#if TASKQUEUE_STATS
 void
-PSPromotionManager::print_stats(uint i) {
-  tty->print_cr("---- GC Worker %2d Stats", i);
-  tty->print_cr("    total pushes            %8d", _total_pushes);
-  tty->print_cr("    masked pushes           %8d", _masked_pushes);
-  tty->print_cr("    overflow pushes         %8d", _overflow_pushes);
-  tty->print_cr("    max overflow length     %8d", _max_overflow_length);
-  tty->print_cr("");
-  tty->print_cr("    arrays chunked          %8d", _arrays_chunked);
-  tty->print_cr("    array chunks processed  %8d", _array_chunks_processed);
-  tty->print_cr("");
-  tty->print_cr("    total steals            %8d", _total_steals);
-  tty->print_cr("    masked steals           %8d", _masked_steals);
-  tty->print_cr("");
+PSPromotionManager::print_taskqueue_stats(uint i) const {
+  const TaskQueueStats& stats = depth_first() ?
+    _claimed_stack_depth.stats : _claimed_stack_breadth.stats;
+  tty->print("%3u ", i);
+  stats.print();
+  tty->cr();
 }
 
 void
+PSPromotionManager::print_local_stats(uint i) const {
+  #define FMT " " SIZE_FORMAT_W(10)
+  tty->print_cr("%3u" FMT FMT FMT FMT, i, _masked_pushes, _masked_steals,
+                _arrays_chunked, _array_chunks_processed);
+  #undef FMT
+}
+
+static const char* const pm_stats_hdr[] = {
+  "    --------masked-------     arrays      array",
+  "thr       push      steal    chunked     chunks",
+  "--- ---------- ---------- ---------- ----------"
+};
+
+void
 PSPromotionManager::print_stats() {
-  tty->print_cr("== GC Tasks Stats (%s), GC %3d",
-                (UseDepthFirstScavengeOrder) ? "Depth-First" : "Breadth-First",
+  const bool df = UseDepthFirstScavengeOrder;
+  tty->print_cr("== GC Task Stats (%s-First), GC %3d", df ? "Depth" : "Breadth",
                 Universe::heap()->total_collections());
 
-  for (uint i = 0; i < ParallelGCThreads+1; ++i) {
-    PSPromotionManager* manager = manager_array(i);
-    manager->print_stats(i);
+  tty->print("thr "); TaskQueueStats::print_header(1); tty->cr();
+  tty->print("--- "); TaskQueueStats::print_header(2); tty->cr();
+  for (uint i = 0; i < ParallelGCThreads + 1; ++i) {
+    manager_array(i)->print_taskqueue_stats(i);
+  }
+
+  const uint hlines = sizeof(pm_stats_hdr) / sizeof(pm_stats_hdr[0]);
+  for (uint i = 0; i < hlines; ++i) tty->print_cr(pm_stats_hdr[i]);
+  for (uint i = 0; i < ParallelGCThreads + 1; ++i) {
+    manager_array(i)->print_local_stats(i);
   }
 }
 
-#endif // PS_PM_STATS
+void
+PSPromotionManager::reset_stats() {
+  TaskQueueStats& stats = depth_first() ?
+    claimed_stack_depth()->stats : claimed_stack_breadth()->stats;
+  stats.reset();
+  _masked_pushes = _masked_steals = 0;
+  _arrays_chunked = _array_chunks_processed = 0;
+}
+#endif // TASKQUEUE_STATS
 
 PSPromotionManager::PSPromotionManager() {
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
@@ -189,16 +207,7 @@
 
   _prefetch_queue.clear();
 
-#if PS_PM_STATS
-  _total_pushes = 0;
-  _masked_pushes = 0;
-  _overflow_pushes = 0;
-  _max_overflow_length = 0;
-  _arrays_chunked = 0;
-  _array_chunks_processed = 0;
-  _total_steals = 0;
-  _masked_steals = 0;
-#endif // PS_PM_STATS
+  TASKQUEUE_STATS_ONLY(reset_stats());
 }
 
 
@@ -423,14 +432,9 @@
             new_obj->is_objArray() &&
             PSChunkLargeArrays) {
           // we'll chunk it
-#if PS_PM_STATS
-          ++_arrays_chunked;
-#endif // PS_PM_STATS
           oop* const masked_o = mask_chunked_array_oop(o);
           push_depth(masked_o);
-#if PS_PM_STATS
-          ++_masked_pushes;
-#endif // PS_PM_STATS
+          TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes);
         } else {
           // we'll just push its contents
           new_obj->push_contents(this);
@@ -494,9 +498,7 @@
   assert(old->is_objArray(), "invariant");
   assert(old->is_forwarded(), "invariant");
 
-#if PS_PM_STATS
-  ++_array_chunks_processed;
-#endif // PS_PM_STATS
+  TASKQUEUE_STATS_ONLY(++_array_chunks_processed);
 
   oop const obj = old->forwardee();
 
@@ -508,9 +510,7 @@
     assert(start > 0, "invariant");
     arrayOop(old)->set_length(start);
     push_depth(mask_chunked_array_oop(old));
-#if PS_PM_STATS
-    ++_masked_pushes;
-#endif // PS_PM_STATS
+    TASKQUEUE_STATS_ONLY(++_masked_pushes);
   } else {
     // this is the final chunk for this array
     start = 0;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Wed Jul 05 17:18:57 2017 +0200
@@ -42,8 +42,6 @@
 class PSOldGen;
 class ParCompactionManager;
 
-#define PS_PM_STATS         0
-
 class PSPromotionManager : public CHeapObj {
   friend class PSScavenge;
   friend class PSRefProcTaskExecutor;
@@ -54,22 +52,18 @@
   static PSOldGen*                    _old_gen;
   static MutableSpace*                _young_space;
 
-#if PS_PM_STATS
-  uint                                _total_pushes;
-  uint                                _masked_pushes;
-
-  uint                                _overflow_pushes;
-  uint                                _max_overflow_length;
+#if TASKQUEUE_STATS
+  size_t                              _masked_pushes;
+  size_t                              _masked_steals;
+  size_t                              _arrays_chunked;
+  size_t                              _array_chunks_processed;
 
-  uint                                _arrays_chunked;
-  uint                                _array_chunks_processed;
+  void print_taskqueue_stats(uint i) const;
+  void print_local_stats(uint i) const;
+  static void print_stats();
 
-  uint                                _total_steals;
-  uint                                _masked_steals;
-
-  void print_stats(uint i);
-  static void print_stats();
-#endif // PS_PM_STATS
+  void reset_stats();
+#endif // TASKQUEUE_STATS
 
   PSYoungPromotionLAB                 _young_lab;
   PSOldPromotionLAB                   _old_lab;
@@ -143,42 +137,12 @@
 
   template <class T> void push_depth(T* p) {
     assert(depth_first(), "pre-condition");
-
-#if PS_PM_STATS
-    ++_total_pushes;
-    int stack_length = claimed_stack_depth()->overflow_stack()->length();
-#endif // PS_PM_STATS
-
     claimed_stack_depth()->push(p);
-
-#if PS_PM_STATS
-    if (claimed_stack_depth()->overflow_stack()->length() != stack_length) {
-      ++_overflow_pushes;
-      if ((uint)stack_length + 1 > _max_overflow_length) {
-        _max_overflow_length = (uint)stack_length + 1;
-      }
-    }
-#endif // PS_PM_STATS
   }
 
   void push_breadth(oop o) {
     assert(!depth_first(), "pre-condition");
-
-#if PS_PM_STATS
-    ++_total_pushes;
-    int stack_length = claimed_stack_breadth()->overflow_stack()->length();
-#endif // PS_PM_STATS
-
     claimed_stack_breadth()->push(o);
-
-#if PS_PM_STATS
-    if (claimed_stack_breadth()->overflow_stack()->length() != stack_length) {
-      ++_overflow_pushes;
-      if ((uint)stack_length + 1 > _max_overflow_length) {
-        _max_overflow_length = (uint)stack_length + 1;
-      }
-    }
-#endif // PS_PM_STATS
   }
 
  protected:
@@ -256,12 +220,5 @@
   template <class T> inline void claim_or_forward_depth(T* p);
   template <class T> inline void claim_or_forward_breadth(T* p);
 
-#if PS_PM_STATS
-  void increment_steals(oop* p = NULL) {
-    _total_steals += 1;
-    if (p != NULL && is_oop_masked(p)) {
-      _masked_steals += 1;
-    }
-  }
-#endif // PS_PM_STATS
+  TASKQUEUE_STATS_ONLY(inline void record_steal(StarTask& p);)
 };
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Wed Jul 05 17:18:57 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -124,3 +124,11 @@
     }
   }
 }
+
+#if TASKQUEUE_STATS
+void PSPromotionManager::record_steal(StarTask& p) {
+  if (is_oop_masked(p)) {
+    ++_masked_steals;
+  }
+}
+#endif // TASKQUEUE_STATS
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -148,9 +148,7 @@
     while(true) {
       StarTask p;
       if (PSPromotionManager::steal_depth(which, &random_seed, p)) {
-#if PS_PM_STATS
-        pm->increment_steals(p);
-#endif // PS_PM_STATS
+        TASKQUEUE_STATS_ONLY(pm->record_steal(p));
         pm->process_popped_location_depth(p);
         pm->drain_stacks_depth(true);
       } else {
@@ -163,9 +161,6 @@
     while(true) {
       oop obj;
       if (PSPromotionManager::steal_breadth(which, &random_seed, obj)) {
-#if PS_PM_STATS
-        pm->increment_steals();
-#endif // PS_PM_STATS
         obj->copy_contents(pm);
         pm->drain_stacks_breadth(true);
       } else {
--- a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Wed Jul 05 17:18:57 2017 +0200
@@ -86,9 +86,7 @@
 
     _gc_locked = false;
 
-    if (full) {
-      _full_gc_count_before = full_gc_count_before;
-    }
+    _full_gc_count_before = full_gc_count_before;
     // In ParallelScavengeHeap::mem_allocate() collections can be
     // executed within a loop and _all_soft_refs_clear can be set
     // true after they have been cleared by a collection and another
--- a/hotspot/src/share/vm/gc_interface/gcCause.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_interface/gcCause.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -78,6 +78,9 @@
     case _old_generation_too_full_to_scavenge:
       return "Old Generation Too Full To Scavenge";
 
+    case _g1_inc_collection_pause:
+      return "G1 Evacuation Pause";
+
     case _last_ditch_collection:
       return "Last ditch collection";
 
--- a/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -328,24 +328,35 @@
   constantPoolOop constants = method()->constants();
   constantTag tag = constants->tag_at(i);
 
-  int nt_index = -1;
+  bool has_klass = true;
 
   switch (tag.value()) {
   case JVM_CONSTANT_InterfaceMethodref:
   case JVM_CONSTANT_Methodref:
   case JVM_CONSTANT_Fieldref:
+    break;
   case JVM_CONSTANT_NameAndType:
+  case JVM_CONSTANT_InvokeDynamic:
+    has_klass = false;
     break;
   default:
     st->print_cr(" bad tag=%d at %d", tag.value(), i);
     return;
   }
 
-  symbolOop klass = constants->klass_name_at(constants->uncached_klass_ref_index_at(i));
   symbolOop name = constants->uncached_name_ref_at(i);
   symbolOop signature = constants->uncached_signature_ref_at(i);
   const char* sep = (tag.is_field() ? "/" : "");
-  st->print_cr(" %d <%s.%s%s%s> ", i, klass->as_C_string(), name->as_C_string(), sep, signature->as_C_string());
+  if (has_klass) {
+    symbolOop klass = constants->klass_name_at(constants->uncached_klass_ref_index_at(i));
+    st->print_cr(" %d <%s.%s%s%s> ", i, klass->as_C_string(), name->as_C_string(), sep, signature->as_C_string());
+  } else {
+    if (tag.is_invoke_dynamic()) {
+      int bsm = constants->invoke_dynamic_bootstrap_method_ref_index_at(i);
+      st->print(" bsm=%d", bsm);
+    }
+    st->print_cr(" %d <%s%s%s>", i, name->as_C_string(), sep, signature->as_C_string());
+  }
 }
 
 
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -702,10 +702,6 @@
 
   methodHandle caller_method(thread, method(thread));
 
-  // first find the bootstrap method
-  KlassHandle caller_klass(thread, caller_method->method_holder());
-  Handle bootm = SystemDictionary::find_bootstrap_method(caller_klass, CHECK);
-
   constantPoolHandle pool(thread, caller_method->constants());
   pool->set_invokedynamic();    // mark header to flag active call sites
 
@@ -726,7 +722,7 @@
     CallInfo info;
     LinkResolver::resolve_invoke(info, Handle(), pool,
                                  site_index, bytecode, CHECK);
-    // The main entry corresponds to a JVM_CONSTANT_NameAndType, and serves
+    // The main entry corresponds to a JVM_CONSTANT_InvokeDynamic, and serves
     // as a common reference point for all invokedynamic call sites with
     // that exact call descriptor.  We will link it in the CP cache exactly
     // as if it were an invokevirtual of MethodHandle.invoke.
@@ -734,23 +730,30 @@
       bytecode,
       info.resolved_method(),
       info.vtable_index());
-    assert(pool->cache()->entry_at(main_index)->is_vfinal(), "f2 must be a methodOop");
   }
 
   // The method (f2 entry) of the main entry is the MH.invoke for the
   // invokedynamic target call signature.
-  intptr_t f2_value = pool->cache()->entry_at(main_index)->f2();
-  methodHandle signature_invoker(THREAD, (methodOop) f2_value);
+  oop f1_value = pool->cache()->entry_at(main_index)->f1();
+  methodHandle signature_invoker(THREAD, (methodOop) f1_value);
   assert(signature_invoker.not_null() && signature_invoker->is_method() && signature_invoker->is_method_handle_invoke(),
          "correct result from LinkResolver::resolve_invokedynamic");
 
+  Handle bootm = SystemDictionary::find_bootstrap_method(caller_method, caller_bci,
+                                                         main_index, CHECK);
+  if (bootm.is_null()) {
+    THROW_MSG(vmSymbols::java_lang_IllegalStateException(),
+              "no bootstrap method found for invokedynamic");
+  }
+
+  // Short circuit if CallSite has been bound already:
+  if (!pool->cache()->secondary_entry_at(site_index)->is_f1_null())
+    return;
+
   symbolHandle call_site_name(THREAD, pool->name_ref_at(site_index));
 
   Handle info;  // NYI: Other metadata from a new kind of CP entry.  (Annotations?)
 
-  // this is the index which gets stored on the CallSite object (as "callerPosition"):
-  int call_site_position = constantPoolCacheOopDesc::decode_secondary_index(site_index);
-
   Handle call_site
     = SystemDictionary::make_dynamic_call_site(bootm,
                                                // Callee information:
--- a/hotspot/src/share/vm/interpreter/linkResolver.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/interpreter/linkResolver.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -67,6 +67,15 @@
   set_common(resolved_klass, selected_klass, resolved_method, selected_method, vtable_index, CHECK);
 }
 
+void CallInfo::set_dynamic(methodHandle resolved_method, TRAPS) {
+  assert(resolved_method->is_method_handle_invoke(), "");
+  KlassHandle resolved_klass = SystemDictionaryHandles::MethodHandle_klass();
+  assert(resolved_klass == resolved_method->method_holder(), "");
+  int vtable_index = methodOopDesc::nonvirtual_vtable_index;
+  assert(resolved_method->vtable_index() == vtable_index, "");
+  set_common(resolved_klass, KlassHandle(), resolved_method, resolved_method, vtable_index, CHECK);
+}
+
 void CallInfo::set_common(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS) {
   assert(resolved_method->signature() == selected_method->signature(), "signatures must correspond");
   _resolved_klass  = resolved_klass;
@@ -176,9 +185,20 @@
                                           KlassHandle klass, symbolHandle name, symbolHandle signature,
                                           KlassHandle current_klass,
                                           TRAPS) {
-  if (EnableMethodHandles && MethodHandles::enabled() &&
+  if (EnableMethodHandles &&
       klass() == SystemDictionary::MethodHandle_klass() &&
       methodOopDesc::is_method_handle_invoke_name(name())) {
+    if (!MethodHandles::enabled()) {
+      // Make sure the Java part of the runtime has been booted up.
+      klassOop natives = SystemDictionary::MethodHandleNatives_klass();
+      if (natives == NULL || instanceKlass::cast(natives)->is_not_initialized()) {
+        SystemDictionary::resolve_or_fail(vmSymbolHandles::sun_dyn_MethodHandleNatives(),
+                                          Handle(),
+                                          Handle(),
+                                          true,
+                                          CHECK);
+      }
+    }
     methodOop result_oop = SystemDictionary::find_method_handle_invoke(name,
                                                                        signature,
                                                                        current_klass,
@@ -1065,7 +1085,7 @@
   if (resolved_method.is_null()) {
     THROW(vmSymbols::java_lang_InternalError());
   }
-  result.set_virtual(resolved_klass, KlassHandle(), resolved_method, resolved_method, resolved_method->vtable_index(), CHECK);
+  result.set_dynamic(resolved_method, CHECK);
 }
 
 //------------------------------------------------------------------------------------------------------------------------
--- a/hotspot/src/share/vm/interpreter/linkResolver.hpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/interpreter/linkResolver.hpp	Wed Jul 05 17:18:57 2017 +0200
@@ -73,6 +73,7 @@
   void         set_static(   KlassHandle resolved_klass,                             methodHandle resolved_method                                                , TRAPS);
   void         set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method                  , TRAPS);
   void         set_virtual(  KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS);
+  void         set_dynamic(                                                          methodHandle resolved_method,                                                 TRAPS);
   void         set_common(   KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS);
 
   friend class LinkResolver;
--- a/hotspot/src/share/vm/interpreter/rewriter.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/interpreter/rewriter.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -32,14 +32,17 @@
 void Rewriter::compute_index_maps() {
   const int length  = _pool->length();
   init_cp_map(length);
+  jint tag_mask = 0;
   for (int i = 0; i < length; i++) {
     int tag = _pool->tag_at(i).value();
+    tag_mask |= (1 << tag);
     switch (tag) {
       case JVM_CONSTANT_InterfaceMethodref:
       case JVM_CONSTANT_Fieldref          : // fall through
       case JVM_CONSTANT_Methodref         : // fall through
       case JVM_CONSTANT_MethodHandle      : // fall through
       case JVM_CONSTANT_MethodType        : // fall through
+      case JVM_CONSTANT_InvokeDynamic     : // fall through
         add_cp_cache_entry(i);
         break;
     }
@@ -47,6 +50,8 @@
 
   guarantee((int)_cp_cache_map.length()-1 <= (int)((u2)-1),
             "all cp cache indexes fit in a u2");
+
+  _have_invoke_dynamic = ((tag_mask & (1 << JVM_CONSTANT_InvokeDynamic)) != 0);
 }
 
 
@@ -59,6 +64,28 @@
   constantPoolCacheOop cache =
       oopFactory::new_constantPoolCache(length, methodOopDesc::IsUnsafeConc, CHECK);
   cache->initialize(_cp_cache_map);
+
+  // Don't bother to the next pass if there is no JVM_CONSTANT_InvokeDynamic.
+  if (_have_invoke_dynamic) {
+    for (int i = 0; i < length; i++) {
+      int pool_index = cp_cache_entry_pool_index(i);
+      if (pool_index >= 0 &&
+          _pool->tag_at(pool_index).is_invoke_dynamic()) {
+        int bsm_index = _pool->invoke_dynamic_bootstrap_method_ref_index_at(pool_index);
+        if (bsm_index != 0) {
+          assert(_pool->tag_at(bsm_index).is_method_handle(), "must be a MH constant");
+          // There is a CP cache entry holding the BSM for these calls.
+          int bsm_cache_index = cp_entry_to_cp_cache(bsm_index);
+          cache->entry_at(i)->initialize_bootstrap_method_index_in_cache(bsm_cache_index);
+        } else {
+          // There is no CP cache entry holding the BSM for these calls.
+          // We will need to look for a class-global BSM, later.
+          guarantee(AllowTransitionalJSR292, "");
+        }
+      }
+    }
+  }
+
   _pool->set_cache(cache);
   cache->set_constant_pool(_pool());
 }
--- a/hotspot/src/share/vm/interpreter/rewriter.hpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/interpreter/rewriter.hpp	Wed Jul 05 17:18:57 2017 +0200
@@ -32,6 +32,7 @@
   objArrayHandle      _methods;
   intArray            _cp_map;
   intStack            _cp_cache_map;
+  bool                _have_invoke_dynamic;
 
   void init_cp_map(int length) {
     _cp_map.initialize(length, -1);
@@ -56,6 +57,22 @@
     return cache_index;
   }
 
+  // Access the contents of _cp_cache_map to determine CP cache layout.
+  int cp_cache_entry_pool_index(int cache_index) {
+    int cp_index = _cp_cache_map[cache_index];
+    if ((cp_index & _secondary_entry_tag) != 0)
+      return -1;
+    else
+      return cp_index;
+  }
+  int cp_cache_secondary_entry_main_index(int cache_index) {
+    int cp_index = _cp_cache_map[cache_index];
+    if ((cp_index & _secondary_entry_tag) == 0)
+      return -1;
+    else
+      return (cp_index - _secondary_entry_tag);
+  }
+
   // All the work goes in here:
   Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, objArrayHandle methods, TRAPS);
 
--- a/hotspot/src/share/vm/oops/constantPoolKlass.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/oops/constantPoolKlass.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -379,6 +379,10 @@
       case JVM_CONSTANT_MethodType :
         st->print("signature_index=%d", cp->method_type_index_at(index));
         break;
+      case JVM_CONSTANT_InvokeDynamic :
+        st->print("bootstrap_method_index=%d", cp->invoke_dynamic_bootstrap_method_ref_index_at(index));
+        st->print(" name_and_type_index=%d", cp->invoke_dynamic_name_and_type_ref_index_at(index));
+        break;
       default:
         ShouldNotReachHere();
         break;
--- a/hotspot/src/share/vm/oops/constantPoolOop.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/oops/constantPoolOop.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -264,10 +264,15 @@
 int constantPoolOopDesc::impl_name_and_type_ref_index_at(int which, bool uncached) {
   int i = which;
   if (!uncached && cache() != NULL) {
-    if (constantPoolCacheOopDesc::is_secondary_index(which))
+    if (constantPoolCacheOopDesc::is_secondary_index(which)) {
       // Invokedynamic indexes are always processed in native order
       // so there is no question of reading a native u2 in Java order here.
-      return cache()->main_entry_at(which)->constant_pool_index();
+      int pool_index = cache()->main_entry_at(which)->constant_pool_index();
+      if (tag_at(pool_index).is_invoke_dynamic())
+        pool_index = invoke_dynamic_name_and_type_ref_index_at(pool_index);
+      assert(tag_at(pool_index).is_name_and_type(), "");
+      return pool_index;
+    }
     // change byte-ordering and go via cache
     i = remap_instruction_operand_from_cache(which);
   } else {
@@ -830,6 +835,19 @@
     }
   } break;
 
+  case JVM_CONSTANT_InvokeDynamic:
+  {
+    int k1 = invoke_dynamic_bootstrap_method_ref_index_at(index1);
+    int k2 = cp2->invoke_dynamic_bootstrap_method_ref_index_at(index2);
+    if (k1 == k2) {
+      int i1 = invoke_dynamic_name_and_type_ref_index_at(index1);
+      int i2 = cp2->invoke_dynamic_name_and_type_ref_index_at(index2);
+      if (i1 == i2) {
+        return true;
+      }
+    }
+  } break;
+
   case JVM_CONSTANT_UnresolvedString:
   {
     symbolOop s1 = unresolved_string_at(index1);
@@ -1016,6 +1034,13 @@
     to_cp->method_handle_index_at_put(to_i, k1, k2);
   } break;
 
+  case JVM_CONSTANT_InvokeDynamic:
+  {
+    int k1 = invoke_dynamic_bootstrap_method_ref_index_at(from_i);
+    int k2 = invoke_dynamic_name_and_type_ref_index_at(from_i);
+    to_cp->invoke_dynamic_at_put(to_i, k1, k2);
+  } break;
+
   // Invalid is used as the tag for the second constant pool entry
   // occupied by JVM_CONSTANT_Double or JVM_CONSTANT_Long. It should
   // not be seen by itself.
@@ -1231,6 +1256,7 @@
     case JVM_CONSTANT_Methodref:
     case JVM_CONSTANT_InterfaceMethodref:
     case JVM_CONSTANT_NameAndType:
+    case JVM_CONSTANT_InvokeDynamic:
       return 5;
 
     case JVM_CONSTANT_Long:
@@ -1444,6 +1470,15 @@
         DBG(printf("JVM_CONSTANT_MethodType: %hd", idx1));
         break;
       }
+      case JVM_CONSTANT_InvokeDynamic: {
+        *bytes = JVM_CONSTANT_InvokeDynamic;
+        idx1 = invoke_dynamic_bootstrap_method_ref_index_at(idx);
+        idx2 = invoke_dynamic_name_and_type_ref_index_at(idx);
+        Bytes::put_Java_u2((address) (bytes+1), idx1);
+        Bytes::put_Java_u2((address) (bytes+3), idx2);
+        DBG(printf("JVM_CONSTANT_InvokeDynamic: %hd %hd", idx1, idx2));
+        break;
+      }
     }
     DBG(printf("\n"));
     bytes += ent_size;
--- a/hotspot/src/share/vm/oops/constantPoolOop.hpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/oops/constantPoolOop.hpp	Wed Jul 05 17:18:57 2017 +0200
@@ -156,6 +156,11 @@
     *int_at_addr(which) = ref_index;
   }
 
+  void invoke_dynamic_at_put(int which, int bootstrap_method_index, int name_and_type_index) {
+    tag_at_put(which, JVM_CONSTANT_InvokeDynamic);
+    *int_at_addr(which) = ((jint) name_and_type_index<<16) | bootstrap_method_index;
+  }
+
   // Temporary until actual use
   void unresolved_string_at_put(int which, symbolOop s) {
     *obj_at_addr(which) = NULL;
@@ -396,6 +401,16 @@
     int sym = method_type_index_at(which);
     return symbol_at(sym);
   }
+  int invoke_dynamic_bootstrap_method_ref_index_at(int which) {
+    assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool");
+    jint ref_index = *int_at_addr(which);
+    return extract_low_short_from_int(ref_index);
+  }
+  int invoke_dynamic_name_and_type_ref_index_at(int which) {
+    assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool");
+    jint ref_index = *int_at_addr(which);
+    return extract_high_short_from_int(ref_index);
+  }
 
   // The following methods (name/signature/klass_ref_at, klass_ref_at_noresolve,
   // name_and_type_ref_index_at) all expect to be passed indices obtained
--- a/hotspot/src/share/vm/oops/cpCacheOop.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/oops/cpCacheOop.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -134,7 +134,7 @@
 void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
                                         methodHandle method,
                                         int vtable_index) {
-
+  assert(!is_secondary_entry(), "");
   assert(method->interpreter_entry() != NULL, "should have been set at this point");
   assert(!method->is_obsolete(),  "attempt to write obsolete method to cpCache");
   bool change_to_virtual = (invoke_code == Bytecodes::_invokeinterface);
@@ -142,7 +142,6 @@
   int byte_no = -1;
   bool needs_vfinal_flag = false;
   switch (invoke_code) {
-    case Bytecodes::_invokedynamic:
     case Bytecodes::_invokevirtual:
     case Bytecodes::_invokeinterface: {
         if (method->can_be_statically_bound()) {
@@ -155,6 +154,23 @@
         byte_no = 2;
         break;
     }
+
+    case Bytecodes::_invokedynamic:  // similar to _invokevirtual
+      if (TraceInvokeDynamic) {
+        tty->print_cr("InvokeDynamic set_method%s method="PTR_FORMAT" index=%d",
+                      (is_secondary_entry() ? " secondary" : ""),
+                      (intptr_t)method(), vtable_index);
+        method->print();
+        this->print(tty, 0);
+      }
+      assert(method->can_be_statically_bound(), "must be a MH invoker method");
+      assert(AllowTransitionalJSR292 || _f2 >= constantPoolOopDesc::CPCACHE_INDEX_TAG, "BSM index initialized");
+      set_f1(method());
+      needs_vfinal_flag = false;  // _f2 is not an oop
+      assert(!is_vfinal(), "f2 not an oop");
+      byte_no = 1;  // coordinate this with bytecode_number & is_resolved
+      break;
+
     case Bytecodes::_invokespecial:
       // Preserve the value of the vfinal flag on invokevirtual bytecode
       // which may be shared with this constant pool cache entry.
@@ -209,6 +225,7 @@
 
 
 void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) {
+  assert(!is_secondary_entry(), "");
   klassOop interf = method->method_holder();
   assert(instanceKlass::cast(interf)->is_interface(), "must be an interface");
   set_f1(interf);
@@ -218,8 +235,23 @@
 }
 
 
+void ConstantPoolCacheEntry::initialize_bootstrap_method_index_in_cache(int bsm_cache_index) {
+  assert(!is_secondary_entry(), "only for JVM_CONSTANT_InvokeDynamic main entry");
+  assert(_f2 == 0, "initialize once");
+  assert(bsm_cache_index == (int)(u2)bsm_cache_index, "oob");
+  set_f2(bsm_cache_index + constantPoolOopDesc::CPCACHE_INDEX_TAG);
+}
+
+int ConstantPoolCacheEntry::bootstrap_method_index_in_cache() {
+  assert(!is_secondary_entry(), "only for JVM_CONSTANT_InvokeDynamic main entry");
+  intptr_t bsm_cache_index = (intptr_t) _f2 - constantPoolOopDesc::CPCACHE_INDEX_TAG;
+  assert(bsm_cache_index == (intptr_t)(u2)bsm_cache_index, "oob");
+  return (int) bsm_cache_index;
+}
+
 void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site,
                                               methodHandle signature_invoker) {
+  assert(is_secondary_entry(), "");
   int param_size = signature_invoker->size_of_parameters();
   assert(param_size >= 1, "method argument size must include MH.this");
   param_size -= 1;              // do not count MH.this; it is not stacked for invokedynamic
@@ -227,7 +259,6 @@
     // racing threads might be trying to install their own favorites
     set_f1(call_site());
   }
-  //set_f2(0);
   bool is_final = true;
   assert(signature_invoker->is_final_method(), "is_final");
   set_flags(as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true) | param_size);
@@ -417,14 +448,14 @@
   // print separator
   if (index == 0) tty->print_cr("                 -------------");
   // print entry
-  tty->print_cr("%3d  (%08x)  ", index, this);
+  tty->print("%3d  ("PTR_FORMAT")  ", index, (intptr_t)this);
   if (is_secondary_entry())
     tty->print_cr("[%5d|secondary]", main_entry_index());
   else
     tty->print_cr("[%02x|%02x|%5d]", bytecode_2(), bytecode_1(), constant_pool_index());
-  tty->print_cr("                 [   %08x]", (address)(oop)_f1);
-  tty->print_cr("                 [   %08x]", _f2);
-  tty->print_cr("                 [   %08x]", _flags);
+  tty->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)(oop)_f1);
+  tty->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_f2);
+  tty->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_flags);
   tty->print_cr("                 -------------");
 }
 
--- a/hotspot/src/share/vm/oops/cpCacheOop.hpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/oops/cpCacheOop.hpp	Wed Jul 05 17:18:57 2017 +0200
@@ -185,6 +185,10 @@
     methodHandle signature_invoker               // determines signature information
   );
 
+  // For JVM_CONSTANT_InvokeDynamic cache entries:
+  void initialize_bootstrap_method_index_in_cache(int bsm_cache_index);
+  int  bootstrap_method_index_in_cache();
+
   void set_parameter_size(int value) {
     assert(parameter_size() == 0 || parameter_size() == value,
            "size must not change");
@@ -207,6 +211,7 @@
       case Bytecodes::_getfield        :    // fall through
       case Bytecodes::_invokespecial   :    // fall through
       case Bytecodes::_invokestatic    :    // fall through
+      case Bytecodes::_invokedynamic   :    // fall through
       case Bytecodes::_invokeinterface : return 1;
       case Bytecodes::_putstatic       :    // fall through
       case Bytecodes::_putfield        :    // fall through
@@ -234,6 +239,7 @@
   Bytecodes::Code bytecode_1() const             { return Bytecodes::cast((_indices >> 16) & 0xFF); }
   Bytecodes::Code bytecode_2() const             { return Bytecodes::cast((_indices >> 24) & 0xFF); }
   volatile oop  f1() const                       { return _f1; }
+  bool is_f1_null() const                        { return (oop)_f1 == NULL; }  // classifies a CPC entry as unbound
   intx f2() const                                { return _f2; }
   int  field_index() const;
   int  parameter_size() const                    { return _flags & 0xFF; }
--- a/hotspot/src/share/vm/oops/methodOop.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/oops/methodOop.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -851,9 +851,15 @@
 // MethodHandleCompiler.
 // Must be consistent with MethodHandleCompiler::get_method_oop().
 bool methodOopDesc::is_method_handle_adapter() const {
-  return (is_method_handle_invoke_name(name()) &&
-          is_synthetic() &&
-          MethodHandleCompiler::klass_is_method_handle_adapter_holder(method_holder()));
+  if (is_synthetic() &&
+      !is_native() &&   // has code from MethodHandleCompiler
+      is_method_handle_invoke_name(name()) &&
+      MethodHandleCompiler::klass_is_method_handle_adapter_holder(method_holder())) {
+    assert(!is_method_handle_invoke(), "disjoint");
+    return true;
+  } else {
+    return false;
+  }
 }
 
 methodHandle methodOopDesc::make_invoke_method(KlassHandle holder,
--- a/hotspot/src/share/vm/prims/jvm.h	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/prims/jvm.h	Wed Jul 05 17:18:57 2017 +0200
@@ -1046,7 +1046,8 @@
     JVM_CONSTANT_InterfaceMethodref,
     JVM_CONSTANT_NameAndType,
     JVM_CONSTANT_MethodHandle           = 15,  // JSR 292
-    JVM_CONSTANT_MethodType             = 16   // JSR 292
+    JVM_CONSTANT_MethodType             = 16,  // JSR 292
+    JVM_CONSTANT_InvokeDynamic          = 17  // JSR 292
 };
 
 /* JVM_CONSTANT_MethodHandle subtypes */
--- a/hotspot/src/share/vm/prims/methodHandleWalk.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/prims/methodHandleWalk.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -738,6 +738,12 @@
 
   // bi
   case Bytecodes::_ldc:
+    assert(Bytecodes::format_bits(op, false) == (Bytecodes::_fmt_b|Bytecodes::_fmt_has_k), "wrong bytecode format");
+    assert((char) index == index, "index does not fit in 8-bit");
+    _bytecode.push(op);
+    _bytecode.push(index);
+    break;
+
   case Bytecodes::_iload:
   case Bytecodes::_lload:
   case Bytecodes::_fload:
@@ -754,7 +760,8 @@
     _bytecode.push(index);
     break;
 
-  // bii
+  // bkk
+  case Bytecodes::_ldc_w:
   case Bytecodes::_ldc2_w:
   case Bytecodes::_checkcast:
     assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bkk, "wrong bytecode format");
--- a/hotspot/src/share/vm/prims/methodHandles.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/prims/methodHandles.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -2475,6 +2475,10 @@
 
 JVM_ENTRY(void, MHI_registerBootstrap(JNIEnv *env, jobject igcls, jclass caller_jh, jobject bsm_jh)) {
   instanceKlassHandle ik = MethodHandles::resolve_instance_klass(caller_jh, THREAD);
+  if (!AllowTransitionalJSR292) {
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+              "registerBootstrapMethod is only supported in JSR 292 EDR");
+  }
   ik->link_class(CHECK);
   if (!java_dyn_MethodHandle::is_instance(JNIHandles::resolve(bsm_jh))) {
     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "method handle");
--- a/hotspot/src/share/vm/runtime/globals.hpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Wed Jul 05 17:18:57 2017 +0200
@@ -3517,6 +3517,9 @@
   experimental(bool, EnableInvokeDynamic, false,                            \
           "recognize the invokedynamic instruction")                        \
                                                                             \
+  experimental(bool, AllowTransitionalJSR292, true,                         \
+          "recognize pre-PFD formats of invokedynamic")                     \
+                                                                            \
   develop(bool, TraceInvokeDynamic, false,                                  \
           "trace internal invoke dynamic operations")                       \
                                                                             \
--- a/hotspot/src/share/vm/runtime/mutexLocker.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/runtime/mutexLocker.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -159,6 +159,8 @@
   def(STS_init_lock              , Mutex,   leaf,        true );
   if (UseConcMarkSweepGC) {
     def(iCMS_lock                  , Monitor, special,     true ); // CMS incremental mode start/stop notification
+  }
+  if (UseConcMarkSweepGC || UseG1GC) {
     def(FullGCCount_lock           , Monitor, leaf,        true ); // in support of ExplicitGCInvokesConcurrent
   }
   if (UseG1GC) {
--- a/hotspot/src/share/vm/utilities/constantTag.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/utilities/constantTag.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -91,6 +91,8 @@
       return "MethodHandle";
     case JVM_CONSTANT_MethodType :
       return "MethodType";
+    case JVM_CONSTANT_InvokeDynamic :
+      return "InvokeDynamic";
     case JVM_CONSTANT_Object :
       return "Object";
     case JVM_CONSTANT_Utf8 :
--- a/hotspot/src/share/vm/utilities/constantTag.hpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/utilities/constantTag.hpp	Wed Jul 05 17:18:57 2017 +0200
@@ -80,13 +80,14 @@
 
   bool is_method_type() const              { return _tag == JVM_CONSTANT_MethodType; }
   bool is_method_handle() const            { return _tag == JVM_CONSTANT_MethodHandle; }
+  bool is_invoke_dynamic() const           { return _tag == JVM_CONSTANT_InvokeDynamic; }
 
   constantTag() {
     _tag = JVM_CONSTANT_Invalid;
   }
   constantTag(jbyte tag) {
     assert((tag >= 0 && tag <= JVM_CONSTANT_NameAndType) ||
-           (tag >= JVM_CONSTANT_MethodHandle && tag <= JVM_CONSTANT_MethodType) ||
+           (tag >= JVM_CONSTANT_MethodHandle && tag <= JVM_CONSTANT_InvokeDynamic) ||
            (tag >= JVM_CONSTANT_InternalMin && tag <= JVM_CONSTANT_InternalMax), "Invalid constant tag");
     _tag = tag;
   }
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Wed Jul 05 17:18:57 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -345,6 +345,35 @@
   return align_size_up(offset, HeapWordsPerLong);
 }
 
+// The expected size in bytes of a cache line, used to pad data structures.
+#define DEFAULT_CACHE_LINE_SIZE 64
+
+// Bytes needed to pad type to avoid cache-line sharing; alignment should be the
+// expected cache line size (a power of two).  The first addend avoids sharing
+// when the start address is not a multiple of alignment; the second maintains
+// alignment of starting addresses that happen to be a multiple.
+#define PADDING_SIZE(type, alignment)                           \
+  ((alignment) + align_size_up_(sizeof(type), alignment))
+
+// Templates to create a subclass padded to avoid cache line sharing.  These are
+// effective only when applied to derived-most (leaf) classes.
+
+// When no args are passed to the base ctor.
+template <class T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
+class Padded: public T {
+private:
+  char _pad_buf_[PADDING_SIZE(T, alignment)];
+};
+
+// When either 0 or 1 args may be passed to the base ctor.
+template <class T, typename Arg1T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
+class Padded01: public T {
+public:
+  Padded01(): T() { }
+  Padded01(Arg1T arg1): T(arg1) { }
+private:
+  char _pad_buf_[PADDING_SIZE(T, alignment)];
+};
 
 //----------------------------------------------------------------------------------------------------
 // Utility macros for compilers
--- a/hotspot/src/share/vm/utilities/taskqueue.cpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/utilities/taskqueue.cpp	Wed Jul 05 17:18:57 2017 +0200
@@ -31,6 +31,48 @@
 uint ParallelTaskTerminator::_total_peeks = 0;
 #endif
 
+#if TASKQUEUE_STATS
+const char * const TaskQueueStats::_names[last_stat_id] = {
+  "qpush", "qpop", "qpop-s", "qattempt", "qsteal", "opush", "omax"
+};
+
+void TaskQueueStats::print_header(unsigned int line, outputStream* const stream,
+                                  unsigned int width)
+{
+  // Use a width w: 1 <= w <= max_width
+  const unsigned int max_width = 40;
+  const unsigned int w = MAX2(MIN2(width, max_width), 1U);
+
+  if (line == 0) { // spaces equal in width to the header
+    const unsigned int hdr_width = w * last_stat_id + last_stat_id - 1;
+    stream->print("%*s", hdr_width, " ");
+  } else if (line == 1) { // labels
+    stream->print("%*s", w, _names[0]);
+    for (unsigned int i = 1; i < last_stat_id; ++i) {
+      stream->print(" %*s", w, _names[i]);
+    }
+  } else if (line == 2) { // dashed lines
+    char dashes[max_width + 1];
+    memset(dashes, '-', w);
+    dashes[w] = '\0';
+    stream->print("%s", dashes);
+    for (unsigned int i = 1; i < last_stat_id; ++i) {
+      stream->print(" %s", dashes);
+    }
+  }
+}
+
+void TaskQueueStats::print(outputStream* stream, unsigned int width) const
+{
+  #define FMT SIZE_FORMAT_W(*)
+  stream->print(FMT, width, _stats[0]);
+  for (unsigned int i = 1; i < last_stat_id; ++i) {
+    stream->print(" " FMT, width, _stats[i]);
+  }
+  #undef FMT
+}
+#endif // TASKQUEUE_STATS
+
 int TaskQueueSetSuper::randomParkAndMiller(int *seed0) {
   const int a =      16807;
   const int m = 2147483647;
--- a/hotspot/src/share/vm/utilities/taskqueue.hpp	Thu Jul 29 13:33:32 2010 -0700
+++ b/hotspot/src/share/vm/utilities/taskqueue.hpp	Wed Jul 05 17:18:57 2017 +0200
@@ -22,6 +22,72 @@
  *
  */
 
+// Simple TaskQueue stats that are collected by default in debug builds.
+
+#if !defined(TASKQUEUE_STATS) && defined(ASSERT)
+#define TASKQUEUE_STATS 1
+#elif !defined(TASKQUEUE_STATS)
+#define TASKQUEUE_STATS 0
+#endif
+
+#if TASKQUEUE_STATS
+#define TASKQUEUE_STATS_ONLY(code) code
+#else
+#define TASKQUEUE_STATS_ONLY(code)
+#endif // TASKQUEUE_STATS
+
+#if TASKQUEUE_STATS
+class TaskQueueStats {
+public:
+  enum StatId {
+    push,             // number of taskqueue pushes
+    pop,              // number of taskqueue pops
+    pop_slow,         // subset of taskqueue pops that were done slow-path
+    steal_attempt,    // number of taskqueue steal attempts
+    steal,            // number of taskqueue steals
+    overflow,         // number of overflow pushes
+    overflow_max_len, // max length of overflow stack
+    last_stat_id
+  };
+
+public:
+  inline TaskQueueStats()       { reset(); }
+
+  inline void record_push()     { ++_stats[push]; }
+  inline void record_pop()      { ++_stats[pop]; }
+  inline void record_pop_slow() { record_pop(); ++_stats[pop_slow]; }
+  inline void record_steal(bool success);
+  inline void record_overflow(size_t new_length);
+
+  inline size_t get(StatId id) const { return _stats[id]; }
+  inline const size_t* get() const   { return _stats; }
+
+  inline void reset();
+
+  static void print_header(unsigned int line, outputStream* const stream = tty,
+                           unsigned int width = 10);
+  void print(outputStream* const stream = tty, unsigned int width = 10) const;
+
+private:
+  size_t                    _stats[last_stat_id];
+  static const char * const _names[last_stat_id];
+};
+
+void TaskQueueStats::record_steal(bool success) {
+  ++_stats[steal_attempt];
+  if (success) ++_stats[steal];
+}
+
+void TaskQueueStats::record_overflow(size_t new_len) {
+  ++_stats[overflow];
+  if (new_len > _stats[overflow_max_len]) _stats[overflow_max_len] = new_len;
+}
+
+void TaskQueueStats::reset() {
+  memset(_stats, 0, sizeof(_stats));
+}
+#endif // TASKQUEUE_STATS
+
 template <unsigned int N>
 class TaskQueueSuper: public CHeapObj {
 protected:
@@ -135,6 +201,8 @@
 
   // Total size of queue.
   static const uint total_size() { return N; }
+
+  TASKQUEUE_STATS_ONLY(TaskQueueStats stats;)
 };
 
 template<class E, unsigned int N = TASKQUEUE_SIZE>
@@ -152,6 +220,7 @@
 public:
   using TaskQueueSuper<N>::max_elems;
   using TaskQueueSuper<N>::size;
+  TASKQUEUE_STATS_ONLY(using TaskQueueSuper<N>::stats;)
 
 private:
   // Slow paths for push, pop_local.  (pop_global has no fast path.)
@@ -224,14 +293,14 @@
     // g++ complains if the volatile result of the assignment is unused.
     const_cast<E&>(_elems[localBot] = t);
     OrderAccess::release_store(&_bottom, increment_index(localBot));
+    TASKQUEUE_STATS_ONLY(stats.record_push());
     return true;
   }
   return false;
 }
 
 template<class E, unsigned int N>
-bool GenericTaskQueue<E, N>::
-pop_local_slow(uint localBot, Age oldAge) {
+bool GenericTaskQueue<E, N>::pop_local_slow(uint localBot, Age oldAge) {
   // This queue was observed to contain exactly one element; either this
   // thread will claim it, or a competing "pop_global".  In either case,
   // the queue will be logically empty afterwards.  Create a new Age value
@@ -251,6 +320,7 @@
     if (tempAge == oldAge) {
       // We win.
       assert(dirty_size(localBot, _age.top()) != N - 1, "sanity");
+      TASKQUEUE_STATS_ONLY(stats.record_pop_slow());
       return true;
     }
   }
@@ -306,6 +376,8 @@
   typedef GrowableArray<E>       overflow_t;
   typedef GenericTaskQueue<E, N> taskqueue_t;
 
+  TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;)
+
   OverflowTaskQueue();
   ~OverflowTaskQueue();
   void initialize();
@@ -356,6 +428,7 @@
 {
   if (!taskqueue_t::push(t)) {
     overflow_stack()->push(t);
+    TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->length()));
   }
   return true;
 }
@@ -424,9 +497,13 @@
 
 template<class T> bool
 GenericTaskQueueSet<T>::steal(uint queue_num, int* seed, E& t) {
-  for (uint i = 0; i < 2 * _n; i++)
-    if (steal_best_of_2(queue_num, seed, t))
+  for (uint i = 0; i < 2 * _n; i++) {
+    if (steal_best_of_2(queue_num, seed, t)) {
+      TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(true));
       return true;
+    }
+  }
+  TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(false));
   return false;
 }
 
@@ -574,6 +651,7 @@
     // g++ complains if the volatile result of the assignment is unused.
     const_cast<E&>(_elems[localBot] = t);
     OrderAccess::release_store(&_bottom, increment_index(localBot));
+    TASKQUEUE_STATS_ONLY(stats.record_push());
     return true;
   } else {
     return push_slow(t, dirty_n_elems);
@@ -603,6 +681,7 @@
   idx_t tp = _age.top();    // XXX
   if (size(localBot, tp) > 0) {
     assert(dirty_size(localBot, tp) != N - 1, "sanity");
+    TASKQUEUE_STATS_ONLY(stats.record_pop());
     return true;
   } else {
     // Otherwise, the queue contained exactly one element; we take the slow
--- a/jaxp/.hgtags	Thu Jul 29 13:33:32 2010 -0700
+++ b/jaxp/.hgtags	Wed Jul 05 17:18:57 2017 +0200
@@ -77,3 +77,4 @@
 d524be5ef62e8b8cb890c59a5d2c19ef0ab50d45 jdk7-b100
 17f62a566a2020fd908e77106ed885e0c4e7c14d jdk7-b101
 15573625af97d01c4e24549041cba7584da7fe88 jdk7-b102
+b7722e8788644507c10bb69a137de422d0300b24 jdk7-b103
--- a/jaxws/.hgtags	Thu Jul 29 13:33:32 2010 -0700
+++ b/jaxws/.hgtags	Wed Jul 05 17:18:57 2017 +0200
@@ -77,3 +77,4 @@
 bd26d0ce0c3cb43e58a8e2770cc03f26d96ffe5c jdk7-b100
 b55ce274490082712f5e002b38d2eed505ca863d jdk7-b101
 d8580443d1815d68e0035a0560634e50fa899288 jdk7-b102
+267386d6b923f724309cab855a555e2d86a15c8f jdk7-b103
--- a/jdk/.hgtags	Thu Jul 29 13:33:32 2010 -0700
+++ b/jdk/.hgtags	Wed Jul 05 17:18:57 2017 +0200
@@ -77,3 +77,4 @@
 820b4e843d5168370a3bf166d19751a3271d8575 jdk7-b100
 d58354a69011f3d3354765fa3167567c4c4a9612 jdk7-b101
 13029a61b16bec06535d4f0aa98229b358684128 jdk7-b102
+6488b70a23cc6dc4b7e00809bc503c2884bafb28 jdk7-b103
--- a/jdk/make/common/shared/Defs-versions.gmk	Thu Jul 29 13:33:32 2010 -0700
+++ b/jdk/make/common/shared/Defs-versions.gmk	Wed Jul 05 17:18:57 2017 +0200
@@ -191,7 +191,7 @@
 
 # Generic
 REQUIRED_ANT_VER          = 1.6.3
-REQUIRED_BOOT_VER         = 1.5
+REQUIRED_BOOT_VER         = 1.6
 REQUIRED_FREETYPE_VERSION = 2.3.0
 REQUIRED_MAKE_VER         = 3.78
 REQUIRED_UNZIP_VER        = 5.12
--- a/jdk/make/docs/Makefile	Thu Jul 29 13:33:32 2010 -0700
+++ b/jdk/make/docs/Makefile	Wed Jul 05 17:18:57 2017 +0200
@@ -47,9 +47,9 @@
 
 # Url to devdocs page
 #   Was: http://java.sun.com/javase/6/webnotes/devdocs-vs-specs.html
-DEV_DOCS_URL-5 = http://java.sun.com/j2se/1.5.0/docs
-DEV_DOCS_URL-6 = http://download.oracle.com/docs/cd/E17409_01/javase/6/docs
-DEV_DOCS_URL-7 = http://download.oracle.com/docs/cd/E17409_01/javase/7/docs
+DEV_DOCS_URL-5 = http://java.sun.com/j2se/1.5.0/docs/index.html
+DEV_DOCS_URL-6 = http://download.oracle.com/javase/6/docs/index.html
+DEV_DOCS_URL-7 = http://download.oracle.com/javase/7/docs/index.html
 DEV_DOCS_URL = $(DEV_DOCS_URL-$(JDK_MINOR_VERSION))
 
 # Url to Java Language Spec
@@ -84,6 +84,11 @@
                   $(SHARE_SRC)/../solaris/classes \
 	          $(SHARE_SRC)/../windows/classes \
 		  $(SHARE_SRC)/doc/stub
+
+# List of directories that actually exist
+ALL_EXISTING_SOURCE_DIRS := $(wildcard $(ALL_SOURCE_DIRS))
+
+# List with classpath separator between them
 EMPTY:=
 SPACE:= $(EMPTY) $(EMPTY)
 RELEASEDOCS_SOURCEPATH = \
@@ -240,7 +245,8 @@
 # Default target is same as docs target, create core api and all others it can
 #
 
-all docs: coredocs otherdocs
+all: docs
+docs: coredocs otherdocs
 
 #################################################################
 # Production Targets -- USE THESE TARGETS WHEN:
@@ -1178,9 +1184,9 @@
 #
 # Get a cache of all the directories
 
-$(DIRECTORY_CACHE): $(ALL_SOURCE_DIRS)
+$(DIRECTORY_CACHE): $(ALL_EXISTING_SOURCE_DIRS)
 	$(prep-target)
-	@for cp in $(ALL_SOURCE_DIRS) ; do 		\
+	@for cp in $(ALL_EXISTING_SOURCE_DIRS) ; do 	\
 	  $(ECHO) "$(FIND) $${cp} -type f >> $@"; 	\
 	  $(FIND) $${cp} -type f >> $@; 		\
 	done