Merge
authoramurillo
Fri, 06 Mar 2015 04:58:52 -0800
changeset 29362 4188dc7f05a8
parent 29298 82cd31c5d6ca (current diff)
parent 29361 ae2aef8d8ab9 (diff)
child 29363 18da5010f46a
child 29454 e5e9478e2ddb
child 29459 d6252edb4fca
child 29467 e6a180c1fbf8
child 29474 81a5c5330d08
Merge
hotspot/src/share/vm/c1/c1_LIR.cpp
hotspot/src/share/vm/compiler/disassembler.cpp
hotspot/src/share/vm/opto/graphKit.cpp
hotspot/src/share/vm/runtime/thread.hpp
hotspot/src/share/vm/runtime/vmStructs.cpp
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2012, 2014 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -2204,7 +2204,8 @@
 
 // Write the card table byte if needed.
 void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) {
-  CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set();
+  CardTableModRefBS* bs =
+    barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
   assert(bs->kind() == BarrierSet::CardTableModRef ||
          bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
 #ifdef ASSERT
@@ -2310,9 +2311,8 @@
   Label& filtered = (filtered_ext != NULL) ? *filtered_ext : filtered_int;
   assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2);
 
-  G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
-  assert(bs->kind() == BarrierSet::G1SATBCT ||
-         bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
+  G1SATBCardTableLoggingModRefBS* bs =
+    barrier_set_cast<G1SATBCardTableLoggingModRefBS>(Universe::heap()->barrier_set());
 
   // Does store cross heap regions?
   if (G1RSBarrierRegionFilter) {
--- a/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -694,7 +694,7 @@
             __ release();
           }
 
-          CardTableModRefBS* const ct = (CardTableModRefBS*)bs;
+          CardTableModRefBS* const ct = barrier_set_cast<CardTableModRefBS>(bs);
           assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
           assert_different_registers(addr, count, tmp);
 
--- a/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -916,7 +916,7 @@
         Register cardtable = G5;
         Register tmp  = G1_scratch;
         Register tmp2 = G3_scratch;
-        jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base;
+        jbyte* byte_map_base = barrier_set_cast<CardTableModRefBS>(bs)->byte_map_base;
 
         Label not_already_dirty, restart, refill, young_card;
 
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -3858,9 +3858,8 @@
 
   if (new_val == G0) return;
 
-  G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
-  assert(bs->kind() == BarrierSet::G1SATBCT ||
-         bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
+  G1SATBCardTableLoggingModRefBS* bs =
+    barrier_set_cast<G1SATBCardTableLoggingModRefBS>(Universe::heap()->barrier_set());
 
   if (G1RSBarrierRegionFilter) {
     xor3(store_addr, new_val, tmp);
@@ -3904,7 +3903,8 @@
 void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_val, Register tmp) {
   // If we're writing constant NULL, we can skip the write barrier.
   if (new_val == G0) return;
-  CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set();
+  CardTableModRefBS* bs =
+    barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
   assert(bs->kind() == BarrierSet::CardTableModRef ||
          bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
   card_table_write(bs->byte_map_base, tmp, store_addr);
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1019,7 +1019,7 @@
       case BarrierSet::CardTableModRef:
       case BarrierSet::CardTableExtension:
         {
-          CardTableModRefBS* ct = (CardTableModRefBS*)bs;
+          CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
           assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
           assert_different_registers(addr, count, tmp);
 
--- a/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1718,8 +1718,8 @@
         // arg0: store_address
         Address store_addr(rbp, 2*BytesPerWord);
 
-        BarrierSet* bs = Universe::heap()->barrier_set();
-        CardTableModRefBS* ct = (CardTableModRefBS*)bs;
+        CardTableModRefBS* ct =
+          barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
         assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
 
         Label done;
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -4204,8 +4204,8 @@
   Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
                                        PtrQueue::byte_offset_of_buf()));
 
-  BarrierSet* bs = Universe::heap()->barrier_set();
-  CardTableModRefBS* ct = (CardTableModRefBS*)bs;
+  CardTableModRefBS* ct =
+    barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
   assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
 
   Label done;
@@ -4305,7 +4305,7 @@
 void MacroAssembler::store_check_part_2(Register obj) {
   BarrierSet* bs = Universe::heap()->barrier_set();
   assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
-  CardTableModRefBS* ct = (CardTableModRefBS*)bs;
+  CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
   assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
 
   // The calculation for byte_map_base is as follows:
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -752,7 +752,7 @@
       case BarrierSet::CardTableModRef:
       case BarrierSet::CardTableExtension:
         {
-          CardTableModRefBS* ct = (CardTableModRefBS*)bs;
+          CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
           assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
 
           Label L_loop;
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1272,7 +1272,7 @@
       case BarrierSet::CardTableModRef:
       case BarrierSet::CardTableExtension:
         {
-          CardTableModRefBS* ct = (CardTableModRefBS*)bs;
+          CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
           assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
 
           Label L_loop;
--- a/hotspot/src/os/windows/vm/attachListener_windows.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/os/windows/vm/attachListener_windows.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -62,7 +62,7 @@
 class Win32AttachListener: AllStatic {
  private:
   enum {
-    preallocate_count = 4                   // number of preallocated operations
+    max_enqueued_operations = 4
   };
 
   // protects the preallocated list and the operation list
@@ -83,9 +83,12 @@
   static void set_tail(Win32AttachOperation* tail)          { _tail = tail; }
 
 
-  // used to wakeup the listener
-  static HANDLE _wakeup;
-  static HANDLE wakeup()                                    { return _wakeup; }
+  // A semaphore is used for communication about enqueued operations.
+  // The maximum count for the semaphore object will be set to "max_enqueued_operations".
+  // The state of a semaphore is signaled when its count is greater than
+  // zero (there are operations enqueued), and nonsignaled when it is zero.
+  static HANDLE _enqueued_ops_semaphore;
+  static HANDLE enqueued_ops_semaphore() { return _enqueued_ops_semaphore; }
 
  public:
   enum {
@@ -110,7 +113,7 @@
 
 // statics
 HANDLE Win32AttachListener::_mutex;
-HANDLE Win32AttachListener::_wakeup;
+HANDLE Win32AttachListener::_enqueued_ops_semaphore;
 Win32AttachOperation* Win32AttachListener::_avail;
 Win32AttachOperation* Win32AttachListener::_head;
 Win32AttachOperation* Win32AttachListener::_tail;
@@ -155,20 +158,19 @@
 };
 
 
-// preallocate the required number of operations
+// Preallocate the maximum number of operations that can be enqueued.
 int Win32AttachListener::init() {
   _mutex = (void*)::CreateMutex(NULL, FALSE, NULL);
   guarantee(_mutex != (HANDLE)NULL, "mutex creation failed");
 
-  _wakeup = ::CreateSemaphore(NULL, 0, 1, NULL);
-  guarantee(_wakeup != (HANDLE)NULL, "semaphore creation failed");
+  _enqueued_ops_semaphore = ::CreateSemaphore(NULL, 0, max_enqueued_operations, NULL);
+  guarantee(_enqueued_ops_semaphore != (HANDLE)NULL, "semaphore creation failed");
 
   set_head(NULL);
   set_tail(NULL);
+  set_available(NULL);
 
-  // preallocate a few operations
-  set_available(NULL);
-  for (int i=0; i<preallocate_count; i++) {
+  for (int i=0; i<max_enqueued_operations; i++) {
     Win32AttachOperation* op = new Win32AttachOperation();
     op->set_next(available());
     set_available(op);
@@ -221,8 +223,12 @@
     op->set_arg(2, arg2);
     op->set_pipe(pipename);
 
-    // wakeup the thread waiting for operations
-    ::ReleaseSemaphore(wakeup(), 1, NULL);
+    // Increment number of enqueued operations.
+    // Side effect: Semaphore will be signaled and will release
+    // any blocking waiters (i.e. the AttachListener thread).
+    BOOL not_exceeding_semaphore_maximum_count =
+      ::ReleaseSemaphore(enqueued_ops_semaphore(), 1, NULL);
+    guarantee(not_exceeding_semaphore_maximum_count, "invariant");
   }
   ::ReleaseMutex(mutex());
 
@@ -230,10 +236,12 @@
 }
 
 
-// dequeue the operation from the head of the operation list. If
+// dequeue the operation from the head of the operation list.
 Win32AttachOperation* Win32AttachListener::dequeue() {
   for (;;) {
-    DWORD res = ::WaitForSingleObject(wakeup(), INFINITE);
+    DWORD res = ::WaitForSingleObject(enqueued_ops_semaphore(), INFINITE);
+    // returning from WaitForSingleObject will have decreased
+    // the current count of the semaphore by 1.
     guarantee(res == WAIT_OBJECT_0, "wait failed");
 
     res = ::WaitForSingleObject(mutex(), INFINITE);
--- a/hotspot/src/share/vm/c1/c1_LIR.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_LIR.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2105,8 +2105,14 @@
 
 // LIR_OpProfileType
 void LIR_OpProfileType::print_instr(outputStream* out) const {
-  out->print("exact = "); exact_klass()->print_name_on(out);
-  out->print("current = "); ciTypeEntries::print_ciklass(out, current_klass());
+  out->print("exact = ");
+  if  (exact_klass() == NULL) {
+    out->print("unknown");
+  } else {
+    exact_klass()->print_name_on(out);
+  }
+  out->print(" current = "); ciTypeEntries::print_ciklass(out, current_klass());
+  out->print(" ");
   mdp()->print(out);          out->print(" ");
   obj()->print(out);          out->print(" ");
   tmp()->print(out);          out->print(" ");
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1582,9 +1582,9 @@
 ////////////////////////////////////////////////////////////////////////
 
 void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
-
-  assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code");
-  LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base);
+  CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(_bs);
+  assert(sizeof(*(ct->byte_map_base)) == sizeof(jbyte), "adjust this code");
+  LIR_Const* card_table_base = new LIR_Const(ct->byte_map_base);
   if (addr->is_address()) {
     LIR_Address* address = addr->as_address_ptr();
     // ptr cannot be an object because we use this barrier for array card marks
@@ -1609,7 +1609,6 @@
     __ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp);
   }
 
-  CardTableModRefBS* ct = (CardTableModRefBS*)_bs;
   LIR_Address *card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BYTE);
   if(((int)ct->byte_map_base & 0xff) == 0) {
     __ move(tmp, card_addr);
--- a/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -43,7 +43,7 @@
   #define TRACE_BCEA(level, code)
 #endif
 
-// Maintain a map of which aguments a local variable or
+// Maintain a map of which arguments a local variable or
 // stack slot may contain.  In addition to tracking
 // arguments, it tracks two special values, "allocated"
 // which represents any object allocated in the current
@@ -319,14 +319,16 @@
     bool must_record_dependencies = false;
     for (i = arg_size - 1; i >= 0; i--) {
       ArgumentMap arg = state.raw_pop();
-      if (!is_argument(arg))
+      // Check if callee arg is a caller arg or an allocated object
+      bool allocated = arg.contains_allocated();
+      if (!(is_argument(arg) || allocated))
         continue;
       for (int j = 0; j < _arg_size; j++) {
         if (arg.contains(j)) {
           _arg_modified[j] |= analyzer._arg_modified[i];
         }
       }
-      if (!is_arg_stack(arg)) {
+      if (!(is_arg_stack(arg) || allocated)) {
         // arguments have already been recognized as escaping
       } else if (analyzer.is_arg_stack(i) && !analyzer.is_arg_returned(i)) {
         set_method_escape(arg);
@@ -1392,12 +1394,12 @@
   method()->print_short_name();
   tty->print_cr(has_dependencies() ? " (not stored)" : "");
   tty->print("     non-escaping args:      ");
-  _arg_local.print_on(tty);
+  _arg_local.print();
   tty->print("     stack-allocatable args: ");
-  _arg_stack.print_on(tty);
+  _arg_stack.print();
   if (_return_local) {
     tty->print("     returned args:          ");
-    _arg_returned.print_on(tty);
+    _arg_returned.print();
   } else if (is_return_allocated()) {
     tty->print_cr("     return allocated value");
   } else {
--- a/hotspot/src/share/vm/classfile/defaultMethods.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/classfile/defaultMethods.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1091,6 +1091,7 @@
     }
     // update idnum for new location
     merged_methods->at(i)->set_method_idnum(i);
+    merged_methods->at(i)->set_orig_method_idnum(i);
   }
 
   // Verify correct order
--- a/hotspot/src/share/vm/code/codeCache.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/code/codeCache.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -360,7 +360,7 @@
  * run the constructor for the CodeBlob subclass he is busy
  * instantiating.
  */
-CodeBlob* CodeCache::allocate(int size, int code_blob_type) {
+CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool strict) {
   // Possibly wakes up the sweeper thread.
   NMethodSweeper::notify(code_blob_type);
   assert_locked_or_safepoint(CodeCache_lock);
@@ -379,11 +379,28 @@
     if (cb != NULL) break;
     if (!heap->expand_by(CodeCacheExpansionSize)) {
       // Expansion failed
-      if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonNMethod)) {
-        // Fallback solution: Store non-nmethod code in the non-profiled code heap.
-        // Note that at in the sweeper, we check the reverse_free_ratio of the non-profiled
-        // code heap and force stack scanning if less than 10% if the code heap are free.
-        return allocate(size, CodeBlobType::MethodNonProfiled);
+      if (SegmentedCodeCache && !strict) {
+        // Fallback solution: Try to store code in another code heap.
+        // Note that in the sweeper, we check the reverse_free_ratio of the code heap
+        // and force stack scanning if less than 10% of the code heap are free.
+        int type = code_blob_type;
+        switch (type) {
+        case CodeBlobType::NonNMethod:
+          type = CodeBlobType::MethodNonProfiled;
+          strict = false;   // Allow recursive search for other heaps
+          break;
+        case CodeBlobType::MethodProfiled:
+          type = CodeBlobType::MethodNonProfiled;
+          strict = true;
+          break;
+        case CodeBlobType::MethodNonProfiled:
+          type = CodeBlobType::MethodProfiled;
+          strict = true;
+          break;
+        }
+        if (heap_available(type)) {
+          return allocate(size, type, strict);
+        }
       }
       MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
       CompileBroker::handle_full_code_cache(code_blob_type);
--- a/hotspot/src/share/vm/code/codeCache.hpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/code/codeCache.hpp	Fri Mar 06 04:58:52 2015 -0800
@@ -122,7 +122,7 @@
   static void initialize();
 
   // Allocation/administration
-  static CodeBlob* allocate(int size, int code_blob_type); // allocates a new CodeBlob
+  static CodeBlob* allocate(int size, int code_blob_type, bool strict = false); // allocates a new CodeBlob
   static void commit(CodeBlob* cb);                        // called when the allocated CodeBlob has been filled
   static int  alignment_unit();                            // guaranteed alignment of all CodeBlobs
   static int  alignment_offset();                          // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
--- a/hotspot/src/share/vm/compiler/disassembler.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/compiler/disassembler.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -343,8 +343,8 @@
     }
 
     BarrierSet* bs = Universe::heap()->barrier_set();
-    if (bs->kind() == BarrierSet::CardTableModRef &&
-        adr == (address)((CardTableModRefBS*)(bs))->byte_map_base) {
+    if (bs->is_a(BarrierSet::CardTableModRef) &&
+        adr == (address)(barrier_set_cast<CardTableModRefBS>(bs)->byte_map_base)) {
       st->print("word_map_base");
       if (WizardMode) st->print(" " INTPTR_FORMAT, (intptr_t)adr);
       return;
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -97,9 +97,7 @@
 void ConcurrentMarkSweepThread::run() {
   assert(this == cmst(), "just checking");
 
-  this->record_stack_base_and_size();
-  this->initialize_thread_local_storage();
-  this->set_active_handles(JNIHandleBlock::allocate_block());
+  initialize_in_thread();
   // From this time Thread::current() should be working.
   assert(this == Thread::current(), "just checking");
   if (BindCMSThreadToCPU && !os::bind_to_processor(CPUForCMSThread)) {
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1440,7 +1440,7 @@
   CMCountDataClosureBase(G1CollectedHeap* g1h,
                          BitMap* region_bm, BitMap* card_bm):
     _g1h(g1h), _cm(g1h->concurrent_mark()),
-    _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
+    _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
     _region_bm(region_bm), _card_bm(card_bm) { }
 };
 
@@ -3111,7 +3111,7 @@
                               BitMap* cm_card_bm,
                               uint max_worker_id) :
     _g1h(g1h), _cm(g1h->concurrent_mark()),
-    _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
+    _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
     _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
 
   bool doHeapRegion(HeapRegion* hr) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -120,7 +120,7 @@
   if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
     G1ParGCAllocBuffer* alloc_buf = alloc_buffer(dest, context);
     add_to_alloc_buffer_waste(alloc_buf->words_remaining());
-    alloc_buf->retire(false /* end_of_gc */, false /* retain */);
+    alloc_buf->retire();
 
     HeapWord* buf = _g1h->par_allocate_during_gc(dest, gclab_word_size, context);
     if (buf == NULL) {
@@ -154,9 +154,7 @@
     G1ParGCAllocBuffer* const buf = _alloc_buffers[state];
     if (buf != NULL) {
       add_to_alloc_buffer_waste(buf->words_remaining());
-      buf->flush_stats_and_retire(_g1h->alloc_buffer_stats(state),
-                                  true /* end_of_gc */,
-                                  false /* retain */);
+      buf->flush_and_retire_stats(_g1h->alloc_buffer_stats(state));
     }
   }
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -159,11 +159,11 @@
     _retired = false;
   }
 
-  virtual void retire(bool end_of_gc, bool retain) {
+  virtual void retire() {
     if (_retired) {
       return;
     }
-    ParGCAllocBuffer::retire(end_of_gc, retain);
+    ParGCAllocBuffer::retire();
     _retired = true;
   }
 };
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1274,7 +1274,7 @@
   virtual bool is_in_closed_subset(const void* p) const;
 
   G1SATBCardTableLoggingModRefBS* g1_barrier_set() {
-    return (G1SATBCardTableLoggingModRefBS*) barrier_set();
+    return barrier_set_cast<G1SATBCardTableLoggingModRefBS>(barrier_set());
   }
 
   // This resets the card table to all zeros.  It is used after
--- a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -33,8 +33,11 @@
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/thread.inline.hpp"
 
-G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap, BarrierSet::Name kind) :
-  CardTableModRefBS(whole_heap, kind) { }
+G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(
+  MemRegion whole_heap,
+  const BarrierSet::FakeRtti& fake_rtti) :
+  CardTableModRefBS(whole_heap, fake_rtti.add_tag(BarrierSet::G1SATBCT))
+{ }
 
 void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
   // Nulls should have been already filtered.
@@ -130,7 +133,7 @@
 
 G1SATBCardTableLoggingModRefBS::
 G1SATBCardTableLoggingModRefBS(MemRegion whole_heap) :
-  G1SATBCardTableModRefBS(whole_heap, BarrierSet::G1SATBCTLogging),
+  G1SATBCardTableModRefBS(whole_heap, BarrierSet::FakeRtti(G1SATBCTLogging)),
   _dcqs(JavaThread::dirty_card_queue_set()),
   _listener()
 {
@@ -203,7 +206,7 @@
   if (new_val == NULL) return;
   // Otherwise, log it.
   G1SATBCardTableLoggingModRefBS* g1_bs =
-    (G1SATBCardTableLoggingModRefBS*)Universe::heap()->barrier_set();
+    barrier_set_cast<G1SATBCardTableLoggingModRefBS>(Universe::heap()->barrier_set());
   g1_bs->write_ref_field_work(field, new_val);
 }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Fri Mar 06 04:58:52 2015 -0800
@@ -43,7 +43,7 @@
     g1_young_gen = CT_MR_BS_last_reserved << 1
   };
 
-  G1SATBCardTableModRefBS(MemRegion whole_heap, BarrierSet::Name kind);
+  G1SATBCardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti);
   ~G1SATBCardTableModRefBS() { }
 
 public:
@@ -53,10 +53,6 @@
   // pre-marking object graph.
   static void enqueue(oop pre_val);
 
-  bool is_a(BarrierSet::Name bsn) {
-    return bsn == BarrierSet::G1SATBCT || CardTableModRefBS::is_a(bsn);
-  }
-
   virtual bool has_write_ref_pre_barrier() { return true; }
 
   // This notes that we don't need to access any BarrierSet data
@@ -128,6 +124,11 @@
   }
 };
 
+template<>
+struct BarrierSet::GetName<G1SATBCardTableModRefBS> {
+  static const BarrierSet::Name value = BarrierSet::G1SATBCT;
+};
+
 class G1SATBCardTableLoggingModRefBSChangedListener : public G1MappingChangedListener {
  private:
   G1SATBCardTableLoggingModRefBS* _card_table;
@@ -159,11 +160,6 @@
 
   virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); }
 
-  bool is_a(BarrierSet::Name bsn) {
-    return bsn == BarrierSet::G1SATBCTLogging ||
-      G1SATBCardTableModRefBS::is_a(bsn);
-  }
-
   void write_ref_field_work(void* field, oop new_val, bool release = false);
 
   // Can be called from static contexts.
@@ -177,4 +173,9 @@
   void write_ref_array_work(MemRegion mr) { invalidate(mr); }
 };
 
+template<>
+struct BarrierSet::GetName<G1SATBCardTableLoggingModRefBS> {
+  static const BarrierSet::Name value = BarrierSet::G1SATBCTLogging;
+};
+
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -193,7 +193,7 @@
   HeapRegionRemSet* hrrs = rem_set();
   hrrs->clear();
   CardTableModRefBS* ct_bs =
-                   (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
+    barrier_set_cast<CardTableModRefBS>(G1CollectedHeap::heap()->barrier_set());
   ct_bs->clear(MemRegion(bottom(), end()));
 }
 
@@ -643,13 +643,9 @@
   // _vo == UseNextMarking -> use "next" marking information,
   // _vo == UseMarkWord    -> use mark word from object header.
   VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) :
-    _g1h(g1h), _bs(NULL), _containing_obj(NULL),
-    _failures(false), _n_failures(0), _vo(vo)
-  {
-    BarrierSet* bs = _g1h->barrier_set();
-    if (bs->is_a(BarrierSet::CardTableModRef))
-      _bs = (CardTableModRefBS*)bs;
-  }
+    _g1h(g1h), _bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
+    _containing_obj(NULL), _failures(false), _n_failures(0), _vo(vo)
+  { }
 
   void set_containing_obj(oop obj) {
     _containing_obj = obj;
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -232,7 +232,7 @@
     if (word_sz * 100 <
         ParallelGCBufferWastePct * plab->word_sz()) {
       // Is small enough; abandon this buffer and start a new one.
-      plab->retire(false, false);
+      plab->retire();
       size_t buf_size = plab->word_sz();
       HeapWord* buf_space = sp->par_allocate(buf_size);
       if (buf_space == NULL) {
@@ -463,10 +463,7 @@
 
     // Flush stats related to To-space PLAB activity and
     // retire the last buffer.
-    par_scan_state.to_space_alloc_buffer()->
-      flush_stats_and_retire(_gen.plab_stats(),
-                             true /* end_of_gc */,
-                             false /* retain */);
+    par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_gen.plab_stats());
 
     // Every thread has its own age table.  We need to merge
     // them all into one.
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -81,7 +81,7 @@
     assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 
     _young_gen = heap->young_gen();
-    _card_table = (CardTableExtension*)heap->barrier_set();
+    _card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set());
     // No point in asserting barrier set type here. Need to make CardTableExtension
     // a unique barrier set type.
   }
@@ -341,7 +341,9 @@
 
   PSOldGen* old_gen = heap->old_gen();
 
-  CheckForPreciseMarks check(heap->young_gen(), (CardTableExtension*)heap->barrier_set());
+  CheckForPreciseMarks check(
+    heap->young_gen(),
+    barrier_set_cast<CardTableExtension>(heap->barrier_set()));
 
   old_gen->oop_iterate_no_header(&check);
 
@@ -349,8 +351,8 @@
 }
 
 void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) {
-  CardTableExtension* card_table = (CardTableExtension*)Universe::heap()->barrier_set();
-  // FIX ME ASSERT HERE
+  CardTableExtension* card_table =
+    barrier_set_cast<CardTableExtension>(Universe::heap()->barrier_set());
 
   jbyte* bot = card_table->byte_for(mr.start());
   jbyte* top = card_table->byte_for(mr.end());
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp	Fri Mar 06 04:58:52 2015 -0800
@@ -54,10 +54,16 @@
   };
 
   CardTableExtension(MemRegion whole_heap) :
-    CardTableModRefBS(whole_heap, BarrierSet::CardTableModRef) { }
-
-  // Too risky for the 4/10/02 putback
-  // BarrierSet::Name kind() { return BarrierSet::CardTableExtension; }
+    CardTableModRefBS(
+      whole_heap,
+      // Concrete tag should be BarrierSet::CardTableExtension.
+      // That will presently break things in a bunch of places though.
+      // The concrete tag is used as a dispatch key in many places, and
+      // CardTableExtension does not correctly dispatch in some of those
+      // uses. This will be addressed as part of a reorganization of the
+      // BarrierSet hierarchy.
+      BarrierSet::FakeRtti(BarrierSet::CardTableModRef, 0).add_tag(BarrierSet::CardTableExtension))
+    { }
 
   // Scavenge support
   void scavenge_contents_parallel(ObjectStartArray* start_array,
@@ -110,4 +116,9 @@
 #endif // ASSERT
 };
 
+template<>
+struct BarrierSet::GetName<CardTableExtension> {
+  static const BarrierSet::Name value = BarrierSet::CardTableExtension;
+};
+
 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_CARDTABLEEXTENSION_HPP
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,6 +1,6 @@
 
 /*
- * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -98,6 +98,7 @@
   // Set up the thread for stack overflow support
   this->record_stack_base_and_size();
   this->initialize_thread_local_storage();
+  this->initialize_named_thread();
   // Bind yourself to your processor.
   if (processor_id() != GCTaskManager::sentinel_worker()) {
     if (TraceGCTaskThread) {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,6 +1,5 @@
-
 /*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -233,15 +232,12 @@
                       young_gen->to_space()->is_empty();
     young_gen_empty = eden_empty && survivors_empty;
 
-    BarrierSet* bs = heap->barrier_set();
-    if (bs->is_a(BarrierSet::ModRef)) {
-      ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
-      MemRegion old_mr = heap->old_gen()->reserved();
-      if (young_gen_empty) {
-        modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
-      } else {
-        modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
-      }
+    ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set());
+    MemRegion old_mr = heap->old_gen()->reserved();
+    if (young_gen_empty) {
+      modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
+    } else {
+      modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
     }
 
     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -111,8 +111,8 @@
 
   Universe::heap()->barrier_set()->resize_covered_region(cmr);
 
-  CardTableModRefBS* _ct = (CardTableModRefBS*)Universe::heap()->barrier_set();
-  assert (_ct->kind() == BarrierSet::CardTableModRef, "Sanity");
+  CardTableModRefBS* _ct =
+    barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
 
   // Verify that the start and end of this generation is the start of a card.
   // If this wasn't true, a single card could span more than one generation,
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1050,16 +1050,12 @@
   bool young_gen_empty = eden_empty && from_space->is_empty() &&
     to_space->is_empty();
 
-  BarrierSet* bs = heap->barrier_set();
-  if (bs->is_a(BarrierSet::ModRef)) {
-    ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
-    MemRegion old_mr = heap->old_gen()->reserved();
-
-    if (young_gen_empty) {
-      modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
-    } else {
-      modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
-    }
+  ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set());
+  MemRegion old_mr = heap->old_gen()->reserved();
+  if (young_gen_empty) {
+    modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
+  } else {
+    modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
   }
 
   // Delete metaspaces for unloaded class loaders and clean up loader_data graph
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,6 +1,5 @@
-
 /*
- * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -866,9 +865,7 @@
                            NULL);                      // header provides liveness info
 
   // Cache the cardtable
-  BarrierSet* bs = Universe::heap()->barrier_set();
-  assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
-  _card_table = (CardTableExtension*)bs;
+  _card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set());
 
   _counters = new CollectorCounters("PSScavenge", 0);
 }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -183,8 +183,8 @@
     PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
 
     assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-    CardTableExtension* card_table = (CardTableExtension *)Universe::heap()->barrier_set();
-    // FIX ME! Assert that card_table is the type we believe it to be.
+    CardTableExtension* card_table =
+      barrier_set_cast<CardTableExtension>(Universe::heap()->barrier_set());
 
     card_table->scavenge_contents_parallel(_gen->start_array(),
                                            _gen->object_space(),
--- a/hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -56,6 +56,7 @@
 void ConcurrentGCThread::initialize_in_thread() {
   this->record_stack_base_and_size();
   this->initialize_thread_local_storage();
+  this->initialize_named_thread();
   this->set_active_handles(JNIHandleBlock::allocate_block());
   // From this time Thread::current() should be working.
   assert(this == Thread::current(), "just checking");
--- a/hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,27 +24,30 @@
 
 #include "precompiled.hpp"
 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
-#include "memory/sharedHeap.hpp"
+#include "memory/threadLocalAllocBuffer.hpp"
 #include "oops/arrayOop.hpp"
 #include "oops/oop.inline.hpp"
-#include "utilities/globalDefinitions.hpp"
 
-PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
+size_t ParGCAllocBuffer::min_size() {
+  // Make sure that we return something that is larger than AlignmentReserve
+  return align_object_size(MAX2(MinTLABSize / HeapWordSize, (uintx)oopDesc::header_size())) + AlignmentReserve;
+}
+
+size_t ParGCAllocBuffer::max_size() {
+  return ThreadLocalAllocBuffer::max_size();
+}
 
 ParGCAllocBuffer::ParGCAllocBuffer(size_t desired_plab_sz_) :
   _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
-  _end(NULL), _hard_end(NULL),
-  _retained(false), _retained_filler(),
-  _allocated(0), _wasted(0)
+  _end(NULL), _hard_end(NULL), _allocated(0), _wasted(0)
 {
-  assert (min_size() > AlignmentReserve, "Inconsistency!");
-  // arrayOopDesc::header_size depends on command line initialization.
-  FillerHeaderSize = align_object_size(arrayOopDesc::header_size(T_INT));
-  AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0;
+  // ArrayOopDesc::header_size depends on command line initialization.
+  AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? align_object_size(arrayOopDesc::header_size(T_INT)) : 0;
+  assert(min_size() > AlignmentReserve,
+         err_msg("Minimum PLAB size " SIZE_FORMAT" must be larger than alignment reserve " SIZE_FORMAT" "
+                 "to be able to contain objects", min_size(), AlignmentReserve));
 }
 
-size_t ParGCAllocBuffer::FillerHeaderSize;
-
 // If the minimum object size is greater than MinObjAlignment, we can
 // end up with a shard at the end of the buffer that's smaller than
 // the smallest object.  We can't allow that because the buffer must
@@ -52,39 +55,33 @@
 // sure we have enough space for a filler int array object.
 size_t ParGCAllocBuffer::AlignmentReserve;
 
-void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) {
-  assert(!retain || end_of_gc, "Can only retain at GC end.");
-  if (_retained) {
-    // If the buffer had been retained shorten the previous filler object.
-    assert(_retained_filler.end() <= _top, "INVARIANT");
-    CollectedHeap::fill_with_object(_retained_filler);
-    // Wasted space book-keeping, otherwise (normally) done in invalidate()
-    _wasted += _retained_filler.word_size();
-    _retained = false;
-  }
-  assert(!end_of_gc || !_retained, "At this point, end_of_gc ==> !_retained.");
+void ParGCAllocBuffer::flush_and_retire_stats(PLABStats* stats) {
+  // Retire the last allocation buffer.
+  size_t unused = retire_internal();
+
+  // Now flush the statistics.
+  stats->add_allocated(_allocated);
+  stats->add_wasted(_wasted);
+  stats->add_unused(unused);
+
+  // Since we have flushed the stats we need to clear  the _allocated and _wasted
+  // fields in case somebody retains an instance of this over GCs. Not doing so
+  // will artifically inflate the values in the statistics.
+  _allocated = 0;
+  _wasted = 0;
+}
+
+void ParGCAllocBuffer::retire() {
+  _wasted += retire_internal();
+}
+
+size_t ParGCAllocBuffer::retire_internal() {
+  size_t result = 0;
   if (_top < _hard_end) {
     CollectedHeap::fill_with_object(_top, _hard_end);
-    if (!retain) {
-      invalidate();
-    } else {
-      // Is there wasted space we'd like to retain for the next GC?
-      if (pointer_delta(_end, _top) > FillerHeaderSize) {
-        _retained = true;
-        _retained_filler = MemRegion(_top, FillerHeaderSize);
-        _top = _top + FillerHeaderSize;
-      } else {
-        invalidate();
-      }
-    }
+    result += invalidate();
   }
-}
-
-void ParGCAllocBuffer::flush_stats(PLABStats* stats) {
-  assert(ResizePLAB, "Wasted work");
-  stats->add_allocated(_allocated);
-  stats->add_wasted(_wasted);
-  stats->add_unused(pointer_delta(_end, _top));
+  return result;
 }
 
 // Compute desired plab size and latch result for later
@@ -101,44 +98,37 @@
            err_msg("Inconsistency in PLAB stats: "
                    "_allocated: "SIZE_FORMAT", "
                    "_wasted: "SIZE_FORMAT", "
-                   "_unused: "SIZE_FORMAT", "
-                   "_used  : "SIZE_FORMAT,
-                   _allocated, _wasted, _unused, _used));
+                   "_unused: "SIZE_FORMAT,
+                   _allocated, _wasted, _unused));
 
     _allocated = 1;
   }
-  double wasted_frac    = (double)_unused/(double)_allocated;
-  size_t target_refills = (size_t)((wasted_frac*TargetSurvivorRatio)/
-                                   TargetPLABWastePct);
+  double wasted_frac    = (double)_unused / (double)_allocated;
+  size_t target_refills = (size_t)((wasted_frac * TargetSurvivorRatio) / TargetPLABWastePct);
   if (target_refills == 0) {
     target_refills = 1;
   }
-  _used = _allocated - _wasted - _unused;
-  size_t plab_sz = _used/(target_refills*no_of_gc_workers);
-  if (PrintPLAB) gclog_or_tty->print(" (plab_sz = " SIZE_FORMAT " ", plab_sz);
+  size_t used = _allocated - _wasted - _unused;
+  size_t recent_plab_sz = used / (target_refills * no_of_gc_workers);
   // Take historical weighted average
-  _filter.sample(plab_sz);
+  _filter.sample(recent_plab_sz);
   // Clip from above and below, and align to object boundary
-  plab_sz = MAX2(min_size(), (size_t)_filter.average());
-  plab_sz = MIN2(max_size(), plab_sz);
-  plab_sz = align_object_size(plab_sz);
+  size_t new_plab_sz = MAX2(min_size(), (size_t)_filter.average());
+  new_plab_sz = MIN2(max_size(), new_plab_sz);
+  new_plab_sz = align_object_size(new_plab_sz);
   // Latch the result
-  if (PrintPLAB) gclog_or_tty->print(" desired_plab_sz = " SIZE_FORMAT ") ", plab_sz);
-  _desired_plab_sz = plab_sz;
-  // Now clear the accumulators for next round:
-  // note this needs to be fixed in the case where we
-  // are retaining across scavenges. FIX ME !!! XXX
-  _allocated = 0;
-  _wasted    = 0;
-  _unused    = 0;
+  if (PrintPLAB) {
+    gclog_or_tty->print(" (plab_sz = " SIZE_FORMAT" desired_plab_sz = " SIZE_FORMAT") ", recent_plab_sz, new_plab_sz);
+  }
+  _desired_plab_sz = new_plab_sz;
+
+  reset();
 }
 
 #ifndef PRODUCT
 void ParGCAllocBuffer::print() {
-  gclog_or_tty->print("parGCAllocBuffer: _bottom: " PTR_FORMAT "  _top: " PTR_FORMAT
-             "  _end: " PTR_FORMAT "  _hard_end: " PTR_FORMAT " _retained: %c"
-             " _retained_filler: [" PTR_FORMAT "," PTR_FORMAT ")\n",
-             _bottom, _top, _end, _hard_end,
-             "FT"[_retained], _retained_filler.start(), _retained_filler.end());
+  gclog_or_tty->print_cr("parGCAllocBuffer: _bottom: " PTR_FORMAT "  _top: " PTR_FORMAT
+    "  _end: " PTR_FORMAT "  _hard_end: " PTR_FORMAT ")",
+    p2i(_bottom), p2i(_top), p2i(_end), p2i(_hard_end));
 }
 #endif // !PRODUCT
--- a/hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,37 +24,43 @@
 
 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
-#include "gc_interface/collectedHeap.hpp"
+
+#include "gc_implementation/shared/gcUtil.hpp"
 #include "memory/allocation.hpp"
-#include "memory/blockOffsetTable.hpp"
-#include "memory/threadLocalAllocBuffer.hpp"
+#include "runtime/atomic.hpp"
 #include "utilities/globalDefinitions.hpp"
 
-// Forward decl.
-
+// Forward declarations.
 class PLABStats;
 
 // A per-thread allocation buffer used during GC.
 class ParGCAllocBuffer: public CHeapObj<mtGC> {
 protected:
-  char head[32];
-  size_t _word_sz;          // in HeapWord units
+  char      head[32];
+  size_t    _word_sz;          // In HeapWord units
   HeapWord* _bottom;
   HeapWord* _top;
-  HeapWord* _end;       // last allocatable address + 1
-  HeapWord* _hard_end;  // _end + AlignmentReserve
-  bool      _retained;  // whether we hold a _retained_filler
-  MemRegion _retained_filler;
+  HeapWord* _end;           // Last allocatable address + 1
+  HeapWord* _hard_end;      // _end + AlignmentReserve
   // In support of ergonomic sizing of PLAB's
   size_t    _allocated;     // in HeapWord units
   size_t    _wasted;        // in HeapWord units
-  char tail[32];
-  static size_t FillerHeaderSize;
+  char      tail[32];
   static size_t AlignmentReserve;
 
-  // Flush the stats supporting ergonomic sizing of PLAB's
-  // Should not be called directly
-  void flush_stats(PLABStats* stats);
+  // Force future allocations to fail and queries for contains()
+  // to return false. Returns the amount of unused space in this PLAB.
+  size_t invalidate() {
+    _end    = _hard_end;
+    size_t remaining = pointer_delta(_end, _top);  // Calculate remaining space.
+    _top    = _end;      // Force future allocations to fail.
+    _bottom = _end;      // Force future contains() queries to return false.
+    return remaining;
+  }
+
+  // Fill in remaining space with a dummy object and invalidate the PLAB. Returns
+  // the amount of remaining space.
+  size_t retire_internal();
 
 public:
   // Initializes the buffer to be empty, but with the given "word_sz".
@@ -62,14 +68,10 @@
   ParGCAllocBuffer(size_t word_sz);
   virtual ~ParGCAllocBuffer() {}
 
-  static const size_t min_size() {
-    // Make sure that we return something that is larger than AlignmentReserve
-    return align_object_size(MAX2(MinTLABSize / HeapWordSize, (uintx)oopDesc::header_size())) + AlignmentReserve;
-  }
-
-  static const size_t max_size() {
-    return ThreadLocalAllocBuffer::max_size();
-  }
+  // Minimum PLAB size.
+  static size_t min_size();
+  // Maximum PLAB size.
+  static size_t max_size();
 
   // If an allocation of the given "word_sz" can be satisfied within the
   // buffer, do the allocation, returning a pointer to the start of the
@@ -128,62 +130,37 @@
     _allocated += word_sz();
   }
 
-  // Flush the stats supporting ergonomic sizing of PLAB's
-  // and retire the current buffer.
-  void flush_stats_and_retire(PLABStats* stats, bool end_of_gc, bool retain) {
-    // We flush the stats first in order to get a reading of
-    // unused space in the last buffer.
-    if (ResizePLAB) {
-      flush_stats(stats);
+  // Flush allocation statistics into the given PLABStats supporting ergonomic
+  // sizing of PLAB's and retire the current buffer. To be called at the end of
+  // GC.
+  void flush_and_retire_stats(PLABStats* stats);
 
-      // Since we have flushed the stats we need to clear
-      // the _allocated and _wasted fields. Not doing so
-      // will artifically inflate the values in the stats
-      // to which we add them.
-      // The next time we flush these values, we will add
-      // what we have just flushed in addition to the size
-      // of the buffers allocated between now and then.
-      _allocated = 0;
-      _wasted = 0;
-    }
-    // Retire the last allocation buffer.
-    retire(end_of_gc, retain);
-  }
-
-  // Force future allocations to fail and queries for contains()
-  // to return false
-  void invalidate() {
-    assert(!_retained, "Shouldn't retain an invalidated buffer.");
-    _end    = _hard_end;
-    _wasted += pointer_delta(_end, _top);  // unused  space
-    _top    = _end;      // force future allocations to fail
-    _bottom = _end;      // force future contains() queries to return false
-  }
-
-  // Fills in the unallocated portion of the buffer with a garbage object.
-  // If "end_of_gc" is TRUE, is after the last use in the GC.  IF "retain"
-  // is true, attempt to re-use the unused portion in the next GC.
-  virtual void retire(bool end_of_gc, bool retain);
+  // Fills in the unallocated portion of the buffer with a garbage object and updates
+  // statistics. To be called during GC.
+  virtual void retire();
 
   void print() PRODUCT_RETURN;
 };
 
-// PLAB stats book-keeping
+// PLAB book-keeping.
 class PLABStats VALUE_OBJ_CLASS_SPEC {
-  size_t _allocated;      // total allocated
+  size_t _allocated;      // Total allocated
   size_t _wasted;         // of which wasted (internal fragmentation)
   size_t _unused;         // Unused in last buffer
-  size_t _used;           // derived = allocated - wasted - unused
-  size_t _desired_plab_sz;// output of filter (below), suitably trimmed and quantized
+  size_t _desired_plab_sz;// Output of filter (below), suitably trimmed and quantized
   AdaptiveWeightedAverage
-         _filter;         // integrator with decay
+         _filter;         // Integrator with decay
 
+  void reset() {
+    _allocated = 0;
+    _wasted    = 0;
+    _unused    = 0;
+  }
  public:
   PLABStats(size_t desired_plab_sz_, unsigned wt) :
     _allocated(0),
     _wasted(0),
     _unused(0),
-    _used(0),
     _desired_plab_sz(desired_plab_sz_),
     _filter(wt)
   { }
@@ -200,9 +177,9 @@
     return _desired_plab_sz;
   }
 
+  // Updates the current desired PLAB size. Computes the new desired PLAB size,
+  // updates _desired_plab_sz and clears sensor accumulators.
   void adjust_desired_plab_sz(uint no_of_gc_workers);
-                                 // filter computation, latches output to
-                                 // _desired_plab_sz, clears sensor accumulators
 
   void add_allocated(size_t v) {
     Atomic::add_ptr(v, &_allocated);
--- a/hotspot/src/share/vm/memory/barrierSet.hpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/memory/barrierSet.hpp	Fri Mar 06 04:58:52 2015 -0800
@@ -27,6 +27,7 @@
 
 #include "memory/memRegion.hpp"
 #include "oops/oopsHierarchy.hpp"
+#include "utilities/fakeRttiSupport.hpp"
 
 // This class provides the interface between a barrier implementation and
 // the rest of the system.
@@ -34,18 +35,57 @@
 class BarrierSet: public CHeapObj<mtGC> {
   friend class VMStructs;
 public:
-  enum Name {
-    ModRef,
-    CardTableModRef,
-    CardTableExtension,
-    G1SATBCT,
-    G1SATBCTLogging
+  // Fake RTTI support.  For a derived class T to participate
+  // - T must have a corresponding Name entry.
+  // - GetName<T> must be specialized to return the corresponding Name
+  //   entry.
+  // - If T is a base class, the constructor must have a FakeRtti
+  //   parameter and pass it up to its base class, with the tag set
+  //   augmented with the corresponding Name entry.
+  // - If T is a concrete class, the constructor must create a
+  //   FakeRtti object whose tag set includes the corresponding Name
+  //   entry, and pass it up to its base class.
+
+  enum Name {                   // associated class
+    ModRef,                     // ModRefBarrierSet
+    CardTableModRef,            // CardTableModRefBS
+    CardTableForRS,             // CardTableModRefBSForCTRS
+    CardTableExtension,         // CardTableExtension
+    G1SATBCT,                   // G1SATBCardTableModRefBS
+    G1SATBCTLogging             // G1SATBCardTableLoggingModRefBS
   };
 
+protected:
+  typedef FakeRttiSupport<BarrierSet, Name> FakeRtti;
+
+private:
+  FakeRtti _fake_rtti;
+
+  // Metafunction mapping a class derived from BarrierSet to the
+  // corresponding Name enum tag.
+  template<typename T> struct GetName;
+
+  // Downcast argument to a derived barrier set type.
+  // The cast is checked in a debug build.
+  // T must have a specialization for BarrierSet::GetName<T>.
+  template<typename T> friend T* barrier_set_cast(BarrierSet* bs);
+
+public:
+  // Note: This is not presently the Name corresponding to the
+  // concrete class of this object.
+  BarrierSet::Name kind() const { return _fake_rtti.concrete_tag(); }
+
+  // Test whether this object is of the type corresponding to bsn.
+  bool is_a(BarrierSet::Name bsn) const { return _fake_rtti.has_tag(bsn); }
+
+  // End of fake RTTI support.
+
+public:
   enum Flags {
     None                = 0,
     TargetUninitialized = 1
   };
+
 protected:
   // Some barrier sets create tables whose elements correspond to parts of
   // the heap; the CardTableModRefBS is an example.  Such barrier sets will
@@ -53,17 +93,12 @@
   // "covering" parts of the heap that are committed. At most one covered
   // region per generation is needed.
   static const int _max_covered_regions = 2;
-  Name _kind;
 
-  BarrierSet(Name kind) : _kind(kind) { }
+  BarrierSet(const FakeRtti& fake_rtti) : _fake_rtti(fake_rtti) { }
   ~BarrierSet() { }
 
 public:
 
-  // To get around prohibition on RTTI.
-  BarrierSet::Name kind() { return _kind; }
-  virtual bool is_a(BarrierSet::Name bsn) = 0;
-
   // These operations indicate what kind of barriers the BarrierSet has.
   virtual bool has_read_ref_barrier() = 0;
   virtual bool has_read_prim_barrier() = 0;
@@ -177,4 +212,10 @@
   virtual void print_on(outputStream* st) const = 0;
 };
 
+template<typename T>
+inline T* barrier_set_cast(BarrierSet* bs) {
+  assert(bs->is_a(BarrierSet::GetName<T>::value), "wrong type of barrier set");
+  return static_cast<T*>(bs);
+}
+
 #endif // SHARE_VM_MEMORY_BARRIERSET_HPP
--- a/hotspot/src/share/vm/memory/barrierSet.inline.hpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/memory/barrierSet.inline.hpp	Fri Mar 06 04:58:52 2015 -0800
@@ -34,7 +34,7 @@
 
 template <class T> void BarrierSet::write_ref_field_pre(T* field, oop new_val) {
   if (kind() == CardTableModRef) {
-    ((CardTableModRefBS*)this)->inline_write_ref_field_pre(field, new_val);
+    barrier_set_cast<CardTableModRefBS>(this)->inline_write_ref_field_pre(field, new_val);
   } else {
     write_ref_field_pre_work(field, new_val);
   }
@@ -42,7 +42,7 @@
 
 void BarrierSet::write_ref_field(void* field, oop new_val, bool release) {
   if (kind() == CardTableModRef) {
-    ((CardTableModRefBS*)this)->inline_write_ref_field(field, new_val, release);
+    barrier_set_cast<CardTableModRefBS>(this)->inline_write_ref_field(field, new_val, release);
   } else {
     write_ref_field_work(field, new_val, release);
   }
@@ -78,7 +78,7 @@
 
 inline void BarrierSet::write_region(MemRegion mr) {
   if (kind() == CardTableModRef) {
-    ((CardTableModRefBS*)this)->inline_write_region(mr);
+    barrier_set_cast<CardTableModRefBS>(this)->inline_write_region(mr);
   } else {
     write_region_work(mr);
   }
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -53,8 +53,10 @@
   return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
 }
 
-CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap, BarrierSet::Name kind) :
-  ModRefBarrierSet(kind),
+CardTableModRefBS::CardTableModRefBS(
+  MemRegion whole_heap,
+  const BarrierSet::FakeRtti& fake_rtti) :
+  ModRefBarrierSet(fake_rtti.add_tag(BarrierSet::CardTableModRef)),
   _whole_heap(whole_heap),
   _guard_index(0),
   _guard_region(),
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.hpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.hpp	Fri Mar 06 04:58:52 2015 -0800
@@ -279,11 +279,6 @@
   static int precleaned_card_val() { return precleaned_card; }
   static int deferred_card_val()   { return deferred_card; }
 
-  // For RTTI simulation.
-  bool is_a(BarrierSet::Name bsn) {
-    return bsn == BarrierSet::CardTableModRef || ModRefBarrierSet::is_a(bsn);
-  }
-
   virtual void initialize();
 
   // *** Barrier set functions.
@@ -292,7 +287,7 @@
 
 protected:
 
-  CardTableModRefBS(MemRegion whole_heap, BarrierSet::Name kind);
+  CardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti);
   ~CardTableModRefBS();
 
   // Record a reference update. Note that these versions are precise!
@@ -462,6 +457,11 @@
   void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
 };
 
+template<>
+struct BarrierSet::GetName<CardTableModRefBS> {
+  static const BarrierSet::Name value = BarrierSet::CardTableModRef;
+};
+
 class CardTableRS;
 
 // A specialization for the CardTableRS gen rem set.
@@ -472,10 +472,24 @@
   bool card_may_have_been_dirty(jbyte cv);
 public:
   CardTableModRefBSForCTRS(MemRegion whole_heap) :
-    CardTableModRefBS(whole_heap, BarrierSet::CardTableModRef) {}
+    CardTableModRefBS(
+      whole_heap,
+      // Concrete tag should be BarrierSet::CardTableForRS.
+      // That will presently break things in a bunch of places though.
+      // The concrete tag is used as a dispatch key in many places, and
+      // CardTableForRS does not correctly dispatch in some of those
+      // uses. This will be addressed as part of a reorganization of the
+      // BarrierSet hierarchy.
+      BarrierSet::FakeRtti(BarrierSet::CardTableModRef, 0).add_tag(BarrierSet::CardTableForRS))
+    {}
 
   void set_CTRS(CardTableRS* rs) { _rs = rs; }
 };
 
+template<>
+struct BarrierSet::GetName<CardTableModRefBSForCTRS> {
+  static const BarrierSet::Name value = BarrierSet::CardTableForRS;
+};
+
 
 #endif // SHARE_VM_MEMORY_CARDTABLEMODREFBS_HPP
--- a/hotspot/src/share/vm/memory/modRefBarrierSet.hpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/memory/modRefBarrierSet.hpp	Fri Mar 06 04:58:52 2015 -0800
@@ -37,10 +37,6 @@
 class ModRefBarrierSet: public BarrierSet {
 public:
 
-  bool is_a(BarrierSet::Name bsn) {
-    return bsn == BarrierSet::ModRef;
-  }
-
   // Barriers only on ref writes.
   bool has_read_ref_barrier() { return false; }
   bool has_read_prim_barrier() { return false; }
@@ -60,7 +56,8 @@
 
 protected:
 
-  ModRefBarrierSet(BarrierSet::Name kind) : BarrierSet(kind) { }
+  ModRefBarrierSet(const BarrierSet::FakeRtti& fake_rtti)
+    : BarrierSet(fake_rtti.add_tag(BarrierSet::ModRef)) { }
   ~ModRefBarrierSet() { }
 
   virtual void write_ref_field_work(void* field, oop new_val, bool release = false) = 0;
@@ -100,4 +97,9 @@
   virtual void clear(MemRegion mr) = 0;
 };
 
+template<>
+struct BarrierSet::GetName<ModRefBarrierSet> {
+  static const BarrierSet::Name value = BarrierSet::ModRef;
+};
+
 #endif // SHARE_VM_MEMORY_MODREFBARRIERSET_HPP
--- a/hotspot/src/share/vm/oops/constMethod.hpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/oops/constMethod.hpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -215,6 +215,7 @@
   u2                _max_stack;                  // Maximum number of entries on the expression stack
   u2                _max_locals;                 // Number of local variables used by this method
   u2                _size_of_parameters;         // size of the parameter block (receiver + arguments) in words
+  u2                _orig_method_idnum;          // Original unique identification number for the method
 
   // Constructor
   ConstMethod(int byte_code_size,
@@ -473,6 +474,9 @@
   u2 method_idnum() const                        { return _method_idnum; }
   void set_method_idnum(u2 idnum)                { _method_idnum = idnum; }
 
+  u2 orig_method_idnum() const                   { return _orig_method_idnum; }
+  void set_orig_method_idnum(u2 idnum)           { _orig_method_idnum = idnum; }
+
   // max stack
   int  max_stack() const                         { return _max_stack; }
   void set_max_stack(int size)                   { _max_stack = size; }
--- a/hotspot/src/share/vm/oops/cpCache.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/oops/cpCache.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -454,7 +454,6 @@
           new_method->name()->as_C_string(),
           new_method->signature()->as_C_string()));
       }
-
       return true;
     }
 
@@ -482,7 +481,6 @@
         new_method->name()->as_C_string(),
         new_method->signature()->as_C_string()));
     }
-
     return true;
   }
 
@@ -509,36 +507,33 @@
           (!f1_as_method()->is_old() && !f1_as_method()->is_obsolete())));
 }
 
-bool ConstantPoolCacheEntry::is_interesting_method_entry(Klass* k) {
+Method* ConstantPoolCacheEntry::get_interesting_method_entry(Klass* k) {
   if (!is_method_entry()) {
     // not a method entry so not interesting by default
-    return false;
+    return NULL;
   }
-
   Method* m = NULL;
   if (is_vfinal()) {
     // virtual and final so _f2 contains method ptr instead of vtable index
     m = f2_as_vfinal_method();
   } else if (is_f1_null()) {
     // NULL _f1 means this is a virtual entry so also not interesting
-    return false;
+    return NULL;
   } else {
     if (!(_f1->is_method())) {
       // _f1 can also contain a Klass* for an interface
-      return false;
+      return NULL;
     }
     m = f1_as_method();
   }
-
   assert(m != NULL && m->is_method(), "sanity check");
   if (m == NULL || !m->is_method() || (k != NULL && m->method_holder() != k)) {
     // robustness for above sanity checks or method is not in
     // the interesting class
-    return false;
+    return NULL;
   }
-
   // the method is in the interesting class so the entry is interesting
-  return true;
+  return m;
 }
 #endif // INCLUDE_JVMTI
 
@@ -615,7 +610,7 @@
 // If any entry of this ConstantPoolCache points to any of
 // old_methods, replace it with the corresponding new_method.
 void ConstantPoolCache::adjust_method_entries(Method** old_methods, Method** new_methods,
-                                                     int methods_length, bool * trace_name_printed) {
+                                              int methods_length, bool * trace_name_printed) {
 
   if (methods_length == 0) {
     // nothing to do if there are no methods
@@ -626,7 +621,7 @@
   Klass* old_holder = old_methods[0]->method_holder();
 
   for (int i = 0; i < length(); i++) {
-    if (!entry_at(i)->is_interesting_method_entry(old_holder)) {
+    if (entry_at(i)->get_interesting_method_entry(old_holder) == NULL) {
       // skip uninteresting methods
       continue;
     }
@@ -650,10 +645,33 @@
   }
 }
 
+// If any entry of this ConstantPoolCache points to any of
+// old_methods, replace it with the corresponding new_method.
+void ConstantPoolCache::adjust_method_entries(InstanceKlass* holder, bool * trace_name_printed) {
+  for (int i = 0; i < length(); i++) {
+    ConstantPoolCacheEntry* entry = entry_at(i);
+    Method* old_method = entry->get_interesting_method_entry(holder);
+    if (old_method == NULL || !old_method->is_old()) {
+      continue; // skip uninteresting entries
+    }
+    if (old_method->is_deleted()) {
+      // clean up entries with deleted methods
+      entry->initialize_entry(entry->constant_pool_index());
+      continue;
+    }
+    Method* new_method = holder->method_with_idnum(old_method->orig_method_idnum());
+
+    assert(new_method != NULL, "method_with_idnum() should not be NULL");
+    assert(old_method != new_method, "sanity check");
+
+    entry_at(i)->adjust_method_entry(old_method, new_method, trace_name_printed);
+  }
+}
+
 // the constant pool cache should never contain old or obsolete methods
 bool ConstantPoolCache::check_no_old_or_obsolete_entries() {
   for (int i = 1; i < length(); i++) {
-    if (entry_at(i)->is_interesting_method_entry(NULL) &&
+    if (entry_at(i)->get_interesting_method_entry(NULL) != NULL &&
         !entry_at(i)->check_no_old_or_obsolete_entries()) {
       return false;
     }
@@ -663,7 +681,7 @@
 
 void ConstantPoolCache::dump_cache() {
   for (int i = 1; i < length(); i++) {
-    if (entry_at(i)->is_interesting_method_entry(NULL)) {
+    if (entry_at(i)->get_interesting_method_entry(NULL) != NULL) {
       entry_at(i)->print(tty, i);
     }
   }
--- a/hotspot/src/share/vm/oops/cpCache.hpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/oops/cpCache.hpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -379,9 +379,9 @@
   // printed the klass name so that other routines in the adjust_*
   // group don't print the klass name.
   bool adjust_method_entry(Method* old_method, Method* new_method,
-         bool * trace_name_printed);
+         bool* trace_name_printed);
   bool check_no_old_or_obsolete_entries();
-  bool is_interesting_method_entry(Klass* k);
+  Method* get_interesting_method_entry(Klass* k);
 #endif // INCLUDE_JVMTI
 
   // Debugging & Printing
@@ -478,7 +478,8 @@
   // printed the klass name so that other routines in the adjust_*
   // group don't print the klass name.
   void adjust_method_entries(Method** old_methods, Method** new_methods,
-                             int methods_length, bool * trace_name_printed);
+                             int methods_length, bool* trace_name_printed);
+  void adjust_method_entries(InstanceKlass* holder, bool* trace_name_printed);
   bool check_no_old_or_obsolete_entries();
   void dump_cache();
 #endif // INCLUDE_JVMTI
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -2793,30 +2793,33 @@
 // not yet in the vtable due to concurrent subclass define and superinterface
 // redefinition
 // Note: those in the vtable, should have been updated via adjust_method_entries
-void InstanceKlass::adjust_default_methods(Method** old_methods, Method** new_methods,
-                                           int methods_length, bool* trace_name_printed) {
+void InstanceKlass::adjust_default_methods(InstanceKlass* holder, bool* trace_name_printed) {
   // search the default_methods for uses of either obsolete or EMCP methods
   if (default_methods() != NULL) {
-    for (int j = 0; j < methods_length; j++) {
-      Method* old_method = old_methods[j];
-      Method* new_method = new_methods[j];
-
-      for (int index = 0; index < default_methods()->length(); index ++) {
-        if (default_methods()->at(index) == old_method) {
-          default_methods()->at_put(index, new_method);
-          if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
-            if (!(*trace_name_printed)) {
-              // RC_TRACE_MESG macro has an embedded ResourceMark
-              RC_TRACE_MESG(("adjust: klassname=%s default methods from name=%s",
-                             external_name(),
-                             old_method->method_holder()->external_name()));
-              *trace_name_printed = true;
-            }
-            RC_TRACE(0x00100000, ("default method update: %s(%s) ",
-                                  new_method->name()->as_C_string(),
-                                  new_method->signature()->as_C_string()));
-          }
+    for (int index = 0; index < default_methods()->length(); index ++) {
+      Method* old_method = default_methods()->at(index);
+      if (old_method == NULL || old_method->method_holder() != holder || !old_method->is_old()) {
+        continue; // skip uninteresting entries
+      }
+      assert(!old_method->is_deleted(), "default methods may not be deleted");
+
+      Method* new_method = holder->method_with_idnum(old_method->orig_method_idnum());
+
+      assert(new_method != NULL, "method_with_idnum() should not be NULL");
+      assert(old_method != new_method, "sanity check");
+
+      default_methods()->at_put(index, new_method);
+      if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
+        if (!(*trace_name_printed)) {
+          // RC_TRACE_MESG macro has an embedded ResourceMark
+          RC_TRACE_MESG(("adjust: klassname=%s default methods from name=%s",
+                         external_name(),
+                         old_method->method_holder()->external_name()));
+          *trace_name_printed = true;
         }
+        RC_TRACE(0x00100000, ("default method update: %s(%s) ",
+                              new_method->name()->as_C_string(),
+                              new_method->signature()->as_C_string()));
       }
     }
   }
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp	Fri Mar 06 04:58:52 2015 -0800
@@ -937,8 +937,7 @@
   Method* method_at_itable(Klass* holder, int index, TRAPS);
 
 #if INCLUDE_JVMTI
-  void adjust_default_methods(Method** old_methods, Method** new_methods,
-                              int methods_length, bool* trace_name_printed);
+  void adjust_default_methods(InstanceKlass* holder, bool* trace_name_printed);
 #endif // INCLUDE_JVMTI
 
   // Garbage collection
--- a/hotspot/src/share/vm/oops/klassVtable.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/oops/klassVtable.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -863,44 +863,43 @@
   }
   return updated;
 }
-void klassVtable::adjust_method_entries(Method** old_methods, Method** new_methods,
-                                        int methods_length, bool * trace_name_printed) {
-  // search the vtable for uses of either obsolete or EMCP methods
-  for (int j = 0; j < methods_length; j++) {
-    Method* old_method = old_methods[j];
-    Method* new_method = new_methods[j];
+
+// search the vtable for uses of either obsolete or EMCP methods
+void klassVtable::adjust_method_entries(InstanceKlass* holder, bool * trace_name_printed) {
+  int prn_enabled = 0;
+  for (int index = 0; index < length(); index++) {
+    Method* old_method = unchecked_method_at(index);
+    if (old_method == NULL || old_method->method_holder() != holder || !old_method->is_old()) {
+      continue; // skip uninteresting entries
+    }
+    assert(!old_method->is_deleted(), "vtable methods may not be deleted");
+
+    Method* new_method = holder->method_with_idnum(old_method->orig_method_idnum());
+
+    assert(new_method != NULL, "method_with_idnum() should not be NULL");
+    assert(old_method != new_method, "sanity check");
 
-    // In the vast majority of cases we could get the vtable index
-    // by using:  old_method->vtable_index()
-    // However, there are rare cases, eg. sun.awt.X11.XDecoratedPeer.getX()
-    // in sun.awt.X11.XFramePeer where methods occur more than once in the
-    // vtable, so, alas, we must do an exhaustive search.
-    for (int index = 0; index < length(); index++) {
-      if (unchecked_method_at(index) == old_method) {
-        put_method_at(new_method, index);
-          // For default methods, need to update the _default_methods array
-          // which can only have one method entry for a given signature
-          bool updated_default = false;
-          if (old_method->is_default_method()) {
-            updated_default = adjust_default_method(index, old_method, new_method);
-          }
+    put_method_at(new_method, index);
+    // For default methods, need to update the _default_methods array
+    // which can only have one method entry for a given signature
+    bool updated_default = false;
+    if (old_method->is_default_method()) {
+      updated_default = adjust_default_method(index, old_method, new_method);
+    }
 
-        if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
-          if (!(*trace_name_printed)) {
-            // RC_TRACE_MESG macro has an embedded ResourceMark
-            RC_TRACE_MESG(("adjust: klassname=%s for methods from name=%s",
-                           klass()->external_name(),
-                           old_method->method_holder()->external_name()));
-            *trace_name_printed = true;
-          }
-          // RC_TRACE macro has an embedded ResourceMark
-          RC_TRACE(0x00100000, ("vtable method update: %s(%s), updated default = %s",
-                                new_method->name()->as_C_string(),
-                                new_method->signature()->as_C_string(),
-                                updated_default ? "true" : "false"));
-        }
-        // cannot 'break' here; see for-loop comment above.
+    if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
+      if (!(*trace_name_printed)) {
+        // RC_TRACE_MESG macro has an embedded ResourceMark
+        RC_TRACE_MESG(("adjust: klassname=%s for methods from name=%s",
+                       klass()->external_name(),
+                       old_method->method_holder()->external_name()));
+        *trace_name_printed = true;
       }
+      // RC_TRACE macro has an embedded ResourceMark
+      RC_TRACE(0x00100000, ("vtable method update: %s(%s), updated default = %s",
+                            new_method->name()->as_C_string(),
+                            new_method->signature()->as_C_string(),
+                            updated_default ? "true" : "false"));
     }
   }
 }
@@ -1193,37 +1192,35 @@
 }
 
 #if INCLUDE_JVMTI
-void klassItable::adjust_method_entries(Method** old_methods, Method** new_methods,
-                                        int methods_length, bool * trace_name_printed) {
-  // search the itable for uses of either obsolete or EMCP methods
-  for (int j = 0; j < methods_length; j++) {
-    Method* old_method = old_methods[j];
-    Method* new_method = new_methods[j];
-    itableMethodEntry* ime = method_entry(0);
+// search the itable for uses of either obsolete or EMCP methods
+void klassItable::adjust_method_entries(InstanceKlass* holder, bool * trace_name_printed) {
 
-    // The itable can describe more than one interface and the same
-    // method signature can be specified by more than one interface.
-    // This means we have to do an exhaustive search to find all the
-    // old_method references.
-    for (int i = 0; i < _size_method_table; i++) {
-      if (ime->method() == old_method) {
-        ime->initialize(new_method);
+  itableMethodEntry* ime = method_entry(0);
+  for (int i = 0; i < _size_method_table; i++, ime++) {
+    Method* old_method = ime->method();
+    if (old_method == NULL || old_method->method_holder() != holder || !old_method->is_old()) {
+      continue; // skip uninteresting entries
+    }
+    assert(!old_method->is_deleted(), "itable methods may not be deleted");
+
+    Method* new_method = holder->method_with_idnum(old_method->orig_method_idnum());
 
-        if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
-          if (!(*trace_name_printed)) {
-            // RC_TRACE_MESG macro has an embedded ResourceMark
-            RC_TRACE_MESG(("adjust: name=%s",
-              old_method->method_holder()->external_name()));
-            *trace_name_printed = true;
-          }
-          // RC_TRACE macro has an embedded ResourceMark
-          RC_TRACE(0x00200000, ("itable method update: %s(%s)",
-            new_method->name()->as_C_string(),
-            new_method->signature()->as_C_string()));
-        }
-        // cannot 'break' here; see for-loop comment above.
+    assert(new_method != NULL, "method_with_idnum() should not be NULL");
+    assert(old_method != new_method, "sanity check");
+
+    ime->initialize(new_method);
+
+    if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
+      if (!(*trace_name_printed)) {
+        // RC_TRACE_MESG macro has an embedded ResourceMark
+        RC_TRACE_MESG(("adjust: name=%s",
+          old_method->method_holder()->external_name()));
+        *trace_name_printed = true;
       }
-      ime++;
+      // RC_TRACE macro has an embedded ResourceMark
+      RC_TRACE(0x00200000, ("itable method update: %s(%s)",
+        new_method->name()->as_C_string(),
+        new_method->signature()->as_C_string()));
     }
   }
 }
--- a/hotspot/src/share/vm/oops/klassVtable.hpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/oops/klassVtable.hpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -98,8 +98,7 @@
   // printed the klass name so that other routines in the adjust_*
   // group don't print the klass name.
   bool adjust_default_method(int vtable_index, Method* old_method, Method* new_method);
-  void adjust_method_entries(Method** old_methods, Method** new_methods,
-                             int methods_length, bool * trace_name_printed);
+  void adjust_method_entries(InstanceKlass* holder, bool * trace_name_printed);
   bool check_no_old_or_obsolete_entries();
   void dump_vtable();
 #endif // INCLUDE_JVMTI
@@ -288,8 +287,7 @@
   // trace_name_printed is set to true if the current call has
   // printed the klass name so that other routines in the adjust_*
   // group don't print the klass name.
-  void adjust_method_entries(Method** old_methods, Method** new_methods,
-                             int methods_length, bool * trace_name_printed);
+  void adjust_method_entries(InstanceKlass* holder, bool * trace_name_printed);
   bool check_no_old_or_obsolete_entries();
   void dump_itable();
 #endif // INCLUDE_JVMTI
--- a/hotspot/src/share/vm/oops/method.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/oops/method.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1450,6 +1450,7 @@
       for (int i = 0; i < length; i++) {
         Method* m = methods->at(i);
         m->set_method_idnum(i);
+        m->set_orig_method_idnum(i);
       }
     }
   }
--- a/hotspot/src/share/vm/oops/method.hpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/oops/method.hpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -233,6 +233,9 @@
   u2 method_idnum() const           { return constMethod()->method_idnum(); }
   void set_method_idnum(u2 idnum)   { constMethod()->set_method_idnum(idnum); }
 
+  u2 orig_method_idnum() const           { return constMethod()->orig_method_idnum(); }
+  void set_orig_method_idnum(u2 idnum)   { constMethod()->set_orig_method_idnum(idnum); }
+
   // code size
   int code_size() const                  { return constMethod()->code_size(); }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/opto/arraycopynode.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -0,0 +1,597 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "opto/arraycopynode.hpp"
+#include "opto/graphKit.hpp"
+
+ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled)
+  : CallNode(arraycopy_type(), NULL, TypeRawPtr::BOTTOM),
+    _alloc_tightly_coupled(alloc_tightly_coupled),
+    _kind(None),
+    _arguments_validated(false) {
+  init_class_id(Class_ArrayCopy);
+  init_flags(Flag_is_macro);
+  C->add_macro_node(this);
+}
+
+uint ArrayCopyNode::size_of() const { return sizeof(*this); }
+
+ArrayCopyNode* ArrayCopyNode::make(GraphKit* kit, bool may_throw,
+                                   Node* src, Node* src_offset,
+                                   Node* dest, Node* dest_offset,
+                                   Node* length,
+                                   bool alloc_tightly_coupled,
+                                   Node* src_klass, Node* dest_klass,
+                                   Node* src_length, Node* dest_length) {
+
+  ArrayCopyNode* ac = new ArrayCopyNode(kit->C, alloc_tightly_coupled);
+  Node* prev_mem = kit->set_predefined_input_for_runtime_call(ac);
+
+  ac->init_req(ArrayCopyNode::Src, src);
+  ac->init_req(ArrayCopyNode::SrcPos, src_offset);
+  ac->init_req(ArrayCopyNode::Dest, dest);
+  ac->init_req(ArrayCopyNode::DestPos, dest_offset);
+  ac->init_req(ArrayCopyNode::Length, length);
+  ac->init_req(ArrayCopyNode::SrcLen, src_length);
+  ac->init_req(ArrayCopyNode::DestLen, dest_length);
+  ac->init_req(ArrayCopyNode::SrcKlass, src_klass);
+  ac->init_req(ArrayCopyNode::DestKlass, dest_klass);
+
+  if (may_throw) {
+    ac->set_req(TypeFunc::I_O , kit->i_o());
+    kit->add_safepoint_edges(ac, false);
+  }
+
+  return ac;
+}
+
+void ArrayCopyNode::connect_outputs(GraphKit* kit) {
+  kit->set_all_memory_call(this, true);
+  kit->set_control(kit->gvn().transform(new ProjNode(this,TypeFunc::Control)));
+  kit->set_i_o(kit->gvn().transform(new ProjNode(this, TypeFunc::I_O)));
+  kit->make_slow_call_ex(this, kit->env()->Throwable_klass(), true);
+  kit->set_all_memory_call(this);
+}
+
+#ifndef PRODUCT
+const char* ArrayCopyNode::_kind_names[] = {"arraycopy", "arraycopy, validated arguments", "clone", "oop array clone", "CopyOf", "CopyOfRange"};
+void ArrayCopyNode::dump_spec(outputStream *st) const {
+  CallNode::dump_spec(st);
+  st->print(" (%s%s)", _kind_names[_kind], _alloc_tightly_coupled ? ", tightly coupled allocation" : "");
+}
+#endif
+
+intptr_t ArrayCopyNode::get_length_if_constant(PhaseGVN *phase) const {
+  // check that length is constant
+  Node* length = in(ArrayCopyNode::Length);
+  const Type* length_type = phase->type(length);
+
+  if (length_type == Type::TOP) {
+    return -1;
+  }
+
+  assert(is_clonebasic() || is_arraycopy() || is_copyof() || is_copyofrange(), "unexpected array copy type");
+
+  return is_clonebasic() ? length->find_intptr_t_con(-1) : length->find_int_con(-1);
+}
+
+int ArrayCopyNode::get_count(PhaseGVN *phase) const {
+  Node* src = in(ArrayCopyNode::Src);
+  const Type* src_type = phase->type(src);
+
+  if (is_clonebasic()) {
+    if (src_type->isa_instptr()) {
+      const TypeInstPtr* inst_src = src_type->is_instptr();
+      ciInstanceKlass* ik = inst_src->klass()->as_instance_klass();
+      // ciInstanceKlass::nof_nonstatic_fields() doesn't take injected
+      // fields into account. They are rare anyway so easier to simply
+      // skip instances with injected fields.
+      if ((!inst_src->klass_is_exact() && (ik->is_interface() || ik->has_subklass())) || ik->has_injected_fields()) {
+        return -1;
+      }
+      int nb_fields = ik->nof_nonstatic_fields();
+      return nb_fields;
+    } else {
+      const TypeAryPtr* ary_src = src_type->isa_aryptr();
+      assert (ary_src != NULL, "not an array or instance?");
+      // clone passes a length as a rounded number of longs. If we're
+      // cloning an array we'll do it element by element. If the
+      // length input to ArrayCopyNode is constant, length of input
+      // array must be too.
+
+      assert((get_length_if_constant(phase) == -1) == !ary_src->size()->is_con() ||
+             phase->is_IterGVN(), "inconsistent");
+
+      if (ary_src->size()->is_con()) {
+        return ary_src->size()->get_con();
+      }
+      return -1;
+    }
+  }
+
+  return get_length_if_constant(phase);
+}
+
+Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int count) {
+  if (!is_clonebasic()) {
+    return NULL;
+  }
+
+  Node* src = in(ArrayCopyNode::Src);
+  Node* dest = in(ArrayCopyNode::Dest);
+  Node* ctl = in(TypeFunc::Control);
+  Node* in_mem = in(TypeFunc::Memory);
+
+  const Type* src_type = phase->type(src);
+
+  assert(src->is_AddP(), "should be base + off");
+  assert(dest->is_AddP(), "should be base + off");
+  Node* base_src = src->in(AddPNode::Base);
+  Node* base_dest = dest->in(AddPNode::Base);
+
+  MergeMemNode* mem = MergeMemNode::make(in_mem);
+
+  const TypeInstPtr* inst_src = src_type->isa_instptr();
+
+  if (inst_src == NULL) {
+    return NULL;
+  }
+
+  if (!inst_src->klass_is_exact()) {
+    ciInstanceKlass* ik = inst_src->klass()->as_instance_klass();
+    assert(!ik->is_interface() && !ik->has_subklass(), "inconsistent klass hierarchy");
+    phase->C->dependencies()->assert_leaf_type(ik);
+  }
+
+  ciInstanceKlass* ik = inst_src->klass()->as_instance_klass();
+  assert(ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem, "too many fields");
+
+  for (int i = 0; i < count; i++) {
+    ciField* field = ik->nonstatic_field_at(i);
+    int fieldidx = phase->C->alias_type(field)->index();
+    const TypePtr* adr_type = phase->C->alias_type(field)->adr_type();
+    Node* off = phase->MakeConX(field->offset());
+    Node* next_src = phase->transform(new AddPNode(base_src,base_src,off));
+    Node* next_dest = phase->transform(new AddPNode(base_dest,base_dest,off));
+    BasicType bt = field->layout_type();
+
+    const Type *type;
+    if (bt == T_OBJECT) {
+      if (!field->type()->is_loaded()) {
+        type = TypeInstPtr::BOTTOM;
+      } else {
+        ciType* field_klass = field->type();
+        type = TypeOopPtr::make_from_klass(field_klass->as_klass());
+      }
+    } else {
+      type = Type::get_const_basic_type(bt);
+    }
+
+    Node* v = LoadNode::make(*phase, ctl, mem->memory_at(fieldidx), next_src, adr_type, type, bt, MemNode::unordered);
+    v = phase->transform(v);
+    Node* s = StoreNode::make(*phase, ctl, mem->memory_at(fieldidx), next_dest, adr_type, v, bt, MemNode::unordered);
+    s = phase->transform(s);
+    mem->set_memory_at(fieldidx, s);
+  }
+
+  if (!finish_transform(phase, can_reshape, ctl, mem)) {
+    return NULL;
+  }
+
+  return mem;
+}
+
+bool ArrayCopyNode::prepare_array_copy(PhaseGVN *phase, bool can_reshape,
+                                       Node*& adr_src,
+                                       Node*& base_src,
+                                       Node*& adr_dest,
+                                       Node*& base_dest,
+                                       BasicType& copy_type,
+                                       const Type*& value_type,
+                                       bool& disjoint_bases) {
+  Node* src = in(ArrayCopyNode::Src);
+  Node* dest = in(ArrayCopyNode::Dest);
+  const Type* src_type = phase->type(src);
+  const TypeAryPtr* ary_src = src_type->isa_aryptr();
+
+  if (is_arraycopy() || is_copyofrange() || is_copyof()) {
+    const Type* dest_type = phase->type(dest);
+    const TypeAryPtr* ary_dest = dest_type->isa_aryptr();
+    Node* src_offset = in(ArrayCopyNode::SrcPos);
+    Node* dest_offset = in(ArrayCopyNode::DestPos);
+
+    // newly allocated object is guaranteed to not overlap with source object
+    disjoint_bases = is_alloc_tightly_coupled();
+
+    if (ary_src  == NULL || ary_src->klass()  == NULL ||
+        ary_dest == NULL || ary_dest->klass() == NULL) {
+      // We don't know if arguments are arrays
+      return false;
+    }
+
+    BasicType src_elem  = ary_src->klass()->as_array_klass()->element_type()->basic_type();
+    BasicType dest_elem = ary_dest->klass()->as_array_klass()->element_type()->basic_type();
+    if (src_elem  == T_ARRAY)  src_elem  = T_OBJECT;
+    if (dest_elem == T_ARRAY)  dest_elem = T_OBJECT;
+
+    if (src_elem != dest_elem || dest_elem == T_VOID) {
+      // We don't know if arguments are arrays of the same type
+      return false;
+    }
+
+    if (dest_elem == T_OBJECT && (!is_alloc_tightly_coupled() || !GraphKit::use_ReduceInitialCardMarks())) {
+      // It's an object array copy but we can't emit the card marking
+      // that is needed
+      return false;
+    }
+
+    value_type = ary_src->elem();
+
+    base_src = src;
+    base_dest = dest;
+
+    uint shift  = exact_log2(type2aelembytes(dest_elem));
+    uint header = arrayOopDesc::base_offset_in_bytes(dest_elem);
+
+    adr_src = src;
+    adr_dest = dest;
+
+    src_offset = Compile::conv_I2X_index(phase, src_offset, ary_src->size());
+    dest_offset = Compile::conv_I2X_index(phase, dest_offset, ary_dest->size());
+
+    Node* src_scale = phase->transform(new LShiftXNode(src_offset, phase->intcon(shift)));
+    Node* dest_scale = phase->transform(new LShiftXNode(dest_offset, phase->intcon(shift)));
+
+    adr_src = phase->transform(new AddPNode(base_src, adr_src, src_scale));
+    adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, dest_scale));
+
+    adr_src = new AddPNode(base_src, adr_src, phase->MakeConX(header));
+    adr_dest = new AddPNode(base_dest, adr_dest, phase->MakeConX(header));
+
+    adr_src = phase->transform(adr_src);
+    adr_dest = phase->transform(adr_dest);
+
+    copy_type = dest_elem;
+  } else {
+    assert (is_clonebasic(), "should be");
+
+    disjoint_bases = true;
+    assert(src->is_AddP(), "should be base + off");
+    assert(dest->is_AddP(), "should be base + off");
+    adr_src = src;
+    base_src = src->in(AddPNode::Base);
+    adr_dest = dest;
+    base_dest = dest->in(AddPNode::Base);
+
+    assert(phase->type(src->in(AddPNode::Offset))->is_intptr_t()->get_con() == phase->type(dest->in(AddPNode::Offset))->is_intptr_t()->get_con(), "same start offset?");
+    BasicType elem = ary_src->klass()->as_array_klass()->element_type()->basic_type();
+    if (elem == T_ARRAY)  elem = T_OBJECT;
+
+    int diff = arrayOopDesc::base_offset_in_bytes(elem) - phase->type(src->in(AddPNode::Offset))->is_intptr_t()->get_con();
+    assert(diff >= 0, "clone should not start after 1st array element");
+    if (diff > 0) {
+      adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(diff)));
+      adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(diff)));
+    }
+
+    copy_type = elem;
+    value_type = ary_src->elem();
+  }
+  return true;
+}
+
+const TypePtr* ArrayCopyNode::get_address_type(PhaseGVN *phase, Node* n) {
+  const Type* at = phase->type(n);
+  assert(at != Type::TOP, "unexpected type");
+  const TypePtr* atp = at->isa_ptr();
+  // adjust atp to be the correct array element address type
+  atp = atp->add_offset(Type::OffsetBot);
+  return atp;
+}
+
+void ArrayCopyNode::array_copy_test_overlap(PhaseGVN *phase, bool can_reshape, bool disjoint_bases, int count, Node*& forward_ctl, Node*& backward_ctl) {
+  Node* ctl = in(TypeFunc::Control);
+  if (!disjoint_bases && count > 1) {
+    Node* src_offset = in(ArrayCopyNode::SrcPos);
+    Node* dest_offset = in(ArrayCopyNode::DestPos);
+    assert(src_offset != NULL && dest_offset != NULL, "should be");
+    Node* cmp = phase->transform(new CmpINode(src_offset, dest_offset));
+    Node *bol = phase->transform(new BoolNode(cmp, BoolTest::lt));
+    IfNode *iff = new IfNode(ctl, bol, PROB_FAIR, COUNT_UNKNOWN);
+
+    phase->transform(iff);
+
+    forward_ctl = phase->transform(new IfFalseNode(iff));
+    backward_ctl = phase->transform(new IfTrueNode(iff));
+  } else {
+    forward_ctl = ctl;
+  }
+}
+
+Node* ArrayCopyNode::array_copy_forward(PhaseGVN *phase,
+                                        bool can_reshape,
+                                        Node* forward_ctl,
+                                        Node* start_mem_src,
+                                        Node* start_mem_dest,
+                                        const TypePtr* atp_src,
+                                        const TypePtr* atp_dest,
+                                        Node* adr_src,
+                                        Node* base_src,
+                                        Node* adr_dest,
+                                        Node* base_dest,
+                                        BasicType copy_type,
+                                        const Type* value_type,
+                                        int count) {
+  Node* mem = phase->C->top();
+  if (!forward_ctl->is_top()) {
+    // copy forward
+    mem = start_mem_dest;
+
+    if (count > 0) {
+      Node* v = LoadNode::make(*phase, forward_ctl, start_mem_src, adr_src, atp_src, value_type, copy_type, MemNode::unordered);
+      v = phase->transform(v);
+      mem = StoreNode::make(*phase, forward_ctl, mem, adr_dest, atp_dest, v, copy_type, MemNode::unordered);
+      mem = phase->transform(mem);
+      for (int i = 1; i < count; i++) {
+        Node* off  = phase->MakeConX(type2aelembytes(copy_type) * i);
+        Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off));
+        Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off));
+        v = LoadNode::make(*phase, forward_ctl, mem, next_src, atp_src, value_type, copy_type, MemNode::unordered);
+        v = phase->transform(v);
+        mem = StoreNode::make(*phase, forward_ctl,mem,next_dest,atp_dest,v, copy_type, MemNode::unordered);
+        mem = phase->transform(mem);
+      }
+    } else if(can_reshape) {
+      PhaseIterGVN* igvn = phase->is_IterGVN();
+      igvn->_worklist.push(adr_src);
+      igvn->_worklist.push(adr_dest);
+    }
+  }
+  return mem;
+}
+
+Node* ArrayCopyNode::array_copy_backward(PhaseGVN *phase,
+                                         bool can_reshape,
+                                         Node* backward_ctl,
+                                         Node* start_mem_src,
+                                         Node* start_mem_dest,
+                                         const TypePtr* atp_src,
+                                         const TypePtr* atp_dest,
+                                         Node* adr_src,
+                                         Node* base_src,
+                                         Node* adr_dest,
+                                         Node* base_dest,
+                                         BasicType copy_type,
+                                         const Type* value_type,
+                                         int count) {
+  Node* mem = phase->C->top();
+  if (!backward_ctl->is_top()) {
+    // copy backward
+    mem = start_mem_dest;
+
+    if (count > 0) {
+      for (int i = count-1; i >= 1; i--) {
+        Node* off  = phase->MakeConX(type2aelembytes(copy_type) * i);
+        Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off));
+        Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off));
+        Node* v = LoadNode::make(*phase, backward_ctl, mem, next_src, atp_src, value_type, copy_type, MemNode::unordered);
+        v = phase->transform(v);
+        mem = StoreNode::make(*phase, backward_ctl,mem,next_dest,atp_dest,v, copy_type, MemNode::unordered);
+        mem = phase->transform(mem);
+      }
+      Node* v = LoadNode::make(*phase, backward_ctl, mem, adr_src, atp_src, value_type, copy_type, MemNode::unordered);
+      v = phase->transform(v);
+      mem = StoreNode::make(*phase, backward_ctl, mem, adr_dest, atp_dest, v, copy_type, MemNode::unordered);
+      mem = phase->transform(mem);
+    } else if(can_reshape) {
+      PhaseIterGVN* igvn = phase->is_IterGVN();
+      igvn->_worklist.push(adr_src);
+      igvn->_worklist.push(adr_dest);
+    }
+  }
+  return mem;
+}
+
+bool ArrayCopyNode::finish_transform(PhaseGVN *phase, bool can_reshape,
+                                     Node* ctl, Node *mem) {
+  if (can_reshape) {
+    PhaseIterGVN* igvn = phase->is_IterGVN();
+    igvn->set_delay_transform(false);
+    if (is_clonebasic()) {
+      Node* out_mem = proj_out(TypeFunc::Memory);
+
+      if (out_mem->outcnt() != 1 || !out_mem->raw_out(0)->is_MergeMem() ||
+          out_mem->raw_out(0)->outcnt() != 1 || !out_mem->raw_out(0)->raw_out(0)->is_MemBar()) {
+        assert(!GraphKit::use_ReduceInitialCardMarks(), "can only happen with card marking");
+        return false;
+      }
+
+      igvn->replace_node(out_mem->raw_out(0), mem);
+
+      Node* out_ctl = proj_out(TypeFunc::Control);
+      igvn->replace_node(out_ctl, ctl);
+    } else {
+      // replace fallthrough projections of the ArrayCopyNode by the
+      // new memory, control and the input IO.
+      CallProjections callprojs;
+      extract_projections(&callprojs, true);
+
+      igvn->replace_node(callprojs.fallthrough_ioproj, in(TypeFunc::I_O));
+      igvn->replace_node(callprojs.fallthrough_memproj, mem);
+      igvn->replace_node(callprojs.fallthrough_catchproj, ctl);
+
+      // The ArrayCopyNode is not disconnected. It still has the
+      // projections for the exception case. Replace current
+      // ArrayCopyNode with a dummy new one with a top() control so
+      // that this part of the graph stays consistent but is
+      // eventually removed.
+
+      set_req(0, phase->C->top());
+      remove_dead_region(phase, can_reshape);
+    }
+  } else {
+    if (in(TypeFunc::Control) != ctl) {
+      // we can't return new memory and control from Ideal at parse time
+      assert(!is_clonebasic(), "added control for clone?");
+      return false;
+    }
+  }
+  return true;
+}
+
+
+Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+  if (remove_dead_region(phase, can_reshape))  return this;
+
+  if (StressArrayCopyMacroNode && !can_reshape) {
+    phase->record_for_igvn(this);
+    return NULL;
+  }
+
+  // See if it's a small array copy and we can inline it as
+  // loads/stores
+  // Here we can only do:
+  // - arraycopy if all arguments were validated before and we don't
+  // need card marking
+  // - clone for which we don't need to do card marking
+
+  if (!is_clonebasic() && !is_arraycopy_validated() &&
+      !is_copyofrange_validated() && !is_copyof_validated()) {
+    return NULL;
+  }
+
+  assert(in(TypeFunc::Control) != NULL &&
+         in(TypeFunc::Memory) != NULL &&
+         in(ArrayCopyNode::Src) != NULL &&
+         in(ArrayCopyNode::Dest) != NULL &&
+         in(ArrayCopyNode::Length) != NULL &&
+         ((in(ArrayCopyNode::SrcPos) != NULL && in(ArrayCopyNode::DestPos) != NULL) ||
+          is_clonebasic()), "broken inputs");
+
+  if (in(TypeFunc::Control)->is_top() ||
+      in(TypeFunc::Memory)->is_top() ||
+      phase->type(in(ArrayCopyNode::Src)) == Type::TOP ||
+      phase->type(in(ArrayCopyNode::Dest)) == Type::TOP ||
+      (in(ArrayCopyNode::SrcPos) != NULL && in(ArrayCopyNode::SrcPos)->is_top()) ||
+      (in(ArrayCopyNode::DestPos) != NULL && in(ArrayCopyNode::DestPos)->is_top())) {
+    return NULL;
+  }
+
+  int count = get_count(phase);
+
+  if (count < 0 || count > ArrayCopyLoadStoreMaxElem) {
+    return NULL;
+  }
+
+  Node* mem = try_clone_instance(phase, can_reshape, count);
+  if (mem != NULL) {
+    return mem;
+  }
+
+  Node* adr_src = NULL;
+  Node* base_src = NULL;
+  Node* adr_dest = NULL;
+  Node* base_dest = NULL;
+  BasicType copy_type = T_ILLEGAL;
+  const Type* value_type = NULL;
+  bool disjoint_bases = false;
+
+  if (!prepare_array_copy(phase, can_reshape,
+                          adr_src, base_src, adr_dest, base_dest,
+                          copy_type, value_type, disjoint_bases)) {
+    return NULL;
+  }
+
+  Node* src = in(ArrayCopyNode::Src);
+  Node* dest = in(ArrayCopyNode::Dest);
+  const TypePtr* atp_src = get_address_type(phase, src);
+  const TypePtr* atp_dest = get_address_type(phase, dest);
+  uint alias_idx_src = phase->C->get_alias_index(atp_src);
+  uint alias_idx_dest = phase->C->get_alias_index(atp_dest);
+
+  Node *in_mem = in(TypeFunc::Memory);
+  Node *start_mem_src = in_mem;
+  Node *start_mem_dest = in_mem;
+  if (in_mem->is_MergeMem()) {
+    start_mem_src = in_mem->as_MergeMem()->memory_at(alias_idx_src);
+    start_mem_dest = in_mem->as_MergeMem()->memory_at(alias_idx_dest);
+  }
+
+
+  if (can_reshape) {
+    assert(!phase->is_IterGVN()->delay_transform(), "cannot delay transforms");
+    phase->is_IterGVN()->set_delay_transform(true);
+  }
+
+  Node* backward_ctl = phase->C->top();
+  Node* forward_ctl = phase->C->top();
+  array_copy_test_overlap(phase, can_reshape, disjoint_bases, count, forward_ctl, backward_ctl);
+
+  Node* forward_mem = array_copy_forward(phase, can_reshape, forward_ctl,
+                                         start_mem_src, start_mem_dest,
+                                         atp_src, atp_dest,
+                                         adr_src, base_src, adr_dest, base_dest,
+                                         copy_type, value_type, count);
+
+  Node* backward_mem = array_copy_backward(phase, can_reshape, backward_ctl,
+                                           start_mem_src, start_mem_dest,
+                                           atp_src, atp_dest,
+                                           adr_src, base_src, adr_dest, base_dest,
+                                           copy_type, value_type, count);
+
+  Node* ctl = NULL;
+  if (!forward_ctl->is_top() && !backward_ctl->is_top()) {
+    ctl = new RegionNode(3);
+    mem = new PhiNode(ctl, Type::MEMORY, atp_dest);
+    ctl->init_req(1, forward_ctl);
+    mem->init_req(1, forward_mem);
+    ctl->init_req(2, backward_ctl);
+    mem->init_req(2, backward_mem);
+    ctl = phase->transform(ctl);
+    mem = phase->transform(mem);
+  } else if (!forward_ctl->is_top()) {
+    ctl = forward_ctl;
+    mem = forward_mem;
+  } else {
+    assert(!backward_ctl->is_top(), "no copy?");
+    ctl = backward_ctl;
+    mem = backward_mem;
+  }
+
+  if (can_reshape) {
+    assert(phase->is_IterGVN()->delay_transform(), "should be delaying transforms");
+    phase->is_IterGVN()->set_delay_transform(false);
+  }
+
+  MergeMemNode* out_mem = MergeMemNode::make(in_mem);
+  out_mem->set_memory_at(alias_idx_dest, mem);
+  mem = out_mem;
+
+  if (!finish_transform(phase, can_reshape, ctl, mem)) {
+    return NULL;
+  }
+
+  return mem;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/opto/arraycopynode.hpp	Fri Mar 06 04:58:52 2015 -0800
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_OPTO_ARRAYCOPYNODE_HPP
+#define SHARE_VM_OPTO_ARRAYCOPYNODE_HPP
+
+#include "opto/callnode.hpp"
+
+class GraphKit;
+
+class ArrayCopyNode : public CallNode {
+private:
+
+  // What kind of arraycopy variant is this?
+  enum {
+    None,            // not set yet
+    ArrayCopy,       // System.arraycopy()
+    CloneBasic,      // A clone that can be copied by 64 bit chunks
+    CloneOop,        // An oop array clone
+    CopyOf,          // Arrays.copyOf()
+    CopyOfRange      // Arrays.copyOfRange()
+  } _kind;
+
+#ifndef PRODUCT
+  static const char* _kind_names[CopyOfRange+1];
+#endif
+  // Is the alloc obtained with
+  // AllocateArrayNode::Ideal_array_allocation() tighly coupled
+  // (arraycopy follows immediately the allocation)?
+  // We cache the result of LibraryCallKit::tightly_coupled_allocation
+  // here because it's much easier to find whether there's a tightly
+  // couple allocation at parse time than at macro expansion time. At
+  // macro expansion time, for every use of the allocation node we
+  // would need to figure out whether it happens after the arraycopy (and
+  // can be ignored) or between the allocation and the arraycopy. At
+  // parse time, it's straightforward because whatever happens after
+  // the arraycopy is not parsed yet so doesn't exist when
+  // LibraryCallKit::tightly_coupled_allocation() is called.
+  bool _alloc_tightly_coupled;
+
+  bool _arguments_validated;
+
+  static const TypeFunc* arraycopy_type() {
+    const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
+    fields[Src]       = TypeInstPtr::BOTTOM;
+    fields[SrcPos]    = TypeInt::INT;
+    fields[Dest]      = TypeInstPtr::BOTTOM;
+    fields[DestPos]   = TypeInt::INT;
+    fields[Length]    = TypeInt::INT;
+    fields[SrcLen]    = TypeInt::INT;
+    fields[DestLen]   = TypeInt::INT;
+    fields[SrcKlass]  = TypeKlassPtr::BOTTOM;
+    fields[DestKlass] = TypeKlassPtr::BOTTOM;
+    const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
+
+    // create result type (range)
+    fields = TypeTuple::fields(0);
+
+    const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
+
+    return TypeFunc::make(domain, range);
+  }
+
+  ArrayCopyNode(Compile* C, bool alloc_tightly_coupled);
+
+  intptr_t get_length_if_constant(PhaseGVN *phase) const;
+  int get_count(PhaseGVN *phase) const;
+  static const TypePtr* get_address_type(PhaseGVN *phase, Node* n);
+
+  Node* try_clone_instance(PhaseGVN *phase, bool can_reshape, int count);
+  Node* conv_I2X_offset(PhaseGVN *phase, Node* offset, const TypeAryPtr* ary_t);
+  bool prepare_array_copy(PhaseGVN *phase, bool can_reshape,
+                          Node*& adr_src, Node*& base_src, Node*& adr_dest, Node*& base_dest,
+                          BasicType& copy_type, const Type*& value_type, bool& disjoint_bases);
+  void array_copy_test_overlap(PhaseGVN *phase, bool can_reshape,
+                               bool disjoint_bases, int count,
+                               Node*& forward_ctl, Node*& backward_ctl);
+  Node* array_copy_forward(PhaseGVN *phase, bool can_reshape, Node* ctl,
+                           Node* start_mem_src, Node* start_mem_dest,
+                           const TypePtr* atp_src, const TypePtr* atp_dest,
+                           Node* adr_src, Node* base_src, Node* adr_dest, Node* base_dest,
+                           BasicType copy_type, const Type* value_type, int count);
+  Node* array_copy_backward(PhaseGVN *phase, bool can_reshape, Node* ctl,
+                            Node *start_mem_src, Node* start_mem_dest,
+                            const TypePtr* atp_src, const TypePtr* atp_dest,
+                            Node* adr_src, Node* base_src, Node* adr_dest, Node* base_dest,
+                            BasicType copy_type, const Type* value_type, int count);
+  bool finish_transform(PhaseGVN *phase, bool can_reshape,
+                        Node* ctl, Node *mem);
+
+public:
+
+  enum {
+    Src   = TypeFunc::Parms,
+    SrcPos,
+    Dest,
+    DestPos,
+    Length,
+    SrcLen,
+    DestLen,
+    SrcKlass,
+    DestKlass,
+    ParmLimit
+  };
+
+  static ArrayCopyNode* make(GraphKit* kit, bool may_throw,
+                             Node* src, Node* src_offset,
+                             Node* dest,  Node* dest_offset,
+                             Node* length,
+                             bool alloc_tightly_coupled,
+                             Node* src_klass = NULL, Node* dest_klass = NULL,
+                             Node* src_length = NULL, Node* dest_length = NULL);
+
+  void connect_outputs(GraphKit* kit);
+
+  bool is_arraycopy()             const  { assert(_kind != None, "should bet set"); return _kind == ArrayCopy; }
+  bool is_arraycopy_validated()   const  { assert(_kind != None, "should bet set"); return _kind == ArrayCopy && _arguments_validated; }
+  bool is_clonebasic()            const  { assert(_kind != None, "should bet set"); return _kind == CloneBasic; }
+  bool is_cloneoop()              const  { assert(_kind != None, "should bet set"); return _kind == CloneOop; }
+  bool is_copyof()                const  { assert(_kind != None, "should bet set"); return _kind == CopyOf; }
+  bool is_copyof_validated()      const  { assert(_kind != None, "should bet set"); return _kind == CopyOf && _arguments_validated; }
+  bool is_copyofrange()           const  { assert(_kind != None, "should bet set"); return _kind == CopyOfRange; }
+  bool is_copyofrange_validated() const  { assert(_kind != None, "should bet set"); return _kind == CopyOfRange && _arguments_validated; }
+
+  void set_arraycopy(bool validated)   { assert(_kind == None, "shouldn't bet set yet"); _kind = ArrayCopy; _arguments_validated = validated; }
+  void set_clonebasic()                { assert(_kind == None, "shouldn't bet set yet"); _kind = CloneBasic; }
+  void set_cloneoop()                  { assert(_kind == None, "shouldn't bet set yet"); _kind = CloneOop; }
+  void set_copyof(bool validated)      { assert(_kind == None, "shouldn't bet set yet"); _kind = CopyOf; _arguments_validated = validated; }
+  void set_copyofrange(bool validated) { assert(_kind == None, "shouldn't bet set yet"); _kind = CopyOfRange; _arguments_validated = validated; }
+
+  virtual int Opcode() const;
+  virtual uint size_of() const; // Size is bigger
+  virtual bool guaranteed_safepoint()  { return false; }
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+
+  bool is_alloc_tightly_coupled() const { return _alloc_tightly_coupled; }
+
+#ifndef PRODUCT
+  virtual void dump_spec(outputStream *st) const;
+#endif
+};
+
+#endif // SHARE_VM_OPTO_ARRAYCOPYNODE_HPP
--- a/hotspot/src/share/vm/opto/callnode.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/opto/callnode.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1875,194 +1875,3 @@
     log->tail(tag);
   }
 }
-
-ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled)
-  : CallNode(arraycopy_type(), NULL, TypeRawPtr::BOTTOM),
-    _alloc_tightly_coupled(alloc_tightly_coupled),
-    _kind(None),
-    _arguments_validated(false) {
-  init_class_id(Class_ArrayCopy);
-  init_flags(Flag_is_macro);
-  C->add_macro_node(this);
-}
-
-uint ArrayCopyNode::size_of() const { return sizeof(*this); }
-
-ArrayCopyNode* ArrayCopyNode::make(GraphKit* kit, bool may_throw,
-                                   Node* src, Node* src_offset,
-                                   Node* dest, Node* dest_offset,
-                                   Node* length,
-                                   bool alloc_tightly_coupled,
-                                   Node* src_klass, Node* dest_klass,
-                                   Node* src_length, Node* dest_length) {
-
-  ArrayCopyNode* ac = new ArrayCopyNode(kit->C, alloc_tightly_coupled);
-  Node* prev_mem = kit->set_predefined_input_for_runtime_call(ac);
-
-  ac->init_req(ArrayCopyNode::Src, src);
-  ac->init_req(ArrayCopyNode::SrcPos, src_offset);
-  ac->init_req(ArrayCopyNode::Dest, dest);
-  ac->init_req(ArrayCopyNode::DestPos, dest_offset);
-  ac->init_req(ArrayCopyNode::Length, length);
-  ac->init_req(ArrayCopyNode::SrcLen, src_length);
-  ac->init_req(ArrayCopyNode::DestLen, dest_length);
-  ac->init_req(ArrayCopyNode::SrcKlass, src_klass);
-  ac->init_req(ArrayCopyNode::DestKlass, dest_klass);
-
-  if (may_throw) {
-    ac->set_req(TypeFunc::I_O , kit->i_o());
-    kit->add_safepoint_edges(ac, false);
-  }
-
-  return ac;
-}
-
-void ArrayCopyNode::connect_outputs(GraphKit* kit) {
-  kit->set_all_memory_call(this, true);
-  kit->set_control(kit->gvn().transform(new ProjNode(this,TypeFunc::Control)));
-  kit->set_i_o(kit->gvn().transform(new ProjNode(this, TypeFunc::I_O)));
-  kit->make_slow_call_ex(this, kit->env()->Throwable_klass(), true);
-  kit->set_all_memory_call(this);
-}
-
-#ifndef PRODUCT
-const char* ArrayCopyNode::_kind_names[] = {"arraycopy", "arraycopy, validated arguments", "clone", "oop array clone", "CopyOf", "CopyOfRange"};
-void ArrayCopyNode::dump_spec(outputStream *st) const {
-  CallNode::dump_spec(st);
-  st->print(" (%s%s)", _kind_names[_kind], _alloc_tightly_coupled ? ", tightly coupled allocation" : "");
-}
-#endif
-
-int ArrayCopyNode::get_count(PhaseGVN *phase) const {
-  Node* src = in(ArrayCopyNode::Src);
-  const Type* src_type = phase->type(src);
-
-  assert(is_clonebasic(), "unexpected arraycopy type");
-  if (src_type->isa_instptr()) {
-    const TypeInstPtr* inst_src = src_type->is_instptr();
-    ciInstanceKlass* ik = inst_src->klass()->as_instance_klass();
-    // ciInstanceKlass::nof_nonstatic_fields() doesn't take injected
-    // fields into account. They are rare anyway so easier to simply
-    // skip instances with injected fields.
-    if ((!inst_src->klass_is_exact() && (ik->is_interface() || ik->has_subklass())) || ik->has_injected_fields()) {
-      return -1;
-    }
-    int nb_fields = ik->nof_nonstatic_fields();
-    return nb_fields;
-  }
-  return -1;
-}
-
-Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int count) {
-  assert(is_clonebasic(), "unexpected arraycopy type");
-
-  Node* src = in(ArrayCopyNode::Src);
-  Node* dest = in(ArrayCopyNode::Dest);
-  Node* ctl = in(TypeFunc::Control);
-  Node* in_mem = in(TypeFunc::Memory);
-
-  const Type* src_type = phase->type(src);
-  const Type* dest_type = phase->type(dest);
-
-  assert(src->is_AddP(), "should be base + off");
-  assert(dest->is_AddP(), "should be base + off");
-  Node* base_src = src->in(AddPNode::Base);
-  Node* base_dest = dest->in(AddPNode::Base);
-
-  MergeMemNode* mem = MergeMemNode::make(in_mem);
-
-  const TypeInstPtr* inst_src = src_type->is_instptr();
-
-  if (!inst_src->klass_is_exact()) {
-    ciInstanceKlass* ik = inst_src->klass()->as_instance_klass();
-    assert(!ik->is_interface() && !ik->has_subklass(), "inconsistent klass hierarchy");
-    phase->C->dependencies()->assert_leaf_type(ik);
-  }
-
-  ciInstanceKlass* ik = inst_src->klass()->as_instance_klass();
-  assert(ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem, "too many fields");
-
-  for (int i = 0; i < count; i++) {
-    ciField* field = ik->nonstatic_field_at(i);
-    int fieldidx = phase->C->alias_type(field)->index();
-    const TypePtr* adr_type = phase->C->alias_type(field)->adr_type();
-    Node* off = phase->MakeConX(field->offset());
-    Node* next_src = phase->transform(new AddPNode(base_src,base_src,off));
-    Node* next_dest = phase->transform(new AddPNode(base_dest,base_dest,off));
-    BasicType bt = field->layout_type();
-
-    const Type *type;
-    if (bt == T_OBJECT) {
-      if (!field->type()->is_loaded()) {
-        type = TypeInstPtr::BOTTOM;
-      } else {
-        ciType* field_klass = field->type();
-        type = TypeOopPtr::make_from_klass(field_klass->as_klass());
-      }
-    } else {
-      type = Type::get_const_basic_type(bt);
-    }
-
-    Node* v = LoadNode::make(*phase, ctl, mem->memory_at(fieldidx), next_src, adr_type, type, bt, MemNode::unordered);
-    v = phase->transform(v);
-    Node* s = StoreNode::make(*phase, ctl, mem->memory_at(fieldidx), next_dest, adr_type, v, bt, MemNode::unordered);
-    s = phase->transform(s);
-    mem->set_memory_at(fieldidx, s);
-  }
-
-  if (!finish_transform(phase, can_reshape, ctl, mem)) {
-    return NULL;
-  }
-
-  return mem;
-}
-
-bool ArrayCopyNode::finish_transform(PhaseGVN *phase, bool can_reshape,
-                                     Node* ctl, Node *mem) {
-  if (can_reshape) {
-    PhaseIterGVN* igvn = phase->is_IterGVN();
-    assert(is_clonebasic(), "unexpected arraycopy type");
-    Node* out_mem = proj_out(TypeFunc::Memory);
-
-    if (out_mem->outcnt() != 1 || !out_mem->raw_out(0)->is_MergeMem() ||
-        out_mem->raw_out(0)->outcnt() != 1 || !out_mem->raw_out(0)->raw_out(0)->is_MemBar()) {
-      assert(!GraphKit::use_ReduceInitialCardMarks(), "can only happen with card marking");
-      return false;
-    }
-
-    igvn->replace_node(out_mem->raw_out(0), mem);
-
-    Node* out_ctl = proj_out(TypeFunc::Control);
-    igvn->replace_node(out_ctl, ctl);
-  }
-  return true;
-}
-
-
-Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) {
-  if (remove_dead_region(phase, can_reshape))  return this;
-
-  if (StressArrayCopyMacroNode && !can_reshape) return NULL;
-
-  // See if it's a small array copy and we can inline it as
-  // loads/stores
-  // Here we can only do:
-  // - clone for which we don't need to do card marking
-
-  if (!is_clonebasic()) {
-    return NULL;
-  }
-
-  if (in(TypeFunc::Control)->is_top() || in(TypeFunc::Memory)->is_top()) {
-    return NULL;
-  }
-
-  int count = get_count(phase);
-
-  if (count < 0 || count > ArrayCopyLoadStoreMaxElem) {
-    return NULL;
-  }
-
-  Node* mem = try_clone_instance(phase, can_reshape, count);
-  return mem;
-}
--- a/hotspot/src/share/vm/opto/callnode.hpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/opto/callnode.hpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1083,117 +1083,4 @@
 #endif
 };
 
-class GraphKit;
-
-class ArrayCopyNode : public CallNode {
-private:
-
-  // What kind of arraycopy variant is this?
-  enum {
-    None,            // not set yet
-    ArrayCopy,       // System.arraycopy()
-    CloneBasic,      // A clone that can be copied by 64 bit chunks
-    CloneOop,        // An oop array clone
-    CopyOf,          // Arrays.copyOf()
-    CopyOfRange      // Arrays.copyOfRange()
-  } _kind;
-
-#ifndef PRODUCT
-  static const char* _kind_names[CopyOfRange+1];
-#endif
-  // Is the alloc obtained with
-  // AllocateArrayNode::Ideal_array_allocation() tighly coupled
-  // (arraycopy follows immediately the allocation)?
-  // We cache the result of LibraryCallKit::tightly_coupled_allocation
-  // here because it's much easier to find whether there's a tightly
-  // couple allocation at parse time than at macro expansion time. At
-  // macro expansion time, for every use of the allocation node we
-  // would need to figure out whether it happens after the arraycopy (and
-  // can be ignored) or between the allocation and the arraycopy. At
-  // parse time, it's straightforward because whatever happens after
-  // the arraycopy is not parsed yet so doesn't exist when
-  // LibraryCallKit::tightly_coupled_allocation() is called.
-  bool _alloc_tightly_coupled;
-
-  bool _arguments_validated;
-
-  static const TypeFunc* arraycopy_type() {
-    const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
-    fields[Src]       = TypeInstPtr::BOTTOM;
-    fields[SrcPos]    = TypeInt::INT;
-    fields[Dest]      = TypeInstPtr::BOTTOM;
-    fields[DestPos]   = TypeInt::INT;
-    fields[Length]    = TypeInt::INT;
-    fields[SrcLen]    = TypeInt::INT;
-    fields[DestLen]   = TypeInt::INT;
-    fields[SrcKlass]  = TypeKlassPtr::BOTTOM;
-    fields[DestKlass] = TypeKlassPtr::BOTTOM;
-    const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
-
-    // create result type (range)
-    fields = TypeTuple::fields(0);
-
-    const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
-
-    return TypeFunc::make(domain, range);
-  }
-
-  ArrayCopyNode(Compile* C, bool alloc_tightly_coupled);
-
-  int get_count(PhaseGVN *phase) const;
-  static const TypePtr* get_address_type(PhaseGVN *phase, Node* n);
-
-  Node* try_clone_instance(PhaseGVN *phase, bool can_reshape, int count);
-  bool finish_transform(PhaseGVN *phase, bool can_reshape,
-                        Node* ctl, Node *mem);
-
-public:
-
-  enum {
-    Src   = TypeFunc::Parms,
-    SrcPos,
-    Dest,
-    DestPos,
-    Length,
-    SrcLen,
-    DestLen,
-    SrcKlass,
-    DestKlass,
-    ParmLimit
-  };
-
-  static ArrayCopyNode* make(GraphKit* kit, bool may_throw,
-                             Node* src, Node* src_offset,
-                             Node* dest,  Node* dest_offset,
-                             Node* length,
-                             bool alloc_tightly_coupled,
-                             Node* src_klass = NULL, Node* dest_klass = NULL,
-                             Node* src_length = NULL, Node* dest_length = NULL);
-
-  void connect_outputs(GraphKit* kit);
-
-  bool is_arraycopy()             const  { assert(_kind != None, "should bet set"); return _kind == ArrayCopy; }
-  bool is_arraycopy_validated()   const  { assert(_kind != None, "should bet set"); return _kind == ArrayCopy && _arguments_validated; }
-  bool is_clonebasic()            const  { assert(_kind != None, "should bet set"); return _kind == CloneBasic; }
-  bool is_cloneoop()              const  { assert(_kind != None, "should bet set"); return _kind == CloneOop; }
-  bool is_copyof()                const  { assert(_kind != None, "should bet set"); return _kind == CopyOf; }
-  bool is_copyofrange()           const  { assert(_kind != None, "should bet set"); return _kind == CopyOfRange; }
-
-  void set_arraycopy(bool validated)   { assert(_kind == None, "shouldn't bet set yet"); _kind = ArrayCopy; _arguments_validated = validated; }
-  void set_clonebasic()                { assert(_kind == None, "shouldn't bet set yet"); _kind = CloneBasic; }
-  void set_cloneoop()                  { assert(_kind == None, "shouldn't bet set yet"); _kind = CloneOop; }
-  void set_copyof()                    { assert(_kind == None, "shouldn't bet set yet"); _kind = CopyOf; _arguments_validated = false; }
-  void set_copyofrange()               { assert(_kind == None, "shouldn't bet set yet"); _kind = CopyOfRange; _arguments_validated = false; }
-
-  virtual int Opcode() const;
-  virtual uint size_of() const; // Size is bigger
-  virtual bool guaranteed_safepoint()  { return false; }
-  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
-
-  bool is_alloc_tightly_coupled() const { return _alloc_tightly_coupled; }
-
-#ifndef PRODUCT
-  virtual void dump_spec(outputStream *st) const;
-#endif
-};
 #endif // SHARE_VM_OPTO_CALLNODE_HPP
--- a/hotspot/src/share/vm/opto/classes.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/opto/classes.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "opto/addnode.hpp"
+#include "opto/arraycopynode.hpp"
 #include "opto/callnode.hpp"
 #include "opto/castnode.hpp"
 #include "opto/cfgnode.hpp"
--- a/hotspot/src/share/vm/opto/compile.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/opto/compile.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -42,6 +42,7 @@
 #include "opto/chaitin.hpp"
 #include "opto/compile.hpp"
 #include "opto/connode.hpp"
+#include "opto/convertnode.hpp"
 #include "opto/divnode.hpp"
 #include "opto/escape.hpp"
 #include "opto/idealGraphPrinter.hpp"
@@ -3867,6 +3868,26 @@
   return SSC_full_test;
 }
 
+Node* Compile::conv_I2X_index(PhaseGVN *phase, Node* idx, const TypeInt* sizetype) {
+#ifdef _LP64
+  // The scaled index operand to AddP must be a clean 64-bit value.
+  // Java allows a 32-bit int to be incremented to a negative
+  // value, which appears in a 64-bit register as a large
+  // positive number.  Using that large positive number as an
+  // operand in pointer arithmetic has bad consequences.
+  // On the other hand, 32-bit overflow is rare, and the possibility
+  // can often be excluded, if we annotate the ConvI2L node with
+  // a type assertion that its value is known to be a small positive
+  // number.  (The prior range check has ensured this.)
+  // This assertion is used by ConvI2LNode::Ideal.
+  int index_max = max_jint - 1;  // array size is max_jint, index is one less
+  if (sizetype != NULL)  index_max = sizetype->_hi - 1;
+  const TypeLong* lidxtype = TypeLong::make(CONST64(0), index_max, Type::WidenMax);
+  idx = phase->transform(new ConvI2LNode(idx, lidxtype));
+#endif
+  return idx;
+}
+
 // The message about the current inlining is accumulated in
 // _print_inlining_stream and transfered into the _print_inlining_list
 // once we know whether inlining succeeds or not. For regular
--- a/hotspot/src/share/vm/opto/compile.hpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/opto/compile.hpp	Fri Mar 06 04:58:52 2015 -0800
@@ -74,6 +74,7 @@
 class JVMState;
 class Type;
 class TypeData;
+class TypeInt;
 class TypePtr;
 class TypeOopPtr;
 class TypeFunc;
@@ -1221,6 +1222,8 @@
   enum { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test };
   int static_subtype_check(ciKlass* superk, ciKlass* subk);
 
+  static Node* conv_I2X_index(PhaseGVN *phase, Node* offset, const TypeInt* sizetype);
+
   // Auxiliary method for randomized fuzzing/stressing
   static bool randomized_select(int count);
 };
--- a/hotspot/src/share/vm/opto/graphKit.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/opto/graphKit.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1660,22 +1660,7 @@
 
   // must be correct type for alignment purposes
   Node* base  = basic_plus_adr(ary, header);
-#ifdef _LP64
-  // The scaled index operand to AddP must be a clean 64-bit value.
-  // Java allows a 32-bit int to be incremented to a negative
-  // value, which appears in a 64-bit register as a large
-  // positive number.  Using that large positive number as an
-  // operand in pointer arithmetic has bad consequences.
-  // On the other hand, 32-bit overflow is rare, and the possibility
-  // can often be excluded, if we annotate the ConvI2L node with
-  // a type assertion that its value is known to be a small positive
-  // number.  (The prior range check has ensured this.)
-  // This assertion is used by ConvI2LNode::Ideal.
-  int index_max = max_jint - 1;  // array size is max_jint, index is one less
-  if (sizetype != NULL)  index_max = sizetype->_hi - 1;
-  const TypeLong* lidxtype = TypeLong::make(CONST64(0), index_max, Type::WidenMax);
-  idx = _gvn.transform( new ConvI2LNode(idx, lidxtype) );
-#endif
+  idx = Compile::conv_I2X_index(&_gvn, idx, sizetype);
   Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
   return basic_plus_adr(ary, base, scale);
 }
@@ -3759,7 +3744,8 @@
 
 Node* GraphKit::byte_map_base_node() {
   // Get base of card map
-  CardTableModRefBS* ct = (CardTableModRefBS*)(Universe::heap()->barrier_set());
+  CardTableModRefBS* ct =
+    barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
   assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust users of this code");
   if (ct->byte_map_base != NULL) {
     return makecon(TypeRawPtr::make((address)ct->byte_map_base));
--- a/hotspot/src/share/vm/opto/library_call.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -30,6 +30,7 @@
 #include "compiler/compileLog.hpp"
 #include "oops/objArrayKlass.hpp"
 #include "opto/addnode.hpp"
+#include "opto/arraycopynode.hpp"
 #include "opto/callGenerator.hpp"
 #include "opto/castnode.hpp"
 #include "opto/cfgnode.hpp"
@@ -3867,26 +3868,65 @@
       Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
       Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
 
-      newcopy = new_array(klass_node, length, 0);  // no arguments to push
-
       // Generate a direct call to the right arraycopy function(s).
       // We know the copy is disjoint but we might not know if the
       // oop stores need checking.
       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).
       // This will fail a store-check if x contains any non-nulls.
 
-      Node* alloc = tightly_coupled_allocation(newcopy, NULL);
-
-      ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, alloc != NULL,
+      // ArrayCopyNode:Ideal may transform the ArrayCopyNode to
+      // loads/stores but it is legal only if we're sure the
+      // Arrays.copyOf would succeed. So we need all input arguments
+      // to the copyOf to be validated, including that the copy to the
+      // new array won't trigger an ArrayStoreException. That subtype
+      // check can be optimized if we know something on the type of
+      // the input array from type speculation.
+      if (_gvn.type(klass_node)->singleton()) {
+        ciKlass* subk   = _gvn.type(load_object_klass(original))->is_klassptr()->klass();
+        ciKlass* superk = _gvn.type(klass_node)->is_klassptr()->klass();
+
+        int test = C->static_subtype_check(superk, subk);
+        if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
+          const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
+          if (t_original->speculative_type() != NULL) {
+            original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
+          }
+        }
+      }
+
+      bool validated = false;
+      // Reason_class_check rather than Reason_intrinsic because we
+      // want to intrinsify even if this traps.
+      if (!too_many_traps(Deoptimization::Reason_class_check)) {
+        Node* not_subtype_ctrl = gen_subtype_check(load_object_klass(original),
+                                                   klass_node);
+
+        if (not_subtype_ctrl != top()) {
+          PreserveJVMState pjvms(this);
+          set_control(not_subtype_ctrl);
+          uncommon_trap(Deoptimization::Reason_class_check,
+                        Deoptimization::Action_make_not_entrant);
+          assert(stopped(), "Should be stopped");
+        }
+        validated = true;
+      }
+
+      newcopy = new_array(klass_node, length, 0);  // no arguments to push
+
+      ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true,
                                               load_object_klass(original), klass_node);
       if (!is_copyOfRange) {
-        ac->set_copyof();
+        ac->set_copyof(validated);
       } else {
-        ac->set_copyofrange();
+        ac->set_copyofrange(validated);
       }
       Node* n = _gvn.transform(ac);
-      assert(n == ac, "cannot disappear");
-      ac->connect_outputs(this);
+      if (n == ac) {
+        ac->connect_outputs(this);
+      } else {
+        assert(validated, "shouldn't transform if all arguments not validated");
+        set_all_memory(n);
+      }
     }
   } // original reexecute is set back here
 
--- a/hotspot/src/share/vm/opto/loopTransform.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/opto/loopTransform.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -2032,7 +2032,7 @@
     // Hopefully, compiler will optimize for powers of 2.
     Node *ctrl = get_ctrl(main_limit);
     Node *stride = cl->stride();
-    Node *init = cl->init_trip();
+    Node *init = cl->init_trip()->uncast();
     Node *span = new SubINode(main_limit,init);
     register_new_node(span,ctrl);
     Node *rndup = _igvn.intcon(stride_con + ((stride_con>0)?-1:1));
--- a/hotspot/src/share/vm/opto/macroArrayCopy.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/opto/macroArrayCopy.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "opto/arraycopynode.hpp"
 #include "oops/objArrayKlass.hpp"
 #include "opto/convertnode.hpp"
 #include "opto/graphKit.hpp"
@@ -519,7 +520,7 @@
     // Test S[] against D[], not S against D, because (probably)
     // the secondary supertype cache is less busy for S[] than S.
     // This usually only matters when D is an interface.
-    Node* not_subtype_ctrl = ac->is_arraycopy_validated() ? top() :
+    Node* not_subtype_ctrl = (ac->is_arraycopy_validated() || ac->is_copyof_validated() || ac->is_copyofrange_validated()) ? top() :
       Phase::gen_subtype_check(src_klass, dest_klass, ctrl, mem, &_igvn);
     // Plug failing path into checked_oop_disjoint_arraycopy
     if (not_subtype_ctrl != top()) {
--- a/hotspot/src/share/vm/opto/type.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/opto/type.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -974,21 +974,10 @@
 
 //------------------------------eq---------------------------------------------
 // Structural equality check for Type representations
-bool TypeF::eq( const Type *t ) const {
-  if( g_isnan(_f) ||
-      g_isnan(t->getf()) ) {
-    // One or both are NANs.  If both are NANs return true, else false.
-    return (g_isnan(_f) && g_isnan(t->getf()));
-  }
-  if (_f == t->getf()) {
-    // (NaN is impossible at this point, since it is not equal even to itself)
-    if (_f == 0.0) {
-      // difference between positive and negative zero
-      if (jint_cast(_f) != jint_cast(t->getf()))  return false;
-    }
-    return true;
-  }
-  return false;
+bool TypeF::eq(const Type *t) const {
+  // Bitwise comparison to distinguish between +/-0. These values must be treated
+  // as different to be consistent with C1 and the interpreter.
+  return (jint_cast(_f) == jint_cast(t->getf()));
 }
 
 //------------------------------hash-------------------------------------------
@@ -1089,21 +1078,10 @@
 
 //------------------------------eq---------------------------------------------
 // Structural equality check for Type representations
-bool TypeD::eq( const Type *t ) const {
-  if( g_isnan(_d) ||
-      g_isnan(t->getd()) ) {
-    // One or both are NANs.  If both are NANs return true, else false.
-    return (g_isnan(_d) && g_isnan(t->getd()));
-  }
-  if (_d == t->getd()) {
-    // (NaN is impossible at this point, since it is not equal even to itself)
-    if (_d == 0.0) {
-      // difference between positive and negative zero
-      if (jlong_cast(_d) != jlong_cast(t->getd()))  return false;
-    }
-    return true;
-  }
-  return false;
+bool TypeD::eq(const Type *t) const {
+  // Bitwise comparison to distinguish between +/-0. These values must be treated
+  // as different to be consistent with C1 and the interpreter.
+  return (jlong_cast(_d) == jlong_cast(t->getd()));
 }
 
 //------------------------------hash-------------------------------------------
--- a/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -782,9 +782,13 @@
           Method* idnum_owner = scratch_class->method_with_idnum(old_num);
           if (idnum_owner != NULL) {
             // There is already a method assigned this idnum -- switch them
+            // Take current and original idnum from the new_method
             idnum_owner->set_method_idnum(new_num);
+            idnum_owner->set_orig_method_idnum(k_new_method->orig_method_idnum());
           }
+          // Take current and original idnum from the old_method
           k_new_method->set_method_idnum(old_num);
+          k_new_method->set_orig_method_idnum(k_old_method->orig_method_idnum());
           if (thread->has_pending_exception()) {
             return JVMTI_ERROR_OUT_OF_MEMORY;
           }
@@ -817,9 +821,12 @@
         Method* idnum_owner = scratch_class->method_with_idnum(num);
         if (idnum_owner != NULL) {
           // There is already a method assigned this idnum -- switch them
+          // Take current and original idnum from the new_method
           idnum_owner->set_method_idnum(new_num);
+          idnum_owner->set_orig_method_idnum(k_new_method->orig_method_idnum());
         }
         k_new_method->set_method_idnum(num);
+        k_new_method->set_orig_method_idnum(num);
         if (thread->has_pending_exception()) {
           return JVMTI_ERROR_OUT_OF_MEMORY;
         }
@@ -3327,6 +3334,7 @@
   // This is a very busy routine. We don't want too much tracing
   // printed out.
   bool trace_name_printed = false;
+  InstanceKlass *the_class = InstanceKlass::cast(_the_class_oop);
 
   // Very noisy: only enable this call if you are trying to determine
   // that a specific class gets found by this routine.
@@ -3338,10 +3346,8 @@
   // If the class being redefined is java.lang.Object, we need to fix all
   // array class vtables also
   if (k->oop_is_array() && _the_class_oop == SystemDictionary::Object_klass()) {
-    k->vtable()->adjust_method_entries(_matching_old_methods,
-                                       _matching_new_methods,
-                                       _matching_methods_length,
-                                       &trace_name_printed);
+    k->vtable()->adjust_method_entries(the_class, &trace_name_printed);
+
   } else if (k->oop_is_instance()) {
     HandleMark hm(_thread);
     InstanceKlass *ik = InstanceKlass::cast(k);
@@ -3383,14 +3389,9 @@
         || ik->is_subtype_of(_the_class_oop))) {
       // ik->vtable() creates a wrapper object; rm cleans it up
       ResourceMark rm(_thread);
-      ik->vtable()->adjust_method_entries(_matching_old_methods,
-                                          _matching_new_methods,
-                                          _matching_methods_length,
-                                          &trace_name_printed);
-      ik->adjust_default_methods(_matching_old_methods,
-                                 _matching_new_methods,
-                                 _matching_methods_length,
-                                 &trace_name_printed);
+
+      ik->vtable()->adjust_method_entries(the_class, &trace_name_printed);
+      ik->adjust_default_methods(the_class, &trace_name_printed);
     }
 
     // If the current class has an itable and we are either redefining an
@@ -3405,10 +3406,8 @@
         || ik->is_subclass_of(_the_class_oop))) {
       // ik->itable() creates a wrapper object; rm cleans it up
       ResourceMark rm(_thread);
-      ik->itable()->adjust_method_entries(_matching_old_methods,
-                                          _matching_new_methods,
-                                          _matching_methods_length,
-                                          &trace_name_printed);
+
+      ik->itable()->adjust_method_entries(the_class, &trace_name_printed);
     }
 
     // The constant pools in other classes (other_cp) can refer to
@@ -3432,10 +3431,7 @@
       other_cp = constantPoolHandle(ik->constants());
       cp_cache = other_cp->cache();
       if (cp_cache != NULL) {
-        cp_cache->adjust_method_entries(_matching_old_methods,
-                                        _matching_new_methods,
-                                        _matching_methods_length,
-                                        &trace_name_printed);
+        cp_cache->adjust_method_entries(the_class, &trace_name_printed);
       }
     }
 
@@ -3578,6 +3574,7 @@
 
       // obsolete methods need a unique idnum so they become new entries in
       // the jmethodID cache in InstanceKlass
+      assert(old_method->method_idnum() == new_method->method_idnum(), "must match");
       u2 num = InstanceKlass::cast(_the_class_oop)->next_method_idnum();
       if (num != ConstMethod::UNSET_IDNUM) {
         old_method->set_method_idnum(num);
--- a/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -289,7 +289,12 @@
 
 // Create MDO if necessary.
 void AdvancedThresholdPolicy::create_mdo(methodHandle mh, JavaThread* THREAD) {
-  if (mh->is_native() || mh->is_abstract() || mh->is_accessor()) return;
+  if (mh->is_native() ||
+      mh->is_abstract() ||
+      mh->is_accessor() ||
+      mh->is_constant_getter()) {
+    return;
+  }
   if (mh->method_data() == NULL) {
     Method::build_interpreter_method_data(mh, CHECK_AND_CLEAR);
   }
--- a/hotspot/src/share/vm/runtime/java.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/runtime/java.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -421,9 +421,11 @@
     os::infinite_sleep();
   }
 
-  // Terminate watcher thread - must before disenrolling any periodic task
-  if (PeriodicTask::num_tasks() > 0)
+  // Stop the WatcherThread. We do this before disenrolling various
+  // PeriodicTasks to reduce the likelihood of races.
+  if (PeriodicTask::num_tasks() > 0) {
     WatcherThread::stop();
+  }
 
   // Print statistics gathered (profiling ...)
   if (Arguments::has_profile()) {
--- a/hotspot/src/share/vm/runtime/sweeper.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/runtime/sweeper.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -724,6 +724,7 @@
 // state of the code cache if it's requested.
 void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
   if (PrintMethodFlushing) {
+    ResourceMark rm;
     stringStream s;
     // Dump code cache state into a buffer before locking the tty,
     // because log_state() will use locks causing lock conflicts.
@@ -741,6 +742,7 @@
   }
 
   if (LogCompilation && (xtty != NULL)) {
+    ResourceMark rm;
     stringStream s;
     // Dump code cache state into a buffer before locking the tty,
     // because log_state() will use locks causing lock conflicts.
--- a/hotspot/src/share/vm/runtime/task.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/runtime/task.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,6 +47,8 @@
 #endif
 
 void PeriodicTask::real_time_tick(int delay_time) {
+  assert(Thread::current()->is_Watcher_thread(), "must be WatcherThread");
+
 #ifndef PRODUCT
   if (ProfilerCheckIntervals) {
     _ticks++;
@@ -60,6 +62,8 @@
 #endif
 
   {
+    // The WatcherThread does not participate in the safepoint protocol
+    // for the PeriodicTask_lock because it is not a JavaThread.
     MutexLockerEx ml(PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
     int orig_num_tasks = _num_tasks;
 
@@ -74,8 +78,7 @@
 }
 
 int PeriodicTask::time_to_wait() {
-  MutexLockerEx ml(PeriodicTask_lock->owned_by_self() ?
-                     NULL : PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
+  assert(PeriodicTask_lock->owned_by_self(), "PeriodicTask_lock required");
 
   if (_num_tasks == 0) {
     return 0; // sleep until shutdown or a task is enrolled
@@ -98,14 +101,19 @@
 }
 
 PeriodicTask::~PeriodicTask() {
+  // This PeriodicTask may have already been disenrolled by a call
+  // to disenroll() before the PeriodicTask was deleted.
   disenroll();
 }
 
-/* enroll could be called from a JavaThread, so we have to check for
- * safepoint when taking the lock to avoid deadlocking */
+// enroll the current PeriodicTask
 void PeriodicTask::enroll() {
-  MutexLockerEx ml(PeriodicTask_lock->owned_by_self() ?
-                     NULL : PeriodicTask_lock);
+  // Follow normal safepoint aware lock enter protocol if the caller does
+  // not already own the PeriodicTask_lock. Otherwise, we don't try to
+  // enter it again because VM internal Mutexes do not support recursion.
+  //
+  MutexLockerEx ml(PeriodicTask_lock->owned_by_self() ? NULL
+                                                      : PeriodicTask_lock);
 
   if (_num_tasks == PeriodicTask::max_tasks) {
     fatal("Overflow in PeriodicTask table");
@@ -113,18 +121,21 @@
   _tasks[_num_tasks++] = this;
 
   WatcherThread* thread = WatcherThread::watcher_thread();
-  if (thread) {
+  if (thread != NULL) {
     thread->unpark();
   } else {
     WatcherThread::start();
   }
 }
 
-/* disenroll could be called from a JavaThread, so we have to check for
- * safepoint when taking the lock to avoid deadlocking */
+// disenroll the current PeriodicTask
 void PeriodicTask::disenroll() {
-  MutexLockerEx ml(PeriodicTask_lock->owned_by_self() ?
-                     NULL : PeriodicTask_lock);
+  // Follow normal safepoint aware lock enter protocol if the caller does
+  // not already own the PeriodicTask_lock. Otherwise, we don't try to
+  // enter it again because VM internal Mutexes do not support recursion.
+  //
+  MutexLockerEx ml(PeriodicTask_lock->owned_by_self() ? NULL
+                                                      : PeriodicTask_lock);
 
   int index;
   for(index = 0; index < _num_tasks && _tasks[index] != this; index++)
--- a/hotspot/src/share/vm/runtime/task.hpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/runtime/task.hpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,6 +54,7 @@
 
   static int _num_tasks;
   static PeriodicTask* _tasks[PeriodicTask::max_tasks];
+  // Can only be called by the WatcherThread
   static void real_time_tick(int delay_time);
 
 #ifndef PRODUCT
@@ -98,6 +99,7 @@
 
   // Calculate when the next periodic task will fire.
   // Called by the WatcherThread's run method.
+  // Requires the PeriodicTask_lock.
   static int time_to_wait();
 
   // The task to perform at each period
--- a/hotspot/src/share/vm/runtime/thread.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1161,6 +1161,10 @@
   va_end(ap);
 }
 
+void NamedThread::initialize_named_thread() {
+  set_native_thread_name(name());
+}
+
 void NamedThread::print_on(outputStream* st) const {
   st->print("\"%s\" ", name());
   Thread::print_on(st);
@@ -1197,8 +1201,15 @@
 }
 
 int WatcherThread::sleep() const {
+  // The WatcherThread does not participate in the safepoint protocol
+  // for the PeriodicTask_lock because it is not a JavaThread.
   MutexLockerEx ml(PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
 
+  if (_should_terminate) {
+    // check for termination before we do any housekeeping or wait
+    return 0;  // we did not sleep.
+  }
+
   // remaining will be zero if there are no tasks,
   // causing the WatcherThread to sleep until a task is
   // enrolled
@@ -1211,8 +1222,9 @@
 
   jlong time_before_loop = os::javaTimeNanos();
 
-  for (;;) {
-    bool timedout = PeriodicTask_lock->wait(Mutex::_no_safepoint_check_flag, remaining);
+  while (true) {
+    bool timedout = PeriodicTask_lock->wait(Mutex::_no_safepoint_check_flag,
+                                            remaining);
     jlong now = os::javaTimeNanos();
 
     if (remaining == 0) {
@@ -1253,7 +1265,7 @@
   this->initialize_thread_local_storage();
   this->set_native_thread_name(this->name());
   this->set_active_handles(JNIHandleBlock::allocate_block());
-  while (!_should_terminate) {
+  while (true) {
     assert(watcher_thread() == Thread::current(), "thread consistency check");
     assert(watcher_thread() == this, "thread consistency check");
 
@@ -1289,6 +1301,11 @@
       }
     }
 
+    if (_should_terminate) {
+      // check for termination before posting the next tick
+      break;
+    }
+
     PeriodicTask::real_time_tick(time_waited);
   }
 
@@ -1319,27 +1336,19 @@
 }
 
 void WatcherThread::stop() {
-  // Get the PeriodicTask_lock if we can. If we cannot, then the
-  // WatcherThread is using it and we don't want to block on that lock
-  // here because that might cause a safepoint deadlock depending on
-  // what the current WatcherThread tasks are doing.
-  bool have_lock = PeriodicTask_lock->try_lock();
-
-  _should_terminate = true;
-  OrderAccess::fence();  // ensure WatcherThread sees update in main loop
-
-  if (have_lock) {
+  {
+    // Follow normal safepoint aware lock enter protocol since the
+    // WatcherThread is stopped by another JavaThread.
+    MutexLocker ml(PeriodicTask_lock);
+    _should_terminate = true;
+
     WatcherThread* watcher = watcher_thread();
     if (watcher != NULL) {
-      // If we managed to get the lock, then we should unpark the
-      // WatcherThread so that it can see we want it to stop.
+      // unpark the WatcherThread so it can see that it should terminate
       watcher->unpark();
     }
-
-    PeriodicTask_lock->unlock();
   }
 
-  // it is ok to take late safepoints here, if needed
   MutexLocker mu(Terminator_lock);
 
   while (watcher_thread() != NULL) {
@@ -1359,9 +1368,7 @@
 }
 
 void WatcherThread::unpark() {
-  MutexLockerEx ml(PeriodicTask_lock->owned_by_self()
-                   ? NULL
-                   : PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
+  assert(PeriodicTask_lock->owned_by_self(), "PeriodicTask_lock required");
   PeriodicTask_lock->notify();
 }
 
@@ -3558,8 +3565,8 @@
   }
 
   {
-    MutexLockerEx ml(PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
-    // Make sure the watcher thread can be started by WatcherThread::start()
+    MutexLocker ml(PeriodicTask_lock);
+    // Make sure the WatcherThread can be started by WatcherThread::start()
     // or by dynamic enrollment.
     WatcherThread::make_startable();
     // Start up the WatcherThread if there are any periodic tasks
--- a/hotspot/src/share/vm/runtime/thread.hpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -667,6 +667,7 @@
   ~NamedThread();
   // May only be called once per thread.
   void set_name(const char* format, ...)  ATTRIBUTE_PRINTF(2, 3);
+  void initialize_named_thread();
   virtual bool is_Named_thread() const { return true; }
   virtual char* name() const { return _name == NULL ? (char*)"Unknown Thread" : _name; }
   JavaThread *processed_thread() { return _processed_thread; }
@@ -701,7 +702,8 @@
   static WatcherThread* _watcher_thread;
 
   static bool _startable;
-  volatile static bool _should_terminate; // updated without holding lock
+  // volatile due to at least one lock-free read
+  volatile static bool _should_terminate;
 
   os::WatcherThreadCrashProtection* _crash_protection;
  public:
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -487,7 +487,6 @@
                                                                                                                                      \
   unchecked_nonstatic_field(ageTable,          sizes,                                         sizeof(ageTable::sizes))               \
                                                                                                                                      \
-  nonstatic_field(BarrierSet,                  _kind,                                         BarrierSet::Name)                      \
   nonstatic_field(BlockOffsetTable,            _bottom,                                       HeapWord*)                             \
   nonstatic_field(BlockOffsetTable,            _end,                                          HeapWord*)                             \
                                                                                                                                      \
--- a/hotspot/src/share/vm/runtime/vmThread.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/runtime/vmThread.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -243,7 +243,7 @@
   assert(this == vm_thread(), "check");
 
   this->initialize_thread_local_storage();
-  this->set_native_thread_name(this->name());
+  this->initialize_named_thread();
   this->record_stack_base_and_size();
   // Notify_lock wait checks on active_handles() to rewait in
   // case of spurious wakeup, it should wait on the last
--- a/hotspot/src/share/vm/shark/sharkBuilder.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/shark/sharkBuilder.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -446,7 +446,7 @@
     CreateIntToPtr(
       CreateAdd(
         LLVMValue::intptr_constant(
-          (intptr_t) ((CardTableModRefBS *) bs)->byte_map_base),
+          (intptr_t) (barrier_set_cast<CardTableModRefBS>(bs)->byte_map_base)),
         CreateLShr(
           CreatePtrToInt(field, SharkType::intptr_type()),
           LLVMValue::intptr_constant(CardTableModRefBS::card_shift))),
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/utilities/fakeRttiSupport.hpp	Fri Mar 06 04:58:52 2015 -0800
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_UTILITIES_FAKERTTISUPPORT_HPP
+#define SHARE_VM_UTILITIES_FAKERTTISUPPORT_HPP
+
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/debug.hpp"
+
+// Provides support for checked downcasts in a hierarchy of classes.
+// The base class provides a member of this type, specialized on that
+// base class and an associated tag type.  Tags are small non-negative
+// integer values uniquely associated with distinct classes in the
+// hierarchy.  A tag type is often an enum type.
+//
+// The concrete class specifies the concrete tag.
+//
+// The tag set specifies the set of classes in the derivation
+// sequence.  Classes in the derivation sequence add their associated
+// tag during construction.  Given the tag associated with a class, an
+// object is an instance of that class if the tag is included in the
+// object's set of recorded tags.
+//
+// A tag T is present in a tag set if the T'th bit of the tag set is
+// one.
+//
+// Note: The representation of a tag set being uintx sets an upper
+// bound on the size of a class hierarchy this utility can be used
+// with.
+template<typename T, typename TagType>
+class FakeRttiSupport VALUE_OBJ_CLASS_SPEC {
+public:
+  // Construct with the indicated concrete tag, and include the
+  // concrete tag in the associated tag set.
+  explicit FakeRttiSupport(TagType concrete_tag) :
+    _tag_set(tag_bit(concrete_tag)), _concrete_tag(concrete_tag) { }
+
+  // Construct with the indicated concrete tag and tag set.
+  // Note: This constructor is public only to allow clients to set up
+  // "unusual" (or perhaps buggy) fake RTTI configurations.
+  FakeRttiSupport(TagType concrete_tag, uintx tag_set) :
+    _tag_set(tag_set), _concrete_tag(validate_tag(concrete_tag)) { }
+
+  // Get the concrete tag.
+  TagType concrete_tag() const { return _concrete_tag; }
+
+  // Test whether tag is in the tag set.
+  bool has_tag(TagType tag) const {
+    return (_tag_set & tag_bit(tag)) != 0;
+  }
+
+  // Return a new support object which is the same as this, except tag
+  // has been added to the tag set.  The tag must not already be
+  // present in the tag set.
+  FakeRttiSupport add_tag(TagType tag) const {
+    uintx tbit = tag_bit(tag);
+    assert((_tag_set & tbit) == 0,
+           err_msg("Tag " UINTX_FORMAT " is already present in tag set: " UINTX_FORMAT,
+                   (uintx)tag, _tag_set));
+    return FakeRttiSupport(_concrete_tag, _tag_set | tbit);
+  }
+
+private:
+  uintx _tag_set;
+  TagType _concrete_tag;
+
+  static uintx tag_bit(TagType tag) {
+    return ((uintx)1) << validate_tag(tag);
+  }
+
+  static TagType validate_tag(uintx tag) {
+    // Type of tag is not TagType to dodge useless MacOSX compiler warning.
+    assert(tag < (sizeof(uintx) * BitsPerByte),
+           err_msg("Tag " UINTX_FORMAT " is too large", tag));
+    return static_cast<TagType>(tag);
+  }
+};
+
+#endif // include guard
--- a/hotspot/src/share/vm/utilities/workgroup.cpp	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/src/share/vm/utilities/workgroup.cpp	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -247,6 +247,7 @@
 void GangWorker::initialize() {
   this->initialize_thread_local_storage();
   this->record_stack_base_and_size();
+  this->initialize_named_thread();
   assert(_gang != NULL, "No gang to run in");
   os::set_priority(this, NearMaxPriority);
   if (TraceWorkGang) {
--- a/hotspot/test/TEST.groups	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/test/TEST.groups	Fri Mar 06 04:58:52 2015 -0800
@@ -114,7 +114,9 @@
   compiler/c2/7047069/Test7047069.java \
   runtime/6294277/SourceDebugExtension.java \
   runtime/ClassFile/JsrRewriting.java \
-  runtime/ClassFile/OomWhileParsingRepeatedJsr.java
+  runtime/ClassFile/OomWhileParsingRepeatedJsr.java \
+  runtime/SharedArchiveFile/LimitSharedSizes.java \
+  runtime/SharedArchiveFile/SpaceUtilizationCheck.java
 
 # Compact 3 adds further tests to compact2
 #
@@ -387,35 +389,7 @@
   -compiler/runtime/6826736
 
 hotspot_compiler_closed = \
-  closed/compiler/c1/ \
-  closed/compiler/c2/ \
-  closed/compiler/codegen/ \
-  closed/compiler/escapeAnalysis/ \
-  closed/compiler/interpreter/ \
-  closed/compiler/jsr292/ \
-  closed/compiler/loopopts/ \
-  closed/compiler/oracle/ \
-  closed/compiler/runtime/ \
-  closed/compiler/symantec/ \
-  -closed/compiler/c1/4477197 \
-  -closed/compiler/c1/5040872 \
-  -closed/compiler/c1/6507107 \
-  -closed/compiler/c2/4344895 \
-  -closed/compiler/c2/4485006 \
-  -closed/compiler/c2/4523683 \
-  -closed/compiler/c2/4620290 \
-  -closed/compiler/c2/4998314 \
-  -closed/compiler/c2/6329104 \
-  -closed/compiler/c2/6434117 \
-  -closed/compiler/c2/6547163 \
-  -closed/compiler/c2/6563987 \
-  -closed/compiler/c2/6595044 \
-  -closed/compiler/codegen/6440479 \
-  -closed/compiler/codegen/6603011 \
-  -closed/compiler/interpreter/5034475 \
-  -closed/compiler/jsr292/LongLambdaFormDynamicStackDepth.java \
-  -closed/compiler/loopopts/4463485 \
-  -closed/compiler/loopopts/8021898
+  sanity/ExecuteInternalVMTests.java
 
 hotspot_gc = \
   sanity/ExecuteInternalVMTests.java \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/arraycopy/TestArrayCloneBadAssert.java	Fri Mar 06 04:58:52 2015 -0800
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8073792
+ * @summary assert broken when array size becomes known during igvn
+ * @run main/othervm -Xcomp -XX:CompileOnly=TestArrayCloneBadAssert.m TestArrayCloneBadAssert
+ *
+ */
+
+public class TestArrayCloneBadAssert {
+
+    static final int[] array = new int[5];
+
+    static int[] m(int[] arr) {
+        int i = 0;
+        for (; i < 2; i++) {
+        }
+        if (i == 2) {
+            arr = array;
+        }
+        return arr.clone();
+    }
+
+    static public void main(String[] args) {
+        int[] arr = new int[5];
+        m(arr);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/arraycopy/TestArrayCopyAsLoadsStores.java	Fri Mar 06 04:58:52 2015 -0800
@@ -0,0 +1,617 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 6912521
+ * @summary small array copy as loads/stores
+ * @run main/othervm -ea -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:CompileCommand=dontinline,TestArrayCopyAsLoadsStores::m* -XX:TypeProfileLevel=200 TestArrayCopyAsLoadsStores
+ * @run main/othervm -ea -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:CompileCommand=dontinline,TestArrayCopyAsLoadsStores::m* -XX:+IgnoreUnrecognizedVMOptions -XX:+StressArrayCopyMacroNode -XX:TypeProfileLevel=200 TestArrayCopyAsLoadsStores
+ *
+ */
+
+import java.lang.annotation.*;
+import java.lang.reflect.*;
+import java.util.*;
+
+public class TestArrayCopyAsLoadsStores {
+
+    public enum ArraySrc {
+        SMALL,
+        LARGE,
+        ZERO
+    }
+
+    public enum ArrayDst {
+        NONE,
+        NEW,
+        SRC
+    }
+
+    static class A {
+    }
+
+    static class B extends A {
+    }
+
+    static final A[] small_a_src = new A[5];
+    static final A[] large_a_src = new A[10];
+    static final A[] zero_a_src = new A[0];
+    static final int[] small_int_src = new int[5];
+    static final int[] large_int_src = new int[10];
+    static final int[] zero_int_src = new int[0];
+    static final Object[] small_object_src = new Object[5];
+    static Object src;
+
+    @Retention(RetentionPolicy.RUNTIME)
+    @interface Args {
+        ArraySrc src();
+        ArrayDst dst() default ArrayDst.NONE;
+        int[] extra_args() default {};
+    }
+
+    // array clone should be compiled as loads/stores
+    @Args(src=ArraySrc.SMALL)
+    static A[] m1() throws CloneNotSupportedException {
+        return (A[])small_a_src.clone();
+    }
+
+    @Args(src=ArraySrc.SMALL)
+    static int[] m2() throws CloneNotSupportedException {
+        return (int[])small_int_src.clone();
+    }
+
+    // new array allocation should be optimized out
+    @Args(src=ArraySrc.SMALL)
+    static int m3() throws CloneNotSupportedException {
+        int[] array_clone = (int[])small_int_src.clone();
+        return array_clone[0] + array_clone[1] + array_clone[2] +
+            array_clone[3] + array_clone[4];
+    }
+
+    // should not be compiled as loads/stores
+    @Args(src=ArraySrc.LARGE)
+    static int[] m4() throws CloneNotSupportedException {
+        return (int[])large_int_src.clone();
+    }
+
+    // check that array of length 0 is handled correctly
+    @Args(src=ArraySrc.ZERO)
+    static int[] m5() throws CloneNotSupportedException {
+        return (int[])zero_int_src.clone();
+    }
+
+    // array copy should be compiled as loads/stores
+    @Args(src=ArraySrc.SMALL, dst=ArrayDst.NEW)
+    static void m6(int[] src, int[] dest) {
+        System.arraycopy(src, 0, dest, 0, 5);
+    }
+
+    // array copy should not be compiled as loads/stores
+    @Args(src=ArraySrc.LARGE, dst=ArrayDst.NEW)
+    static void m7(int[] src, int[] dest) {
+        System.arraycopy(src, 0, dest, 0, 10);
+    }
+
+    // array copy should be compiled as loads/stores
+    @Args(src=ArraySrc.SMALL)
+    static A[] m8(A[] src) {
+        src[0] = src[0]; // force null check
+        A[] dest = new A[5];
+        System.arraycopy(src, 0, dest, 0, 5);
+        return dest;
+    }
+
+    // array copy should not be compiled as loads/stores: we would
+    // need to emit GC barriers
+    @Args(src=ArraySrc.SMALL, dst=ArrayDst.NEW)
+    static void m9(A[] src, A[] dest) {
+        System.arraycopy(src, 0, dest, 0, 5);
+    }
+
+    // overlapping array regions: copy backward
+    @Args(src=ArraySrc.SMALL, dst=ArrayDst.SRC)
+    static void m10(int[] src, int[] dest) {
+        System.arraycopy(src, 0, dest, 1, 4);
+    }
+
+    static boolean m10_check(int[] src, int[] dest) {
+        boolean failure = false;
+        for (int i = 0; i < 5; i++) {
+            int j = Math.max(i - 1, 0);
+            if (dest[i] != src[j]) {
+                System.out.println("Test m10 failed for " + i + " src[" + j +"]=" + src[j] + ", dest[" + i + "]=" + dest[i]);
+                failure = true;
+            }
+        }
+        return failure;
+    }
+
+    // overlapping array regions: copy forward
+    @Args(src=ArraySrc.SMALL, dst=ArrayDst.SRC)
+    static void m11(int[] src, int[] dest) {
+        System.arraycopy(src, 1, dest, 0, 4);
+    }
+
+    static boolean m11_check(int[] src, int[] dest) {
+        boolean failure = false;
+        for (int i = 0; i < 5; i++) {
+            int j = Math.min(i + 1, 4);
+            if (dest[i] != src[j]) {
+                System.out.println("Test m11 failed for " + i + " src[" + j +"]=" + src[j] + ", dest[" + i + "]=" + dest[i]);
+                failure = true;
+            }
+        }
+        return failure;
+    }
+
+    // overlapping array region with unknown src/dest offsets: compiled code must include both forward and backward copies
+    @Args(src=ArraySrc.SMALL, dst=ArrayDst.SRC, extra_args={0,1})
+    static void m12(int[] src, int[] dest, int srcPos, int destPos) {
+        System.arraycopy(src, srcPos, dest, destPos, 4);
+    }
+
+    static boolean m12_check(int[] src, int[] dest) {
+        boolean failure = false;
+        for (int i = 0; i < 5; i++) {
+            int j = Math.max(i - 1, 0);
+            if (dest[i] != src[j]) {
+                System.out.println("Test m10 failed for " + i + " src[" + j +"]=" + src[j] + ", dest[" + i + "]=" + dest[i]);
+                failure = true;
+            }
+        }
+        return failure;
+    }
+
+    // Array allocation and copy should optimize out
+    @Args(src=ArraySrc.SMALL)
+    static int m13(int[] src) {
+        int[] dest = new int[5];
+        System.arraycopy(src, 0, dest, 0, 5);
+        return dest[0] + dest[1] + dest[2] + dest[3] + dest[4];
+    }
+
+    // Check that copy of length 0 is handled correctly
+    @Args(src=ArraySrc.ZERO, dst=ArrayDst.NEW)
+    static void m14(int[] src, int[] dest) {
+        System.arraycopy(src, 0, dest, 0, 0);
+    }
+
+    // copyOf should compile to loads/stores
+    @Args(src=ArraySrc.SMALL)
+    static A[] m15() {
+        return Arrays.copyOf(small_a_src, 5, A[].class);
+    }
+
+    static Object[] helper16(int i) {
+        Object[] arr = null;
+        if ((i%2) == 0) {
+            arr = small_a_src;
+        } else {
+            arr = small_object_src;
+        }
+        return arr;
+    }
+
+    // CopyOf may need subtype check
+    @Args(src=ArraySrc.SMALL, dst=ArrayDst.NONE, extra_args={0})
+    static A[] m16(A[] unused_src, int i) {
+        Object[] arr = helper16(i);
+        return Arrays.copyOf(arr, 5, A[].class);
+    }
+
+    static Object[] helper17_1(int i) {
+        Object[] arr = null;
+        if ((i%2) == 0) {
+            arr = small_a_src;
+        } else {
+            arr = small_object_src;
+        }
+        return arr;
+    }
+
+    static A[] helper17_2(Object[] arr) {
+        return Arrays.copyOf(arr, 5, A[].class);
+    }
+
+    // CopyOf may leverage type speculation
+    @Args(src=ArraySrc.SMALL, dst=ArrayDst.NONE, extra_args={0})
+    static A[] m17(A[] unused_src, int i) {
+        Object[] arr = helper17_1(i);
+        return helper17_2(arr);
+    }
+
+    static Object[] helper18_1(int i) {
+        Object[] arr = null;
+        if ((i%2) == 0) {
+            arr = small_a_src;
+        } else {
+            arr = small_object_src;
+        }
+        return arr;
+    }
+
+    static Object[] helper18_2(Object[] arr) {
+        return Arrays.copyOf(arr, 5, Object[].class);
+    }
+
+    // CopyOf should not attempt to use type speculation if it's not needed
+    @Args(src=ArraySrc.SMALL, dst=ArrayDst.NONE, extra_args={0})
+    static Object[] m18(A[] unused_src, int i) {
+        Object[] arr = helper18_1(i);
+        return helper18_2(arr);
+    }
+
+    static Object[] helper19(int i) {
+        Object[] arr = null;
+        if ((i%2) == 0) {
+            arr = small_a_src;
+        } else {
+            arr = small_object_src;
+        }
+        return arr;
+    }
+
+    // CopyOf may need subtype check. Test is run to make type check
+    // fail and cause deoptimization. Next compilation should not
+    // compile as loads/stores because the first compilation
+    // deoptimized.
+    @Args(src=ArraySrc.SMALL, dst=ArrayDst.NONE, extra_args={0})
+    static A[] m19(A[] unused_src, int i) {
+        Object[] arr = helper19(i);
+        return Arrays.copyOf(arr, 5, A[].class);
+    }
+
+    // copyOf for large array should not compile to loads/stores
+    @Args(src=ArraySrc.LARGE)
+    static A[] m20() {
+        return Arrays.copyOf(large_a_src, 10, A[].class);
+    }
+
+    // check zero length copyOf is handled correctly
+    @Args(src=ArraySrc.ZERO)
+    static A[] m21() {
+        return Arrays.copyOf(zero_a_src, 0, A[].class);
+    }
+
+    // Run with srcPos=0 for a 1st compile, then with incorrect value
+    // of srcPos to cause deoptimization, then with srcPos=0 for a 2nd
+    // compile. The 2nd compile shouldn't turn arraycopy into
+    // loads/stores because input arguments are no longer known to be
+    // valid.
+    @Args(src=ArraySrc.SMALL, dst=ArrayDst.NEW, extra_args={0})
+    static void m22(int[] src, int[] dest, int srcPos) {
+        System.arraycopy(src, srcPos, dest, 0, 5);
+    }
+
+    // copyOfRange should compile to loads/stores
+    @Args(src=ArraySrc.SMALL)
+    static A[] m23() {
+        return Arrays.copyOfRange(small_a_src, 1, 4, A[].class);
+    }
+
+    static boolean m23_check(A[] src, A[] dest) {
+        boolean failure = false;
+        for (int i = 0; i < 3; i++) {
+            if (src[i+1] != dest[i]) {
+                System.out.println("Test m23 failed for " + i + " src[" + (i+1) +"]=" + dest[i] + ", dest[" + i + "]=" + dest[i]);
+                failure = true;
+            }
+        }
+        return failure;
+    }
+
+    // array copy should be compiled as loads/stores. Invoke then with
+    // incompatible array type to verify we don't allow a forbidden
+    // arraycopy to happen.
+    @Args(src=ArraySrc.SMALL)
+    static A[] m24(Object[] src) {
+        src[0] = src[0]; // force null check
+        A[] dest = new A[5];
+        System.arraycopy(src, 0, dest, 0, 5);
+        return dest;
+    }
+
+    // overlapping array region with unknown src/dest offsets but
+    // length 1: compiled code doesn't need both forward and backward
+    // copies
+    @Args(src=ArraySrc.SMALL, dst=ArrayDst.SRC, extra_args={0,1})
+    static void m25(int[] src, int[] dest, int srcPos, int destPos) {
+        System.arraycopy(src, srcPos, dest, destPos, 1);
+    }
+
+    static boolean m25_check(int[] src, int[] dest) {
+        boolean failure = false;
+        if (dest[1] != src[0]) {
+            System.out.println("Test m10 failed for src[0]=" + src[0] + ", dest[1]=" + dest[1]);
+            return true;
+        }
+        return false;
+    }
+
+    final HashMap<String,Method> tests = new HashMap<>();
+    {
+        for (Method m : this.getClass().getDeclaredMethods()) {
+            if (m.getName().matches("m[0-9]+(_check)?")) {
+                assert(Modifier.isStatic(m.getModifiers())) : m;
+                tests.put(m.getName(), m);
+            }
+        }
+    }
+
+    boolean success = true;
+
+    void doTest(String name) throws Exception {
+        Method m = tests.get(name);
+        Method m_check = tests.get(name + "_check");
+        Class[] paramTypes = m.getParameterTypes();
+        Object[] params = new Object[paramTypes.length];
+        Class retType = m.getReturnType();
+        boolean isIntArray = (retType.isPrimitive() && !retType.equals(Void.TYPE)) ||
+            (retType.equals(Void.TYPE) && paramTypes[0].getComponentType().isPrimitive()) ||
+            (retType.isArray() && retType.getComponentType().isPrimitive());
+
+        Args args = m.getAnnotation(Args.class);
+
+        Object src = null;
+        switch(args.src()) {
+        case SMALL: {
+            if (isIntArray) {
+                src = small_int_src;
+            } else {
+                src = small_a_src;
+            }
+            break;
+        }
+        case LARGE: {
+            if (isIntArray) {
+                src = large_int_src;
+            } else {
+                src = large_a_src;
+            }
+            break;
+        }
+        case ZERO: {
+            if (isIntArray) {
+                src = zero_int_src;
+            } else {
+                src = zero_a_src;
+            }
+            break;
+        }
+        }
+
+        for (int i = 0; i < 20000; i++) {
+            boolean failure = false;
+
+            int p = 0;
+
+            if (params.length > 0) {
+                if (isIntArray) {
+                    params[0] = ((int[])src).clone();
+                } else {
+                    params[0] = ((A[])src).clone();
+                }
+                p++;
+            }
+
+            if (params.length > 1) {
+                switch(args.dst()) {
+                case NEW: {
+                    if (isIntArray) {
+                        params[1] = new int[((int[])params[0]).length];
+                    } else {
+                        params[1] = new A[((A[])params[0]).length];
+                    }
+                    p++;
+                    break;
+                }
+                case SRC: {
+                    params[1] = params[0];
+                    p++;
+                    break;
+                }
+                case NONE: break;
+                }
+            }
+
+            for (int j = 0; j < args.extra_args().length; j++) {
+                params[p+j] = args.extra_args()[j];
+            }
+
+            Object res = m.invoke(null, params);
+
+            if (retType.isPrimitive() && !retType.equals(Void.TYPE)) {
+                int s = (int)res;
+                int sum = 0;
+                int[] int_res = (int[])src;
+                for (int j = 0; j < int_res.length; j++) {
+                    sum += int_res[j];
+                }
+                failure = (s != sum);
+                if (failure) {
+                    System.out.println("Test " + name + " failed: result = " + s + " != " + sum);
+                }
+            } else {
+                Object dest = null;
+                if (!retType.equals(Void.TYPE)) {
+                    dest = res;
+                } else {
+                    dest = params[1];
+                }
+
+                if (m_check != null) {
+                    failure = (boolean)m_check.invoke(null,  new Object[] { src, dest });
+                } else {
+                    if (isIntArray) {
+                        int[] int_res = (int[])src;
+                        int[] int_dest = (int[])dest;
+                        for (int j = 0; j < int_res.length; j++) {
+                            if (int_res[j] != int_dest[j]) {
+                                System.out.println("Test " + name + " failed for " + j + " src[" + j +"]=" + int_res[j] + ", dest[" + j + "]=" + int_dest[j]);
+                                failure = true;
+                            }
+                        }
+                    } else {
+                        Object[] object_res = (Object[])src;
+                        Object[] object_dest = (Object[])dest;
+                        for (int j = 0; j < object_res.length; j++) {
+                            if (object_res[j] != object_dest[j]) {
+                                System.out.println("Test " + name + " failed for " + j + " src[" + j +"]=" + object_res[j] + ", dest[" + j + "]=" + object_dest[j]);
+                                failure = true;
+                            }
+                        }
+                    }
+                }
+            }
+
+            if (failure) {
+                success = false;
+                break;
+            }
+        }
+    }
+
+    public static void main(String[] args) throws Exception {
+        for (int i = 0; i < small_a_src.length; i++) {
+            small_a_src[i] = new A();
+        }
+
+        for (int i = 0; i < small_int_src.length; i++) {
+            small_int_src[i] = i;
+        }
+
+        for (int i = 0; i < large_int_src.length; i++) {
+            large_int_src[i] = i;
+        }
+
+        for (int i = 0; i < 5; i++) {
+            small_object_src[i] = new Object();
+        }
+
+        TestArrayCopyAsLoadsStores test = new TestArrayCopyAsLoadsStores();
+
+        test.doTest("m1");
+        test.doTest("m2");
+        test.doTest("m3");
+        test.doTest("m4");
+        test.doTest("m5");
+        test.doTest("m6");
+        test.doTest("m7");
+        test.doTest("m8");
+        test.doTest("m9");
+        test.doTest("m10");
+        test.doTest("m11");
+        test.doTest("m12");
+        test.doTest("m13");
+        test.doTest("m14");
+        test.doTest("m15");
+
+        // make both branches of the If appear taken
+        for (int i = 0; i < 20000; i++) {
+            helper16(i);
+        }
+
+        test.doTest("m16");
+
+        // load class B so type check in m17 would not be simple comparison
+        B b = new B();
+        // make both branches of the If appear taken
+        for (int i = 0; i < 20000; i++) {
+            helper17_1(i);
+        }
+
+        test.doTest("m17");
+
+        // make both branches of the If appear taken
+        for (int i = 0; i < 20000; i++) {
+            helper18_1(i);
+        }
+        test.doTest("m18");
+
+        // make both branches of the If appear taken
+        for (int i = 0; i < 20000; i++) {
+            helper19(i);
+        }
+
+        // Compile
+        for (int i = 0; i < 20000; i++) {
+            m19(null, 0);
+        }
+
+        // force deopt
+        boolean m19_exception = false;
+        for (int i = 0; i < 10; i++) {
+            try {
+                m19(null, 1);
+            } catch(ArrayStoreException ase) {
+                m19_exception = true;
+            }
+        }
+
+        if (!m19_exception) {
+            System.out.println("Test m19: exception wasn't thrown");
+            test.success = false;
+        }
+
+        test.doTest("m19");
+
+        test.doTest("m20");
+        test.doTest("m21");
+
+        // Compile
+        int[] dst = new int[small_int_src.length];
+        for (int i = 0; i < 20000; i++) {
+            m22(small_int_src, dst, 0);
+        }
+
+        // force deopt
+        for (int i = 0; i < 10; i++) {
+            try {
+                m22(small_int_src, dst, 5);
+            } catch(ArrayIndexOutOfBoundsException aioobe) {}
+        }
+
+        test.doTest("m22");
+        test.doTest("m23");
+
+        test.doTest("m24");
+        boolean m24_exception = false;
+        try {
+            m24(small_object_src);
+        } catch(ArrayStoreException ase) {
+            m24_exception = true;
+        }
+
+        if (!m24_exception) {
+            System.out.println("Test m24: exception wasn't thrown");
+            test.success = false;
+        }
+
+        test.doTest("m25");
+
+        if (!test.success) {
+            throw new RuntimeException("some tests failed");
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/c2/FloatingPointFoldingTest.java	Fri Mar 06 04:58:52 2015 -0800
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8073670
+ * @summary Test that causes C2 to fold two NaNs with different values into a single NaN.
+ * @run main/othervm -XX:-TieredCompilation -Xcomp -XX:CompileCommand=compileonly,FloatingPointFoldingTest.test_double_inf -XX:CompileCommand=compileonly,FloatingPointFoldingTest.test_double_zero -XX:CompileCommand=compileonly,FloatingPointFoldingTest.test_double_nan -XX:CompileCommand=compileonly,FloatingPointFoldingTest.test_float_inf -XX:CompileCommand=compileonly,FloatingPointFoldingTest.test_float_zero -XX:CompileCommand=compileonly,FloatingPointFoldingTest.test_float_nan FloatingPointFoldingTest
+ */
+
+public class FloatingPointFoldingTest {
+    // Double values.
+    public static final long MINUS_INF_LONGBITS = 0xfff0000000000000L;
+    public static final double DOUBLE_MINUS_INF = Double.longBitsToDouble(MINUS_INF_LONGBITS);
+
+    public static final long PLUS_INF_LONGBITS = 0x7ff0000000000000L;
+    public static final double DOUBLE_PLUS_INF = Double.longBitsToDouble(PLUS_INF_LONGBITS);
+
+    public static final long MINUS_ZERO_LONGBITS = 0x8000000000000000L;
+    public static final double DOUBLE_MINUS_ZERO = Double.longBitsToDouble(MINUS_ZERO_LONGBITS);
+
+    // We need two different NaN values. A floating point number is
+    // considered to be NaN is the sign bit is 0, all exponent bits
+    // are set to 1, and at least one bit of the exponent is not zero.
+    //
+    // As java.lang.Double.NaN is 0x7ff8000000000000L, we use
+    // 0x7ffc000000000000L as a second NaN double value.
+    public static final long NAN_LONGBITS = 0x7ffc000000000000L;
+    public static final double DOUBLE_NAN = Double.longBitsToDouble(NAN_LONGBITS);
+
+    // Float values.
+    public static final int MINUS_INF_INTBITS = 0xff800000;
+    public static final float FLOAT_MINUS_INF = Float.intBitsToFloat(MINUS_INF_INTBITS);
+
+    public static final int PLUS_INF_INTBITS = 0x7f800000;
+    public static final float FLOAT_PLUS_INF = Float.intBitsToFloat(PLUS_INF_INTBITS);
+
+    public static final int MINUS_ZERO_INTBITS = 0x80000000;
+    public static final float FLOAT_MINUS_ZERO = Float.intBitsToFloat(MINUS_ZERO_INTBITS);
+
+    // As java.lang.Float.NaN is 0x7fc00000, we use 0x7fe00000
+    // as a second NaN float value.
+    public static final int NAN_INTBITS = 0x7fe00000;
+    public static final float FLOAT_NAN = Float.intBitsToFloat(NAN_INTBITS);
+
+
+    // Double tests.
+    static void test_double_inf(long[] result) {
+        double d1 = DOUBLE_MINUS_INF;
+        double d2 = DOUBLE_PLUS_INF;
+        result[0] = Double.doubleToRawLongBits(d1);
+        result[1] = Double.doubleToRawLongBits(d2);
+    }
+
+    static void test_double_zero(long[] result) {
+        double d1 = DOUBLE_MINUS_ZERO;
+        double d2 = 0;
+        result[0] = Double.doubleToRawLongBits(d1);
+        result[1] = Double.doubleToRawLongBits(d2);
+    }
+
+    static void test_double_nan(long[] result) {
+        double d1 = DOUBLE_NAN;
+        double d2 = Double.NaN;
+        result[0] = Double.doubleToRawLongBits(d1);
+        result[1] = Double.doubleToRawLongBits(d2);
+    }
+
+    // Float tests.
+    static void test_float_inf(int[] result) {
+        float f1 = FLOAT_MINUS_INF;
+        float f2 = FLOAT_PLUS_INF;
+        result[0] = Float.floatToRawIntBits(f1);
+        result[1] = Float.floatToRawIntBits(f2);
+    }
+
+    static void test_float_zero(int[] result) {
+        float f1 = FLOAT_MINUS_ZERO;
+        float f2 = 0;
+        result[0] = Float.floatToRawIntBits(f1);
+        result[1] = Float.floatToRawIntBits(f2);
+    }
+
+    static void test_float_nan(int[] result) {
+        float f1 = FLOAT_NAN;
+        float f2 = Float.NaN;
+        result[0] = Float.floatToRawIntBits(f1);
+        result[1] = Float.floatToRawIntBits(f2);
+    }
+
+    // Check doubles.
+    static void check_double(long[] result, double d1, double d2) {
+        if (result[0] == result[1]) {
+            throw new RuntimeException("ERROR: Two different double values are considered equal. \n"
+                                       + String.format("\toriginal values: 0x%x 0x%x\n", Double.doubleToRawLongBits(d1), Double.doubleToRawLongBits(d2))
+                                       + String.format("\tvalues after execution of method test(): 0x%x 0x%x", result[0], result[1]));
+        }
+    }
+
+    // Check floats.
+    static void check_float(int[] result, float f1, float f2) {
+        if (result[0] == result[1]) {
+            throw new RuntimeException("ERROR: Two different float values are considered equal. \n"
+                                       + String.format("\toriginal values: 0x%x 0x%x\n", Float.floatToRawIntBits(f1), Float.floatToRawIntBits(f2))
+                                       + String.format("\tvalues after execution of method test(): 0x%x 0x%x", result[0], result[1]));
+        }
+    }
+
+    public static void main(String[] args) {
+        // Float tests.
+
+        int[] iresult = new int[2];
+
+        // -Inf and +Inf.
+        test_float_inf(iresult);
+        check_float(iresult, FLOAT_MINUS_INF, FLOAT_PLUS_INF);
+
+        // 0 and -0.
+        test_float_zero(iresult);
+        check_float(iresult, FLOAT_MINUS_ZERO, 0);
+
+        // Diferrent NaNs.
+        test_float_nan(iresult);
+        check_float(iresult, FLOAT_NAN, Float.NaN);
+
+        // Double tests.
+
+        long[] lresult = new long[2];
+
+        // -Inf and +Inf.
+        test_double_inf(lresult);
+        check_double(lresult, DOUBLE_MINUS_INF, DOUBLE_PLUS_INF);
+
+        // 0 and -0.
+        test_double_zero(lresult);
+        check_double(lresult, DOUBLE_MINUS_ZERO, 0);
+
+        // Diferrent NaNs.
+        test_double_nan(lresult);
+        check_double(lresult, DOUBLE_NAN, Double.NaN);
+    }
+}
--- a/hotspot/test/compiler/codecache/jmx/UsageThresholdIncreasedTest.java	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/test/compiler/codecache/jmx/UsageThresholdIncreasedTest.java	Fri Mar 06 04:58:52 2015 -0800
@@ -51,7 +51,9 @@
 
     public static void main(String[] args) {
         for (BlobType btype : BlobType.getAvailable()) {
-            new UsageThresholdIncreasedTest(btype).runTest();
+            if (CodeCacheUtils.isCodeHeapPredictable(btype)) {
+                new UsageThresholdIncreasedTest(btype).runTest();
+            }
         }
     }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/escapeAnalysis/TestEscapeThroughInvoke.java	Fri Mar 06 04:58:52 2015 -0800
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8073956
+ * @summary Tests C2 EA with allocated object escaping through a call.
+ * @run main/othervm -XX:CompileCommand=dontinline,TestEscapeThroughInvoke::create TestEscapeThroughInvoke
+ */
+public class TestEscapeThroughInvoke {
+    private A a;
+
+    public static void main(String[] args) {
+        TestEscapeThroughInvoke test = new TestEscapeThroughInvoke();
+        test.a = new A(42);
+        // Make sure run gets compiled by C2
+        for (int i = 0; i < 100_000; ++i) {
+            test.run();
+        }
+    }
+
+    private void run() {
+        // Allocate something to trigger EA
+        new Object();
+        // Create a new escaping instance of A and
+        // verify that it is always equal to 'a.saved'.
+        A escapingA = create(42);
+        a.check(escapingA);
+    }
+
+    // Create and return a new instance of A that escaped through 'A::saveInto'.
+    // The 'dummy' parameters are needed to avoid EA skipping the methods.
+    private A create(Integer dummy) {
+        A result = new A(dummy);
+        result.saveInto(a, dummy); // result escapes into 'a' here
+        return result;
+    }
+}
+
+class A {
+    private A saved;
+
+    public A(Integer dummy) { }
+
+    public void saveInto(A other, Integer dummy) {
+        other.saved = this;
+    }
+
+    public void check(A other) {
+        if (this.saved != other) {
+            throw new RuntimeException("TEST FAILED: Objects not equal.");
+        }
+    }
+}
--- a/hotspot/test/compiler/loopopts/CountedLoopProblem.java	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/test/compiler/loopopts/CountedLoopProblem.java	Fri Mar 06 04:58:52 2015 -0800
@@ -36,18 +36,22 @@
     public static void main(String[] args) throws Exception {
         Random r = new Random(42);
         int x = 0;
-        StringBuilder sb = new StringBuilder();
-        for(int i = 0; i < 1000000; ++i) {
-            int v = Math.abs(r.nextInt());
-            sb.append('+').append(v).append('\n');
-            x += v;
-            // To trigger the problem we must OSR in the following loop
-            // To make the problem 100% reproducible run with -XX:-TieredCompilation -XX:OSROnlyBCI=62
-            while(x < 0) x += 1000000000;
-            sb.append('=').append(x).append('\n');
-        }
-        if (sb.toString().hashCode() != 0xaba94591) {
-            throw new Exception("Unexpected result");
+        try {
+            StringBuilder sb = new StringBuilder();
+            for(int i = 0; i < 1000000; ++i) {
+                int v = Math.abs(r.nextInt());
+                sb.append('+').append(v).append('\n');
+                x += v;
+                // To trigger the problem we must OSR in the following loop
+                // To make the problem 100% reproducible run with -XX:-TieredCompilation -XX:OSROnlyBCI=62
+                while(x < 0) x += 1000000000;
+                sb.append('=').append(x).append('\n');
+            }
+            if (sb.toString().hashCode() != 0xaba94591) {
+                throw new Exception("Unexpected result");
+            }
+        } catch(OutOfMemoryError e) {
+            // small heap, ignore
         }
     }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/loopopts/TestCastIINoLoopLimitCheck.java	Fri Mar 06 04:58:52 2015 -0800
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8073184
+ * @summary CastII that guards counted loops confuses range check elimination with LoopLimitCheck off
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-LoopLimitCheck -XX:CompileOnly=TestCastIINoLoopLimitCheck.m -Xcomp  TestCastIINoLoopLimitCheck
+ *
+ */
+
+public class TestCastIINoLoopLimitCheck {
+
+    static void m(int i, int index, char[] buf) {
+        while (i >= 65536) {
+            i = i / 100;
+            buf [--index] = 0;
+            buf [--index] = 1;
+        }
+    }
+
+    static public void main(String[] args) {
+        m(0, 0, null);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/print/TestProfileReturnTypePrinting.java	Fri Mar 06 04:58:52 2015 -0800
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8073154
+ * @build TestProfileReturnTypePrinting
+ * @run main/othervm -XX:TypeProfileLevel=020
+ *                   -XX:CompileOnly=TestProfileReturnTypePrinting.testMethod
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+PrintLIR
+ *                   TestProfileReturnTypePrinting
+ * @summary Verify that c1's LIR that contains ProfileType node could be dumped
+ *          without a crash disregard to an exact class knowledge.
+ */
+public class TestProfileReturnTypePrinting {
+    private static final int ITERATIONS = 1_000_000;
+
+    public static void main(String args[]) {
+        for (int i = 0; i < ITERATIONS; i++) {
+            TestProfileReturnTypePrinting.testMethod(i);
+        }
+    }
+
+    private static int testMethod(int i) {
+        return TestProfileReturnTypePrinting.foo().hashCode()
+                + TestProfileReturnTypePrinting.bar(i).hashCode();
+    }
+
+    /* Exact class of returned value is known statically. */
+    private static B foo() {
+        return new B();
+    }
+
+    /* Exact class of returned value is not known statically. */
+    private static Object bar(int i) {
+        if (i % 2 == 0) {
+            return new A();
+        } else {
+            return new B();
+        }
+    }
+
+    private static class A {
+    }
+
+    private static class B extends A {
+    }
+}
--- a/hotspot/test/compiler/tiered/LevelTransitionTest.java	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/test/compiler/tiered/LevelTransitionTest.java	Fri Mar 06 04:58:52 2015 -0800
@@ -29,6 +29,7 @@
 /**
  * @test LevelTransitionTest
  * @library /testlibrary /../../test/lib /compiler/whitebox
+ * @ignore 8067651
  * @build TransitionsTestExecutor LevelTransitionTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm/timeout=240 -Xmixed -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
--- a/hotspot/test/gc/TestSmallHeap.java	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/test/gc/TestSmallHeap.java	Fri Mar 06 04:58:52 2015 -0800
@@ -26,6 +26,7 @@
  * @bug 8067438
  * @requires vm.gc=="null"
  * @requires (vm.opt.AggressiveOpts=="null") | (vm.opt.AggressiveOpts=="false")
+ * @requires vm.compMode != "Xcomp"
  * @summary Verify that starting the VM with a small heap works
  * @library /testlibrary /../../test/lib
  * @build TestSmallHeap
--- a/hotspot/test/gc/TestSoftReferencesBehaviorOnOOME.java	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/test/gc/TestSoftReferencesBehaviorOnOOME.java	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
  * @key gc
  * @summary Tests that all SoftReferences has been cleared at time of OOM.
  * @library /testlibrary
+ * @ignore 8073669
  * @build TestSoftReferencesBehaviorOnOOME
  * @run main/othervm -Xmx128m TestSoftReferencesBehaviorOnOOME 512 2k
  * @run main/othervm -Xmx128m TestSoftReferencesBehaviorOnOOME 128k 256k
--- a/hotspot/test/runtime/Safepoint/AssertSafepointCheckConsistency1.java	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/test/runtime/Safepoint/AssertSafepointCheckConsistency1.java	Fri Mar 06 04:58:52 2015 -0800
@@ -47,6 +47,7 @@
                   "-XX:+UnlockDiagnosticVMOptions",
                   "-XX:+WhiteBoxAPI",
                   "-XX:-TransmitErrorReport",
+                  "-XX:-CreateMinidumpOnCrash",
                   "-Xmx32m",
                   "AssertSafepointCheckConsistency1",
                   "test");
@@ -55,5 +56,3 @@
         }
     }
 }
-
-
--- a/hotspot/test/runtime/Safepoint/AssertSafepointCheckConsistency2.java	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/test/runtime/Safepoint/AssertSafepointCheckConsistency2.java	Fri Mar 06 04:58:52 2015 -0800
@@ -47,6 +47,7 @@
                   "-XX:+UnlockDiagnosticVMOptions",
                   "-XX:+WhiteBoxAPI",
                   "-XX:-TransmitErrorReport",
+                  "-XX:-CreateMinidumpOnCrash",
                   "-Xmx32m",
                   "AssertSafepointCheckConsistency2",
                   "test");
--- a/hotspot/test/runtime/Safepoint/AssertSafepointCheckConsistency3.java	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/test/runtime/Safepoint/AssertSafepointCheckConsistency3.java	Fri Mar 06 04:58:52 2015 -0800
@@ -47,6 +47,7 @@
                   "-XX:+UnlockDiagnosticVMOptions",
                   "-XX:+WhiteBoxAPI",
                   "-XX:-TransmitErrorReport",
+                  "-XX:-CreateMinidumpOnCrash",
                   "-Xmx32m",
                   "AssertSafepointCheckConsistency3",
                   "test");
--- a/hotspot/test/runtime/Safepoint/AssertSafepointCheckConsistency4.java	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/test/runtime/Safepoint/AssertSafepointCheckConsistency4.java	Fri Mar 06 04:58:52 2015 -0800
@@ -47,6 +47,7 @@
                "-XX:+UnlockDiagnosticVMOptions",
                "-XX:+WhiteBoxAPI",
                "-XX:-TransmitErrorReport",
+               "-XX:-CreateMinidumpOnCrash",
                "-Xmx32m",
                "AssertSafepointCheckConsistency4",
                "test");
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/Thread/Fibonacci.java	Fri Mar 06 04:58:52 2015 -0800
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @summary Calculates Fibonacci numbers "recursively" via threads and compares
+ *     the result with the classical calculation.
+ *     This test is skipped on 32-bit Windows: limited virtual space on Win-32
+ *     make this test inherently unstable on Windows with 32-bit VM data model.
+ * @requires !(os.family == "windows" & sun.arch.data.model == "32")
+ * @library /testlibrary
+ * @run main Fibonacci 15
+ */
+
+import com.oracle.java.testlibrary.Asserts;
+
+public class Fibonacci extends Thread {
+    private int index;
+    private int value;
+    private Fibonacci left;
+    private Fibonacci right;
+
+    public Fibonacci(int i) {
+        index = i;
+    }
+
+    private int getValue() {
+        return value;
+    }
+
+    @Override
+    public void run() {
+        if (index == 0 || index == 1) {
+            // base cases, 0 Fibonacci number = 0, 1 Fibonacci number = 1
+            value = index;
+        } else {
+            // inductive cases
+            left = new Fibonacci(index - 2);
+            right = new Fibonacci(index - 1);
+            left.start();
+            right.start();
+            try {
+                left.join();
+                right.join();
+            } catch (InterruptedException e) {
+                throw new Error("InterruptedException for index " + index, e);
+            }
+            // compute and terminate
+            value = left.getValue() + right.getValue();
+        }
+    }
+
+    public static int traditionalFibonacci(int n) {
+        int n1 = 0, n2 = 1, nn = 0;
+
+        if (n == 0 || n == 1) {
+           nn = n;
+        }
+
+        for (int i = 1; i < n; ++i) {
+            nn = n2 + n1;
+            n1 = n2;
+            n2 = nn;
+        }
+        return nn;
+    }
+
+    public static void main(String[] args) throws Error,AssertionError {
+        int expected;
+        int number;
+        Fibonacci recursiveFibonacci;
+
+        if (args.length != 1) {
+            throw new Error("Error: args.length must be 1");
+        }
+
+        number = Integer.parseInt(args[0]);
+        recursiveFibonacci = new Fibonacci(number);
+
+        recursiveFibonacci.start();
+        try {
+            recursiveFibonacci.join();
+        } catch (InterruptedException e) {
+            throw new Error("InterruptedException in main thread", e);
+        }
+
+        expected = traditionalFibonacci(number);
+
+        System.out.println("Fibonacci[" + number + "] = " + expected);
+
+        Asserts.assertEQ(recursiveFibonacci.getValue(), expected,
+                          "Unexpected calculated value: " + recursiveFibonacci.getValue() + " expected " + expected );
+    }
+}
--- a/hotspot/test/serviceability/dcmd/gc/RunGCTest.java	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/test/serviceability/dcmd/gc/RunGCTest.java	Fri Mar 06 04:58:52 2015 -0800
@@ -39,7 +39,7 @@
  * @library /testlibrary
  * @build com.oracle.java.testlibrary.*
  * @build com.oracle.java.testlibrary.dcmd.*
- * @run testng/othervm -XX:+PrintGCDetails -Xloggc:RunGC.gclog RunGCTest
+ * @run testng/othervm -XX:+PrintGCDetails -Xloggc:RunGC.gclog -XX:-ExplicitGCInvokesConcurrent RunGCTest
  */
 public class RunGCTest {
     public void run(CommandExecutor executor) {
--- a/hotspot/test/testlibrary/ctw/src/sun/hotspot/tools/ctw/CompileTheWorld.java	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/test/testlibrary/ctw/src/sun/hotspot/tools/ctw/CompileTheWorld.java	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
 
 package sun.hotspot.tools.ctw;
 
-import sun.management.ManagementFactoryHelper;
+import java.lang.management.ManagementFactory;
 
 import java.io.*;
 import java.nio.file.Files;
@@ -55,7 +55,7 @@
 
         try {
             try {
-                if (ManagementFactoryHelper.getCompilationMXBean() == null) {
+                if (ManagementFactory.getCompilationMXBean() == null) {
                     throw new RuntimeException(
                             "CTW can not work in interpreted mode");
                 }
--- a/hotspot/test/testlibrary/ctw/src/sun/hotspot/tools/ctw/Utils.java	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/test/testlibrary/ctw/src/sun/hotspot/tools/ctw/Utils.java	Fri Mar 06 04:58:52 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,7 @@
 package sun.hotspot.tools.ctw;
 
 import com.sun.management.HotSpotDiagnosticMXBean;
-import sun.management.ManagementFactoryHelper;
+import java.lang.management.ManagementFactory;
 
 import java.io.File;
 import java.util.regex.Pattern;
@@ -160,7 +160,7 @@
     public static String getVMOption(String name) {
         String result;
         HotSpotDiagnosticMXBean diagnostic
-                = ManagementFactoryHelper.getDiagnosticMXBean();
+                = ManagementFactory.getPlatformMXBean(HotSpotDiagnosticMXBean.class);
         result = diagnostic.getVMOption(name).getValue();
         return result;
     }
--- a/hotspot/test/testlibrary_tests/RandomGeneratorTest.java	Wed Jul 05 20:23:32 2017 +0200
+++ b/hotspot/test/testlibrary_tests/RandomGeneratorTest.java	Fri Mar 06 04:58:52 2015 -0800
@@ -58,7 +58,7 @@
         }
         jvmArgs.add(RandomRunner.class.getName());
         String[] cmdLineArgs = jvmArgs.toArray(new String[jvmArgs.size()]);
-        String etalon = ProcessTools.executeTestJvm(cmdLineArgs).getOutput().trim();
+        String etalon = ProcessTools.executeTestJvm(cmdLineArgs).getStdout().trim();
         seedOpt.verify(etalon, cmdLineArgs);
     }
 
@@ -122,7 +122,7 @@
             String lastLineOrig = getLastLine(orig);
             String lastLine;
             try {
-                lastLine = getLastLine(ProcessTools.executeTestJvm(cmdLine).getOutput().trim());
+                lastLine = getLastLine(ProcessTools.executeTestJvm(cmdLine).getStdout().trim());
             } catch (Throwable t) {
                 throw new Error("TESTBUG: Unexpedted exception during jvm execution.", t);
             }