Merge
authorneliasso
Tue, 01 Dec 2015 19:18:56 +0100
changeset 34504 e113f0e2c682
parent 34318 a60242eae2ea (current diff)
parent 34503 57d1a0e76091 (diff)
child 34506 7af1663b3497
Merge
--- a/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Mon Nov 30 13:55:06 2015 -0800
+++ b/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Tue Dec 01 19:18:56 2015 +0100
@@ -386,7 +386,8 @@
 
   // get type
   __ add(r3, r1, tags_offset);
-  __ ldrb(r3, Address(r0, r3));
+  __ lea(r3, Address(r0, r3));
+  __ ldarb(r3, r3);
 
   // unresolved class - get the resolved class
   __ cmp(r3, JVM_CONSTANT_UnresolvedClass);
@@ -3316,7 +3317,8 @@
   // how Constant Pool is updated (see ConstantPool::klass_at_put)
   const int tags_offset = Array<u1>::base_offset_in_bytes();
   __ lea(rscratch1, Address(r0, r3, Address::lsl(0)));
-  __ ldrb(rscratch1, Address(rscratch1, tags_offset));
+  __ lea(rscratch1, Address(rscratch1, tags_offset));
+  __ ldarb(rscratch1, rscratch1);
   __ cmp(rscratch1, JVM_CONSTANT_Class);
   __ br(Assembler::NE, slow_case);
 
@@ -3460,7 +3462,8 @@
   __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
   // See if bytecode has already been quicked
   __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
-  __ ldrb(r1, Address(rscratch1, r19));
+  __ lea(r1, Address(rscratch1, r19));
+  __ ldarb(r1, r1);
   __ cmp(r1, JVM_CONSTANT_Class);
   __ br(Assembler::EQ, quicked);
 
@@ -3514,7 +3517,8 @@
   __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
   // See if bytecode has already been quicked
   __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
-  __ ldrb(r1, Address(rscratch1, r19));
+  __ lea(r1, Address(rscratch1, r19));
+  __ ldarb(r1, r1);
   __ cmp(r1, JVM_CONSTANT_Class);
   __ br(Assembler::EQ, quicked);
 
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.code/src/jdk/vm/ci/code/CompilationResult.java	Mon Nov 30 13:55:06 2015 -0800
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.code/src/jdk/vm/ci/code/CompilationResult.java	Tue Dec 01 19:18:56 2015 +0100
@@ -870,31 +870,11 @@
      * Records a custom infopoint in the code section.
      *
      * Compiler implementations can use this method to record non-standard infopoints, which are not
-     * handled by the dedicated methods like {@link #recordCall}.
+     * handled by dedicated methods like {@link #recordCall}.
      *
      * @param infopoint the infopoint to record, usually a derived class from {@link Infopoint}
      */
     public void addInfopoint(Infopoint infopoint) {
-        // The infopoints list must always be sorted
-        if (!infopoints.isEmpty()) {
-            Infopoint previousInfopoint = infopoints.get(infopoints.size() - 1);
-            if (previousInfopoint.pcOffset > infopoint.pcOffset) {
-                // This re-sorting should be very rare
-                Collections.sort(infopoints);
-                previousInfopoint = infopoints.get(infopoints.size() - 1);
-            }
-            if (previousInfopoint.pcOffset == infopoint.pcOffset) {
-                if (infopoint.reason.canBeOmitted()) {
-                    return;
-                }
-                if (previousInfopoint.reason.canBeOmitted()) {
-                    Infopoint removed = infopoints.remove(infopoints.size() - 1);
-                    assert removed == previousInfopoint;
-                } else {
-                    throw new RuntimeException("Infopoints that can not be omited should have distinct PCs");
-                }
-            }
-        }
         infopoints.add(infopoint);
     }
 
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.code/src/jdk/vm/ci/code/InfopointReason.java	Mon Nov 30 13:55:06 2015 -0800
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.code/src/jdk/vm/ci/code/InfopointReason.java	Tue Dec 01 19:18:56 2015 +0100
@@ -26,22 +26,12 @@
  * A reason for infopoint insertion.
  */
 public enum InfopointReason {
-    UNKNOWN(false),
-    SAFEPOINT(false),
-    CALL(false),
-    IMPLICIT_EXCEPTION(false),
-    METHOD_START(true),
-    METHOD_END(true),
-    LINE_NUMBER(true),
-    METASPACE_ACCESS(true);
 
-    private InfopointReason(boolean canBeOmitted) {
-        this.canBeOmitted = canBeOmitted;
-    }
-
-    private final boolean canBeOmitted;
-
-    public boolean canBeOmitted() {
-        return canBeOmitted;
-    }
+    SAFEPOINT,
+    CALL,
+    IMPLICIT_EXCEPTION,
+    METASPACE_ACCESS,
+    METHOD_START,
+    METHOD_END,
+    BYTECODE_POSITION;
 }
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotCompiledCode.java	Mon Nov 30 13:55:06 2015 -0800
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotCompiledCode.java	Tue Dec 01 19:18:56 2015 +0100
@@ -24,9 +24,12 @@
 
 import java.nio.ByteBuffer;
 import java.nio.ByteOrder;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Comparator;
+import java.util.EnumMap;
 import java.util.List;
+import java.util.Map;
 import java.util.stream.Stream;
 import java.util.stream.Stream.Builder;
 
@@ -41,6 +44,8 @@
 import jdk.vm.ci.code.CompilationResult.Mark;
 import jdk.vm.ci.code.CompilationResult.Site;
 import jdk.vm.ci.code.DataSection;
+import jdk.vm.ci.code.InfopointReason;
+import jdk.vm.ci.common.JVMCIError;
 import jdk.vm.ci.meta.Assumptions.Assumption;
 import jdk.vm.ci.meta.ResolvedJavaMethod;
 
@@ -155,14 +160,75 @@
 
     static class SiteComparator implements Comparator<Site> {
 
+        /**
+         * Defines an order for sorting {@link Infopoint}s based on their
+         * {@linkplain Infopoint#reason reasons}. This is used to choose which infopoint to preserve
+         * when multiple infopoints collide on the same PC offset. A negative order value implies a
+         * non-optional infopoint (i.e., must be preserved). Non-optional infopoints must not
+         * collide.
+         */
+        static final Map<InfopointReason, Integer> HOTSPOT_INFOPOINT_SORT_ORDER = new EnumMap<>(InfopointReason.class);
+        static {
+            HOTSPOT_INFOPOINT_SORT_ORDER.put(InfopointReason.SAFEPOINT, -4);
+            HOTSPOT_INFOPOINT_SORT_ORDER.put(InfopointReason.CALL, -3);
+            HOTSPOT_INFOPOINT_SORT_ORDER.put(InfopointReason.IMPLICIT_EXCEPTION, -2);
+            HOTSPOT_INFOPOINT_SORT_ORDER.put(InfopointReason.METASPACE_ACCESS, 1);
+            HOTSPOT_INFOPOINT_SORT_ORDER.put(InfopointReason.METHOD_START, 2);
+            HOTSPOT_INFOPOINT_SORT_ORDER.put(InfopointReason.METHOD_END, 3);
+            HOTSPOT_INFOPOINT_SORT_ORDER.put(InfopointReason.BYTECODE_POSITION, 4);
+        }
+
+        static int ord(Infopoint info) {
+            return HOTSPOT_INFOPOINT_SORT_ORDER.get(info.reason);
+        }
+
+        static int checkCollision(Infopoint i1, Infopoint i2) {
+            int o1 = ord(i1);
+            int o2 = ord(i2);
+            if (o1 < 0 && o2 < 0) {
+                throw new JVMCIError("Non-optional infopoints cannot collide: %s and %s", i1, i2);
+            }
+            return o1 - o2;
+        }
+
+        /**
+         * Records whether any two {@link Infopoint}s had the same {@link Infopoint#pcOffset}.
+         */
+        boolean sawCollidingInfopoints;
+
         public int compare(Site s1, Site s2) {
-            if (s1.pcOffset == s2.pcOffset && (s1 instanceof Mark ^ s2 instanceof Mark)) {
-                return s1 instanceof Mark ? -1 : 1;
+            if (s1.pcOffset == s2.pcOffset) {
+                // Marks must come first since patching a call site
+                // may need to know the mark denoting the call type
+                // (see uses of CodeInstaller::_next_call_type).
+                boolean s1IsMark = s1 instanceof Mark;
+                boolean s2IsMark = s2 instanceof Mark;
+                if (s1IsMark != s2IsMark) {
+                    return s1IsMark ? -1 : 1;
+                }
+
+                // Infopoints must group together so put them after
+                // other Site types.
+                boolean s1IsInfopoint = s1 instanceof Infopoint;
+                boolean s2IsInfopoint = s2 instanceof Infopoint;
+                if (s1IsInfopoint != s2IsInfopoint) {
+                    return s1IsInfopoint ? 1 : -1;
+                }
+
+                if (s1IsInfopoint) {
+                    sawCollidingInfopoints = true;
+                    return checkCollision((Infopoint) s1, (Infopoint) s2);
+                }
             }
             return s1.pcOffset - s2.pcOffset;
         }
     }
 
+    /**
+     * HotSpot expects sites to be presented in ascending order of PC (see
+     * {@code DebugInformationRecorder::add_new_pc_offset}). In addition, it expects
+     * {@link Infopoint} PCs to be unique.
+     */
     private static Site[] getSortedSites(CompilationResult target) {
         List<?>[] lists = new List<?>[]{target.getInfopoints(), target.getDataPatches(), target.getMarks()};
         int count = 0;
@@ -176,7 +242,27 @@
                 result[pos++] = (Site) elem;
             }
         }
-        Arrays.sort(result, new SiteComparator());
+        SiteComparator c = new SiteComparator();
+        Arrays.sort(result, c);
+        if (c.sawCollidingInfopoints) {
+            Infopoint lastInfopoint = null;
+            List<Site> copy = new ArrayList<>(count);
+            for (int i = 0; i < count; i++) {
+                if (result[i] instanceof Infopoint) {
+                    Infopoint info = (Infopoint) result[i];
+                    if (lastInfopoint == null || lastInfopoint.pcOffset != info.pcOffset) {
+                        lastInfopoint = info;
+                        copy.add(info);
+                    } else {
+                        // Omit this colliding infopoint
+                        assert lastInfopoint.reason.compareTo(info.reason) <= 0;
+                    }
+                } else {
+                    copy.add(result[i]);
+                }
+            }
+            result = copy.toArray(new Site[copy.size()]);
+        }
         return result;
     }
 
--- a/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp	Mon Nov 30 13:55:06 2015 -0800
+++ b/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp	Tue Dec 01 19:18:56 2015 +0100
@@ -36,7 +36,7 @@
   // We must have enough patching space so that call can be inserted.
   // We cannot use fat nops here, since the concurrent code rewrite may transiently
   // create the illegal instruction sequence.
-  while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) {
+  while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeGeneralJump::instruction_size) {
     _masm->nop();
   }
   patch->install(_masm, patch_code, obj, info);
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Mon Nov 30 13:55:06 2015 -0800
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Tue Dec 01 19:18:56 2015 +0100
@@ -1163,7 +1163,7 @@
           }
 #endif
 
-          for (int i = NativeCall::instruction_size; i < *byte_count; i++) {
+          for (int i = NativeGeneralJump::instruction_size; i < *byte_count; i++) {
             address ptr = copy_buff + i;
             int a_byte = (*ptr) & 0xFF;
             address dst = instr_pc + i;
--- a/hotspot/src/share/vm/code/debugInfoRec.cpp	Mon Nov 30 13:55:06 2015 -0800
+++ b/hotspot/src/share/vm/code/debugInfoRec.cpp	Tue Dec 01 19:18:56 2015 +0100
@@ -33,7 +33,7 @@
 // We keep track of these chunks in order to detect
 // repetition and enable sharing.
 class DIR_Chunk {
-  friend class DebugInformationRecorder;
+private:
   int  _offset; // location in the stream of this scope
   int  _length; // number of bytes in the stream
   int  _hash;   // hash of stream bytes (for quicker reuse)
@@ -41,6 +41,9 @@
   DebugInformationRecorder* _DIR;
 #endif
 
+public:
+  int offset() { return _offset; }
+
   void* operator new(size_t ignore, DebugInformationRecorder* dir) throw() {
     assert(ignore == sizeof(DIR_Chunk), "");
     if (dir->_next_chunk >= dir->_next_chunk_limit) {
@@ -284,7 +287,7 @@
     NOT_PRODUCT(++dir_stats.chunks_shared);
     assert(ns+1 == _next_chunk, "");
     _next_chunk = ns;
-    return match->_offset;
+    return match->offset();
   } else {
     // Inserted this chunk, so nothing to do
     return serialized_null;
@@ -296,7 +299,7 @@
     NOT_PRODUCT(++dir_stats.chunks_reshared);
     assert(ns+1 == _next_chunk, "");
     _next_chunk = ns;
-    return ms->_offset;
+    return ms->offset();
   }
 
   // Look in recently encountered scopes next:
@@ -311,7 +314,7 @@
     _shared_chunks->append(ms);
     assert(ns+1 == _next_chunk, "");
     _next_chunk = ns;
-    return ms->_offset;
+    return ms->offset();
   }
 
   // No match.  Add this guy to the list, in hopes of future shares.
--- a/hotspot/src/share/vm/jvmci/jvmciCodeInstaller.cpp	Mon Nov 30 13:55:06 2015 -0800
+++ b/hotspot/src/share/vm/jvmci/jvmciCodeInstaller.cpp	Tue Dec 01 19:18:56 2015 +0100
@@ -727,10 +727,9 @@
       if (InfopointReason::SAFEPOINT() == reason || InfopointReason::CALL() == reason || InfopointReason::IMPLICIT_EXCEPTION() == reason) {
         TRACE_jvmci_4("safepoint at %i", pc_offset);
         site_Safepoint(buffer, pc_offset, site, CHECK_OK);
-      } else if (InfopointReason::METHOD_START() == reason || InfopointReason::METHOD_END() == reason || InfopointReason::LINE_NUMBER() == reason) {
+      } else {
+        TRACE_jvmci_4("infopoint at %i", pc_offset);
         site_Infopoint(buffer, pc_offset, site, CHECK_OK);
-      } else {
-        JVMCI_ERROR_OK("unknown infopoint reason at %i", pc_offset);
       }
     } else if (site->is_a(CompilationResult_DataPatch::klass())) {
       TRACE_jvmci_4("datapatch at %i", pc_offset);
@@ -868,25 +867,33 @@
   return objects;
 }
 
-void CodeInstaller::record_scope(jint pc_offset, Handle debug_info, TRAPS) {
+void CodeInstaller::record_scope(jint pc_offset, Handle debug_info, ScopeMode scope_mode, TRAPS) {
   Handle position = DebugInfo::bytecodePosition(debug_info);
   if (position.is_null()) {
     // Stubs do not record scope info, just oop maps
     return;
   }
 
-  GrowableArray<ScopeValue*>* objectMapping = record_virtual_objects(debug_info, CHECK);
-  record_scope(pc_offset, position, objectMapping, CHECK);
+  GrowableArray<ScopeValue*>* objectMapping;
+  if (scope_mode == CodeInstaller::FullFrame) {
+    objectMapping = record_virtual_objects(debug_info, CHECK);
+  } else {
+    objectMapping = NULL;
+  }
+  record_scope(pc_offset, position, scope_mode, objectMapping, CHECK);
 }
 
-void CodeInstaller::record_scope(jint pc_offset, Handle position, GrowableArray<ScopeValue*>* objects, TRAPS) {
+void CodeInstaller::record_scope(jint pc_offset, Handle position, ScopeMode scope_mode, GrowableArray<ScopeValue*>* objects, TRAPS) {
   Handle frame;
-  if (position->is_a(BytecodeFrame::klass())) {
+  if (scope_mode == CodeInstaller::FullFrame) {
+    if (!position->is_a(BytecodeFrame::klass())) {
+      JVMCI_ERROR("Full frame expected for debug info at %i", pc_offset);
+    }
     frame = position;
   }
   Handle caller_frame = BytecodePosition::caller(position);
   if (caller_frame.not_null()) {
-    record_scope(pc_offset, caller_frame, objects, CHECK);
+    record_scope(pc_offset, caller_frame, scope_mode, objects, CHECK);
   }
 
   Handle hotspot_method = BytecodePosition::method(position);
@@ -990,7 +997,7 @@
   // jint next_pc_offset = Assembler::locate_next_instruction(instruction) - _instructions->start();
   OopMap *map = create_oop_map(debug_info, CHECK);
   _debug_recorder->add_safepoint(pc_offset, map);
-  record_scope(pc_offset, debug_info, CHECK);
+  record_scope(pc_offset, debug_info, CodeInstaller::FullFrame, CHECK);
   _debug_recorder->end_safepoint(pc_offset);
 }
 
@@ -1000,8 +1007,12 @@
     JVMCI_ERROR("debug info expected at infopoint at %i", pc_offset);
   }
 
+  // We'd like to check that pc_offset is greater than the
+  // last pc recorded with _debug_recorder (raising an exception if not)
+  // but DebugInformationRecorder doesn't have sufficient public API.
+
   _debug_recorder->add_non_safepoint(pc_offset);
-  record_scope(pc_offset, debug_info, CHECK);
+  record_scope(pc_offset, debug_info, CodeInstaller::BytecodePosition, CHECK);
   _debug_recorder->end_non_safepoint(pc_offset);
 }
 
@@ -1028,7 +1039,7 @@
   if (debug_info.not_null()) {
     OopMap *map = create_oop_map(debug_info, CHECK);
     _debug_recorder->add_safepoint(next_pc_offset, map);
-    record_scope(next_pc_offset, debug_info, CHECK);
+    record_scope(next_pc_offset, debug_info, CodeInstaller::FullFrame, CHECK);
   }
 
   if (foreign_call.not_null()) {
--- a/hotspot/src/share/vm/jvmci/jvmciCodeInstaller.hpp	Mon Nov 30 13:55:06 2015 -0800
+++ b/hotspot/src/share/vm/jvmci/jvmciCodeInstaller.hpp	Tue Dec 01 19:18:56 2015 +0100
@@ -219,8 +219,18 @@
 
   OopMap* create_oop_map(Handle debug_info, TRAPS);
 
-  void record_scope(jint pc_offset, Handle debug_info, TRAPS);
-  void record_scope(jint pc_offset, Handle code_pos, GrowableArray<ScopeValue*>* objects, TRAPS);
+  /**
+   * Specifies the level of detail to record for a scope.
+   */
+  enum ScopeMode {
+    // Only record a method and BCI
+    BytecodePosition,
+    // Record a method, bci and JVM frame state
+    FullFrame
+  };
+
+  void record_scope(jint pc_offset, Handle debug_info, ScopeMode scope_mode, TRAPS);
+  void record_scope(jint pc_offset, Handle position, ScopeMode scope_mode, GrowableArray<ScopeValue*>* objects, TRAPS);
   void record_object_value(ObjectValue* sv, Handle value, GrowableArray<ScopeValue*>* objects, TRAPS);
 
   GrowableArray<ScopeValue*>* record_virtual_objects(Handle debug_info, TRAPS);
--- a/hotspot/src/share/vm/jvmci/jvmciJavaClasses.hpp	Mon Nov 30 13:55:06 2015 -0800
+++ b/hotspot/src/share/vm/jvmci/jvmciJavaClasses.hpp	Tue Dec 01 19:18:56 2015 +0100
@@ -148,14 +148,9 @@
     int_field(CompilationResult_DataSectionReference, offset)                                                                                                  \
   end_class                                                                                                                                                    \
   start_class(InfopointReason)                                                                                                                                 \
-    static_oop_field(InfopointReason, UNKNOWN, "Ljdk/vm/ci/code/InfopointReason;")                                                                             \
     static_oop_field(InfopointReason, SAFEPOINT, "Ljdk/vm/ci/code/InfopointReason;")                                                                           \
     static_oop_field(InfopointReason, CALL, "Ljdk/vm/ci/code/InfopointReason;")                                                                                \
     static_oop_field(InfopointReason, IMPLICIT_EXCEPTION, "Ljdk/vm/ci/code/InfopointReason;")                                                                  \
-    static_oop_field(InfopointReason, METHOD_START, "Ljdk/vm/ci/code/InfopointReason;")                                                                        \
-    static_oop_field(InfopointReason, METHOD_END, "Ljdk/vm/ci/code/InfopointReason;")                                                                          \
-    static_oop_field(InfopointReason, LINE_NUMBER, "Ljdk/vm/ci/code/InfopointReason;")                                                                         \
-    static_oop_field(InfopointReason, METASPACE_ACCESS, "Ljdk/vm/ci/code/InfopointReason;")                                                                    \
   end_class                                                                                                                                                    \
   start_class(CompilationResult_Infopoint)                                                                                                                     \
     oop_field(CompilationResult_Infopoint, debugInfo, "Ljdk/vm/ci/code/DebugInfo;")                                                                            \
--- a/hotspot/src/share/vm/opto/c2_globals.hpp	Mon Nov 30 13:55:06 2015 -0800
+++ b/hotspot/src/share/vm/opto/c2_globals.hpp	Tue Dec 01 19:18:56 2015 +0100
@@ -744,7 +744,10 @@
           range(0, max_intx)                                                \
                                                                             \
   develop(bool, StressArrayCopyMacroNode, false,                            \
-          "Perform ArrayCopy load/store replacement during IGVN only")
+          "Perform ArrayCopy load/store replacement during IGVN only")      \
+                                                                            \
+  develop(bool, RenumberLiveNodes, true,                                    \
+          "Renumber live nodes")                                            \
 
 C2_FLAGS(DECLARE_DEVELOPER_FLAG, \
          DECLARE_PD_DEVELOPER_FLAG, \
--- a/hotspot/src/share/vm/opto/compile.cpp	Mon Nov 30 13:55:06 2015 -0800
+++ b/hotspot/src/share/vm/opto/compile.cpp	Tue Dec 01 19:18:56 2015 +0100
@@ -2156,6 +2156,20 @@
   // so keep only the actual candidates for optimizations.
   cleanup_expensive_nodes(igvn);
 
+  if (!failing() && RenumberLiveNodes && live_nodes() + NodeLimitFudgeFactor < unique()) {
+    Compile::TracePhase tp("", &timers[_t_renumberLive]);
+    initial_gvn()->replace_with(&igvn);
+    for_igvn()->clear();
+    Unique_Node_List new_worklist(C->comp_arena());
+    {
+      ResourceMark rm;
+      PhaseRenumberLive prl = PhaseRenumberLive(initial_gvn(), for_igvn(), &new_worklist);
+    }
+    set_for_igvn(&new_worklist);
+    igvn = PhaseIterGVN(initial_gvn());
+    igvn.optimize();
+  }
+
   // Perform escape analysis
   if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
     if (has_loops()) {
--- a/hotspot/src/share/vm/opto/library_call.cpp	Mon Nov 30 13:55:06 2015 -0800
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Tue Dec 01 19:18:56 2015 +0100
@@ -152,6 +152,8 @@
   Node* generate_limit_guard(Node* offset, Node* subseq_length,
                              Node* array_length,
                              RegionNode* region);
+  void  generate_string_range_check(Node* array, Node* offset,
+                                    Node* length, bool char_count);
   Node* generate_current_thread(Node* &tls_output);
   Node* load_mirror_from_klass(Node* klass);
   Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null,
@@ -204,6 +206,8 @@
   bool inline_string_compareTo(StrIntrinsicNode::ArgEnc ae);
   bool inline_string_indexOf(StrIntrinsicNode::ArgEnc ae);
   bool inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae);
+  Node* make_indexOf_node(Node* src_start, Node* src_count, Node* tgt_start, Node* tgt_count,
+                          RegionNode* region, Node* phi, StrIntrinsicNode::ArgEnc ae);
   bool inline_string_indexOfChar();
   bool inline_string_equals(StrIntrinsicNode::ArgEnc ae);
   bool inline_string_toBytesU();
@@ -897,6 +901,31 @@
   return is_over;
 }
 
+// Emit range checks for the given String.value byte array
+void LibraryCallKit::generate_string_range_check(Node* array, Node* offset, Node* count, bool char_count) {
+  if (stopped()) {
+    return; // already stopped
+  }
+  RegionNode* bailout = new RegionNode(1);
+  record_for_igvn(bailout);
+  if (char_count) {
+    // Convert char count to byte count
+    count = _gvn.transform(new LShiftINode(count, intcon(1)));
+  }
+
+  // Offset and count must not be negative
+  generate_negative_guard(offset, bailout);
+  generate_negative_guard(count, bailout);
+  // Offset + count must not exceed length of array
+  generate_limit_guard(offset, count, load_array_length(array), bailout);
+
+  if (bailout->req() > 1) {
+    PreserveJVMState pjvms(this);
+    set_control(_gvn.transform(bailout));
+    uncommon_trap(Deoptimization::Reason_intrinsic,
+                  Deoptimization::Action_maybe_recompile);
+  }
+}
 
 //--------------------------generate_current_thread--------------------
 Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
@@ -1016,7 +1045,9 @@
 
 //------------------------------inline_hasNegatives------------------------------
 bool LibraryCallKit::inline_hasNegatives() {
-  if (too_many_traps(Deoptimization::Reason_intrinsic))  return false;
+  if (too_many_traps(Deoptimization::Reason_intrinsic)) {
+    return false;
+  }
 
   assert(callee()->signature()->size() == 3, "hasNegatives has 3 parameters");
   // no receiver since it is static method
@@ -1024,26 +1055,14 @@
   Node* offset     = argument(1);
   Node* len        = argument(2);
 
-  RegionNode* bailout = new RegionNode(1);
-  record_for_igvn(bailout);
-
-  // offset must not be negative.
-  generate_negative_guard(offset, bailout);
-
-  // offset + length must not exceed length of ba.
-  generate_limit_guard(offset, len, load_array_length(ba), bailout);
-
-  if (bailout->req() > 1) {
-    PreserveJVMState pjvms(this);
-    set_control(_gvn.transform(bailout));
-    uncommon_trap(Deoptimization::Reason_intrinsic,
-                  Deoptimization::Action_maybe_recompile);
-  }
-  if (!stopped()) {
-    Node* ba_start = array_element_address(ba, offset, T_BYTE);
-    Node* result = new HasNegativesNode(control(), memory(TypeAryPtr::BYTES), ba_start, len);
-    set_result(_gvn.transform(result));
-  }
+  // Range checks
+  generate_string_range_check(ba, offset, len, false);
+  if (stopped()) {
+    return true;
+  }
+  Node* ba_start = array_element_address(ba, offset, T_BYTE);
+  Node* result = new HasNegativesNode(control(), memory(TypeAryPtr::BYTES), ba_start, len);
+  set_result(_gvn.transform(result));
   return true;
 }
 
@@ -1124,30 +1143,10 @@
     tgt_count = _gvn.transform(new RShiftINode(tgt_count, intcon(1)));
   }
 
-  // Check for substr count > string count
-  Node* cmp = _gvn.transform(new CmpINode(tgt_count, src_count));
-  Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::gt));
-  Node* if_gt = generate_slow_guard(bol, NULL);
-  if (if_gt != NULL) {
-    result_phi->init_req(2, intcon(-1));
-    result_rgn->init_req(2, if_gt);
-  }
-
-  if (!stopped()) {
-    // Check for substr count == 0
-    cmp = _gvn.transform(new CmpINode(tgt_count, intcon(0)));
-    bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
-    Node* if_zero = generate_slow_guard(bol, NULL);
-    if (if_zero != NULL) {
-      result_phi->init_req(3, intcon(0));
-      result_rgn->init_req(3, if_zero);
-    }
-  }
-
-  if (!stopped()) {
-    Node* result = make_string_method_node(Op_StrIndexOf, src_start, src_count, tgt_start, tgt_count, ae);
-    result_phi->init_req(1, result);
-    result_rgn->init_req(1, control());
+  Node* result = make_indexOf_node(src_start, src_count, tgt_start, tgt_count, result_rgn, result_phi, ae);
+  if (result != NULL) {
+    result_phi->init_req(3, result);
+    result_rgn->init_req(3, control());
   }
   set_control(_gvn.transform(result_rgn));
   record_for_igvn(result_rgn);
@@ -1158,44 +1157,53 @@
 
 //-----------------------------inline_string_indexOf-----------------------
 bool LibraryCallKit::inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae) {
+  if (too_many_traps(Deoptimization::Reason_intrinsic)) {
+    return false;
+  }
   if (!Matcher::has_match_rule(Op_StrIndexOf) || !UseSSE42Intrinsics) {
     return false;
   }
   assert(callee()->signature()->size() == 5, "String.indexOf() has 5 arguments");
   Node* src         = argument(0); // byte[]
-  Node* src_count   = argument(1);
+  Node* src_count   = argument(1); // char count
   Node* tgt         = argument(2); // byte[]
-  Node* tgt_count   = argument(3);
-  Node* from_index  = argument(4);
-
-  // Java code which calls this method has range checks for from_index value.
-  src_count = _gvn.transform(new SubINode(src_count, from_index));
+  Node* tgt_count   = argument(3); // char count
+  Node* from_index  = argument(4); // char index
 
   // Multiply byte array index by 2 if String is UTF16 encoded
   Node* src_offset = (ae == StrIntrinsicNode::LL) ? from_index : _gvn.transform(new LShiftINode(from_index, intcon(1)));
+  src_count = _gvn.transform(new SubINode(src_count, from_index));
   Node* src_start = array_element_address(src, src_offset, T_BYTE);
   Node* tgt_start = array_element_address(tgt, intcon(0), T_BYTE);
 
-  Node* result = make_string_method_node(Op_StrIndexOf, src_start, src_count, tgt_start, tgt_count, ae);
-
-  // The result is index relative to from_index if substring was found, -1 otherwise.
-  // Generate code which will fold into cmove.
-  RegionNode* region = new RegionNode(3);
+  // Range checks
+  generate_string_range_check(src, src_offset, src_count, ae != StrIntrinsicNode::LL);
+  generate_string_range_check(tgt, intcon(0), tgt_count, ae == StrIntrinsicNode::UU);
+  if (stopped()) {
+    return true;
+  }
+
+  RegionNode* region = new RegionNode(5);
   Node* phi = new PhiNode(region, TypeInt::INT);
 
-  Node* cmp = _gvn.transform(new CmpINode(result, intcon(0)));
-  Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::lt));
-
-  Node* if_lt = generate_slow_guard(bol, NULL);
-  if (if_lt != NULL) {
-    // result == -1
-    phi->init_req(2, result);
-    region->init_req(2, if_lt);
-  }
-  if (!stopped()) {
-    result = _gvn.transform(new AddINode(result, from_index));
-    phi->init_req(1, result);
-    region->init_req(1, control());
+  Node* result = make_indexOf_node(src_start, src_count, tgt_start, tgt_count, region, phi, ae);
+  if (result != NULL) {
+    // The result is index relative to from_index if substring was found, -1 otherwise.
+    // Generate code which will fold into cmove.
+    Node* cmp = _gvn.transform(new CmpINode(result, intcon(0)));
+    Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::lt));
+
+    Node* if_lt = generate_slow_guard(bol, NULL);
+    if (if_lt != NULL) {
+      // result == -1
+      phi->init_req(3, result);
+      region->init_req(3, if_lt);
+    }
+    if (!stopped()) {
+      result = _gvn.transform(new AddINode(result, from_index));
+      phi->init_req(4, result);
+      region->init_req(4, control());
+    }
   }
 
   set_control(_gvn.transform(region));
@@ -1205,8 +1213,38 @@
   return true;
 }
 
+// Create StrIndexOfNode with fast path checks
+Node* LibraryCallKit::make_indexOf_node(Node* src_start, Node* src_count, Node* tgt_start, Node* tgt_count,
+                                        RegionNode* region, Node* phi, StrIntrinsicNode::ArgEnc ae) {
+  // Check for substr count > string count
+  Node* cmp = _gvn.transform(new CmpINode(tgt_count, src_count));
+  Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::gt));
+  Node* if_gt = generate_slow_guard(bol, NULL);
+  if (if_gt != NULL) {
+    phi->init_req(1, intcon(-1));
+    region->init_req(1, if_gt);
+  }
+  if (!stopped()) {
+    // Check for substr count == 0
+    cmp = _gvn.transform(new CmpINode(tgt_count, intcon(0)));
+    bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
+    Node* if_zero = generate_slow_guard(bol, NULL);
+    if (if_zero != NULL) {
+      phi->init_req(2, intcon(0));
+      region->init_req(2, if_zero);
+    }
+  }
+  if (!stopped()) {
+    return make_string_method_node(Op_StrIndexOf, src_start, src_count, tgt_start, tgt_count, ae);
+  }
+  return NULL;
+}
+
 //-----------------------------inline_string_indexOfChar-----------------------
 bool LibraryCallKit::inline_string_indexOfChar() {
+  if (too_many_traps(Deoptimization::Reason_intrinsic)) {
+    return false;
+  }
   if (!Matcher::has_match_rule(Op_StrIndexOfChar) || !(UseSSE > 4)) {
     return false;
   }
@@ -1218,9 +1256,14 @@
 
   Node* src_offset = _gvn.transform(new LShiftINode(from_index, intcon(1)));
   Node* src_start = array_element_address(src, src_offset, T_BYTE);
-
   Node* src_count = _gvn.transform(new SubINode(max, from_index));
 
+  // Range checks
+  generate_string_range_check(src, src_offset, src_count, true);
+  if (stopped()) {
+    return true;
+  }
+
   RegionNode* region = new RegionNode(3);
   Node* phi = new PhiNode(region, TypeInt::INT);
 
@@ -1256,6 +1299,9 @@
 //   void StringLatin1.inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len)
 //   void StringLatin1.inflate(byte[] src, int srcOff, byte[] dst, int dstOff, int len)
 bool LibraryCallKit::inline_string_copy(bool compress) {
+  if (too_many_traps(Deoptimization::Reason_intrinsic)) {
+    return false;
+  }
   int nargs = 5;  // 2 oops, 3 ints
   assert(callee()->signature()->size() == nargs, "string copy has 5 arguments");
 
@@ -1278,6 +1324,13 @@
          (!compress && src_elem == T_BYTE && (dst_elem == T_BYTE || dst_elem == T_CHAR)),
          "Unsupported array types for inline_string_copy");
 
+  // Range checks
+  generate_string_range_check(src, src_offset, length, compress && src_elem == T_BYTE);
+  generate_string_range_check(dst, dst_offset, length, !compress && dst_elem == T_BYTE);
+  if (stopped()) {
+    return true;
+  }
+
   // Convert char[] offsets to byte[] offsets
   if (compress && src_elem == T_BYTE) {
     src_offset = _gvn.transform(new LShiftINode(src_offset, intcon(1)));
@@ -1329,6 +1382,9 @@
 //------------------------inline_string_toBytesU--------------------------
 // public static byte[] StringUTF16.toBytes(char[] value, int off, int len)
 bool LibraryCallKit::inline_string_toBytesU() {
+  if (too_many_traps(Deoptimization::Reason_intrinsic)) {
+    return false;
+  }
   // Get the arguments.
   Node* value     = argument(0);
   Node* offset    = argument(1);
@@ -1347,8 +1403,11 @@
     RegionNode* bailout = new RegionNode(1);
     record_for_igvn(bailout);
 
+    // Range checks
+    generate_negative_guard(offset, bailout);
+    generate_negative_guard(length, bailout);
+    generate_limit_guard(offset, length, load_array_length(value), bailout);
     // Make sure that resulting byte[] length does not overflow Integer.MAX_VALUE
-    generate_negative_guard(length, bailout);
     generate_limit_guard(length, intcon(0), intcon(max_jint/2), bailout);
 
     if (bailout->req() > 1) {
@@ -1357,9 +1416,9 @@
       uncommon_trap(Deoptimization::Reason_intrinsic,
                     Deoptimization::Action_maybe_recompile);
     }
-    if (stopped()) return true;
-
-    // Range checks are done by caller.
+    if (stopped()) {
+      return true;
+    }
 
     Node* size = _gvn.transform(new LShiftINode(length, intcon(1)));
     Node* klass_node = makecon(TypeKlassPtr::make(ciTypeArrayKlass::make(T_BYTE)));
@@ -1412,12 +1471,14 @@
 }
 
 //------------------------inline_string_getCharsU--------------------------
-// public void StringUTF16.getChars(byte[] value, int srcBegin, int srcEnd, char dst[], int dstBegin)
+// public void StringUTF16.getChars(byte[] src, int srcBegin, int srcEnd, char dst[], int dstBegin)
 bool LibraryCallKit::inline_string_getCharsU() {
-  if (too_many_traps(Deoptimization::Reason_intrinsic))  return false;
+  if (too_many_traps(Deoptimization::Reason_intrinsic)) {
+    return false;
+  }
 
   // Get the arguments.
-  Node* value     = argument(0);
+  Node* src       = argument(0);
   Node* src_begin = argument(1);
   Node* src_end   = argument(2); // exclusive offset (i < src_end)
   Node* dst       = argument(3);
@@ -1428,21 +1489,26 @@
   AllocateArrayNode* alloc = tightly_coupled_allocation(dst, NULL);
 
   // Check if a null path was taken unconditionally.
-  value = null_check(value);
+  src = null_check(src);
   dst = null_check(dst);
   if (stopped()) {
     return true;
   }
 
-  // Range checks are done by caller.
-
   // Get length and convert char[] offset to byte[] offset
   Node* length = _gvn.transform(new SubINode(src_end, src_begin));
   src_begin = _gvn.transform(new LShiftINode(src_begin, intcon(1)));
 
+  // Range checks
+  generate_string_range_check(src, src_begin, length, true);
+  generate_string_range_check(dst, dst_begin, length, false);
+  if (stopped()) {
+    return true;
+  }
+
   if (!stopped()) {
     // Calculate starting addresses.
-    Node* src_start = array_element_address(value, src_begin, T_BYTE);
+    Node* src_start = array_element_address(src, src_begin, T_BYTE);
     Node* dst_start = array_element_address(dst, dst_begin, T_CHAR);
 
     // Check if array addresses are aligned to HeapWordSize
--- a/hotspot/src/share/vm/opto/node.cpp	Mon Nov 30 13:55:06 2015 -0800
+++ b/hotspot/src/share/vm/opto/node.cpp	Tue Dec 01 19:18:56 2015 +0100
@@ -316,6 +316,9 @@
 // Create a Node, with a given number of required edges.
 Node::Node(uint req)
   : _idx(Init(req))
+#ifdef ASSERT
+  , _parse_idx(_idx)
+#endif
 {
   assert( req < Compile::current()->max_node_limit() - NodeLimitFudgeFactor, "Input limit exceeded" );
   debug_only( verify_construction() );
@@ -335,6 +338,9 @@
 //------------------------------Node-------------------------------------------
 Node::Node(Node *n0)
   : _idx(Init(1))
+#ifdef ASSERT
+  , _parse_idx(_idx)
+#endif
 {
   debug_only( verify_construction() );
   NOT_PRODUCT(nodes_created++);
@@ -347,6 +353,9 @@
 //------------------------------Node-------------------------------------------
 Node::Node(Node *n0, Node *n1)
   : _idx(Init(2))
+#ifdef ASSERT
+  , _parse_idx(_idx)
+#endif
 {
   debug_only( verify_construction() );
   NOT_PRODUCT(nodes_created++);
@@ -361,6 +370,9 @@
 //------------------------------Node-------------------------------------------
 Node::Node(Node *n0, Node *n1, Node *n2)
   : _idx(Init(3))
+#ifdef ASSERT
+  , _parse_idx(_idx)
+#endif
 {
   debug_only( verify_construction() );
   NOT_PRODUCT(nodes_created++);
@@ -377,6 +389,9 @@
 //------------------------------Node-------------------------------------------
 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3)
   : _idx(Init(4))
+#ifdef ASSERT
+  , _parse_idx(_idx)
+#endif
 {
   debug_only( verify_construction() );
   NOT_PRODUCT(nodes_created++);
@@ -395,6 +410,9 @@
 //------------------------------Node-------------------------------------------
 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, Node *n4)
   : _idx(Init(5))
+#ifdef ASSERT
+  , _parse_idx(_idx)
+#endif
 {
   debug_only( verify_construction() );
   NOT_PRODUCT(nodes_created++);
@@ -416,6 +434,9 @@
 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
                      Node *n4, Node *n5)
   : _idx(Init(6))
+#ifdef ASSERT
+  , _parse_idx(_idx)
+#endif
 {
   debug_only( verify_construction() );
   NOT_PRODUCT(nodes_created++);
@@ -439,6 +460,9 @@
 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
                      Node *n4, Node *n5, Node *n6)
   : _idx(Init(7))
+#ifdef ASSERT
+  , _parse_idx(_idx)
+#endif
 {
   debug_only( verify_construction() );
   NOT_PRODUCT(nodes_created++);
--- a/hotspot/src/share/vm/opto/node.hpp	Mon Nov 30 13:55:06 2015 -0800
+++ b/hotspot/src/share/vm/opto/node.hpp	Tue Dec 01 19:18:56 2015 +0100
@@ -293,10 +293,16 @@
 
  public:
   // Each Node is assigned a unique small/dense number.  This number is used
-  // to index into auxiliary arrays of data and bitvectors.
-  // It is declared const to defend against inadvertant assignment,
-  // since it is used by clients as a naked field.
+  // to index into auxiliary arrays of data and bit vectors.
+  // The field _idx is declared constant to defend against inadvertent assignments,
+  // since it is used by clients as a naked field. However, the field's value can be
+  // changed using the set_idx() method.
+  //
+  // The PhaseRenumberLive phase renumbers nodes based on liveness information.
+  // Therefore, it updates the value of the _idx field. The parse-time _idx is
+  // preserved in _parse_idx.
   const node_idx_t _idx;
+  DEBUG_ONLY(const node_idx_t _parse_idx;)
 
   // Get the (read-only) number of input edges
   uint req() const { return _cnt; }
--- a/hotspot/src/share/vm/opto/phase.cpp	Mon Nov 30 13:55:06 2015 -0800
+++ b/hotspot/src/share/vm/opto/phase.cpp	Tue Dec 01 19:18:56 2015 +0100
@@ -77,6 +77,7 @@
          tty->print_cr("           Other:               %7.3f s", other);
        }
     }
+    tty->print_cr ("         Renumber Live:       %7.3f s", timers[_t_renumberLive].seconds());
     tty->print_cr ("         IdealLoop:           %7.3f s", timers[_t_idealLoop].seconds());
     tty->print_cr ("         IdealLoop Verify:    %7.3f s", timers[_t_idealLoopVerify].seconds());
     tty->print_cr ("         Cond Const Prop:     %7.3f s", timers[_t_ccp].seconds());
@@ -88,6 +89,7 @@
       (timers[_t_escapeAnalysis].seconds() +
        timers[_t_iterGVN].seconds() +
        timers[_t_incrInline].seconds() +
+       timers[_t_renumberLive].seconds() +
        timers[_t_idealLoop].seconds() +
        timers[_t_idealLoopVerify].seconds() +
        timers[_t_ccp].seconds() +
--- a/hotspot/src/share/vm/opto/phase.hpp	Mon Nov 30 13:55:06 2015 -0800
+++ b/hotspot/src/share/vm/opto/phase.hpp	Tue Dec 01 19:18:56 2015 +0100
@@ -42,22 +42,23 @@
 class Phase : public StackObj {
 public:
   enum PhaseNumber {
-    Compiler,                   // Top-level compiler phase
-    Parser,                     // Parse bytecodes
-    Remove_Useless,             // Remove useless nodes
-    Optimistic,                 // Optimistic analysis phase
-    GVN,                        // Pessimistic global value numbering phase
-    Ins_Select,                 // Instruction selection phase
-    CFG,                        // Build a CFG
-    BlockLayout,                // Linear ordering of blocks
-    Register_Allocation,        // Register allocation, duh
-    LIVE,                       // Dragon-book LIVE range problem
-    StringOpts,                 // StringBuilder related optimizations
-    Interference_Graph,         // Building the IFG
-    Coalesce,                   // Coalescing copies
-    Ideal_Loop,                 // Find idealized trip-counted loops
-    Macro_Expand,               // Expand macro nodes
-    Peephole,                   // Apply peephole optimizations
+    Compiler,                         // Top-level compiler phase
+    Parser,                           // Parse bytecodes
+    Remove_Useless,                   // Remove useless nodes
+    Remove_Useless_And_Renumber_Live, // First, remove useless nodes from the graph. Then, renumber live nodes.
+    Optimistic,                       // Optimistic analysis phase
+    GVN,                              // Pessimistic global value numbering phase
+    Ins_Select,                       // Instruction selection phase
+    CFG,                              // Build a CFG
+    BlockLayout,                      // Linear ordering of blocks
+    Register_Allocation,              // Register allocation, duh
+    LIVE,                             // Dragon-book LIVE range problem
+    StringOpts,                       // StringBuilder related optimizations
+    Interference_Graph,               // Building the IFG
+    Coalesce,                         // Coalescing copies
+    Ideal_Loop,                       // Find idealized trip-counted loops
+    Macro_Expand,                     // Expand macro nodes
+    Peephole,                         // Apply peephole optimizations
     last_phase
   };
 
@@ -73,6 +74,7 @@
         _t_incrInline_igvn,
         _t_incrInline_pru,
         _t_incrInline_inline,
+      _t_renumberLive,
       _t_idealLoop,
       _t_idealLoopVerify,
       _t_ccp,
--- a/hotspot/src/share/vm/opto/phaseX.cpp	Mon Nov 30 13:55:06 2015 -0800
+++ b/hotspot/src/share/vm/opto/phaseX.cpp	Tue Dec 01 19:18:56 2015 +0100
@@ -406,7 +406,7 @@
 //=============================================================================
 //------------------------------PhaseRemoveUseless-----------------------------
 // 1) Use a breadthfirst walk to collect useful nodes reachable from root.
-PhaseRemoveUseless::PhaseRemoveUseless( PhaseGVN *gvn, Unique_Node_List *worklist ) : Phase(Remove_Useless),
+PhaseRemoveUseless::PhaseRemoveUseless(PhaseGVN *gvn, Unique_Node_List *worklist, PhaseNumber phase_num) : Phase(phase_num),
   _useful(Thread::current()->resource_area()) {
 
   // Implementation requires 'UseLoopSafepoints == true' and an edge from root
@@ -443,6 +443,82 @@
   }
 }
 
+//=============================================================================
+//------------------------------PhaseRenumberLive------------------------------
+// First, remove useless nodes (equivalent to identifying live nodes).
+// Then, renumber live nodes.
+//
+// The set of live nodes is returned by PhaseRemoveUseless in the _useful structure.
+// If the number of live nodes is 'x' (where 'x' == _useful.size()), then the
+// PhaseRenumberLive updates the node ID of each node (the _idx field) with a unique
+// value in the range [0, x).
+//
+// At the end of the PhaseRenumberLive phase, the compiler's count of unique nodes is
+// updated to 'x' and the list of dead nodes is reset (as there are no dead nodes).
+//
+// The PhaseRenumberLive phase updates two data structures with the new node IDs.
+// (1) The worklist is used by the PhaseIterGVN phase to identify nodes that must be
+// processed. A new worklist (with the updated node IDs) is returned in 'new_worklist'.
+// (2) Type information (the field PhaseGVN::_types) maps type information to each
+// node ID. The mapping is updated to use the new node IDs as well. Updated type
+// information is returned in PhaseGVN::_types.
+//
+// The PhaseRenumberLive phase does not preserve the order of elements in the worklist.
+//
+// Other data structures used by the compiler are not updated. The hash table for value
+// numbering (the field PhaseGVN::_table) is not updated because computing the hash
+// values is not based on node IDs. The field PhaseGVN::_nodes is not updated either
+// because it is empty wherever PhaseRenumberLive is used.
+PhaseRenumberLive::PhaseRenumberLive(PhaseGVN* gvn,
+                                     Unique_Node_List* worklist, Unique_Node_List* new_worklist,
+                                     PhaseNumber phase_num) :
+  PhaseRemoveUseless(gvn, worklist, Remove_Useless_And_Renumber_Live) {
+
+  assert(RenumberLiveNodes, "RenumberLiveNodes must be set to true for node renumbering to take place");
+  assert(C->live_nodes() == _useful.size(), "the number of live nodes must match the number of useful nodes");
+  assert(gvn->nodes_size() == 0, "GVN must not contain any nodes at this point");
+
+  uint old_unique_count = C->unique();
+  uint live_node_count = C->live_nodes();
+  uint worklist_size = worklist->size();
+
+  // Storage for the updated type information.
+  Type_Array new_type_array(C->comp_arena());
+
+  // Iterate over the set of live nodes.
+  uint current_idx = 0; // The current new node ID. Incremented after every assignment.
+  for (uint i = 0; i < _useful.size(); i++) {
+    Node* n = _useful.at(i);
+    const Type* type = gvn->type_or_null(n);
+    new_type_array.map(current_idx, type);
+
+    bool in_worklist = false;
+    if (worklist->member(n)) {
+      in_worklist = true;
+    }
+
+    n->set_idx(current_idx); // Update node ID.
+
+    if (in_worklist) {
+      new_worklist->push(n);
+    }
+
+    current_idx++;
+  }
+
+  assert(worklist_size == new_worklist->size(), "the new worklist must have the same size as the original worklist");
+  assert(live_node_count == current_idx, "all live nodes must be processed");
+
+  // Replace the compiler's type information with the updated type information.
+  gvn->replace_types(new_type_array);
+
+  // Update the unique node count of the compilation to the number of currently live nodes.
+  C->set_unique(live_node_count);
+
+  // Set the dead node count to 0 and reset dead node list.
+  C->reset_dead_node_list();
+}
+
 
 //=============================================================================
 //------------------------------PhaseTransform---------------------------------
--- a/hotspot/src/share/vm/opto/phaseX.hpp	Mon Nov 30 13:55:06 2015 -0800
+++ b/hotspot/src/share/vm/opto/phaseX.hpp	Tue Dec 01 19:18:56 2015 +0100
@@ -148,11 +148,21 @@
   Unique_Node_List _useful;   // Nodes reachable from root
                               // list is allocated from current resource area
 public:
-  PhaseRemoveUseless( PhaseGVN *gvn, Unique_Node_List *worklist );
+  PhaseRemoveUseless(PhaseGVN *gvn, Unique_Node_List *worklist, PhaseNumber phase_num = Remove_Useless);
 
   Unique_Node_List *get_useful() { return &_useful; }
 };
 
+//------------------------------PhaseRenumber----------------------------------
+// Phase that first performs a PhaseRemoveUseless, then it renumbers compiler
+// structures accordingly.
+class PhaseRenumberLive : public PhaseRemoveUseless {
+public:
+  PhaseRenumberLive(PhaseGVN* gvn,
+                    Unique_Node_List* worklist, Unique_Node_List* new_worklist,
+                    PhaseNumber phase_num = Remove_Useless_And_Renumber_Live);
+};
+
 
 //------------------------------PhaseTransform---------------------------------
 // Phases that analyze, then transform.  Constructing the Phase object does any
@@ -162,7 +172,7 @@
 class PhaseTransform : public Phase {
 protected:
   Arena*     _arena;
-  Node_Array _nodes;           // Map old node indices to new nodes.
+  Node_List  _nodes;           // Map old node indices to new nodes.
   Type_Array _types;           // Map old node indices to Types.
 
   // ConNode caches:
@@ -187,7 +197,13 @@
 
   Arena*      arena()   { return _arena; }
   Type_Array& types()   { return _types; }
+  void replace_types(Type_Array new_types) {
+    _types = new_types;
+  }
   // _nodes is used in varying ways by subclasses, which define local accessors
+  uint nodes_size() {
+    return _nodes.size();
+  }
 
 public:
   // Get a previously recorded type for the node n.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/intrinsics/string/TestStringConstruction.java	Tue Dec 01 19:18:56 2015 +0100
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8142303
+ * @summary Tests handling of invalid array indices in C2 intrinsic if explicit range check in Java code is not inlined.
+ * @run main/othervm -XX:CompileCommand=inline,java.lang.String::* -XX:CompileCommand=inline,java.lang.StringUTF16::* -XX:CompileCommand=exclude,java.lang.String::checkBoundsOffCount TestStringConstruction
+ */
+public class TestStringConstruction {
+
+    public static void main(String[] args) {
+        char[] chars = new char[42];
+        for (int i = 0; i < 10_000; ++i) {
+            test(chars);
+        }
+    }
+
+    private static String test(char[] chars) {
+        try {
+            // The constructor calls String::checkBoundsOffCount(-1, 42) to perform
+            // range checks on offset and count. If this method is not inlined, C2
+            // does not know about the explicit range checks and does not cut off the
+            // dead code. As a result, -1 is fed as offset into the StringUTF16.compress
+            // intrinsic which is replaced by TOP and causes a failure in the matcher.
+            return new String(chars, -1 , 42);
+        } catch (Exception e) {
+            return "";
+        }
+    }
+}
+