Merge
authorduke
Wed, 05 Jul 2017 17:22:53 +0200
changeset 6549 c185fa717e00
parent 6548 382d78ea38b0 (current diff)
parent 6479 7f96f194b139 (diff)
child 6556 a318f1d17415
Merge
--- a/.hgtags-top-repo	Fri Sep 24 16:41:32 2010 -0700
+++ b/.hgtags-top-repo	Wed Jul 05 17:22:53 2017 +0200
@@ -85,3 +85,4 @@
 140fdef4ddf52244013b6157dc542cd9f677bb6f jdk7-b108
 81dfc728d7bb7e1fff4a4dc6d0f7cea5a3315667 jdk7-b109
 2a02d4a6955c7c078aee9a604cb3be409800d82c jdk7-b110
+9702d6fef68e17533ee7fcf5923b11ead3e912ce jdk7-b111
--- a/hotspot/.hgtags	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/.hgtags	Wed Jul 05 17:22:53 2017 +0200
@@ -120,3 +120,4 @@
 e44a93947ccbfce712b51725f313163606f15486 jdk7-b108
 cc4bb3022b3144dc5db0805b9ef6c7eff2aa3b81 jdk7-b109
 2f25f2b8de2700a1822463b1bd3d02b5e218018f jdk7-b110
+07b042e13dde4f3479ba9ec55120fcd5e8623323 jdk7-b111
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Wed Jul 05 17:22:53 2017 +0200
@@ -1037,7 +1037,7 @@
                             public void prologue(Address start, Address end) {
                             }
                             public void visit(CodeBlob blob) {
-                                fout.println(gen.genHTML(blob.instructionsBegin()));
+                                fout.println(gen.genHTML(blob.contentBegin()));
                             }
                             public void epilogue() {
                             }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/c1/Runtime1.java	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/c1/Runtime1.java	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,7 +54,7 @@
 
   /** FIXME: consider making argument "type-safe" in Java port */
   public Address entryFor(int id) {
-    return blobFor(id).instructionsBegin();
+    return blobFor(id).codeBegin();
   }
 
   /** FIXME: consider making argument "type-safe" in Java port */
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,7 +39,8 @@
   private static CIntegerField sizeField;
   private static CIntegerField headerSizeField;
   private static CIntegerField relocationSizeField;
-  private static CIntegerField instructionsOffsetField;
+  private static CIntegerField contentOffsetField;
+  private static CIntegerField codeOffsetField;
   private static CIntegerField frameCompleteOffsetField;
   private static CIntegerField dataOffsetField;
   private static CIntegerField frameSizeField;
@@ -68,7 +69,8 @@
     headerSizeField          = type.getCIntegerField("_header_size");
     relocationSizeField      = type.getCIntegerField("_relocation_size");
     frameCompleteOffsetField = type.getCIntegerField("_frame_complete_offset");
-    instructionsOffsetField  = type.getCIntegerField("_instructions_offset");
+    contentOffsetField       = type.getCIntegerField("_content_offset");
+    codeOffsetField          = type.getCIntegerField("_code_offset");
     dataOffsetField          = type.getCIntegerField("_data_offset");
     frameSizeField           = type.getCIntegerField("_frame_size");
     oopMapsField             = type.getAddressField("_oop_maps");
@@ -111,11 +113,19 @@
   //  public RelocInfo relocationBegin();
   //  public RelocInfo relocationEnd();
 
-  public Address instructionsBegin() {
-    return headerBegin().addOffsetTo(instructionsOffsetField.getValue(addr));
+  public Address contentBegin() {
+    return headerBegin().addOffsetTo(contentOffsetField.getValue(addr));
   }
 
-  public Address instructionsEnd() {
+  public Address contentEnd() {
+    return headerBegin().addOffsetTo(dataOffsetField.getValue(addr));
+  }
+
+  public Address codeBegin() {
+    return headerBegin().addOffsetTo(contentOffsetField.getValue(addr));
+  }
+
+  public Address codeEnd() {
     return headerBegin().addOffsetTo(dataOffsetField.getValue(addr));
   }
 
@@ -128,24 +138,27 @@
   }
 
   // Offsets
-  public int getRelocationOffset()   { return (int) headerSizeField.getValue(addr);         }
-  public int getInstructionsOffset() { return (int) instructionsOffsetField.getValue(addr); }
-  public int getDataOffset()         { return (int) dataOffsetField.getValue(addr);         }
+  public int getRelocationOffset() { return (int) headerSizeField   .getValue(addr); }
+  public int getContentOffset()    { return (int) contentOffsetField.getValue(addr); }
+  public int getCodeOffset()       { return (int) codeOffsetField   .getValue(addr); }
+  public int getDataOffset()       { return (int) dataOffsetField   .getValue(addr); }
 
   // Sizes
-  public int getSize()             { return (int) sizeField.getValue(addr);                     }
-  public int getHeaderSize()       { return (int) headerSizeField.getValue(addr);               }
+  public int getSize()             { return (int) sizeField      .getValue(addr);     }
+  public int getHeaderSize()       { return (int) headerSizeField.getValue(addr);     }
   // FIXME: add getRelocationSize()
-  public int getInstructionsSize() { return (int) instructionsEnd().minus(instructionsBegin()); }
-  public int getDataSize()         { return (int) dataEnd().minus(dataBegin());                 }
+  public int getContentSize()      { return (int) contentEnd().minus(contentBegin()); }
+  public int getCodeSize()         { return (int) codeEnd()   .minus(codeBegin());    }
+  public int getDataSize()         { return (int) dataEnd()   .minus(dataBegin());    }
 
   // Containment
-  public boolean blobContains(Address addr)         { return headerBegin().lessThanOrEqual(addr) && dataEnd().greaterThan(addr);               }
+  public boolean blobContains(Address addr)    { return headerBegin() .lessThanOrEqual(addr) && dataEnd()   .greaterThan(addr); }
   // FIXME: add relocationContains
-  public boolean instructionsContains(Address addr) { return instructionsBegin().lessThanOrEqual(addr) && instructionsEnd().greaterThan(addr); }
-  public boolean dataContains(Address addr)         { return dataBegin().lessThanOrEqual(addr) && dataEnd().greaterThan(addr);                 }
-  public boolean contains(Address addr)             { return instructionsContains(addr);                                                       }
-  public boolean isFrameCompleteAt(Address a)       { return instructionsContains(a) && a.minus(instructionsBegin()) >= frameCompleteOffsetField.getValue(addr); }
+  public boolean contentContains(Address addr) { return contentBegin().lessThanOrEqual(addr) && contentEnd().greaterThan(addr); }
+  public boolean codeContains(Address addr)    { return codeBegin()   .lessThanOrEqual(addr) && codeEnd()   .greaterThan(addr); }
+  public boolean dataContains(Address addr)    { return dataBegin()   .lessThanOrEqual(addr) && dataEnd()   .greaterThan(addr); }
+  public boolean contains(Address addr)        { return contentContains(addr);                                                  }
+  public boolean isFrameCompleteAt(Address a)  { return codeContains(a) && a.minus(codeBegin()) >= frameCompleteOffsetField.getValue(addr); }
 
   // Reclamation support (really only used by the nmethods, but in order to get asserts to work
   // in the CodeCache they are defined virtual here)
@@ -168,7 +181,7 @@
     if (Assert.ASSERTS_ENABLED) {
       Assert.that(getOopMaps() != null, "nope");
     }
-    return getOopMaps().findMapAtOffset(pc.minus(instructionsBegin()), debugging);
+    return getOopMaps().findMapAtOffset(pc.minus(codeBegin()), debugging);
   }
 
   //  virtual void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, void f(oop*)) { ShouldNotReachHere(); }
@@ -200,7 +213,8 @@
   }
 
   protected void printComponentsOn(PrintStream tty) {
-    tty.println(" instructions: [" + instructionsBegin() + ", " + instructionsEnd() + "), " +
+    tty.println(" content: [" + contentBegin() + ", " + contentEnd() + "), " +
+                " code: [" + codeBegin() + ", " + codeEnd() + "), " +
                 " data: [" + dataBegin() + ", " + dataEnd() + "), " +
                 " frame size: " + getFrameSize());
   }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java	Wed Jul 05 17:22:53 2017 +0200
@@ -134,10 +134,10 @@
   public boolean isOSRMethod()    { return getEntryBCI() != VM.getVM().getInvocationEntryBCI(); }
 
   /** Boundaries for different parts */
-  public Address constantsBegin()       { return instructionsBegin();                                }
+  public Address constantsBegin()       { return contentBegin();                                     }
   public Address constantsEnd()         { return getEntryPoint();                                    }
-  public Address codeBegin()            { return getEntryPoint();                                    }
-  public Address codeEnd()              { return headerBegin().addOffsetTo(getStubOffset());         }
+  public Address instsBegin()           { return codeBegin();                                       }
+  public Address instsEnd()             { return headerBegin().addOffsetTo(getStubOffset());         }
   public Address exceptionBegin()       { return headerBegin().addOffsetTo(getExceptionOffset());    }
   public Address deoptBegin()           { return headerBegin().addOffsetTo(getDeoptOffset());        }
   public Address stubBegin()            { return headerBegin().addOffsetTo(getStubOffset());         }
@@ -156,7 +156,7 @@
   public Address nulChkTableEnd()       { return headerBegin().addOffsetTo(getNMethodEndOffset());   }
 
   public int constantsSize()            { return (int) constantsEnd()   .minus(constantsBegin());    }
-  public int codeSize()                 { return (int) codeEnd()        .minus(codeBegin());         }
+  public int instsSize()                { return (int) instsEnd()       .minus(instsBegin());        }
   public int stubSize()                 { return (int) stubEnd()        .minus(stubBegin());         }
   public int oopsSize()                 { return (int) oopsEnd()        .minus(oopsBegin());         }
   public int scopesDataSize()           { return (int) scopesDataEnd()  .minus(scopesDataBegin());   }
@@ -169,7 +169,7 @@
   public int totalSize() {
     return
       constantsSize()    +
-      codeSize()         +
+      instsSize()        +
       stubSize()         +
       scopesDataSize()   +
       scopesPCsSize()    +
@@ -179,7 +179,7 @@
   }
 
   public boolean constantsContains   (Address addr) { return constantsBegin()   .lessThanOrEqual(addr) && constantsEnd()   .greaterThan(addr); }
-  public boolean codeContains        (Address addr) { return codeBegin()        .lessThanOrEqual(addr) && codeEnd()        .greaterThan(addr); }
+  public boolean instsContains       (Address addr) { return instsBegin()       .lessThanOrEqual(addr) && instsEnd()       .greaterThan(addr); }
   public boolean stubContains        (Address addr) { return stubBegin()        .lessThanOrEqual(addr) && stubEnd()        .greaterThan(addr); }
   public boolean oopsContains        (Address addr) { return oopsBegin()        .lessThanOrEqual(addr) && oopsEnd()        .greaterThan(addr); }
   public boolean scopesDataContains  (Address addr) { return scopesDataBegin()  .lessThanOrEqual(addr) && scopesDataEnd()  .greaterThan(addr); }
@@ -353,7 +353,8 @@
 
   protected void printComponentsOn(PrintStream tty) {
     // FIXME: add relocation information
-    tty.println(" instructions: [" + instructionsBegin() + ", " + instructionsEnd() + "), " +
+    tty.println(" content: [" + contentBegin() + ", " + contentEnd() + "), " +
+                " code: [" + codeBegin() + ", " + codeEnd() + "), " +
                 " data: [" + dataBegin() + ", " + dataEnd() + "), " +
                 " oops: [" + oopsBegin() + ", " + oopsEnd() + "), " +
                 " frame size: " + getFrameSize());
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/PCDesc.java	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/PCDesc.java	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -75,7 +75,7 @@
   }
 
   public Address getRealPC(NMethod code) {
-    return code.instructionsBegin().addOffsetTo(getPCOffset());
+    return code.codeBegin().addOffsetTo(getPCOffset());
   }
 
 
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/FindInCodeCachePanel.java	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/FindInCodeCachePanel.java	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -190,11 +190,11 @@
 
   private void reportResult(StringBuffer result, CodeBlob blob) {
     result.append("<a href='blob:");
-    result.append(blob.instructionsBegin().toString());
+    result.append(blob.contentBegin().toString());
     result.append("'>");
     result.append(blob.getName());
     result.append("@");
-    result.append(blob.instructionsBegin());
+    result.append(blob.contentBegin());
     result.append("</a><br>");
   }
 
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java	Wed Jul 05 17:22:53 2017 +0200
@@ -1415,13 +1415,13 @@
          buf.append(genMethodAndKlassLink(nmethod.getMethod()));
 
          buf.h3("Compiled Code");
-         sun.jvm.hotspot.debugger.Address codeBegin = nmethod.codeBegin();
-         sun.jvm.hotspot.debugger.Address codeEnd   = nmethod.codeEnd();
-         final int codeSize = (int)codeEnd.minus(codeBegin);
-         final long startPc = addressToLong(codeBegin);
-         final byte[] code = new byte[codeSize];
+         sun.jvm.hotspot.debugger.Address instsBegin = nmethod.instsBegin();
+         sun.jvm.hotspot.debugger.Address instsEnd   = nmethod.instsEnd();
+         final int instsSize = nmethod.instsSize();
+         final long startPc = addressToLong(instsBegin);
+         final byte[] code = new byte[instsSize];
          for (int i=0; i < code.length; i++)
-            code[i] = codeBegin.getJByteAt(i);
+            code[i] = instsBegin.getJByteAt(i);
 
          final long verifiedEntryPoint = addressToLong(nmethod.getVerifiedEntryPoint());
          final long entryPoint = addressToLong(nmethod.getEntryPoint());
@@ -1499,8 +1499,8 @@
          buf.h3("CodeBlob");
 
          buf.h3("Compiled Code");
-         final sun.jvm.hotspot.debugger.Address codeBegin = blob.instructionsBegin();
-         final int codeSize = blob.getInstructionsSize();
+         final sun.jvm.hotspot.debugger.Address codeBegin = blob.codeBegin();
+         final int codeSize = blob.getCodeSize();
          final long startPc = addressToLong(codeBegin);
          final byte[] code = new byte[codeSize];
          for (int i=0; i < code.length; i++)
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/PointerFinder.java	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/PointerFinder.java	Wed Jul 05 17:22:53 2017 +0200
@@ -96,15 +96,15 @@
         if (Assert.ASSERTS_ENABLED) {
           Assert.that(loc.blob != null, "Should have found CodeBlob");
         }
-        loc.inBlobInstructions = loc.blob.instructionsContains(a);
-        loc.inBlobData         = loc.blob.dataContains(a);
+        loc.inBlobCode = loc.blob.codeContains(a);
+        loc.inBlobData = loc.blob.dataContains(a);
 
         if (loc.blob.isNMethod()) {
             NMethod nm = (NMethod) loc.blob;
             loc.inBlobOops = nm.oopsContains(a);
         }
 
-        loc.inBlobUnknownLocation = (!(loc.inBlobInstructions ||
+        loc.inBlobUnknownLocation = (!(loc.inBlobCode ||
                                        loc.inBlobData ||
                                        loc.inBlobOops));
         return loc;
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/PointerLocation.java	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/PointerLocation.java	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -65,7 +65,7 @@
   InterpreterCodelet interpreterCodelet;
   CodeBlob blob;
   // FIXME: add more detail about CodeBlob
-  boolean inBlobInstructions;
+  boolean inBlobCode;
   boolean inBlobData;
   boolean inBlobOops;
   boolean inBlobUnknownLocation;
@@ -142,8 +142,8 @@
     return blob;
   }
 
-  public boolean isInBlobInstructions() {
-    return inBlobInstructions;
+  public boolean isInBlobCode() {
+    return inBlobCode;
   }
 
   public boolean isInBlobData() {
@@ -233,8 +233,8 @@
     } else if (isInCodeCache()) {
       CodeBlob b = getCodeBlob();
       tty.print("In ");
-      if (isInBlobInstructions()) {
-        tty.print("instructions");
+      if (isInBlobCode()) {
+        tty.print("code");
       } else if (isInBlobData()) {
         tty.print("data");
       } else if (isInBlobOops()) {
--- a/hotspot/make/hotspot_version	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/make/hotspot_version	Wed Jul 05 17:22:53 2017 +0200
@@ -33,9 +33,9 @@
 # Don't put quotes (fail windows build).
 HOTSPOT_VM_COPYRIGHT=Copyright 2010
 
-HS_MAJOR_VER=19
+HS_MAJOR_VER=20
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=06
+HS_BUILD_NUMBER=01
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=7
--- a/hotspot/make/jprt.properties	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/make/jprt.properties	Wed Jul 05 17:22:53 2017 +0200
@@ -47,6 +47,8 @@
 # Define the Solaris platforms we want for the various releases
 
 jprt.my.solaris.sparc.jdk7=solaris_sparc_5.10
+jprt.my.solaris.sparc.jdk7b107=solaris_sparc_5.10
+jprt.my.solaris.sparc.jdk7temp=solaris_sparc_5.10
 jprt.my.solaris.sparc.jdk6=solaris_sparc_5.8
 jprt.my.solaris.sparc.jdk6perf=solaris_sparc_5.8
 jprt.my.solaris.sparc.jdk6u10=solaris_sparc_5.8
@@ -56,6 +58,8 @@
 jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}}
 
 jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10
+jprt.my.solaris.sparcv9.jdk7b107=solaris_sparcv9_5.10
+jprt.my.solaris.sparcv9.jdk7temp=solaris_sparcv9_5.10
 jprt.my.solaris.sparcv9.jdk6=solaris_sparcv9_5.8
 jprt.my.solaris.sparcv9.jdk6perf=solaris_sparcv9_5.8
 jprt.my.solaris.sparcv9.jdk6u10=solaris_sparcv9_5.8
@@ -65,6 +69,8 @@
 jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
 
 jprt.my.solaris.i586.jdk7=solaris_i586_5.10
+jprt.my.solaris.i586.jdk7b107=solaris_i586_5.10
+jprt.my.solaris.i586.jdk7temp=solaris_i586_5.10
 jprt.my.solaris.i586.jdk6=solaris_i586_5.8
 jprt.my.solaris.i586.jdk6perf=solaris_i586_5.8
 jprt.my.solaris.i586.jdk6u10=solaris_i586_5.8
@@ -74,6 +80,8 @@
 jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}}
 
 jprt.my.solaris.x64.jdk7=solaris_x64_5.10
+jprt.my.solaris.x64.jdk7b107=solaris_x64_5.10
+jprt.my.solaris.x64.jdk7temp=solaris_x64_5.10
 jprt.my.solaris.x64.jdk6=solaris_x64_5.10
 jprt.my.solaris.x64.jdk6perf=solaris_x64_5.10
 jprt.my.solaris.x64.jdk6u10=solaris_x64_5.10
@@ -83,6 +91,8 @@
 jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}}
 
 jprt.my.linux.i586.jdk7=linux_i586_2.6
+jprt.my.linux.i586.jdk7b107=linux_i586_2.6
+jprt.my.linux.i586.jdk7temp=linux_i586_2.6
 jprt.my.linux.i586.jdk6=linux_i586_2.4
 jprt.my.linux.i586.jdk6perf=linux_i586_2.4
 jprt.my.linux.i586.jdk6u10=linux_i586_2.4
@@ -92,6 +102,8 @@
 jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}}
 
 jprt.my.linux.x64.jdk7=linux_x64_2.6
+jprt.my.linux.x64.jdk7b107=linux_x64_2.6
+jprt.my.linux.x64.jdk7temp=linux_x64_2.6
 jprt.my.linux.x64.jdk6=linux_x64_2.4
 jprt.my.linux.x64.jdk6perf=linux_x64_2.4
 jprt.my.linux.x64.jdk6u10=linux_x64_2.4
@@ -100,7 +112,9 @@
 jprt.my.linux.x64.jdk6u20=linux_x64_2.4
 jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}}
 
-jprt.my.windows.i586.jdk7=windows_i586_5.0
+jprt.my.windows.i586.jdk7=windows_i586_5.1
+jprt.my.windows.i586.jdk7b107=windows_i586_5.0
+jprt.my.windows.i586.jdk7temp=windows_i586_5.0
 jprt.my.windows.i586.jdk6=windows_i586_5.0
 jprt.my.windows.i586.jdk6perf=windows_i586_5.0
 jprt.my.windows.i586.jdk6u10=windows_i586_5.0
@@ -110,6 +124,8 @@
 jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
 
 jprt.my.windows.x64.jdk7=windows_x64_5.2
+jprt.my.windows.x64.jdk7b107=windows_x64_5.2
+jprt.my.windows.x64.jdk7temp=windows_x64_5.2
 jprt.my.windows.x64.jdk6=windows_x64_5.2
 jprt.my.windows.x64.jdk6perf=windows_x64_5.2
 jprt.my.windows.x64.jdk6u10=windows_x64_5.2
--- a/hotspot/make/linux/Makefile	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/make/linux/Makefile	Wed Jul 05 17:22:53 2017 +0200
@@ -19,7 +19,7 @@
 # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 # or visit www.oracle.com if you need additional information or have any
 # questions.
-#  
+#
 #
 
 # This makefile creates a build tree and lights off a build.
@@ -45,13 +45,13 @@
 #
 #    make REMOTE="rsh -l me myotherlinuxbox"
 
-# Along with VM, Serviceability Agent (SA) is built for SA/JDI binding. 
-# JDI binding on SA produces two binaries: 
+# Along with VM, Serviceability Agent (SA) is built for SA/JDI binding.
+# JDI binding on SA produces two binaries:
 #  1. sa-jdi.jar       - This is build before building libjvm[_g].so
 #                        Please refer to ./makefiles/sa.make
 #  2. libsa[_g].so     - Native library for SA - This is built after
 #                        libjsig[_g].so (signal interposition library)
-#                        Please refer to ./makefiles/vm.make 
+#                        Please refer to ./makefiles/vm.make
 # If $(GAMMADIR)/agent dir is not present, SA components are not built.
 
 ifeq ($(GAMMADIR),)
@@ -61,11 +61,9 @@
 endif
 include $(GAMMADIR)/make/$(OSNAME)/makefiles/rules.make
 
-ifndef LP64
 ifndef CC_INTERP
 FORCE_TIERED=1
 endif
-endif
 
 ifdef LP64
   ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","")
--- a/hotspot/make/linux/makefiles/sa.make	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/make/linux/makefiles/sa.make	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -48,6 +48,9 @@
 AGENT_FILES1 := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_FILES1))
 AGENT_FILES2 := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_FILES2))
 
+AGENT_FILES1_LIST := $(GENERATED)/agent1.classes.list
+AGENT_FILES2_LIST := $(GENERATED)/agent2.classes.list
+
 SA_CLASSDIR = $(GENERATED)/saclasses
 
 SA_BUILD_VERSION_PROP = "sun.jvm.hotspot.runtime.VM.saBuildVersion=$(SA_BUILD_VERSION)"
@@ -79,10 +82,24 @@
 	$(QUIETLY) if [ ! -d $(SA_CLASSDIR) ] ; then \
 	  mkdir -p $(SA_CLASSDIR);        \
 	fi
-
-	$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) $(AGENT_FILES1)
-	$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) $(AGENT_FILES2)
-
+	
+# Note: When indented, make tries to execute the '$(shell' comment.
+# In some environments, cmd processors have limited line length.
+# To prevent the javac invocation in the next block from using
+# a very long cmd line, we use javac's @file-list option. We
+# generate the file lists using make's built-in 'foreach' control
+# flow which also avoids cmd processor line length issues. Since
+# the 'foreach' is done as part of make's macro expansion phase,
+# the initialization of the lists is also done in the same phase
+# using '$(shell rm ...' instead of using the more traditional
+# 'rm ...' rule.
+	$(shell rm -rf $(AGENT_FILES1_LIST) $(AGENT_FILES2_LIST))
+	$(foreach file,$(AGENT_FILES1),$(shell echo $(file) >> $(AGENT_FILES1_LIST)))
+	$(foreach file,$(AGENT_FILES2),$(shell echo $(file) >> $(AGENT_FILES2_LIST)))
+	
+	$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) @$(AGENT_FILES1_LIST)
+	$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) @$(AGENT_FILES2_LIST)
+	
 	$(QUIETLY) $(REMOTE) $(COMPILE.RMIC)  -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer
 	$(QUIETLY) echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES)
 	$(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql/sa.js
@@ -101,3 +118,4 @@
 clean:
 	rm -rf $(SA_CLASSDIR)
 	rm -rf $(GENERATED)/sa-jdi.jar
+	rm -rf $(AGENT_FILES1_LIST) $(AGENT_FILES2_LIST)
--- a/hotspot/make/solaris/Makefile	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/make/solaris/Makefile	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1998, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -19,7 +19,7 @@
 # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 # or visit www.oracle.com if you need additional information or have any
 # questions.
-#  
+#
 #
 
 # This makefile creates a build tree and lights off a build.
@@ -36,13 +36,13 @@
 # or BOOTDIR has to be set. We do *not* search javac, javah, rmic etc.
 # from the PATH.
 
-# Along with VM, Serviceability Agent (SA) is built for SA/JDI binding. 
-# JDI binding on SA produces two binaries: 
+# Along with VM, Serviceability Agent (SA) is built for SA/JDI binding.
+# JDI binding on SA produces two binaries:
 #  1. sa-jdi.jar       - This is build before building libjvm[_g].so
 #                        Please refer to ./makefiles/sa.make
 #  2. libsaproc[_g].so - Native library for SA - This is built after
 #                        libjsig[_g].so (signal interposition library)
-#                        Please refer to ./makefiles/vm.make 
+#                        Please refer to ./makefiles/vm.make
 # If $(GAMMADIR)/agent dir is not present, SA components are not built.
 
 ifeq ($(GAMMADIR),)
@@ -52,11 +52,9 @@
 endif
 include $(GAMMADIR)/make/$(OSNAME)/makefiles/rules.make
 
-ifndef LP64
 ifndef CC_INTERP
 FORCE_TIERED=1
 endif
-endif
 
 ifdef LP64
   ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","")
--- a/hotspot/make/solaris/makefiles/dtrace.make	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/make/solaris/makefiles/dtrace.make	Wed Jul 05 17:22:53 2017 +0200
@@ -165,7 +165,7 @@
 $(DTRACE.o): $(DTRACE).d $(JVMOFFS).h $(JVMOFFS)Index.h $(DTraced_Files)
 	@echo Compiling $(DTRACE).d
 
-	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -G -o $@ -s $(DTRACE).d \
+	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -G -xlazyload -o $@ -s $(DTRACE).d \
      $(DTraced_Files) ||\
   STATUS=$$?;\
 	if [ x"$$STATUS" = x"1" -a \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/make/solaris/makefiles/reorder_TIERED_sparcv9	Wed Jul 05 17:22:53 2017 +0200
@@ -0,0 +1,4477 @@
+data = R0x2000;
+text = LOAD ?RXO;
+
+
+text: .text%__1cCosOjavaTimeMillis6F_x_;
+text: .text%__1cQIndexSetIteratorQadvance_and_next6M_I_;
+text: .text%__1cNinstanceKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
+text: .text%__1cNinstanceKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cNinstanceKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cOtypeArrayKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cOtypeArrayKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cIPhaseIFGIadd_edge6MII_i_;
+text: .text%__1cQIndexSetIterator2t6MpnIIndexSet__v_;
+text: .text%__1cENodeEjvms6kM_pnIJVMState__;
+text: .text%__1cIIndexSetWalloc_block_containing6MI_pn0AIBitBlock__;
+text: .text%__1cETypeDcmp6Fkpk03_i_;
+text: .text%__1cENodeHlatency6MI_I_;
+text: .text%__1cHRegMaskJis_bound16kM_i_;
+text: .text%__1cDff16FI_i_;
+text: .text%__1cHRegMaskESize6kM_I_;
+text: .text%__1cXresource_allocate_bytes6FI_pc_;
+text: .text%__1cENodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cJVectorSet2R6MI_rnDSet__;
+text: .text%__1cHRegMaskJis_bound26kM_i_;
+text: .text%__1cNSharedRuntimeElmul6Fxx_x_;
+text: .text%__1cIMachNodeGOpcode6kM_i_;
+text: .text%__1cJiRegIOperEtype6kM_pknEType__: ad_sparc.o;
+text: .text%__1cIIndexSetKinitialize6MI_v_;
+text: .text%__1cITypeNodeLbottom_type6kM_pknEType__;
+text: .text%__1cPClassFileStreamGget_u26MpnGThread__H_;
+text: .text%__1cKTypeOopPtrFklass6kM_pnHciKlass__: type.o;
+text: .text%__1cETypeFuhash6Fkpk0_i_;
+text: .text%__1cQIndexSetIteratorEnext6M_I_: chaitin.o;
+text: .text%__1cENodeIout_grow6MI_v_;
+text: .text%__1cOloadConI13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cNobjArrayKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
+text: .text%__1cENodeHadd_req6Mp0_v_;
+text: .text%__1cJMarkSweepUAdjustPointerClosureGdo_oop6MppnHoopDesc__v_: markSweep.o;
+text: .text%__1cNobjArrayKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cNobjArrayKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cOloadConI13NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cICallNodeKmatch_edge6kMI_I_;
+text: .text%__1cINodeHashQhash_find_insert6MpnENode__2_;
+text: .text%__1cHPhiNodeGOpcode6kM_i_;
+text: .text%__1cKbranchNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o;
+text: .text%__1cIProjNodeGOpcode6kM_i_;
+text: .text%__1cETypeIhashcons6M_pk0_;
+text: .text%__1cOPhaseIdealLoopUbuild_loop_late_post6MpnENode_pk0_v_;
+text: .text%__1cMPhaseChaitinTinterfere_with_live6MIpnIIndexSet__v_;
+text: .text%__1cWNode_Backward_IteratorEnext6M_pnENode__;
+text: .text%__1cNIdealLoopTreeJis_member6kMpk0_i_;
+text: .text%__1cMMachCallNodeKin_RegMask6kMI_rknHRegMask__;
+text: .text%__1cHCompileNnode_bundling6MpknENode__pnGBundle__;
+text: .text%__1cGIfNodeGOpcode6kM_i_;
+text: .text%__1cOPhaseIdealLoopYsplit_if_with_blocks_pre6MpnENode__2_;
+text: .text%__1cOPhaseIdealLoopZsplit_if_with_blocks_post6MpnENode__v_;
+text: .text%__1cIUniverseMnon_oop_word6F_pv_;
+text: .text%__1cDLRGOcompute_degree6kMr0_i_;
+text: .text%__1cFArenaIArealloc6MpvII_1_;
+text: .text%__1cIConINodeGOpcode6kM_i_;
+text: .text%__1cETypeEmeet6kMpk0_2_;
+text: .text%__1cENode2t6MI_v_;
+text: .text%__1cRMachSpillCopyNodeJideal_reg6kM_I_: ad_sparc.o;
+text: .text%__1cIPipelineXfunctional_unit_latency6kMIpk0_I_;
+text: .text%__1cWPSScavengeRootsClosureGdo_oop6MppnHoopDesc__v_: psTasks.o;
+text: .text%__1cLsymbolKlassIoop_size6kMpnHoopDesc__i_;
+text: .text%__1cJCProjNodeNis_block_proj6kM_pknENode__: cfgnode.o;
+text: .text%__1cKIfTrueNodeGOpcode6kM_i_;
+text: .text%__1cNRelocIteratorTadvance_over_prefix6M_v_;
+text: .text%__1cIMachNodeKin_RegMask6kMI_rknHRegMask__;
+text: .text%__1cJloadPNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cIPhaseIFGQeffective_degree6kMI_i_;
+text: .text%__1cWConstantPoolCacheEntryPfollow_contents6M_v_;
+text: .text%__1cWConstantPoolCacheEntryPadjust_pointers6M_v_;
+text: .text%__1cIAddPNodeGOpcode6kM_i_;
+text: .text%__1cIPhaseIFGJre_insert6MI_v_;
+text: .text%__1cIPhaseIFGLremove_node6MI_pnIIndexSet__;
+text: .text%__1cKNode_ArrayGinsert6MIpnENode__v_;
+text: .text%__1cHTypeIntEhash6kM_i_;
+text: .text%__1cLsymbolKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cLsymbolKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cMPhaseIterGVNNtransform_old6MpnENode__2_;
+text: .text%__1cDfh16FI_i_;
+text: .text%__1cNMachIdealNodeErule6kM_I_: ad_sparc.o;
+text: .text%__1cIIndexSetKfree_block6MI_v_;
+text: .text%__1cWShouldNotReachHereNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o;
+text: .text%__1cLIfFalseNodeGOpcode6kM_i_;
+text: .text%__1cSCallStaticJavaNodeGOpcode6kM_i_;
+text: .text%__1cENodeEhash6kM_I_;
+text: .text%__1cOPhaseIdealLoopEsort6MpnNIdealLoopTree_2_2_;
+text: .text%__1cMMachProjNodeLbottom_type6kM_pknEType__;
+text: .text%JVM_ArrayCopy;
+text: .text%__1cOtypeArrayKlassKcopy_array6MpnMarrayOopDesc_i2iipnGThread__v_;
+text: .text%__1cNSharedRuntimeDl2f6Fx_f_;
+text: .text%__1cPjava_lang_ClassLas_klassOop6FpnHoopDesc__pnMklassOopDesc__;
+text: .text%__1cHConNodeGOpcode6kM_i_;
+text: .text%__1cMPhaseIterGVNWadd_users_to_worklist06MpnENode__v_;
+text: .text%__1cMMachProjNodeGOpcode6kM_i_;
+text: .text%__1cJiRegPOperEtype6kM_pknEType__: ad_sparc.o;
+text: .text%__1cXPipeline_Use_Cycle_Mask2L6Mi_r0_: ad_sparc_pipeline.o;
+text: .text%__1cIBoolNodeGOpcode6kM_i_;
+text: .text%__1cYCallStaticJavaDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cENodeEgrow6MI_v_;
+text: .text%__1cIciObjectEhash6M_i_;
+text: .text%__1cKRegionNodeGOpcode6kM_i_;
+text: .text%__1cOPhaseIdealLoopUbuild_loop_tree_impl6MpnENode_i_i_;
+text: .text%__1cJMarkSweepSMarkAndPushClosureGdo_oop6MppnHoopDesc__v_: markSweep.o;
+text: .text%__1cRMachSpillCopyNodeLbottom_type6kM_pknEType__: ad_sparc.o;
+text: .text%__1cOPhaseIdealLoopOget_early_ctrl6MpnENode__2_;
+text: .text%__1cIIndexSetKinitialize6MIpnFArena__v_;
+text: .text%__1cLmethodKlassIoop_size6kMpnHoopDesc__i_;
+text: .text%__1cIPhaseGVNJtransform6MpnENode__2_;
+text: .text%__1cOoop_RelocationLunpack_data6M_v_;
+text: .text%__1cRmethodDataOopDescHdata_at6Mi_pnLProfileData__;
+text: .text%__1cPJavaFrameAnchorNmake_walkable6MpnKJavaThread__v_;
+text: .text%__1cENodeNis_block_proj6kM_pk0_;
+text: .text%__1cNRelocIteratorFreloc6M_pnKRelocation__;
+text: .text%__1cIProjNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cQconstMethodKlassIoop_size6kMpnHoopDesc__i_;
+text: .text%__1cPClassFileStreamGget_u16MpnGThread__C_;
+text: .text%__1cLTypeInstPtrEhash6kM_i_;
+text: .text%__1cYCallStaticJavaDirectNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cOPhaseIdealLoopThas_local_phi_input6MpnENode__2_;
+text: .text%__1cJloadINodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cRMachSpillCopyNodeLout_RegMask6kM_rknHRegMask__: ad_sparc.o;
+text: .text%__1cKbranchNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cMMachProjNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cMMachProjNodeLout_RegMask6kM_rknHRegMask__: classes.o;
+text: .text%__1cRMachSpillCopyNodeKin_RegMask6kMI_rknHRegMask__: ad_sparc.o;
+text: .text%__1cbAfinal_graph_reshaping_impl6FpnENode_rnUFinal_Reshape_Counts__v_: compile.o;
+text: .text%__1cOtypeArrayKlassIallocate6MipnGThread__pnQtypeArrayOopDesc__;
+text: .text%__1cUParallelScavengeHeapVlarge_typearray_limit6M_I_: parallelScavengeHeap.o;
+text: .text%__1cIPhaseCCPOtransform_once6MpnENode__2_;
+text: .text%__1cGciTypeEmake6FnJBasicType__p0_;
+text: .text%__1cKoopFactoryNnew_typeArray6FnJBasicType_ipnGThread__pnQtypeArrayOopDesc__;
+text: .text%__1cENodeFclone6kM_p0_;
+text: .text%__1cITypeNodeEhash6kM_I_;
+text: .text%__1cMPipeline_UseMfull_latency6kMIrk0_I_;
+text: .text%__1cRMachSpillCopyNodePoper_input_base6kM_I_: ad_sparc.o;
+text: .text%__1cENodeKmatch_edge6kMI_I_;
+text: .text%__1cQconstMethodKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cLmethodKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cLmethodKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cQconstMethodKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cOPhaseIdealLoopZremix_address_expressions6MpnENode__2_;
+text: .text%__1cSInterpreterRuntimeInewarray6FpnKJavaThread_nJBasicType_i_v_;
+text: .text%__1cICallNodeLbottom_type6kM_pknEType__;
+text: .text%__1cOPhaseIdealLoopNget_late_ctrl6MpnENode_2_2_;
+text: .text%JVM_CurrentTimeMillis;
+text: .text%__1cENodeIIdentity6MpnOPhaseTransform__p0_;
+text: .text%__1cIPipelinePoperand_latency6kMIpk0_I_;
+text: .text%__1cKTypeAryPtrEhash6kM_i_;
+text: .text%__1cETypeFxmeet6kMpk0_2_;
+text: .text%__1cILRG_ListGextend6MII_v_;
+text: .text%__1cJVectorSet2F6kMI_i_;
+text: .text%__1cENodeQIdeal_DU_postCCP6MpnIPhaseCCP__p0_;
+text: .text%__1cOtypeArrayKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
+text: .text%__1cIProjNodeEhash6kM_I_;
+text: .text%__1cIAddINodeGOpcode6kM_i_;
+text: .text%__1cIIndexSet2t6Mp0_v_;
+text: .text%__1cRmethodDataOopDescJnext_data6MpnLProfileData__2_;
+text: .text%__1cITypeNodeJideal_reg6kM_I_;
+text: .text%__1cYCallStaticJavaDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cMloadConPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cHPhiNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cENodeHsize_of6kM_I_;
+text: .text%__1cICmpPNodeGOpcode6kM_i_;
+text: .text%__1cKNode_ArrayGremove6MI_v_;
+text: .text%__1cHPhiNodeEhash6kM_I_;
+text: .text%__1cLSymbolTableGlookup6FpkcipnGThread__pnNsymbolOopDesc__;
+text: .text%__1cKoopFactoryKnew_symbol6FpkcipnGThread__pnNsymbolOopDesc__;
+text: .text%__1cKmethodOperJnum_edges6kM_I_: ad_sparc.o;
+text: .text%__1cJStartNodeLbottom_type6kM_pknEType__;
+text: .text%__1cHTypeIntFxmeet6kMpknEType__3_;
+text: .text%__1cIProjNodeLbottom_type6kM_pknEType__;
+text: .text%__1cPciObjectFactoryDget6MpnHoopDesc__pnIciObject__;
+text: .text%__1cILocationIwrite_on6MpnUDebugInfoWriteStream__v_;
+text: .text%__1cICmpINodeGOpcode6kM_i_;
+text: .text%Unsafe_CompareAndSwapLong;
+text: .text%__1cNCatchProjNodeGOpcode6kM_i_;
+text: .text%__1cQUnique_Node_ListGremove6MpnENode__v_;
+text: .text%__1cENode2t6Mp0_v_;
+text: .text%__1cNLocationValueIwrite_on6MpnUDebugInfoWriteStream__v_;
+text: .text%__1cFframeVinterpreter_frame_bcp6kM_pC_;
+text: .text%__1cTCreateExceptionNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cPClassFileStreamHskip_u16MipnGThread__v_;
+text: .text%__1cHRegMaskMSmearToPairs6M_v_;
+text: .text%__1cMPhaseIterGVNVadd_users_to_worklist6MpnENode__v_;
+text: .text%__1cMloadConPNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cNinstanceKlassLfind_method6FpnPobjArrayOopDesc_pnNsymbolOopDesc_4_pnNmethodOopDesc__;
+text: .text%__1cMPipeline_UseJadd_usage6Mrk0_v_;
+text: .text%__1cIAddPNodeKmatch_edge6kMI_I_;
+text: .text%__1cJiRegIOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cGIfNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cGcmpkey6Fpkv1_i_;
+text: .text%__1cMMergeMemNodeGOpcode6kM_i_;
+text: .text%__1cFframeYinterpreter_frame_method6kM_pnNmethodOopDesc__;
+text: .text%__1cIParmNodeGOpcode6kM_i_;
+text: .text%__1cPClassFileParserRverify_legal_utf86MpkCipnGThread__v_;
+text: .text%__1cHTypeIntEmake6Fiii_pk0_;
+text: .text%__1cENodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cNsymbolOopDescLas_C_string6kM_pc_;
+text: .text%__1cKSchedulingWAddNodeToAvailableList6MpnENode__v_;
+text: .text%__1cKSchedulingSChooseNodeToBundle6M_pnENode__;
+text: .text%__1cKSchedulingPAddNodeToBundle6MpnENode_pknFBlock__v_;
+text: .text%__1cICallNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cTconstantPoolOopDescNklass_at_impl6FnSconstantPoolHandle_ipnGThread__pnMklassOopDesc__;
+text: .text%__1cJLoadPNodeGOpcode6kM_i_;
+text: .text%__1cMMutableSpaceIallocate6MI_pnIHeapWord__;
+text: .text%__1cJPSPermGenSallocate_permanent6MI_pnIHeapWord__;
+text: .text%__1cUParallelScavengeHeapWpermanent_mem_allocate6MI_pnIHeapWord__;
+text: .text%__1cIMachNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cMMutableSpaceMcas_allocate6MI_pnIHeapWord__;
+text: .text%__1cNflagsRegPOperEtype6kM_pknEType__: ad_sparc.o;
+text: .text%__1cHPhiNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cMMachTypeNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
+text: .text%__1cJCatchNodeGOpcode6kM_i_;
+text: .text%__1cIJVMStateLdebug_start6kM_I_;
+text: .text%__1cENodeHdel_req6MI_v_;
+text: .text%__1cRSignatureIterator2t6MnMsymbolHandle__v_;
+text: .text%__1cOAbstractICachePinvalidate_word6FpC_v_;
+text: .text%__1cFBlockIis_Empty6kM_i_;
+text: .text%__1cOThreadCritical2T6M_v_;
+text: .text%__1cOThreadCritical2t6M_v_;
+text: .text%method_compare: methodOop.o;
+text: .text%__1cICodeHeapKfind_start6kMpv_1_;
+text: .text%__1cETypeEhash6kM_i_;
+text: .text%__1cRNativeInstructionLset_long_at6Mii_v_;
+text: .text%__1cIAddPNodeLbottom_type6kM_pknEType__;
+text: .text%__1cJCProjNodeEhash6kM_I_: classes.o;
+text: .text%__1cIHaltNodeGOpcode6kM_i_;
+text: .text%__1cFStateRMachNodeGenerator6MipnHCompile__pnIMachNode__;
+text: .text%__1cHMatcherKReduceInst6MpnFState_irpnENode__pnIMachNode__;
+text: .text%__1cICmpUNodeGOpcode6kM_i_;
+text: .text%__1cOPhaseIdealLoopbIdom_lca_for_get_late_ctrl_internal6MpnENode_22_2_;
+text: .text%__1cXPipeline_Use_Cycle_MaskCOr6Mrk0_v_;
+text: .text%__1cILoadNodeEhash6kM_I_;
+text: .text%__1cKTypeAryPtrKadd_offset6kMi_pknHTypePtr__;
+text: .text%__1cKHandleMarkKinitialize6MpnGThread__v_;
+text: .text%__1cKHandleMark2T6M_v_;
+text: .text%__1cZPhaseConservativeCoalesceIcoalesce6MpnFBlock__v_;
+text: .text%__1cMPhaseIterGVNZremove_globally_dead_node6MpnENode__v_;
+text: .text%__1cWShouldNotReachHereNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cHPhiNodeKin_RegMask6kMI_rknHRegMask__;
+text: .text%__1cILoadNodeLbottom_type6kM_pknEType__;
+text: .text%JVM_ReleaseUTF;
+text: .text%__1cJloadPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cJTypeTupleEhash6kM_i_;
+text: .text%__1cMflagsRegOperEtype6kM_pknEType__: ad_sparc.o;
+text: .text%__1cObranchConPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cKoopFactoryMnew_objArray6FpnMklassOopDesc_ipnGThread__pnPobjArrayOopDesc__;
+text: .text%__1cNinstanceKlassRallocate_objArray6MiipnGThread__pnPobjArrayOopDesc__;
+text: .text%__1cMOopMapStreamJfind_next6M_v_;
+text: .text%__1cFDictI2i6M_v_;
+text: .text%__1cKNode_ArrayEgrow6MI_v_;
+text: .text%__1cHTypeIntEmake6Fi_pk0_;
+text: .text%__1cRAbstractAssembler2t6MpnKCodeBuffer__v_;
+text: .text%__1cJloadPNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cMMergeMemNodeLbottom_type6kM_pknEType__: memnode.o;
+text: .text%__1cSInterpreterRuntimeJanewarray6FpnKJavaThread_pnTconstantPoolOopDesc_ii_v_;
+text: .text%__1cOPSPromotionLABKinitialize6MnJMemRegion__v_;
+text: .text%__1cJMultiNodeIproj_out6kMI_pnIProjNode__;
+text: .text%__1cPindOffset13OperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cUcompI_iReg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cODataRelocationJset_value6MpC_v_: relocInfo.o;
+text: .text%__1cKRelocationRpd_set_data_value6MpCi_v_;
+text: .text%__1cKCastPPNodeGOpcode6kM_i_;
+text: .text%__1cOoop_RelocationFvalue6M_pC_: relocInfo.o;
+text: .text%__1cOoop_RelocationGoffset6M_i_: relocInfo.o;
+text: .text%__1cPSignatureStreamEnext6M_v_;
+text: .text%__1cLLShiftINodeGOpcode6kM_i_;
+text: .text%__1cMPhaseChaitinSuse_prior_register6MpnENode_I2pnFBlock_rnJNode_List_6_i_;
+text: .text%__1cGIfNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cGBitMapJset_union6M0_v_;
+text: .text%__1cIConPNodeGOpcode6kM_i_;
+text: .text%__1cJLoadINodeGOpcode6kM_i_;
+text: .text%JVM_GetMethodIxExceptionTableLength;
+text: .text%__1cOJNIHandleBlockPallocate_handle6MpnHoopDesc__pnI_jobject__;
+text: .text%__1cPClassFileParserUassemble_annotations6MpCi1ipnGThread__nPtypeArrayHandle__;
+text: .text%__1cNSharedRuntimeDd2i6Fd_i_;
+text: .text%__1cVcompP_iRegP_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cKRegionNodeEhash6kM_I_: classes.o;
+text: .text%__1cNbranchConNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cOoop_RelocationSfix_oop_relocation6M_v_;
+text: .text%__1cRSignatureIteratorSiterate_parameters6M_v_;
+text: .text%__1cIAddPNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cGBitMap2t6MpII_v_;
+text: .text%__1cPClassFileStreamGget_u46MpnGThread__I_;
+text: .text%__1cMMachCallNodeLbottom_type6kM_pknEType__;
+text: .text%__1cFParsePdo_one_bytecode6M_v_;
+text: .text%__1cFParseNdo_exceptions6M_v_;
+text: .text%__1cHPhiNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cHMatcherKmatch_tree6MpknENode__pnIMachNode__;
+text: .text%__1cMPhaseIterGVNKis_IterGVN6M_p0_: phaseX.o;
+text: .text%__1cKimmI13OperIconstant6kM_i_: ad_sparc_clone.o;
+text: .text%__1cCosVcurrent_stack_pointer6F_pC_;
+text: .text%__1cEDict2F6kMpkv_pv_;
+text: .text%__1cKRegionNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cENodeIdestruct6M_v_;
+text: .text%__1cMCreateExNodeGOpcode6kM_i_;
+text: .text%__1cIBoolNodeEhash6kM_I_;
+text: .text%__1cNinstanceKlassWuncached_lookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__;
+text: .text%__1cLTypeInstPtrFxmeet6kMpknEType__3_;
+text: .text%__1cKNode_ArrayFclear6M_v_;
+text: .text%__1cObranchConPNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cIProjNodeHsize_of6kM_I_;
+text: .text%__1cTconstantPoolOopDescWsignature_ref_index_at6Mi_i_;
+text: .text%__1cMloadConINodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cIHaltNodeKmatch_edge6kMI_I_: classes.o;
+text: .text%__1cJloadBNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cHhashptr6Fpkv_i_;
+text: .text%__1cMMachHaltNodeEjvms6kM_pnIJVMState__;
+text: .text%__1cHhashkey6Fpkv_i_;
+text: .text%__1cMPhaseChaitinHnew_lrg6MpknENode_I_v_;
+text: .text%__1cIJVMStateJdebug_end6kM_I_;
+text: .text%__1cIPhaseIFGMtest_edge_sq6kMII_i_;
+text: .text%__1cJloadPNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cHSubNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cRSignatureIteratorSiterate_returntype6M_v_;
+text: .text%__1cSaddP_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cIMachNodeHtwo_adr6kM_I_: ad_sparc.o;
+text: .text%__1cNSafePointNodeHsize_of6kM_I_;
+text: .text%__1cLTypeInstPtrKadd_offset6kMi_pknHTypePtr__;
+text: .text%__1cHCmpNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cPcheckCastPPNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cNLoadRangeNodeGOpcode6kM_i_;
+text: .text%__1cNbranchConNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cENode2t6Mp011_v_;
+text: .text%__1cJStoreNodeKmatch_edge6kMI_I_;
+text: .text%__1cOPSPromotionLABFflush6M_v_;
+text: .text%__1cQResultTypeFinderDset6MinJBasicType__v_: bytecode.o;
+text: .text%__1cOBytecodeStreamEnext6M_nJBytecodesECode__: generateOopMap.o;
+text: .text%__1cOcompU_iRegNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cNmethodOopDescLresult_type6kM_nJBasicType__;
+text: .text%__1cICodeHeapJnext_free6kMpnJHeapBlock__pv_;
+text: .text%__1cICodeHeapLblock_start6kMpv_pnJHeapBlock__;
+text: .text%__1cICodeHeapKnext_block6kMpnJHeapBlock__2_;
+text: .text%__1cSCountedLoopEndNodeGOpcode6kM_i_;
+text: .text%__1cPciInstanceKlassGloader6M_pnHoopDesc__;
+text: .text%__1cPcheckCastPPNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cNCellTypeStateFmerge6kM0i_0_;
+text: .text%__1cMPhaseIterGVNMsubsume_node6MpnENode_2_v_;
+text: .text%__1cILoadNodeKmatch_edge6kMI_I_;
+text: .text%__1cJloadINodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cNExceptionMark2T6M_v_;
+text: .text%__1cNExceptionMark2t6MrpnGThread__v_;
+text: .text%__1cITypeLongEhash6kM_i_;
+text: .text%__1cJHashtableJnew_entry6MIpnHoopDesc__pnOHashtableEntry__;
+text: .text%__1cJiRegLOperEtype6kM_pknEType__: ad_sparc.o;
+text: .text%__1cKJNIHandlesKmake_local6FpnHJNIEnv__pnHoopDesc__pnI_jobject__;
+text: .text%__1cPciInstanceKlassRprotection_domain6M_pnHoopDesc__;
+text: .text%__1cOloadConI13NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cOloadConI13NodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
+text: .text%__1cObranchConPNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cKStoreINodeGOpcode6kM_i_;
+text: .text%__1cJcmpOpOperJnum_edges6kM_I_: ad_sparc_clone.o;
+text: .text%__1cRSignatureIterator2t6MpnNsymbolOopDesc__v_;
+text: .text%__1cJiRegPOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cKRegionNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cKstorePNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cHPhiNodeHsize_of6kM_I_: cfgnode.o;
+text: .text%__1cJrelocInfoNfinish_prefix6Mph_p0_;
+text: .text%__1cQaddP_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cSaddI_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cTAbstractInterpreterLmethod_kind6FnMmethodHandle__n0AKMethodKind__;
+text: .text%__1cIMachOperDreg6kMpnNPhaseRegAlloc_pknENode_i_i_;
+text: .text%__1cIBoolNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cLCounterDataKcell_count6M_i_: ciMethodData.o;
+text: .text%__1cHRegMaskMClearToPairs6M_v_;
+text: .text%__1cRshlI_reg_imm5NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cIMachOperDreg6kMpnNPhaseRegAlloc_pknENode__i_;
+text: .text%__1cNPhaseCoalesceRcombine_these_two6MpnENode_2_v_;
+text: .text%__1cKcmpOpPOperJnum_edges6kM_I_: ad_sparc_clone.o;
+text: .text%__1cKTypeRawPtrKadd_offset6kMi_pknHTypePtr__;
+text: .text%__1cMloadConINodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cFArenaEgrow6MI_pv_;
+text: .text%__1cMPhaseChaitinLinsert_proj6MpnFBlock_IpnENode_I_v_;
+text: .text%__1cILoadNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cJStoreNodeLbottom_type6kM_pknEType__;
+text: .text%__1cIBoolNodeLbottom_type6kM_pknEType__: subnode.o;
+text: .text%__1cNSafePointNodeSset_next_exception6Mp0_v_;
+text: .text%__1cQaddP_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cIHaltNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cPCheckCastPPNodeGOpcode6kM_i_;
+text: .text%__1cKStorePNodeGOpcode6kM_i_;
+text: .text%__1cKRelocationLunpack_data6M_v_: relocInfo.o;
+text: .text%__1cNflagsRegUOperEtype6kM_pknEType__: ad_sparc.o;
+text: .text%__1cNinstanceKlassGvtable6kM_pnLklassVtable__;
+text: .text%__1cPcheckCastPPNodeHtwo_adr6kM_I_: ad_sparc_misc.o;
+text: .text%__1cIAddPNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cRInvocationCounterEinit6M_v_;
+text: .text%__1cKNode_Array2t6MpnFArena__v_: block.o;
+text: .text%__1cTconstantPoolOopDescNklass_name_at6Mi_pnNsymbolOopDesc__;
+text: .text%__1cXPhaseAggressiveCoalesceIcoalesce6MpnFBlock__v_;
+text: .text%__1cFBlockScall_catch_cleanup6MrnLBlock_Array__v_;
+text: .text%__1cObranchConUNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cTconstantPoolOopDescRname_ref_index_at6Mi_i_;
+text: .text%__1cIAddINodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cHRetNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o;
+text: .text%__1cKRegionNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cKstorePNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cMObjectLocker2T6M_v_;
+text: .text%__1cOcompI_iRegNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cICallNodeIIdentity6MpnOPhaseTransform__pnENode__: callnode.o;
+text: .text%__1cMURShiftINodeGOpcode6kM_i_;
+text: .text%__1cRmethodDataOopDescPinitialize_data6MpnOBytecodeStream_i_i_;
+text: .text%__1cNRelocIteratorKset_limits6MpC1_v_;
+text: .text%__1cIRootNodeGOpcode6kM_i_;
+text: .text%__1cOloadConI13NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cJloadPNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cILoadNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cTCreateExceptionNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cFStateM_sub_Op_ConI6MpknENode__v_;
+text: .text%__1cPcheckCastPPNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cISubINodeGOpcode6kM_i_;
+text: .text%__1cNbranchConNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cJTypeTupleEmake6FIppknEType__pk0_;
+text: .text%__1cJTypeTupleGfields6FI_ppknEType__;
+text: .text%__1cENodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cLSymbolTableJbasic_add6MipCiIpnGThread__pnNsymbolOopDesc__;
+text: .text%__1cLsymbolKlassPallocate_symbol6MpCipnGThread__pnNsymbolOopDesc__;
+text: .text%__1cSinstanceKlassKlassIoop_size6kMpnHoopDesc__i_;
+text: .text%__1cRAbstractAssemblerEbind6MrnFLabel__v_;
+text: .text%__1cKbranchNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cHPhiNodeIadr_type6kM_pknHTypePtr__: cfgnode.o;
+text: .text%__1cHAddNodeEhash6kM_I_;
+text: .text%__1cENodeRdisconnect_inputs6Mp0_i_;
+text: .text%__1cPsplit_flow_path6FpnIPhaseGVN_pnHPhiNode__pnENode__: cfgnode.o;
+text: .text%__1cSaddI_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cJFieldTypeKbasic_type6FpnNsymbolOopDesc__nJBasicType__;
+text: .text%__1cHConNodeEhash6kM_I_;
+text: .text%__1cLLShiftINodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cNmethodOopDescIbci_from6kMpC_i_;
+text: .text%__1cOMachReturnNodeIadr_type6kM_pknHTypePtr__;
+text: .text%__1cNidealize_test6FpnIPhaseGVN_pnGIfNode__3_: ifnode.o;
+text: .text%__1cITypeNodeHsize_of6kM_I_;
+text: .text%__1cNSafePointNodeLbottom_type6kM_pknEType__: callnode.o;
+text: .text%__1cTconstantPoolOopDescSklass_at_if_loaded6FnSconstantPoolHandle_i_pnMklassOopDesc__;
+text: .text%__1cJloadINodeIpipeline6kM_pknIPipeline__;
+text: .text%JVM_GetClassModifiers;
+text: .text%__1cJCodeCacheJfind_blob6Fpv_pnICodeBlob__;
+text: .text%__1cNSafePointNodeOnext_exception6kM_p0_;
+text: .text%JVM_GetClassAccessFlags;
+text: .text%__1cLklassItable2t6MnTinstanceKlassHandle__v_;
+text: .text%__1cIsplit_if6FpnGIfNode_pnMPhaseIterGVN__pnENode__: ifnode.o;
+text: .text%__1cHTypeAryEhash6kM_i_;
+text: .text%__1cPfieldDescriptorKinitialize6MpnMklassOopDesc_i_v_;
+text: .text%__1cJMultiNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__;
+text: .text%__1cJCProjNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cPPerfLongVariantGsample6M_v_;
+text: .text%__1cJStoreNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cMPhaseChaitinMyank_if_dead6MpnENode_pnFBlock_pnJNode_List_6_i_;
+text: .text%__1cJCatchNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cIMachOperNconstant_disp6kM_i_;
+text: .text%__1cIMachOperFscale6kM_i_;
+text: .text%__1cENode2t6Mp0111_v_;
+text: .text%__1cFPhase2t6Mn0ALPhaseNumber__v_;
+text: .text%__1cNCompileBrokerLmaybe_block6F_v_;
+text: .text%__1cFBlockOcode_alignment6M_I_;
+text: .text%__1cNinstanceKlassGitable6kM_pnLklassItable__;
+text: .text%__1cLciSignatureLreturn_type6kM_pnGciType__;
+text: .text%__1cFStateM_sub_Op_RegP6MpknENode__v_;
+text: .text%JVM_GetCPMethodSignatureUTF;
+text: .text%__1cFChunkJnext_chop6M_v_;
+text: .text%__1cMMergeMemNodeEhash6kM_I_;
+text: .text%__1cKSchedulingbFComputeRegisterAntidependencies6MpnFBlock__v_;
+text: .text%__1cKSchedulingPComputeUseCount6MpknFBlock__v_;
+text: .text%__1cHTypePtrHget_con6kM_i_;
+text: .text%__1cNinstanceKlassRprotection_domain6M_pnHoopDesc__: instanceKlass.o;
+text: .text%__1cIMachNodePcompute_padding6kMi_i_: ad_sparc.o;
+text: .text%__1cIMachNodeSalignment_required6kM_i_: ad_sparc.o;
+text: .text%__1cMPhaseChaitinSget_spillcopy_wide6MpnENode_2I_2_;
+text: .text%__1cYDebugInformationRecorderTcreate_scope_values6MpnNGrowableArray4CpnKScopeValue____pnKDebugToken__;
+text: .text%__1cWstatic_stub_RelocationLunpack_data6M_v_;
+text: .text%__1cQaddI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cObranchConUNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cFBlockJfind_node6kMpknENode__I_;
+text: .text%__1cUArgumentSizeComputerDset6MinJBasicType__v_: frame.o;
+text: .text%__1cHCmpNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cNCollectedHeapXallocate_from_tlab_slow6FpnGThread_I_pnIHeapWord__;
+text: .text%__1cWThreadLocalAllocBufferXclear_before_allocation6M_v_;
+text: .text%__1cHTypePtrEhash6kM_i_;
+text: .text%__1cNinstanceKlassRallocate_instance6MpnGThread__pnPinstanceOopDesc__;
+text: .text%__1cSObjectSynchronizerKslow_enter6FnGHandle_pnJBasicLock_pnGThread__v_;
+text: .text%__1cWThreadLocalAllocBufferEfill6MpnIHeapWord_2I_v_;
+text: .text%__1cUParallelScavengeHeapRallocate_new_tlab6MI_pnIHeapWord__;
+text: .text%__1cYNoJvmtiVMObjectAllocMark2t6M_v_;
+text: .text%__1cYNoJvmtiVMObjectAllocMark2T6M_v_;
+text: .text%__1cFBlockLfind_remove6MpknENode__v_;
+text: .text%__1cIIndexSetJlrg_union6MIIkIpknIPhaseIFG_rknHRegMask__I_;
+text: .text%__1cKMemBarNodeKmatch_edge6kMI_I_: classes.o;
+text: .text%__1cUcompI_iReg_imm13NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cMLinkResolverbAcheck_method_accessability6FnLKlassHandle_11nMmethodHandle_pnGThread__v_;
+text: .text%__1cNObjectMonitorEexit6MpnGThread__v_;
+text: .text%__1cIimmPOperEtype6kM_pknEType__: ad_sparc_clone.o;
+text: .text%__1cMloadConPNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
+text: .text%__1cLMachNopNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cJloadINodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cNloadRangeNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cVCompressedWriteStream2t6Mi_v_;
+text: .text%__1cNObjectMonitorFenter6MpnGThread__v_;
+text: .text%__1cENodeKreplace_by6Mp0_v_;
+text: .text%__1cSObjectSynchronizerJslow_exit6FpnHoopDesc_pnJBasicLock_pnGThread__v_;
+text: .text%__1cMMergeMemNodePiteration_setup6Mpk0_v_;
+text: .text%__1cFKlassNlookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__;
+text: .text%__1cKDictionaryEfind6MiInMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__;
+text: .text%__1cRMachSpillCopyNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cKRegionNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cJStoreNodeEhash6kM_I_;
+text: .text%__1cSaddP_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cQaddI_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cIGraphKitJclone_map6M_pnNSafePointNode__;
+text: .text%__1cKIfTrueNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cRMemBarReleaseNodeGOpcode6kM_i_;
+text: .text%__1cKbranchNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cIMachOperIconstant6kM_i_;
+text: .text%__1cWMutableSpaceUsedHelperLtake_sample6M_x_: spaceCounters.o;
+text: .text%__1cGPcDescHreal_pc6kMpknHnmethod__pC_;
+text: .text%__1cRPSOldPromotionLABFflush6M_v_;
+text: .text%__1cTconstantPoolOopDescMklass_ref_at6MipnGThread__pnMklassOopDesc__;
+text: .text%__1cPcompP_iRegPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cLBoxLockNodeGOpcode6kM_i_;
+text: .text%__1cIciObjectJset_ident6MI_v_;
+text: .text%__1cKJNIHandlesKmake_local6FpnHoopDesc__pnI_jobject__;
+text: .text%__1cKTypeRawPtrEhash6kM_i_;
+text: .text%__1cIBoolNodeKmatch_edge6kMI_I_: subnode.o;
+text: .text%__1cMMergeMemNodePset_base_memory6MpnENode__v_;
+text: .text%__1cLIfFalseNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cCosPelapsed_counter6F_x_;
+text: .text%__1cGBitMapOset_difference6M0_v_;
+text: .text%__1cNSafePointNodeEjvms6kM_pnIJVMState__: callnode.o;
+text: .text%__1cOoop_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o;
+text: .text%__1cMMergeMemNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%JVM_GetMethodIxLocalsCount;
+text: .text%__1cNloadRangeNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%JVM_CurrentThread;
+text: .text%__1cENodeHget_ptr6kM_i_;
+text: .text%__1cRcmpFastUnlockNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cIAndINodeGOpcode6kM_i_;
+text: .text%__1cPClassFileParserYverify_legal_method_name6MnMsymbolHandle_pnGThread__v_;
+text: .text%__1cENodeHins_req6MIp0_v_;
+text: .text%__1cMPhaseChaitinFUnion6MpknENode_3_v_;
+text: .text%__1cMloadConLNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cHAddNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cKRelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o;
+text: .text%__1cKstoreINodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cOFastUnlockNodeGOpcode6kM_i_;
+text: .text%__1cITypeNodeDcmp6kMrknENode__I_;
+text: .text%__1cIHaltNodeLbottom_type6kM_pknEType__;
+text: .text%__1cKstorePNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cKcmpOpUOperJnum_edges6kM_I_: ad_sparc_clone.o;
+text: .text%__1cLstoreI0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cIciObject2t6MnGHandle__v_;
+text: .text%__1cNSafePointNodeKmatch_edge6kMI_I_;
+text: .text%__1cIMachOperOindex_position6kM_i_;
+text: .text%__1cXmembar_release_lockNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cJVectorSet2L6MI_rnDSet__;
+text: .text%__1cOcompU_iRegNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cMMergeMemNodeJmemory_at6kMI_pnENode__;
+text: .text%__1cSaddP_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cPindOffset13OperNconstant_disp6kM_i_: ad_sparc.o;
+text: .text%__1cPindOffset13OperFscale6kM_i_: ad_sparc.o;
+text: .text%__1cPindOffset13OperNbase_position6kM_i_: ad_sparc.o;
+text: .text%__1cWShouldNotReachHereNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cPciObjectFactoryRcreate_new_object6MpnHoopDesc__pnIciObject__;
+text: .text%__1cUcompI_iReg_imm13NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cVcompP_iRegP_imm13NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cQaddP_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cQaddP_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cMLinkResolverZcheck_klass_accessability6FnLKlassHandle_1pnGThread__v_;
+text: .text%__1cIJVMStateIof_depth6kMi_p0_;
+text: .text%__1cNSharedRuntimeElrem6Fxx_x_;
+text: .text%__1cRconstantPoolKlassIoop_size6kMpnHoopDesc__i_;
+text: .text%__1cMciMethodDataLbci_to_data6Mi_pnLProfileData__;
+text: .text%__1cRMemBarAcquireNodeGOpcode6kM_i_;
+text: .text%__1cKo0RegPOperEtype6kM_pknEType__: ad_sparc.o;
+text: .text%__1cSaddI_reg_imm13NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cObranchConUNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cJVectorSet2t6MpnFArena__v_;
+text: .text%__1cKTypeAryPtrFxmeet6kMpknEType__3_;
+text: .text%__1cVcompP_iRegP_imm13NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cRSignatureIteratorSiterate_parameters6MX_v_;
+text: .text%__1cICallNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__;
+text: .text%__1cJTraceTime2T6M_v_;
+text: .text%__1cITypeNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cPcheckCastPPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cKMemBarNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cSconstMethodOopDescZset_inlined_tables_length6Miii_v_;
+text: .text%__1cNmethodOopDescbAcompute_size_of_parameters6MpnGThread__v_;
+text: .text%__1cSconstMethodOopDescLobject_size6Fiiii_i_;
+text: .text%__1cLmethodKlassIallocate6MnRconstMethodHandle_nLAccessFlags_pnGThread__pnNmethodOopDesc__;
+text: .text%__1cMMergeMemNodeNset_memory_at6MIpnENode__v_;
+text: .text%__1cLstoreI0NodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cNSignatureInfoHdo_void6M_v_: bytecode.o;
+text: .text%__1cQaddI_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cENode2t6Mp01_v_;
+text: .text%__1cNinstanceKlassKfind_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__pnMklassOopDesc__;
+text: .text%__1cKstoreINodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cRshrI_reg_imm5NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cFStateM_sub_Op_AddP6MpknENode__v_;
+text: .text%__1cTCreateExceptionNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cITypeFuncEhash6kM_i_;
+text: .text%__1cLBoxLockNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cMTypeKlassPtrEhash6kM_i_;
+text: .text%__1cMCallLeafNodeGOpcode6kM_i_;
+text: .text%__1cSCallLeafDirectNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cHPhiNodeEmake6FpnENode_2pknEType_pknHTypePtr__p0_;
+text: .text%__1cIAddPNodeQmach_bottom_type6FpknIMachNode__pknEType__;
+text: .text%__1cOcompU_iRegNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cJiRegLOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cNflagsRegPOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cHOrINodeGOpcode6kM_i_;
+text: .text%__1cXmembar_acquire_lockNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
+text: .text%JVM_GetCPMethodClassNameUTF;
+text: .text%__1cMloadConDNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cMflagsRegOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cLProfileDataPfollow_contents6M_v_: ciMethodData.o;
+text: .text%__1cLProfileDataPadjust_pointers6M_v_: ciMethodData.o;
+text: .text%__1cFStateM_sub_Op_RegI6MpknENode__v_;
+text: .text%__1cKklassKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cFKlassXfollow_weak_klass_links6MpnRBoolObjectClosure_pnKOopClosure__v_;
+text: .text%__1cJMarkSweepXrevisit_weak_klass_link6FpnFKlass__v_;
+text: .text%__1cKklassKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cWconstantPoolCacheKlassIoop_size6kMpnHoopDesc__i_;
+text: .text%__1cHCompileYout_preserve_stack_slots6F_I_;
+text: .text%__1cIGraphKitLclean_stack6Mi_v_;
+text: .text%__1cKStoreBNodeGOpcode6kM_i_;
+text: .text%__1cLklassVtableToop_adjust_pointers6M_v_;
+text: .text%__1cLklassVtableToop_follow_contents6M_v_;
+text: .text%__1cSconstMethodOopDescbBcompressed_linenumber_table6kM_pC_;
+text: .text%__1cJlabelOperFlabel6kM_pnFLabel__: ad_sparc.o;
+text: .text%__1cLciSignatureHtype_at6kMi_pnGciType__;
+text: .text%__1cIMachNodeIadr_type6kM_pknHTypePtr__;
+text: .text%__1cIMachOperMdisp_as_type6kM_pknHTypePtr__: ad_sparc.o;
+text: .text%__1cRshlI_reg_imm5NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%JVM_IsNaN;
+text: .text%__1cNloadRangeNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cKbranchNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cJStartNodeGOpcode6kM_i_;
+text: .text%__1cQregF_to_stkINodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cENodeDcmp6kMrk0_I_;
+text: .text%__1cHTypeIntFxdual6kM_pknEType__;
+text: .text%__1cIciObjectIencoding6M_pnI_jobject__;
+text: .text%__1cMmerge_region6FpnKRegionNode_pnIPhaseGVN__pnENode__: cfgnode.o;
+text: .text%__1cJAssemblerOpatched_branch6Fiii_i_;
+text: .text%__1cJAssemblerSbranch_destination6Fii_i_;
+text: .text%__1cRshlI_reg_imm5NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cENodeIadd_prec6Mp0_v_;
+text: .text%__1cLBoxLockNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cPSignatureStreamJas_symbol6MpnGThread__pnNsymbolOopDesc__;
+text: .text%__1cSaddP_reg_imm13NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cWMachCallStaticJavaNodePret_addr_offset6M_i_;
+text: .text%__1cITypeFuncEmake6FpknJTypeTuple_3_pk0_;
+text: .text%__1cMloadConDNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cSCallLeafDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cKTypeOopPtrHget_con6kM_i_;
+text: .text%__1cQsubI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cIRootNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cJloadLNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cMLinkResolverZcheck_field_accessability6FnLKlassHandle_11rnPfieldDescriptor_pnGThread__v_;
+text: .text%__1cJLoadBNodeGOpcode6kM_i_;
+text: .text%__1cOGenerateOopMapHinterp16MpnOBytecodeStream__v_;
+text: .text%__1cSvframeStreamCommonEnext6M_v_;
+text: .text%__1cIAddINodeGadd_id6kM_pknEType__: classes.o;
+text: .text%__1cIRootNodeNis_block_proj6kM_pknENode__: classes.o;
+text: .text%__1cMMergeMemNode2t6MpnENode__v_;
+text: .text%__1cOcompI_iRegNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cRMachSafePointNodeKin_RegMask6kMI_rknHRegMask__;
+text: .text%__1cJloadINodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cPindOffset13OperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
+text: .text%__1cPindOffset13OperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
+text: .text%__1cPindOffset13OperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
+text: .text%__1cICmpPNodeDsub6kMpknEType_3_3_;
+text: .text%__1cHMemNodeQIdeal_DU_postCCP6MpnIPhaseCCP__pnENode__;
+text: .text%__1cIGraphKitQkill_dead_locals6M_v_;
+text: .text%__1cCosMvm_page_size6F_i_;
+text: .text%__1cRlock_ptr_RegPOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cVcompP_iRegP_imm13NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cUcompI_iReg_imm13NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cNSignatureInfoJdo_object6Mii_v_: bytecode.o;
+text: .text%__1cRconstantPoolKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cNinstanceKlassUadjust_static_fields6M_v_;
+text: .text%__1cRconstantPoolKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cLklassItableToop_adjust_pointers6M_v_;
+text: .text%__1cNinstanceKlassUfollow_static_fields6M_v_;
+text: .text%__1cLklassItableToop_follow_contents6M_v_;
+text: .text%__1cSinstanceKlassKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cNinstanceKlassXfollow_weak_klass_links6MpnRBoolObjectClosure_pnKOopClosure__v_;
+text: .text%__1cSinstanceKlassKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cNSafePointNodeGOpcode6kM_i_;
+text: .text%__1cJLoadPNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cMPhaseChaitinPset_was_spilled6MpnENode__v_;
+text: .text%__1cYDebugInformationRecorderVcreate_monitor_values6MpnNGrowableArray4CpnMMonitorValue____pnKDebugToken__;
+text: .text%__1cMloadConPNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cIGraphKit2t6MpnIJVMState__v_;
+text: .text%__1cPconvI2L_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cQPreserveJVMState2T6M_v_;
+text: .text%__1cRshrI_reg_imm5NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cWconstantPoolCacheKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cWconstantPoolCacheKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cTCreateExceptionNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cXmembar_release_lockNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cMloadConLNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cLConvI2LNodeGOpcode6kM_i_;
+text: .text%__1cITypeLongFxmeet6kMpknEType__3_;
+text: .text%__1cNinstanceKlassKinitialize6MpnGThread__v_;
+text: .text%__1cFParseMmerge_common6Mpn0AFBlock_i_v_;
+text: .text%__1cPciInstanceKlassYunique_concrete_subklass6M_p0_;
+text: .text%__1cLBoxLockNodeHsize_of6kM_I_;
+text: .text%__1cOPhaseIdealLoopIset_idom6MpnENode_2I_v_;
+text: .text%JVM_GetCPFieldClassNameUTF;
+text: .text%__1cSaddI_reg_imm13NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cNLoadKlassNodeGOpcode6kM_i_;
+text: .text%__1cRcmpFastUnlockNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cPciInstanceKlassLfield_cache6M_pnTciConstantPoolCache__;
+text: .text%__1cFciEnvSget_field_by_index6MpnPciInstanceKlass_i_pnHciField__;
+text: .text%__1cOcompI_iRegNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cRshlI_reg_imm5NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cNmethodOopDescIbcp_from6kMi_pC_;
+text: .text%__1cICmpINodeDsub6kMpknEType_3_3_;
+text: .text%__1cLRShiftINodeGOpcode6kM_i_;
+text: .text%__1cOtypeArrayKlassSallocate_permanent6MipnGThread__pnQtypeArrayOopDesc__;
+text: .text%__1cSCallLeafDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cPcheckCastPPNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cOPhaseIdealLoopQconditional_move6MpnENode__2_;
+text: .text%__1cJStoreNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cITypeFuncEmake6FpnIciMethod__pk0_;
+text: .text%__1cOGenerateOopMapEpush6MnNCellTypeState__v_;
+text: .text%__1cJloadSNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cKStoreCNodeGOpcode6kM_i_;
+text: .text%__1cOGenerateOopMapRdo_exception_edge6MpnOBytecodeStream__v_;
+text: .text%__1cMstringStreamFwrite6MpkcI_v_;
+text: .text%__1cOGenerateOopMapDpop6M_nNCellTypeState__;
+text: .text%__1cHRetNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cPcmpFastLockNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cMLinkResolverMresolve_pool6FrnLKlassHandle_rnMsymbolHandle_42nSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cMLinkResolverOresolve_invoke6FrnICallInfo_nGHandle_nSconstantPoolHandle_inJBytecodesECode_pnGThread__v_;
+text: .text%__1cIBoolNodeJideal_reg6kM_I_: subnode.o;
+text: .text%__1cHCmpNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cRloadConP_pollNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cETypeFwiden6kMpk0_2_: type.o;
+text: .text%__1cLstoreI0NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cFciEnvTget_method_by_index6MpnPciInstanceKlass_inJBytecodesECode__pnIciMethod__;
+text: .text%__1cFciEnvYget_method_by_index_impl6MpnPciInstanceKlass_inJBytecodesECode__pnIciMethod__;
+text: .text%__1cMloadConPNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cFciEnvNlookup_method6MpnNinstanceKlass_2pnNsymbolOopDesc_4nJBytecodesECode__pnNmethodOopDesc__;
+text: .text%__1cKDictionaryKfind_class6MiInMsymbolHandle_nGHandle__pnMklassOopDesc__;
+text: .text%__1cPcompP_iRegPNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cNloadRangeNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cNCatchProjNodeLbottom_type6kM_pknEType__: cfgnode.o;
+text: .text%__1cNCatchProjNodeHsize_of6kM_I_: cfgnode.o;
+text: .text%__1cFStateK_sub_Op_If6MpknENode__v_;
+text: .text%__1cTciConstantPoolCacheDget6Mi_pv_;
+text: .text%__1cSInterpreterRuntimeMmonitorenter6FpnKJavaThread_pnPBasicObjectLock__v_;
+text: .text%__1cSInterpreterRuntimePresolve_get_put6FpnKJavaThread_nJBytecodesECode__v_;
+text: .text%__1cQsubI_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cXmembar_acquire_lockNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cQaddP_reg_regNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
+text: .text%__1cPCountedLoopNodeGOpcode6kM_i_;
+text: .text%__1cSInterpreterRuntimeLmonitorexit6FpnKJavaThread_pnPBasicObjectLock__v_;
+text: .text%__1cIAndLNodeGOpcode6kM_i_;
+text: .text%__1cIGraphKitOset_all_memory6MpnENode__v_;
+text: .text%__1cQSystemDictionarybEresolve_instance_class_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__;
+text: .text%__1cVjava_lang_ClassLoaderbBnon_reflection_class_loader6FpnHoopDesc__2_;
+text: .text%__1cFParseFBlockKinit_graph6Mp0_v_;
+text: .text%__1cMTypeKlassPtrEmake6FnHTypePtrDPTR_pnHciKlass_i_pk0_;
+text: .text%__1cKRelocationLspec_simple6FnJrelocInfoJrelocType__nQRelocationHolder__;
+text: .text%__1cCosGmalloc6FI_pv_;
+text: .text%__1cSInterpreterRuntimeOresolve_invoke6FpnKJavaThread_nJBytecodesECode__v_;
+text: .text%__1cIGraphKitTadd_exception_state6MpnNSafePointNode__v_;
+text: .text%__1cIimmPOperIconstant6kM_i_: ad_sparc_clone.o;
+text: .text%__1cIregDOperEtype6kM_pknEType__: ad_sparc.o;
+text: .text%__1cKstoreINodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cICodeHeapLheader_size6F_I_;
+text: .text%__1cWConstantPoolCacheEntryKset_method6MnJBytecodesECode_nMmethodHandle_i_v_;
+text: .text%__1cNloadRangeNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cFParseMdo_one_block6M_v_;
+text: .text%__1cOPhaseIdealLoopRregister_new_node6MpnENode_2_v_;
+text: .text%__1cLstoreB0NodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cIAddINodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cIJVMStateLdebug_depth6kM_I_;
+text: .text%__1cENodeNadd_req_batch6Mp0I_v_;
+text: .text%__1cKciTypeFlowLStateVectorOpush_translate6MpnGciType__v_;
+text: .text%__1cJloadFNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cPVirtualCallDataKcell_count6M_i_: ciMethodData.o;
+text: .text%__1cIMachNodeOpipeline_class6F_pknIPipeline__;
+text: .text%__1cQSystemDictionarybCfind_instance_or_array_klass6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__;
+text: .text%__1cIPhaseGVNUtransform_no_reclaim6MpnENode__2_;
+text: .text%__1cIAddLNodeGOpcode6kM_i_;
+text: .text%__1cLLShiftINodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cOMethodLivenessKBasicBlockJpropagate6Mp0_v_;
+text: .text%__1cKciTypeFlowGJsrSet2t6MpnFArena_i_v_;
+text: .text%__1cHMatcherKmatch_sfpt6MpnNSafePointNode__pnIMachNode__;
+text: .text%__1cMFastLockNodeGOpcode6kM_i_;
+text: .text%__1cLConvL2INodeGOpcode6kM_i_;
+text: .text%__1cIXorINodeGOpcode6kM_i_;
+text: .text%__1cMVirtualSpaceOcommitted_size6kM_I_;
+text: .text%__1cOcompU_iRegNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cPorI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cKTypeAryPtrFklass6kM_pnHciKlass__;
+text: .text%__1cIGraphKitbDtransfer_exceptions_into_jvms6M_pnIJVMState__;
+text: .text%__1cLTypeInstPtrFxdual6kM_pknEType__;
+text: .text%__1cNLoadRangeNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cFBlockKsched_call6MrnHMatcher_rnLBlock_Array_IrnJNode_List_pipnMMachCallNode_rnJVectorSet__I_;
+text: .text%__1cSsafePoint_pollNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cILoadNodeHsize_of6kM_I_;
+text: .text%__1cRInterpretedRFrameKtop_method6kM_nMmethodHandle__: rframe.o;
+text: .text%__1cIGraphKitJsync_jvms6kM_pnIJVMState__;
+text: .text%__1cICmpUNodeDsub6kMpknEType_3_3_;
+text: .text%__1cEUTF8Hstrrchr6FpWiW_1_;
+text: .text%__1cPcompP_iRegPNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cPsp_ptr_RegPOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cPClassFileParserbCverify_legal_field_signature6MnMsymbolHandle_1pnGThread__v_;
+text: .text%__1cPClassFileParserXverify_legal_field_name6MnMsymbolHandle_pnGThread__v_;
+text: .text%__1cRshrP_reg_imm5NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cLBoxLockNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cITypeLongEmake6Fxxi_pk0_;
+text: .text%__1cNloadKlassNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%JVM_GetCPMethodNameUTF;
+text: .text%__1cMtlsLoadPNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cLstoreB0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cIimmIOperIconstant6kM_i_: ad_sparc_clone.o;
+text: .text%__1cNSharedRuntimeEldiv6Fxx_x_;
+text: .text%__1cHBitDataKcell_count6M_i_: ciMethodData.o;
+text: .text%__1cURethrowExceptionNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o;
+text: .text%__1cQSystemDictionarybOfind_constrained_instance_or_array_klass6FnMsymbolHandle_nGHandle_pnGThread__pnMklassOopDesc__;
+text: .text%__1cQsubI_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cJloadBNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cIciSymbol2t6MnMsymbolHandle__v_;
+text: .text%__1cQaddP_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cKmethodOperGmethod6kM_i_: ad_sparc.o;
+text: .text%__1cFKlassIsubklass6kM_p0_;
+text: .text%__1cNinstanceKlassbBallocate_permanent_instance6MpnGThread__pnPinstanceOopDesc__;
+text: .text%__1cXInterpreterFrameClosureJoffset_do6Mi_v_: frame.o;
+text: .text%__1cTconstantPoolOopDescOstring_at_impl6FnSconstantPoolHandle_ipnGThread__pnHoopDesc__;
+text: .text%__1cEUTF8Sconvert_to_unicode6FpkcpHi_v_;
+text: .text%__1cIMulLNodeGOpcode6kM_i_;
+text: .text%__1cKReturnNodeKmatch_edge6kMI_I_;
+text: .text%__1cGOopMap2t6Mii_v_;
+text: .text%__1cNloadConP0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cJLoadSNodeGOpcode6kM_i_;
+text: .text%__1cLPCTableNodeLbottom_type6kM_pknEType__;
+text: .text%__1cKBranchDataKcell_count6M_i_: ciMethodData.o;
+text: .text%__1cMCreateExNodeKmatch_edge6kMI_I_: classes.o;
+text: .text%__1cRloadConP_pollNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cJCodeCacheEnext6FpnICodeBlob__2_;
+text: .text%__1cRcmpFastUnlockNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cJLoadLNodeGOpcode6kM_i_;
+text: .text%__1cMciMethodDataLhas_trap_at6MpnLProfileData_i_i_;
+text: .text%__1cPThreadLocalNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cKReturnNodeGOpcode6kM_i_;
+text: .text%__1cNinstanceKlassPinitialize_impl6FnTinstanceKlassHandle_pnGThread__v_;
+text: .text%__1cTconstantPoolOopDescbBbasic_type_for_signature_at6Mi_nJBasicType__;
+text: .text%__1cNflagsRegUOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cMloadConINodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
+text: .text%__1cNCatchProjNodeEhash6kM_I_;
+text: .text%__1cEUTF8Ounicode_length6Fpkci_i_;
+text: .text%__1cHCompileTProcess_OopMap_Node6MpnIMachNode_i_v_;
+text: .text%__1cNCallGenerator2t6MpnIciMethod__v_;
+text: .text%__1cKCompiledIC2t6MpnKRelocation__v_;
+text: .text%__1cKCompiledICOic_destination6kM_pC_;
+text: .text%__1cHTypeAryFxmeet6kMpknEType__3_;
+text: .text%__1cICallNodeJideal_reg6kM_I_: callnode.o;
+text: .text%__1cLStringTableGintern6FpnNsymbolOopDesc_pnGThread__pnHoopDesc__;
+text: .text%__1cNsymbolOopDescKas_unicode6kMri_pH_;
+text: .text%__1cPmethodDataKlassIoop_size6kMpnHoopDesc__i_;
+text: .text%__1cKciTypeFlowQadd_to_work_list6Mpn0AFBlock__v_;
+text: .text%__1cKciTypeFlowKflow_block6Mpn0AFBlock_pn0ALStateVector_pn0AGJsrSet__v_;
+text: .text%__1cEUTF8Enext6FpkcpH_pc_;
+text: .text%__1cJVectorSetFClear6M_v_;
+text: .text%__1cHCompileSflatten_alias_type6kMpknHTypePtr__3_;
+text: .text%__1cCosEfree6Fpv_v_;
+text: .text%__1cRshrI_reg_imm5NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cPcmpFastLockNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cYciExceptionHandlerStreamFcount6M_i_;
+text: .text%__1cKciTypeFlowFBlockScompute_exceptions6M_v_;
+text: .text%__1cIPhaseIFGFUnion6MII_v_;
+text: .text%__1cYCallStaticJavaDirectNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cILoopNodeGOpcode6kM_i_;
+text: .text%__1cICmpLNodeGOpcode6kM_i_;
+text: .text%__1cOPhaseIdealLoopGspinup6MpnENode_2222pnLsmall_cache__2_;
+text: .text%__1cQaddI_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cMindIndexOperJnum_edges6kM_I_: ad_sparc.o;
+text: .text%__1cIConLNodeGOpcode6kM_i_;
+text: .text%JVM_GetCPFieldSignatureUTF;
+text: .text%__1cENodeLnonnull_req6kM_p0_;
+text: .text%__1cYCallStaticJavaDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cYCallStaticJavaDirectNodeKmethod_set6Mi_v_;
+text: .text%__1cMelapsedTimerFstart6M_v_;
+text: .text%__1cMelapsedTimerEstop6M_v_;
+text: .text%__1cMURShiftINodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cSaddP_reg_imm13NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cOPhaseIdealLoopOfind_use_block6MpnENode_22222_2_;
+text: .text%__1cOPhaseIdealLoopKhandle_use6MpnENode_2pnLsmall_cache_22222_v_;
+text: .text%jni_DeleteLocalRef: jni.o;
+text: .text%__1cIGraphKit2t6M_v_;
+text: .text%__1cMoutputStreamDput6Mc_v_;
+text: .text%__1cIGraphKitNset_map_clone6MpnNSafePointNode__v_;
+text: .text%__1cRInterpretedRFrameEinit6M_v_;
+text: .text%__1cHMulNodeEhash6kM_I_;
+text: .text%__1cENodeJset_req_X6MIp0pnMPhaseIterGVN__v_;
+text: .text%__1cJLoadINodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cINodeHashLhash_insert6MpnENode__v_;
+text: .text%__1cKstoreCNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cENodeLbottom_type6kM_pknEType__;
+text: .text%__1cKJNIHandlesKmake_local6FpnGThread_pnHoopDesc__pnI_jobject__;
+text: .text%__1cKstoreCNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cIAddPNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cQjava_lang_StringbBcreate_tenured_from_unicode6FpHipnGThread__nGHandle__;
+text: .text%__1cKoopFactoryXnew_permanent_charArray6FipnGThread__pnQtypeArrayOopDesc__;
+text: .text%__1cKMemBarNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cGvframe2t6MpknFframe_pknLRegisterMap_pnKJavaThread__v_;
+text: .text%__1cLRegisterMap2t6Mpk0_v_;
+text: .text%__1cXmembar_acquire_lockNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cOcompI_iRegNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cIciSymbolEmake6Fpkc_p0_;
+text: .text%__1cPorI_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cGPcDesc2t6Miii_v_;
+text: .text%__1cHCompileKalias_type6MpnHciField__pn0AJAliasType__;
+text: .text%__1cGvframeKnew_vframe6FpknFframe_pknLRegisterMap_pnKJavaThread__p0_;
+text: .text%__1cPconvI2L_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cMtlsLoadPNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
+text: .text%__1cYcompareAndSwapL_boolNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cIAddINodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cIciMethodRget_flow_analysis6M_pnKciTypeFlow__;
+text: .text%__1cWCallLeafNoFPDirectNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cSmembar_acquireNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cKbranchNodeJlabel_set6MrnFLabel_I_v_;
+text: .text%__1cKbranchNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cOloadConI13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cSaddL_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%jni_GetObjectField: jni.o;
+text: .text%__1cSMemBarCPUOrderNodeGOpcode6kM_i_;
+text: .text%__1cJFieldTypeOget_array_info6FpnNsymbolOopDesc_pip2pnGThread__nJBasicType__;
+text: .text%__1cQandL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cWstatic_stub_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o;
+text: .text%__1cQaddL_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cNloadKlassNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cPmethodDataKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cPmethodDataKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cJloadBNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cYcompareAndSwapL_boolNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cRMachNullCheckNodeKin_RegMask6kMI_rknHRegMask__;
+text: .text%__1cOPhaseIdealLoopIsink_use6MpnENode_2_v_;
+text: .text%__1cIGraphKitOreplace_in_map6MpnENode_2_v_;
+text: .text%__1cNinstanceKlassLfind_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__;
+text: .text%__1cHCompileKTracePhase2T6M_v_;
+text: .text%__1cMPhaseChaitinLclone_projs6MpnFBlock_IpnENode_4rI_i_;
+text: .text%__1cNinstanceKlassSlookup_osr_nmethod6kMkpnNmethodOopDesc_i_pnHnmethod__;
+text: .text%__1cIJVMState2t6MpnIciMethod_p0_v_;
+text: .text%__1cIHaltNode2t6MpnENode_2_v_;
+text: .text%__1cLOptoRuntimeSuncommon_trap_Type6F_pknITypeFunc__;
+text: .text%__1cJloadLNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cSsafePoint_pollNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cINodeHashJhash_find6MpknENode__p1_;
+text: .text%__1cQmulL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cSaddP_reg_imm13NodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
+text: .text%__1cOMethodLivenessKBasicBlock2t6Mp0ii_v_;
+text: .text%__1cOMethodLivenessKBasicBlockQcompute_gen_kill6MpnIciMethod__v_;
+text: .text%__1cOGenerateOopMapFppush6MpnNCellTypeState__v_;
+text: .text%__1cJTypeTupleKmake_range6FpnLciSignature__pk0_;
+text: .text%__1cJTypeTupleLmake_domain6FpnPciInstanceKlass_pnLciSignature__pk0_;
+text: .text%__1cSmembar_acquireNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cMWarmCallInfoKalways_hot6F_p0_;
+text: .text%__1cTCreateExceptionNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cLstoreB0NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cMtlsLoadPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cLBoxLockNodeKin_RegMask6kMI_rknHRegMask__;
+text: .text%__1cITypeLongEmake6Fx_pk0_;
+text: .text%__1cHciFieldPinitialize_from6MpnPfieldDescriptor__v_;
+text: .text%__1cKimmI13OperJnum_edges6kM_I_: ad_sparc_clone.o;
+text: .text%__1cJloadBNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cIGraphKitZadd_exception_states_from6MpnIJVMState__v_;
+text: .text%__1cMPhaseChaitinNFind_compress6MpknENode__I_;
+text: .text%__1cQSystemDictionaryEfind6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__;
+text: .text%__1cHPhiNodeEmake6FpnENode_2_p0_;
+text: .text%__1cNCatchProjNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cWCallLeafNoFPDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cIciMethodTcall_profile_at_bci6Mi_nNciCallProfile__;
+text: .text%__1cIProjNodeDcmp6kMrknENode__I_;
+text: .text%__1cLklassVtableIindex_of6kMpnNmethodOopDesc_i_i_;
+text: .text%__1cFParseMprofile_call6MpnENode__v_;
+text: .text%__1cFciEnvbTget_instance_klass_for_declared_method_holder6FpnHciKlass__pnPciInstanceKlass__;
+text: .text%__1cIGraphKitWround_double_arguments6MpnIciMethod__v_;
+text: .text%__1cIGraphKitTround_double_result6MpnIciMethod__v_;
+text: .text%__1cFParseHdo_call6M_v_;
+text: .text%__1cNloadConP0NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cHMulNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cMPhaseIterGVNJtransform6MpnENode__2_;
+text: .text%__1cHTypeIntFwiden6kMpknEType__3_;
+text: .text%__1cSsafePoint_pollNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cJloadSNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cKarrayKlassLobject_size6kMi_i_;
+text: .text%__1cKMemBarNodeEhash6kM_I_;
+text: .text%__1cQSystemDictionaryPresolve_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__;
+text: .text%__1cNinstanceKlassKjava_super6kM_pnMklassOopDesc__: instanceKlass.o;
+text: .text%__1cMLinkResolverVresolve_invokevirtual6FrnICallInfo_nGHandle_nSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cKMemoryPoolYrecord_peak_memory_usage6M_v_;
+text: .text%__1cMURShiftLNodeGOpcode6kM_i_;
+text: .text%__1cIGraphKitUmake_exception_state6MpnENode__pnNSafePointNode__;
+text: .text%__1cLProfileDataOtranslate_from6Mp0_v_: ciMethodData.o;
+text: .text%__1cRsarI_reg_imm5NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cLBuildCutout2t6MpnIGraphKit_pnENode_ff_v_;
+text: .text%__1cTCompareAndSwapLNodeGOpcode6kM_i_;
+text: .text%__1cQxorI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cMMergeMemNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cLLShiftINodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cNflagsRegLOperEtype6kM_pknEType__: ad_sparc.o;
+text: .text%__1cQsubI_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cKarrayKlassGvtable6kM_pnLklassVtable__;
+text: .text%__1cRshrI_reg_imm5NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cQCallLeafNoFPNodeGOpcode6kM_i_;
+text: .text%__1cMURShiftINodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cFStateM_sub_Op_ConP6MpknENode__v_;
+text: .text%__1cIGraphKitMsaved_ex_oop6FpnNSafePointNode__pnENode__;
+text: .text%__1cISubINodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cPciInstanceKlassFsuper6M_p0_;
+text: .text%__1cIBoolNodeHsize_of6kM_I_;
+text: .text%__1cSobjArrayKlassKlassIoop_size6kMpnHoopDesc__i_: objArrayKlassKlass.o;
+text: .text%__1cPcompP_iRegPNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cJloadPNodeOmemory_operand6kM_pknIMachOper__;
+text: .text%__1cPBytecode_invokeJsignature6kM_pnNsymbolOopDesc__;
+text: .text%__1cFframebGinterpreter_callee_receiver_addr6MnMsymbolHandle__ppnHoopDesc__;
+text: .text%__1cNSignatureInfoGdo_int6M_v_: bytecode.o;
+text: .text%__1cOstackSlotLOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cKInlineTreeMok_to_inline6MpnIciMethod_pnIJVMState_rnNciCallProfile_pnMWarmCallInfo__8_;
+text: .text%__1cOGenerateOopMapbAget_basic_block_containing6kMi_pnKBasicBlock__;
+text: .text%__1cICodeHeapSallocated_capacity6kM_I_;
+text: .text%__1cICHeapObj2n6FI_pv_;
+text: .text%__1cQsubL_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cWCallLeafNoFPDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cFTypeDEhash6kM_i_;
+text: .text%__1cKTypeRawPtrHget_con6kM_i_;
+text: .text%__1cJStartNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__;
+text: .text%jni_ExceptionOccurred: jni.o;
+text: .text%__1cKciTypeFlowLStateVectorStype_meet_internal6FpnGciType_3p0_3_;
+text: .text%__1cMloadConINodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cGIfNodeHsize_of6kM_I_: classes.o;
+text: .text%__1cPconvL2I_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cIimmLOperJconstantL6kM_x_: ad_sparc_clone.o;
+text: .text%__1cTStackWalkCompPolicyRcompilation_level6MnMmethodHandle_i_i_;
+text: .text%jni_GetByteArrayRegion: jni.o;
+text: .text%__1cIGraphKitTset_all_memory_call6MpnENode__v_;
+text: .text%__1cSHighResTimeSamplerLtake_sample6M_x_: statSampler.o;
+text: .text%__1cHCompileFstart6kM_pnJStartNode__;
+text: .text%__1cPStatSamplerTaskEtask6M_v_: statSampler.o;
+text: .text%__1cMPeriodicTaskOreal_time_tick6FI_v_;
+text: .text%__1cQPlaceholderTableKfind_entry6MiInMsymbolHandle_nGHandle__pnNsymbolOopDesc__;
+text: .text%__1cIParmNodeJideal_reg6kM_I_;
+text: .text%__1cQandL_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cIMachNodeRget_base_and_disp6kMrirpknHTypePtr__pknENode__;
+text: .text%__1cQSystemDictionarybBresolve_array_class_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__;
+text: .text%__1cIregFOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cRbranchLoopEndNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cGRFrame2t6MnFframe_pnKJavaThread_kp0_v_;
+text: .text%jni_GetArrayLength: jni.o;
+text: .text%__1cPciInstanceKlassUget_canonical_holder6Mi_p0_;
+text: .text%__1cJloadLNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cOClearArrayNodeGOpcode6kM_i_;
+text: .text%__1cPClassFileParserbDverify_legal_method_signature6MnMsymbolHandle_1pnGThread__i_;
+text: .text%__1cVCompressedWriteStreamEgrow6M_v_;
+text: .text%JVM_Write;
+text: .text%__1cLciSignature2t6MpnHciKlass_pnIciSymbol__v_;
+text: .text%__1cIciMethod2t6MnMmethodHandle__v_;
+text: .text%__1cIHaltNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cWShouldNotReachHereNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cLOpaque1NodeGOpcode6kM_i_;
+text: .text%__1cSbranchCon_longNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cKstoreCNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cHAddNodePadd_of_identity6kMpknEType_3_3_;
+text: .text%__1cUcompU_iReg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%JVM_RawMonitorEnter;
+text: .text%JVM_RawMonitorExit;
+text: .text%__1cOMachReturnNodeKin_RegMask6kMI_rknHRegMask__;
+text: .text%__1cMTypeKlassPtrKadd_offset6kMi_pknHTypePtr__;
+text: .text%__1cWShouldNotReachHereNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cPcmpFastLockNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cETypeRget_typeflow_type6FpnGciType__pk0_;
+text: .text%__1cOJNIHandleBlockNrelease_block6Fp0pnGThread__v_;
+text: .text%__1cRcmpFastUnlockNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cXinitialize_static_field6FpnPfieldDescriptor_pnGThread__v_: classFileParser.o;
+text: .text%__1cURethrowExceptionNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cOJNIHandleBlockOallocate_block6FpnGThread__p0_;
+text: .text%__1cNSignatureInfoHdo_bool6M_v_: bytecode.o;
+text: .text%__1cKBufferBlobHoops_do6MpnKOopClosure__v_: codeBlob.o;
+text: .text%__1cSandI_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cIAddINodeIadd_ring6kMpknEType_3_3_;
+text: .text%__1cLTypeInstPtrQcast_to_ptr_type6kMnHTypePtrDPTR__pknEType__;
+text: .text%__1cMloadConLNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
+text: .text%__1cFParseFmerge6Mi_v_;
+text: .text%__1cNSafePointNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cJTypeTupleFxdual6kM_pknEType__;
+text: .text%__1cNLoadKlassNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cPorI_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cLRethrowNodeGOpcode6kM_i_;
+text: .text%__1cJloadSNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cICodeHeapIcapacity6kM_I_;
+text: .text%__1cKMemoryPoolImax_size6kM_I_: memoryPool.o;
+text: .text%__1cMCodeHeapPoolNused_in_bytes6M_I_: memoryPool.o;
+text: .text%__1cPcmpFastLockNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cMCodeHeapPoolQget_memory_usage6M_nLMemoryUsage__;
+text: .text%__1cFArena2T6M_v_;
+text: .text%__1cKMemBarNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__;
+text: .text%__1cOCallRelocationFvalue6M_pC_: relocInfo.o;
+text: .text%__1cHoopDescSslow_identity_hash6M_i_;
+text: .text%__1cSObjectSynchronizerXidentity_hash_value_for6FnGHandle__i_;
+text: .text%__1cLPCTableNodeEhash6kM_I_;
+text: .text%__1cHConNodeLout_RegMask6kM_rknHRegMask__: classes.o;
+text: .text%__1cXPhaseAggressiveCoalesceYinsert_copy_with_overlap6MpnFBlock_pnENode_II_v_;
+text: .text%__1cOloadConI13NodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cJloadBNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cMtlsLoadPNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cMPhaseChaitinNFind_compress6MI_I_;
+text: .text%__1cMindIndexOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cFStateN_sub_Op_LoadP6MpknENode__v_;
+text: .text%__1cFframeVinterpreter_frame_bci6kM_i_;
+text: .text%__1cNGCTaskManagerIget_task6MI_pnGGCTask__;
+text: .text%__1cLGCTaskQdDueueGremove6M_pnGGCTask__;
+text: .text%__1cLGCTaskQdDueueHenqueue6MpnGGCTask__v_;
+text: .text%__1cNGCTaskManagerPnote_completion6MI_v_;
+text: .text%__1cQandI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cIGraphKitHjava_bc6kM_nJBytecodesECode__;
+text: .text%__1cIGraphKitNbuiltin_throw6MnODeoptimizationLDeoptReason_pnENode__v_;
+text: .text%__1cOGenerateOopMapHget_var6Mi_nNCellTypeState__;
+text: .text%__1cRinterpretedVFrameGmethod6kM_pnNmethodOopDesc__;
+text: .text%jni_GetSuperclass: jni.o;
+text: .text%__1cJJavaCallsLcall_helper6FpnJJavaValue_pnMmethodHandle_pnRJavaCallArguments_pnGThread__v_;
+text: .text%__1cCosUos_exception_wrapper6FpFpnJJavaValue_pnMmethodHandle_pnRJavaCallArguments_pnGThread__v2468_v_;
+text: .text%__1cTAbstractInterpreterbFsize_top_interpreter_activation6FpnNmethodOopDesc__i_;
+text: .text%__1cIMulINodeGOpcode6kM_i_;
+text: .text%__1cRcompL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cNloadKlassNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cJloadPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cGGCTask2t6M_v_;
+text: .text%__1cJloadSNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cIJumpDataKcell_count6M_i_: ciMethodData.o;
+text: .text%__1cObranchConPNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cITypeFuncFxdual6kM_pknEType__;
+text: .text%__1cQjava_lang_StringGlength6FpnHoopDesc__i_;
+text: .text%__1cFStateM_sub_Op_CmpI6MpknENode__v_;
+text: .text%__1cJcmpOpOperFccode6kM_i_: ad_sparc_clone.o;
+text: .text%__1cGciType2t6MnLKlassHandle__v_;
+text: .text%__1cHciKlass2t6MnLKlassHandle__v_;
+text: .text%__1cMindirectOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cSPSPromotionManagerbBgc_thread_promotion_manager6Fi_p0_;
+text: .text%__1cQxorI_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cJloadLNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cIregFOperEtype6kM_pknEType__: ad_sparc.o;
+text: .text%__1cKcmpOpPOperFccode6kM_i_: ad_sparc_clone.o;
+text: .text%__1cNloadKlassNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cHPhiNodeMslice_memory6kMpknHTypePtr__p0_;
+text: .text%__1cPCheckCastPPNodeJideal_reg6kM_I_: connode.o;
+text: .text%__1cObranchConPNodeJlabel_set6MrnFLabel_I_v_;
+text: .text%__1cObranchConPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cICHeapObj2k6Fpv_v_;
+text: .text%__1cSaddL_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cRmethodDataOopDescJbci_to_dp6Mi_pC_;
+text: .text%__1cMloadConFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cRInvocationCounterJset_carry6M_v_;
+text: .text%__1cFArena2t6M_v_;
+text: .text%__1cRInterpreterOopMapLiterate_oop6MpnNOffsetClosure__v_;
+text: .text%__1cRInterpreterOopMap2T6M_v_;
+text: .text%__1cLOopMapCacheGlookup6MnMmethodHandle_ipnRInterpreterOopMap__v_;
+text: .text%__1cNinstanceKlassImask_for6MnMmethodHandle_ipnRInterpreterOopMap__v_;
+text: .text%__1cNmethodOopDescImask_for6MipnRInterpreterOopMap__v_;
+text: .text%__1cRInterpreterOopMap2t6M_v_;
+text: .text%__1cISubINodeDsub6kMpknEType_3_3_;
+text: .text%__1cFParseOreturn_current6MpnENode__v_;
+text: .text%__1cRsarI_reg_imm5NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cMMonitorValueIwrite_on6MpnUDebugInfoWriteStream__v_;
+text: .text%__1cMloadConLNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cJStoreNodeSIdeal_masked_input6MpnIPhaseGVN_I_pnENode__;
+text: .text%jni_GetPrimitiveArrayCritical: jni.o;
+text: .text%jni_ReleasePrimitiveArrayCritical: jni.o;
+text: .text%__1cPconvI2L_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cNMemoryServiceXtrack_memory_pool_usage6FpnKMemoryPool__v_;
+text: .text%__1cSmembar_releaseNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cJimmU5OperIconstant6kM_i_: ad_sparc_clone.o;
+text: .text%__1cPciInstanceKlass2t6MnLKlassHandle__v_;
+text: .text%__1cLOpaque1NodeEhash6kM_I_;
+text: .text%__1cJStoreNodeZIdeal_sign_extended_input6MpnIPhaseGVN_i_pnENode__;
+text: .text%__1cSbranchCon_longNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cOGenerateOopMapGppload6MpnNCellTypeState_i_v_;
+text: .text%__1cSmembar_releaseNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cNbranchConNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cFciEnvVnotice_inlined_method6MpnIciMethod__v_;
+text: .text%__1cFKlassTarray_klass_or_null6Mi_pnMklassOopDesc__;
+text: .text%__1cZCallDynamicJavaDirectNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cJMultiNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cKStoreLNodeGOpcode6kM_i_;
+text: .text%__1cbBopt_virtual_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o;
+text: .text%__1cTconstantPoolOopDescbCklass_ref_at_if_loaded_check6FnSconstantPoolHandle_ipnGThread__pnMklassOopDesc__;
+text: .text%__1cHciField2t6MpnPciInstanceKlass_i_v_;
+text: .text%__1cNloadKlassNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cOJNIHandleBlockHoops_do6MpnKOopClosure__v_;
+text: .text%__1cOGenerateOopMapJdo_method6Miiii_v_;
+text: .text%__1cRsarI_reg_imm5NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cRbranchLoopEndNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cQmulL_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cLstoreP0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cLRethrowNodeKmatch_edge6kMI_I_;
+text: .text%__1cFTypeFEhash6kM_i_;
+text: .text%__1cHnmethodHoops_do6MpnKOopClosure__v_;
+text: .text%__1cFStateM_sub_Op_AddI6MpknENode__v_;
+text: .text%__1cOParseGeneratorIgenerate6MpnIJVMState__2_;
+text: .text%__1cFParseQcreate_entry_map6M_pnNSafePointNode__;
+text: .text%__1cFArenaEused6kM_I_;
+text: .text%__1cFParseLbuild_exits6M_v_;
+text: .text%__1cFParseIdo_exits6M_v_;
+text: .text%__1cFParse2t6MpnIJVMState_pnIciMethod_f_v_;
+text: .text%__1cIBoolNodeDcmp6kMrknENode__I_;
+text: .text%__1cFParsePdo_method_entry6M_v_;
+text: .text%__1cNCallGeneratorKfor_inline6FpnIciMethod_f_p0_;
+text: .text%__1cbGJvmtiVMObjectAllocEventCollector2t6M_v_;
+text: .text%__1cbGJvmtiVMObjectAllocEventCollector2T6M_v_;
+text: .text%__1cQconstMethodKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
+text: .text%__1cRciVirtualCallDataOtranslate_from6MpnLProfileData__v_;
+text: .text%jni_IsSameObject: jni.o;
+text: .text%__1cMloadConINodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cNbranchConNodeJlabel_set6MrnFLabel_I_v_;
+text: .text%__1cNbranchConNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cQandL_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cLmethodKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
+text: .text%__1cLsymbolKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
+text: .text%__1cIciObjectFklass6M_pnHciKlass__;
+text: .text%__1cLSymbolTableFprobe6Fpkci_pnNsymbolOopDesc__;
+text: .text%__1cPThreadLocalNodeGOpcode6kM_i_;
+text: .text%__1cZPhaseConservativeCoalesceKupdate_ifg6MIIpnIIndexSet_2_v_;
+text: .text%__1cZPhaseConservativeCoalesceMunion_helper6MpnENode_2II222pnFBlock_I_v_;
+text: .text%__1cOMethodLivenessKBasicBlockJstore_one6Mi_v_;
+text: .text%__1cIIndexSetEswap6Mp0_v_;
+text: .text%__1cHTypeAryEmake6FpknEType_pknHTypeInt__pk0_;
+text: .text%__1cPClassFileParserbCverify_legal_class_modifiers6MipnGThread__v_;
+text: .text%__1cKTypeAryPtrFxdual6kM_pknEType__;
+text: .text%__1cLAccessFlagsPatomic_set_bits6Mi_v_;
+text: .text%__1cQComputeCallStackJdo_object6Mii_v_: generateOopMap.o;
+text: .text%__1cNinstanceKlassWcompute_modifier_flags6kMpnGThread__i_;
+text: .text%__1cKstoreBNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cKCastPPNodeQIdeal_DU_postCCP6MpnIPhaseCCP__pnENode__;
+text: .text%__1cKstorePNodeOmemory_operand6kM_pknIMachOper__;
+text: .text%__1cOPhaseIdealLoopOsplit_thru_phi6MpnENode_2i_2_;
+text: .text%__1cENodeGOpcode6kM_i_;
+text: .text%__1cRshrP_reg_imm5NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cQandI_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cIciMethodbBinterpreter_call_site_count6Mi_i_;
+text: .text%__1cGBitMapIset_from6M0_v_;
+text: .text%__1cNCompileBrokerOcompile_method6FnMmethodHandle_i1ipkcpnGThread__pnHnmethod__;
+text: .text%__1cTconstantPoolOopDescbDresolve_string_constants_impl6FnSconstantPoolHandle_pnGThread__v_;
+text: .text%__1cHSubNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cFChunk2n6FII_pv_;
+text: .text%__1cTCallDynamicJavaNodeGOpcode6kM_i_;
+text: .text%__1cKstoreBNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cILoadNodeDcmp6kMrknENode__I_;
+text: .text%__1cIciObject2t6M_v_;
+text: .text%__1cSconstMethodOopDescZchecked_exceptions_length6kM_i_;
+text: .text%__1cRcompL_reg_conNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cHCompileXin_preserve_stack_slots6M_I_;
+text: .text%__1cPciObjectFactoryUget_empty_methodData6M_pnMciMethodData__;
+text: .text%__1cMciMethodData2t6M_v_;
+text: .text%__1cHOrINodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cFframeLreal_sender6kMpnLRegisterMap__0_;
+text: .text%__1cGRFrameGcaller6M_p0_;
+text: .text%__1cPCheckCastPPNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cRshrP_reg_imm5NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cJJavaCallsEcall6FpnJJavaValue_nMmethodHandle_pnRJavaCallArguments_pnGThread__v_;
+text: .text%__1cXmembar_release_lockNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
+text: .text%__1cJloadINodeOmemory_operand6kM_pknIMachOper__;
+text: .text%__1cXJNI_ArgumentPusherVaArgKget_object6M_v_: jni.o;
+text: .text%__1cMloadConFNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cIMulINodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cMCreateExNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cQaddL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cMCreateExNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cISubINodeGadd_id6kM_pknEType__: classes.o;
+text: .text%__1cFKlassQset_next_sibling6MpnMklassOopDesc__v_;
+text: .text%__1cQdivD_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cNCatchProjNodeDcmp6kMrknENode__I_;
+text: .text%__1cKTypeOopPtrEhash6kM_i_;
+text: .text%__1cIMinINodeGOpcode6kM_i_;
+text: .text%__1cMURShiftINodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cFframeRoops_code_blob_do6MpnKOopClosure_pknLRegisterMap__v_;
+text: .text%__1cKTypeRawPtrFxmeet6kMpknEType__3_;
+text: .text%JVM_GetMethodIxModifiers;
+text: .text%__1cIMulLNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cPconvI2L_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cLLShiftINodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cTCreateExceptionNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%JVM_IsInterface;
+text: .text%__1cPorI_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cIDivINodeGOpcode6kM_i_;
+text: .text%__1cOGenerateOopMapTmerge_state_into_bb6MpnKBasicBlock__v_;
+text: .text%__1cICodeHeapIallocate6MI_pv_;
+text: .text%__1cICodeHeapPsearch_freelist6MI_pnJFreeBlock__;
+text: .text%__1cLOpaque1NodeLbottom_type6kM_pknEType__: connode.o;
+text: .text%__1cNloadRangeNodeOmemory_operand6kM_pknIMachOper__;
+text: .text%__1cLRShiftLNodeGOpcode6kM_i_;
+text: .text%__1cJCodeCacheIallocate6Fi_pnICodeBlob__;
+text: .text%__1cSCountedLoopEndNodeKstride_con6kM_i_;
+text: .text%__1cUPipeline_Use_Element2t6M_v_: output.o;
+text: .text%__1cRshrL_reg_imm6NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cHCompileSregister_intrinsic6MpnNCallGenerator__v_;
+text: .text%__1cNSCMemProjNodeGOpcode6kM_i_;
+text: .text%__1cNimmP_pollOperEtype6kM_pknEType__: ad_sparc_clone.o;
+text: .text%__1cRloadConP_pollNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
+text: .text%__1cQinstanceRefKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cQinstanceRefKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cOGenerateOopMapUreachable_basicblock6Fp0ipi_v_;
+text: .text%__1cPciInstanceKlassLfind_method6MpnIciSymbol_2_pnIciMethod__;
+text: .text%__1cXvirtual_call_RelocationLunpack_data6M_v_;
+text: .text%__1cFciEnvRfind_system_klass6MpnIciSymbol__pnHciKlass__;
+text: .text%__1cLRegisterMapIpd_clear6M_v_;
+text: .text%__1cHUNICODEHas_utf86FpHi_pc_;
+text: .text%__1cLstoreP0NodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cParrayKlassKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cParrayKlassKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cIGraphKitYcombine_exception_states6MpnNSafePointNode_2_v_;
+text: .text%__1cQmulL_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cRshrP_reg_imm5NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cSconstMethodOopDescYchecked_exceptions_start6kM_pnXCheckedExceptionElement__;
+text: .text%__1cPClassFileParserYparse_checked_exceptions6MpHInSconstantPoolHandle_pnGThread__1_;
+text: .text%__1cKstoreLNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cRbranchLoopEndNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cQConstantIntValueIwrite_on6MpnUDebugInfoWriteStream__v_;
+text: .text%__1cSconvI2D_helperNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cPClassFileStreamHskip_u26MipnGThread__v_;
+text: .text%__1cUcompI_iReg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cOMacroAssemblerNverify_thread6M_v_;
+text: .text%__1cIGraphKitZset_results_for_java_call6MpnMCallJavaNode__pnENode__;
+text: .text%__1cSbranchCon_longNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cHnmethodVcleanup_inline_caches6M_v_;
+text: .text%__1cTciConstantPoolCacheGinsert6Mipv_v_;
+text: .text%__1cIAddLNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cFStateO_sub_Op_StoreI6MpknENode__v_;
+text: .text%__1cKHandleAreaHoops_do6MpnKOopClosure__v_;
+text: .text%__1cHciField2t6MpnPfieldDescriptor__v_;
+text: .text%__1cRSignatureIterator2t6MpnGThread_pnNsymbolOopDesc__v_;
+text: .text%__1cMloadConLNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cYcompareAndSwapL_boolNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cFKlassMnext_sibling6kM_p0_;
+text: .text%__1cKDictionaryStry_get_next_class6M_pnMklassOopDesc__;
+text: .text%__1cNinstanceKlassKmethods_do6MpFpnNmethodOopDesc__v_v_;
+text: .text%__1cQSystemDictionaryStry_get_next_class6F_pnMklassOopDesc__;
+text: .text%__1cSobjArrayKlassKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cSobjArrayKlassKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cJimmU5OperJnum_edges6kM_I_: ad_sparc_clone.o;
+text: .text%__1cLBlock_ArrayEgrow6MI_v_;
+text: .text%__1cYinternal_word_RelocationLunpack_data6M_v_;
+text: .text%__1cKcmpOpPOperGnegate6M_v_: ad_sparc_clone.o;
+text: .text%__1cObranchConPNodeGnegate6M_v_: ad_sparc_misc.o;
+text: .text%__1cUvisit_all_interfaces6FpnPobjArrayOopDesc_pnXInterfaceVisiterClosure__v_;
+text: .text%__1cLBoxLockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cPcmpFastLockNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cHRetNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cPconvL2I_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cKoopFactoryTnew_system_objArray6FipnGThread__pnPobjArrayOopDesc__;
+text: .text%__1cbDcatch_cleanup_find_cloned_def6FpnFBlock_pnENode_1rnLBlock_Array_i_3_: lcm.o;
+text: .text%__1cQxorI_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cKstoreLNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cFciEnvVget_constant_by_index6MpnPciInstanceKlass_i_nKciConstant__;
+text: .text%__1cFciEnvbAget_constant_by_index_impl6MpnPciInstanceKlass_i_nKciConstant__;
+text: .text%__1cOClearArrayNodeKmatch_edge6kMI_I_;
+text: .text%__1cPconvL2I_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cJloadSNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cKJavaThreadHoops_do6MpnKOopClosure__v_;
+text: .text%__1cSFixupMirrorClosureJdo_object6MpnHoopDesc__v_: universe.o;
+text: .text%__1cFStateP_sub_Op_LShiftI6MpknENode__v_;
+text: .text%__1cQandL_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cPSignatureStreamRas_symbol_or_null6M_pnNsymbolOopDesc__;
+text: .text%__1cKoopFactoryYnew_permanent_shortArray6FipnGThread__pnQtypeArrayOopDesc__;
+text: .text%__1cIGraphKitbBset_arguments_for_java_call6MpnMCallJavaNode__v_;
+text: .text%__1cIGraphKitJpush_node6MnJBasicType_pnENode__v_: callGenerator.o;
+text: .text%__1cNSignatureInfoIdo_array6Mii_v_: bytecode.o;
+text: .text%__1cJcmpOpOperGnegate6M_v_: ad_sparc_clone.o;
+text: .text%__1cMloadConPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%jni_SetObjectArrayElement: jni.o;
+text: .text%__1cSandI_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cPThreadLocalNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cNSafePointNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cObranchConUNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cRshlL_reg_imm6NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cQandI_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cSandI_reg_imm13NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cRCardTableModRefBSPdirty_MemRegion6MnJMemRegion__v_;
+text: .text%__1cZresource_reallocate_bytes6FpcII_0_;
+text: .text%__1cLConvL2INodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cOAbstractICacheQinvalidate_range6FpCi_v_;
+text: .text%__1cKstorePNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cIMaxINodeGOpcode6kM_i_;
+text: .text%__1cTDirectCallGeneratorIgenerate6MpnIJVMState__2_;
+text: .text%__1cNCallGeneratorPfor_direct_call6FpnIciMethod__p0_;
+text: .text%__1cMWarmCallInfoLalways_cold6F_p0_;
+text: .text%__1cIimmDOperJconstantD6kM_d_: ad_sparc_clone.o;
+text: .text%__1cIPhaseIFGEinit6MI_v_;
+text: .text%__1cJPhaseLiveHcompute6MI_v_;
+text: .text%__1cMLinkResolverbCresolve_virtual_call_or_null6FnLKlassHandle_1nMsymbolHandle_21_nMmethodHandle__;
+text: .text%__1cSaddI_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cJloadLNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cFTypeDEmake6Fd_pk0_;
+text: .text%__1cPThreadRootsTaskEname6M_pc_: psTasks.o;
+text: .text%__1cPThreadRootsTaskFdo_it6MpnNGCTaskManager_I_v_;
+text: .text%__1cRshlI_reg_imm5NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cQaddL_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cMloadConDNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
+text: .text%__1cFStateN_sub_Op_LoadI6MpknENode__v_;
+text: .text%__1cIMachOperEtype6kM_pknEType__;
+text: .text%JVM_GetCPClassNameUTF;
+text: .text%__1cKBufferBlobGcreate6Fpkci_p0_;
+text: .text%__1cKcmpOpUOperFccode6kM_i_: ad_sparc_clone.o;
+text: .text%__1cObranchConUNodeJlabel_set6MrnFLabel_I_v_;
+text: .text%__1cObranchConUNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%jni_GetStringLength: jni.o;
+text: .text%__1cMLinkResolverbBresolve_static_call_or_null6FnLKlassHandle_nMsymbolHandle_21_nMmethodHandle__;
+text: .text%__1cLConvI2LNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cJloadPNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cMoutputStream2t6Mi_v_;
+text: .text%__1cMstringStreamJas_string6M_pc_;
+text: .text%__1cMstringStream2T6M_v_;
+text: .text%__1cMstringStream2t6MI_v_;
+text: .text%__1cIGraphKitMreset_memory6M_pnENode__;
+text: .text%__1cZCallDynamicJavaDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cKstorePNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cENodeMsetup_is_top6M_v_;
+text: .text%__1cIGotoNodeGOpcode6kM_i_;
+text: .text%__1cPfieldDescriptorRint_initial_value6kM_i_;
+text: .text%__1cNbranchConNodeGnegate6M_v_: ad_sparc_misc.o;
+text: .text%__1cOGenerateOopMapLbb_mark_fct6Fp0ipi_v_;
+text: .text%__1cKcmpOpPOperFequal6kM_i_: ad_sparc_clone.o;
+text: .text%__1cSInterpreterRuntimeE_new6FpnKJavaThread_pnTconstantPoolOopDesc_i_v_;
+text: .text%__1cKReturnNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cOGenerateOopMapRsigchar_to_effect6McipnNCellTypeState__2_;
+text: .text%__1cOGenerateOopMapIdo_field6Miiii_v_;
+text: .text%__1cJloadINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cSmembar_releaseNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
+text: .text%__1cSaddL_reg_imm13NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cLRShiftINodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cEDict2t6MpFpkv2_ipF2_i_v_;
+text: .text%__1cEDict2T6M_v_;
+text: .text%__1cKBranchDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_;
+text: .text%__1cLOopRecorder2t6MpnFArena__v_;
+text: .text%__1cRClassPathZipEntryLopen_stream6Mpkc_pnPClassFileStream__;
+text: .text%__1cMLinkResolverbCresolve_special_call_or_null6FnLKlassHandle_nMsymbolHandle_21_nMmethodHandle__;
+text: .text%__1cIModINodeGOpcode6kM_i_;
+text: .text%__1cRInterpretedRFrame2t6MnFframe_pnKJavaThread_nMmethodHandle__v_;
+text: .text%__1cKJavaThreadQlast_java_vframe6MpnLRegisterMap__pnKjavaVFrame__;
+text: .text%__1cTStackWalkCompPolicyVfindTopInlinableFrame6MpnNGrowableArray4CpnGRFrame____2_;
+text: .text%__1cTStackWalkCompPolicyXmethod_invocation_event6MnMmethodHandle_pnGThread__v_;
+text: .text%__1cISubLNodeGOpcode6kM_i_;
+text: .text%__1cKciTypeFlow2t6MpnFciEnv_pnIciMethod_i_v_;
+text: .text%__1cKciTypeFlowPget_start_state6M_pkn0ALStateVector__;
+text: .text%__1cKciTypeFlowHdo_flow6M_v_;
+text: .text%__1cKciTypeFlowKflow_types6M_v_;
+text: .text%__1cKciTypeFlowKmap_blocks6M_v_;
+text: .text%__1cMloadConPNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cTconstantPoolOopDescbCverify_constant_pool_resolve6FnSconstantPoolHandle_nLKlassHandle_pnGThread__v_;
+text: .text%__1cIciMethodJload_code6M_v_;
+text: .text%__1cMciMethodDataJload_data6M_v_;
+text: .text%__1cIGraphKitTuse_exception_state6MpnNSafePointNode__pnENode__;
+text: .text%__1cOcompU_iRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cIGraphKitGmemory6MI_pnENode__;
+text: .text%__1cIHaltNodeEhash6kM_I_: classes.o;
+text: .text%__1cFKlassQup_cast_abstract6M_p0_;
+text: .text%__1cKReturnNodeEhash6kM_I_: classes.o;
+text: .text%__1cPClassFileParserXverify_legal_class_name6MnMsymbolHandle_pnGThread__v_;
+text: .text%__1cPjava_lang_ClassNcreate_mirror6FnLKlassHandle_pnGThread__pnHoopDesc__;
+text: .text%__1cIAndINodeGadd_id6kM_pknEType__: classes.o;
+text: .text%__1cMciMethodData2t6MnQmethodDataHandle__v_;
+text: .text%__1cIAndINodeImul_ring6kMpknEType_3_3_;
+text: .text%__1cLOpaque2NodeGOpcode6kM_i_;
+text: .text%__1cOClearArrayNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cNmethodOopDescbEfast_exception_handler_bci_for6MnLKlassHandle_ipnGThread__i_;
+text: .text%__1cSInterpreterRuntimebFexception_handler_for_exception6FpnKJavaThread_pnHoopDesc__pC_;
+text: .text%__1cOPhaseIdealLoopPis_counted_loop6MpnENode_pnNIdealLoopTree__2_;
+text: .text%__1cQComputeCallStackHdo_void6M_v_: generateOopMap.o;
+text: .text%__1cFKlassRinitialize_supers6MpnMklassOopDesc_pnGThread__v_;
+text: .text%__1cKKlass_vtbl2n6FIrnLKlassHandle_ipnGThread__pv_;
+text: .text%__1cFKlassVbase_create_klass_oop6FrnLKlassHandle_irknKKlass_vtbl_pnGThread__pnMklassOopDesc__;
+text: .text%__1cQjava_lang_StringLutf8_length6FpnHoopDesc__i_;
+text: .text%jni_GetStringUTFLength: jni.o;
+text: .text%__1cQjava_lang_StringOas_utf8_string6FpnHoopDesc_ii_pc_;
+text: .text%jni_GetStringUTFRegion: jni.o;
+text: .text%__1cFKlassRbase_create_klass6FrnLKlassHandle_irknKKlass_vtbl_pnGThread__1_;
+text: .text%__1cHUNICODELutf8_length6FpHi_i_;
+text: .text%__1cQPlaceholderTableMremove_entry6MiInMsymbolHandle_nGHandle__v_;
+text: .text%__1cKstoreBNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cKstoreINodeOmemory_operand6kM_pknIMachOper__;
+text: .text%__1cQSystemDictionaryTload_instance_class6FnMsymbolHandle_nGHandle_pnGThread__nTinstanceKlassHandle__;
+text: .text%__1cRsarI_reg_imm5NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cUcompU_iReg_imm13NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cQmulL_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cHAddNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cUcompU_iReg_imm13NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cKPerfStringKset_string6Mpkc_v_;
+text: .text%__1cQjava_lang_StringRas_unicode_string6FpnHoopDesc_ri_pH_;
+text: .text%JVM_InternString;
+text: .text%__1cLStringTableGintern6FpnHoopDesc_pnGThread__2_;
+text: .text%__1cCosGrandom6F_l_;
+text: .text%__1cKimmP13OperIconstant6kM_i_: ad_sparc_clone.o;
+text: .text%__1cVcompP_iRegP_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cKoopFactoryXnew_permanent_byteArray6FipnGThread__pnQtypeArrayOopDesc__;
+text: .text%__1cRcompL_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cRMachNullCheckNodeLout_RegMask6kM_rknHRegMask__: machnode.o;
+text: .text%__1cSTailCalljmpIndNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o;
+text: .text%__1cIGraphKitPpush_pair_local6Mi_v_: parse2.o;
+text: .text%__1cICodeHeapKdeallocate6Mpv_v_;
+text: .text%__1cJCodeCacheEfree6FpnICodeBlob__v_;
+text: .text%__1cKTypeRawPtrEmake6FpC_pk0_;
+text: .text%jni_SetIntField: jni.o;
+text: .text%__1cNIdealLoopTreeMcounted_loop6MpnOPhaseIdealLoop__v_;
+text: .text%__1cKBufferBlobEfree6Fp0_v_;
+text: .text%__1cPconvL2I_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cPciObjectFactoryMvm_symbol_at6Fi_pnIciSymbol__;
+text: .text%__1cKDictionaryJadd_klass6MnMsymbolHandle_nGHandle_nLKlassHandle__v_;
+text: .text%__1cVshrL_reg_imm6_L2INodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cZCallDynamicJavaDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cIGraphKitTcreate_and_xform_if6MpnENode_2ff_pnGIfNode__: graphKit.o;
+text: .text%__1cWImplicitExceptionTableGappend6MII_v_;
+text: .text%__1cRMachNullCheckNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cLProfileDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_: ciMethodData.o;
+text: .text%__1cQxorI_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cNIdealLoopTreeVadjust_loop_exit_prob6MpnOPhaseIdealLoop__v_;
+text: .text%__1cNinstanceKlassVadd_dependent_nmethod6MpnHnmethod__v_;
+text: .text%__1cSandI_reg_imm13NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cIPhaseIFGISquareUp6M_v_;
+text: .text%__1cLklassVtableMget_mirandas6FpnNGrowableArray4CpnNmethodOopDesc___pnMklassOopDesc_pnPobjArrayOopDesc_8_v_;
+text: .text%__1cKCodeBuffer2T6M_v_;
+text: .text%__1cQPSGenerationPoolQget_memory_usage6M_nLMemoryUsage__;
+text: .text%__1cLOpaque1NodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cMURShiftINodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cRcompL_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cXAdaptiveWeightedAverageGsample6Mf_v_;
+text: .text%__1cFKlassWappend_to_sibling_list6M_v_;
+text: .text%__1cQSystemDictionarySjava_system_loader6F_pnHoopDesc__;
+text: .text%__1cFKlassMset_subklass6MpnMklassOopDesc__v_;
+text: .text%__1cOGenerateOopMapLmerge_state6Fp0ipi_v_;
+text: .text%__1cMTypeKlassPtrFxdual6kM_pknEType__;
+text: .text%__1cQSystemDictionaryVdefine_instance_class6FnTinstanceKlassHandle_pnGThread__v_;
+text: .text%__1cSinstanceKlassKlassXallocate_instance_klass6MiiiinNReferenceType_pnGThread__pnMklassOopDesc__;
+text: .text%__1cPClassFileParserbBcheck_final_method_override6FnTinstanceKlassHandle_pnGThread__v_;
+text: .text%__1cJCodeCachebKnumber_of_nmethods_with_dependencies6F_i_;
+text: .text%__1cNinstanceKlassQinit_implementor6M_v_;
+text: .text%__1cPClassFileStream2t6MpCipc_v_;
+text: .text%__1cNinstanceKlassSprocess_interfaces6MpnGThread__v_;
+text: .text%__1cNinstanceKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__;
+text: .text%__1cKoopFactoryRnew_instanceKlass6FiiiinNReferenceType_pnGThread__pnMklassOopDesc__;
+text: .text%__1cNinstanceKlassWdo_local_static_fields6MpFpnPfieldDescriptor_pnGThread__v4_v_;
+text: .text%__1cPClassFileParserMsort_methods6MnOobjArrayHandle_111pnGThread__nPtypeArrayHandle__;
+text: .text%__1cFKlassKsuperklass6kM_pnNinstanceKlass__;
+text: .text%__1cPClassFileParserbBparse_constant_pool_entries6MnSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cPClassFileParserTparse_constant_pool6MpnGThread__nSconstantPoolHandle__;
+text: .text%__1cPClassFileParserbDcompute_transitive_interfaces6MnTinstanceKlassHandle_nOobjArrayHandle_pnGThread__2_;
+text: .text%__1cIUniverseTflush_dependents_on6FnTinstanceKlassHandle__v_;
+text: .text%__1cLklassItableZsetup_itable_offset_table6FnTinstanceKlassHandle__v_;
+text: .text%__1cPClassFileParserbCcheck_super_interface_access6FnTinstanceKlassHandle_pnGThread__v_;
+text: .text%__1cNinstanceKlassQeager_initialize6MpnGThread__v_;
+text: .text%__1cPClassFileParserVset_precomputed_flags6MnTinstanceKlassHandle__v_;
+text: .text%__1cPClassFileParserbAparse_classfile_attributes6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_;
+text: .text%__1cKcmpOpPOperJnot_equal6kM_i_: ad_sparc_clone.o;
+text: .text%__1cMPhaseIterGVNIoptimize6M_v_;
+text: .text%__1cOPhaseTransform2t6MnFPhaseLPhaseNumber__v_;
+text: .text%__1cISubINodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cNinstanceKlassOset_alloc_size6MI_v_: instanceKlass.o;
+text: .text%__1cNinstanceKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceKlass.o;
+text: .text%__1cHMemNodeHsize_of6kM_I_;
+text: .text%__1cFVTuneQstart_class_load6F_v_;
+text: .text%__1cSThreadProfilerMark2T6M_v_;
+text: .text%__1cFVTuneOend_class_load6F_v_;
+text: .text%__1cLClassLoaderOload_classfile6FnMsymbolHandle_pnGThread__nTinstanceKlassHandle__;
+text: .text%__1cJEventMark2t6MpkcE_v_: classLoader.o;
+text: .text%__1cSThreadProfilerMark2t6Mn0AGRegion__v_;
+text: .text%__1cQSystemDictionaryRload_shared_class6FnTinstanceKlassHandle_nGHandle_pnGThread__1_;
+text: .text%__1cKklassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
+text: .text%__1cPClassFileParserbKparse_classfile_sourcefile_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_;
+text: .text%__1cQmodI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cLRShiftINodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cKCMoveINodeGOpcode6kM_i_;
+text: .text%__1cLLShiftLNodeGOpcode6kM_i_;
+text: .text%__1cYcompareAndSwapL_boolNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cSinstanceKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
+text: .text%__1cNinstanceKlassScopy_static_fields6MpnSPSPromotionManager__v_;
+text: .text%__1cMtlsLoadPNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cFStateQ_sub_Op_URShiftI6MpknENode__v_;
+text: .text%__1cKcmpOpUOperGnegate6M_v_: ad_sparc_clone.o;
+text: .text%__1cObranchConUNodeGnegate6M_v_: ad_sparc_misc.o;
+text: .text%__1cQaddP_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cOGenerateOopMapJinterp_bb6MpnKBasicBlock__v_;
+text: .text%__1cOGenerateOopMapQnext_bb_start_pc6MpnKBasicBlock__i_;
+text: .text%__1cLklassVtableYadd_new_mirandas_to_list6FpnNGrowableArray4CpnNmethodOopDesc___pnPobjArrayOopDesc_6pnMklassOopDesc__v_;
+text: .text%__1cIRewriterHrewrite6FnTinstanceKlassHandle_pnGThread__v_;
+text: .text%__1cNinstanceKlassNrewrite_class6MpnGThread__v_;
+text: .text%__1cYconstantPoolCacheOopDescKinitialize6MrnIintArray__v_;
+text: .text%JVM_GetMethodIxSignatureUTF;
+text: .text%JVM_GetMethodIxMaxStack;
+text: .text%JVM_GetMethodIxArgsSize;
+text: .text%JVM_GetMethodIxByteCodeLength;
+text: .text%JVM_GetMethodIxExceptionIndexes;
+text: .text%JVM_GetMethodIxByteCode;
+text: .text%JVM_GetMethodIxExceptionsCount;
+text: .text%__1cLstoreP0NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cHCmpNodeGadd_id6kM_pknEType__: classes.o;
+text: .text%__1cMPhaseChaitinSbuild_ifg_physical6MpnMResourceArea__I_;
+text: .text%__1cWCountInterfacesClosureEdoit6MpnMklassOopDesc_i_v_: klassVtable.o;
+text: .text%__1cQmulD_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cKoopFactoryWnew_permanent_intArray6FipnGThread__pnQtypeArrayOopDesc__;
+text: .text%__1cPClassFileParserVparse_exception_table6MIInSconstantPoolHandle_pnGThread__nPtypeArrayHandle__;
+text: .text%__1cNPhaseCoalescePcoalesce_driver6M_v_;
+text: .text%__1cLBuildCutout2T6M_v_;
+text: .text%__1cNloadConL0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cJloadFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cNloadConP0NodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
+text: .text%__1cJimmP0OperEtype6kM_pknEType__: ad_sparc_clone.o;
+text: .text%__1cLstoreI0NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cPCheckCastPPNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cSObjectSynchronizerJnotifyall6FnGHandle_pnGThread__v_;
+text: .text%__1cHNTarjanICOMPRESS6M_v_;
+text: .text%__1cNRelocIteratorTlocs_and_index_size6Fii_i_;
+text: .text%__1cQsubL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cOcompI_iRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cLklassItableTcompute_itable_size6FnOobjArrayHandle__i_;
+text: .text%__1cQandI_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cIXorINodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cRmethodDataOopDescLbci_to_data6Mi_pnLProfileData__;
+text: .text%__1cFframeZinterpreter_frame_set_bcx6Mi_v_;
+text: .text%__1cMnegF_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cLstoreI0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cTOopMapForCacheEntryZfill_stackmap_for_opcodes6MpnOBytecodeStream_pnNCellTypeState_4i_v_;
+text: .text%__1cSaddL_reg_imm13NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cQshrL_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cKstoreLNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cNSharedRuntimebKexception_handler_for_return_address6FpC_1_;
+text: .text%__1cILoopNodeHsize_of6kM_I_: classes.o;
+text: .text%__1cHMatcherLfind_shared6MpnENode__v_;
+text: .text%__1cJStartNodeHsize_of6kM_I_;
+text: .text%__1cHMatcherFxform6MpnENode_i_2_;
+text: .text%__1cEDict2t6MpFpkv2_ipF2_ipnFArena_i_v_;
+text: .text%__1cRInterpretedRFrameKtop_vframe6kM_pnKjavaVFrame__: rframe.o;
+text: .text%__1cQmodI_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cRinterpretedVFrameDbci6kM_i_;
+text: .text%__1cIAndINodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cIAndINodeGmul_id6kM_pknEType__: classes.o;
+text: .text%__1cNinstanceKlassbBcall_class_initializer_impl6FnTinstanceKlassHandle_pnGThread__v_;
+text: .text%__1cNloadRangeNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cRcompL_reg_conNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cMLinkResolverbHlookup_instance_method_in_klasses6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_;
+text: .text%__1cMnegF_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cNSharedRuntimebWnative_method_throw_unsatisfied_link_error_entry6F_pC_;
+text: .text%__1cTStackWalkCompPolicyYmethod_back_branch_event6MnMmethodHandle_iipnGThread__v_;
+text: .text%__1cRCompilationPolicybJreset_counter_for_back_branch_event6MnMmethodHandle__v_;
+text: .text%__1cOMethodLivenessQcompute_liveness6M_v_;
+text: .text%__1cOMethodLiveness2t6MpnFArena_pnIciMethod__v_;
+text: .text%__1cOMethodLivenessNinit_gen_kill6M_v_;
+text: .text%__1cOMethodLivenessSpropagate_liveness6M_v_;
+text: .text%__1cOMethodLivenessRinit_basic_blocks6M_v_;
+text: .text%__1cIGraphKitHopt_iff6MpnENode_2_2_;
+text: .text%__1cLRShiftINodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cJTimeStampGupdate6M_v_;
+text: .text%__1cRmethodDataOopDescKmileage_of6FpnNmethodOopDesc__i_;
+text: .text%__1cWconstantPoolCacheKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
+text: .text%__1cMloadConDNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cFParseQarray_addressing6MnJBasicType_ippknEType__pnENode__;
+text: .text%__1cNloadConP0NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cQaddL_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cPCountedLoopNodeHsize_of6kM_I_: classes.o;
+text: .text%__1cIProjNodeJideal_reg6kM_I_;
+text: .text%__1cQaddI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cQcmovI_reg_ltNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cRsubI_zero_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cJcmpOpOperFequal6kM_i_: ad_sparc_clone.o;
+text: .text%__1cHCompilebAvarargs_C_out_slots_killed6kM_I_;
+text: .text%__1cXJNI_ArgumentPusherVaArgHiterate6MX_v_: jni.o;
+text: .text%__1cbBjava_lang_ref_SoftReferenceFclock6F_x_;
+text: .text%__1cOPhaseIdealLoopQset_subtree_ctrl6MpnENode__v_;
+text: .text%__1cWstatic_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o;
+text: .text%__1cNflagsRegLOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cYciExceptionHandlerStreamPcount_remaining6M_i_;
+text: .text%__1cFParseXcatch_inline_exceptions6MpnNSafePointNode__v_;
+text: .text%__1cRconstantPoolKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
+text: .text%__1cNobjArrayKlassKcopy_array6MpnMarrayOopDesc_i2iipnGThread__v_;
+text: .text%__1cKcmpOpUOperNgreater_equal6kM_i_: ad_sparc_clone.o;
+text: .text%JVM_GetFieldIxModifiers;
+text: .text%__1cRScavengeRootsTaskFdo_it6MpnNGCTaskManager_I_v_;
+text: .text%__1cRScavengeRootsTaskEname6M_pc_: psTasks.o;
+text: .text%JVM_IsConstructorIx;
+text: .text%__1cPJavaCallWrapperHoops_do6MpnKOopClosure__v_;
+text: .text%__1cFframeNoops_entry_do6MpnKOopClosure_pknLRegisterMap__v_;
+text: .text%__1cSaddP_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cFKlassTarray_klass_or_null6M_pnMklassOopDesc__;
+text: .text%__1cKNativeCallXset_destination_mt_safe6MpC_v_;
+text: .text%__1cUBytecode_tableswitchOdest_offset_at6kMi_i_;
+text: .text%__1cPciObjArrayKlassNelement_klass6M_pnHciKlass__;
+text: .text%__1cKg1RegIOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cSvframeStreamCommonZsecurity_get_caller_frame6Mi_v_;
+text: .text%__1cUjni_invoke_nonstatic6FpnHJNIEnv__pnJJavaValue_pnI_jobject_nLJNICallType_pnK_jmethodID_pnSJNI_ArgumentPusher_pnGThread__v_: jni.o;
+text: .text%__1cIAndINodeKmul_opcode6kM_i_: classes.o;
+text: .text%__1cIAndINodeKadd_opcode6kM_i_: classes.o;
+text: .text%__1cTMachCallRuntimeNodePret_addr_offset6M_i_;
+text: .text%__1cLConvL2INodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cKo0RegPOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cIregDOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cNmethodOopDescTverified_code_entry6M_pC_;
+text: .text%__1cNSharedRuntimeXfind_callee_info_helper6FpnKJavaThread_rnMvframeStream_rnJBytecodesECode_rnICallInfo_pnGThread__nGHandle__;
+text: .text%__1cPBytecode_invokeFindex6kM_i_;
+text: .text%__1cLRethrowNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cSPSKeepAliveClosureGdo_oop6MppnHoopDesc__v_: psScavenge.o;
+text: .text%__1cFParseFBlockRsuccessor_for_bci6Mi_p1_;
+text: .text%__1cVPreserveExceptionMark2T6M_v_;
+text: .text%__1cVPreserveExceptionMark2t6MrpnGThread__v_;
+text: .text%__1cHRetNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cIRootNodeFValue6kMpnOPhaseTransform__pknEType__: classes.o;
+text: .text%__1cMoutputStreamFprint6MpkcE_v_;
+text: .text%__1cOGenerateOopMapKcopy_state6MpnNCellTypeState_2_v_;
+text: .text%__1cHCompileQsync_stack_slots6kM_i_;
+text: .text%__1cHMulNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cJLoadFNodeGOpcode6kM_i_;
+text: .text%__1cNSignatureInfoHdo_long6M_v_: bytecode.o;
+text: .text%__1cHPhiNodeDcmp6kMrknENode__I_;
+text: .text%__1cHOrINodeGadd_id6kM_pknEType__: classes.o;
+text: .text%__1cSTailCalljmpIndNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cKMemoryPoolHoops_do6MpnKOopClosure__v_;
+text: .text%__1cKstoreINodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cRloadConP_pollNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cPClassFileStreamGget_u86MpnGThread__X_;
+text: .text%__1cLMachNopNodeMideal_Opcode6kM_i_: ad_sparc.o;
+text: .text%__1cLMachNopNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cOPhaseIdealLoopNreorg_offsets6MpnNIdealLoopTree__v_;
+text: .text%__1cRshrL_reg_imm6NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cNmethodOopDescVset_signature_handler6MpC_v_;
+text: .text%__1cbBjava_lang_ref_SoftReferenceJtimestamp6FpnHoopDesc__x_;
+text: .text%__1cPcompP_iRegPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cSxorI_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cOPhaseIdealLoopRsplit_thru_region6MpnENode_2_2_;
+text: .text%__1cIAndLNodeGadd_id6kM_pknEType__: classes.o;
+text: .text%__1cbAPSEvacuateFollowersClosureHdo_void6M_v_: psScavenge.o;
+text: .text%jni_ExceptionCheck: jni.o;
+text: .text%__1cIAndLNodeImul_ring6kMpknEType_3_3_;
+text: .text%__1cJCodeCacheMfind_nmethod6Fpv_pnHnmethod__;
+text: .text%__1cOPhaseIdealLoopMdominated_by6MpnENode_2_v_;
+text: .text%__1cQshlI_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cFParseNthrow_to_exit6MpnNSafePointNode__v_;
+text: .text%__1cQinstanceRefKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
+text: .text%__1cVConstantOopWriteValueIwrite_on6MpnUDebugInfoWriteStream__v_;
+text: .text%__1cJVectorSetGslamin6Mrk0_v_;
+text: .text%JVM_Clone;
+text: .text%__1cRAbstractAssemblerFflush6M_v_;
+text: .text%__1cITypeLongFxdual6kM_pknEType__;
+text: .text%__1cIJumpDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_;
+text: .text%__1cKCompiledIC2t6MpnKNativeCall__v_;
+text: .text%__1cOstackSlotLOperEtype6kM_pknEType__: ad_sparc.o;
+text: .text%__1cURethrowExceptionNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cRshrL_reg_imm6NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cLOpaque2NodeEhash6kM_I_;
+text: .text%__1cJloadFNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cUcompU_iReg_imm13NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cKstoreINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cUEdenMutableSpacePoolQget_memory_usage6M_nLMemoryUsage__;
+text: .text%__1cYSurvivorMutableSpacePoolQget_memory_usage6M_nLMemoryUsage__;
+text: .text%__1cLOptoRuntimeJstub_name6FpC_pkc_;
+text: .text%__1cHOrINodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cICmpLNodeDsub6kMpknEType_3_3_;
+text: .text%__1cHPhiNodeKmake_blank6FpnENode_2_p0_;
+text: .text%__1cXJNI_ArgumentPusherVaArgIget_long6M_v_: jni.o;
+text: .text%__1cIMulINodeGadd_id6kM_pknEType__: classes.o;
+text: .text%__1cOMachEpilogNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cFStateM_sub_Op_SubI6MpknENode__v_;
+text: .text%__1cFframeRretrieve_receiver6MpnLRegisterMap__pnHoopDesc__;
+text: .text%__1cPBytecode_invokeNstatic_target6MpnGThread__nMmethodHandle__;
+text: .text%__1cNloadKlassNodeOmemory_operand6kM_pknIMachOper__;
+text: .text%__1cMTailCallNodeKmatch_edge6kMI_I_;
+text: .text%jni_NewObject: jni.o;
+text: .text%__1cIPhaseIFGYCompute_Effective_Degree6M_v_;
+text: .text%__1cHMemNodeIadr_type6kM_pknHTypePtr__;
+text: .text%__1cXmembar_release_lockNodeIadr_type6kM_pknHTypePtr__;
+text: .text%__1cJNode_ListEyank6MpnENode__v_;
+text: .text%__1cMPhaseChaitinISimplify6M_v_;
+text: .text%__1cNIdealLoopTreeIset_nest6MI_i_;
+text: .text%__1cSCallLeafDirectNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
+text: .text%__1cIMulLNodeImul_ring6kMpknEType_3_3_;
+text: .text%__1cMStartOSRNodeGOpcode6kM_i_;
+text: .text%__1cSCallLeafDirectNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cIMulLNodeGadd_id6kM_pknEType__: classes.o;
+text: .text%__1cLcmpD_ccNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cJcmpOpOperEless6kM_i_: ad_sparc_clone.o;
+text: .text%__1cKciTypeFlowPflow_exceptions6MpnNGrowableArray4Cpn0AFBlock___pnNGrowableArray4CpnPciInstanceKlass___pn0ALStateVector__v_;
+text: .text%__1cKType_ArrayEgrow6MI_v_;
+text: .text%__1cNloadConP0NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cXmembar_release_lockNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cPconvF2D_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cRshrL_reg_imm6NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cMURShiftLNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cMLinkResolverOresolve_method6FrnMmethodHandle_rnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cVshrL_reg_imm6_L2INodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cSMemBarVolatileNodeGOpcode6kM_i_;
+text: .text%__1cLstoreB0NodeOmemory_operand6kM_pknIMachOper__;
+text: .text%__1cRshrI_reg_imm5NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cQjava_lang_StringOas_utf8_string6FpnHoopDesc__pc_;
+text: .text%__1cRcmpFastUnlockNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
+text: .text%__1cNSafePointNodeLpop_monitor6M_v_;
+text: .text%__1cMPhaseChaitinVfind_base_for_derived6MppnENode_2rI_2_;
+text: .text%__1cLOptoRuntimebAcomplete_monitor_exit_Type6F_pknITypeFunc__;
+text: .text%__1cOstackSlotIOperEtype6kM_pknEType__: ad_sparc.o;
+text: .text%__1cIGraphKitNshared_unlock6MpnENode_2_v_;
+text: .text%__1cFStateT_sub_Op_CheckCastPP6MpknENode__v_;
+text: .text%__1cQsubI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cFKlassDLCA6Mp0_1_;
+text: .text%__1cKTypeRawPtrEmake6FnHTypePtrDPTR__pk0_;
+text: .text%__1cHciKlassVleast_common_ancestor6Mp0_1_;
+text: .text%__1cOPhaseIdealLoopPbuild_loop_tree6M_v_;
+text: .text%__1cRcompL_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cRshlL_reg_imm6NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cRloadConP_pollNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cQshlL_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cMindirectOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
+text: .text%__1cMindirectOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
+text: .text%__1cMindirectOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
+text: .text%__1cNSafePointNodeMpush_monitor6MpknMFastLockNode__v_;
+text: .text%__1cSCallLeafDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cSCallLeafDirectNodeKmethod_set6Mi_v_;
+text: .text%__1cIDivINodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cJLoadBNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cJloadBNodeOmemory_operand6kM_pknIMachOper__;
+text: .text%__1cPCountedLoopNodeJinit_trip6kM_pnENode__: cfgnode.o;
+text: .text%__1cRcompL_reg_conNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cPcheckCastPPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cOGenerateOopMapGdo_ldc6Mii_v_;
+text: .text%__1cJCMoveNodeLis_cmove_id6FpnOPhaseTransform_pnENode_44pnIBoolNode__4_;
+text: .text%__1cKTypeAryPtrQcast_to_ptr_type6kMnHTypePtrDPTR__pknEType__;
+text: .text%__1cOPhaseIdealLoopKDominators6M_v_;
+text: .text%__1cOPhaseIdealLoopPbuild_loop_late6MrnJVectorSet_rnJNode_List_rnKNode_Stack_pk0_v_;
+text: .text%__1cOPhaseIdealLoopQbuild_loop_early6MrnJVectorSet_rnJNode_List_rnKNode_Stack_pk0_v_;
+text: .text%jni_NewGlobalRef: jni.o;
+text: .text%__1cTciConstantPoolCache2t6MpnFArena_i_v_;
+text: .text%__1cIAndINodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cYcompareAndSwapL_boolNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cMPhaseChaitinFSplit6MI_I_;
+text: .text%__1cMPhaseChaitinHcompact6M_v_;
+text: .text%__1cZPhaseConservativeCoalesce2t6MrnMPhaseChaitin__v_;
+text: .text%__1cMPhaseChaitinZcompress_uf_map_for_nodes6M_v_;
+text: .text%__1cZPhaseConservativeCoalesceGverify6M_v_;
+text: .text%__1cRcmpFastUnlockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cQshlI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cXmembar_release_lockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cKPSYoungGenNused_in_bytes6kM_I_;
+text: .text%__1cOMachEpilogNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cKCompiledICSset_to_monomorphic6MrknOCompiledICInfo__v_;
+text: .text%__1cJloadFNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cIRootNodeIIdentity6MpnOPhaseTransform__pnENode__: classes.o;
+text: .text%__1cJLoadLNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cTjava_lang_ThrowableTfill_in_stack_trace6FnGHandle__v_;
+text: .text%__1cTjava_lang_ThrowableTfill_in_stack_trace6FnGHandle_pnGThread__v_;
+text: .text%__1cFframeZinterpreter_frame_set_bcp6MpC_v_;
+text: .text%JVM_FillInStackTrace;
+text: .text%__1cKJavaThreadGactive6F_p0_;
+text: .text%__1cKstoreFNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cQjava_lang_StringOchar_converter6FnGHandle_HHpnGThread__1_;
+text: .text%__1cMVirtualSpaceNreserved_size6kM_I_;
+text: .text%__1cICodeHeapMmax_capacity6kM_I_;
+text: .text%__1cRsubI_zero_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cHTypePtrFxmeet6kMpknEType__3_;
+text: .text%__1cNflagsRegFOperEtype6kM_pknEType__: ad_sparc.o;
+text: .text%__1cIMinINodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cFParseWensure_phis_everywhere6M_v_;
+text: .text%__1cLRethrowNodeEhash6kM_I_: classes.o;
+text: .text%__1cIDivLNodeGOpcode6kM_i_;
+text: .text%__1cPlocal_vsnprintf6FpcIpkcpv_i_;
+text: .text%__1cNDispatchTableJset_entry6MirnKEntryPoint__v_;
+text: .text%__1cNmethodOopDescVclear_native_function6M_v_;
+text: .text%__1cOloadConL13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cQsubL_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%jio_snprintf;
+text: .text%__1cMloadConINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cSSetupItableClosureEdoit6MpnMklassOopDesc_i_v_: klassVtable.o;
+text: .text%__1cSmulI_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%jni_NewLocalRef: jni.o;
+text: .text%__1cIMulDNodeGOpcode6kM_i_;
+text: .text%__1cLStrCompNodeGOpcode6kM_i_;
+text: .text%__1cQcmovI_reg_gtNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cPClassFileParserbNparse_classfile_inner_classes_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__H_;
+text: .text%__1cKStoreFNodeGOpcode6kM_i_;
+text: .text%__1cLConvD2INodeGOpcode6kM_i_;
+text: .text%__1cIAddLNodeGadd_id6kM_pknEType__: classes.o;
+text: .text%__1cMURShiftLNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cKReturnNodeJideal_reg6kM_I_: classes.o;
+text: .text%jni_DeleteGlobalRef: jni.o;
+text: .text%__1cVPatchingRelocIteratorIpostpass6M_v_;
+text: .text%__1cVPatchingRelocIteratorHprepass6M_v_;
+text: .text%__1cRAbstractAssemblerOcode_fill_byte6F_i_;
+text: .text%__1cIAndLNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cIAndLNodeGmul_id6kM_pknEType__: classes.o;
+text: .text%__1cJOopMapSet2t6M_v_;
+text: .text%__1cNSCMemProjNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%JVM_GetCPMethodModifiers;
+text: .text%jni_GetObjectArrayElement: jni.o;
+text: .text%__1cFParseKarray_load6MnJBasicType__v_;
+text: .text%jni_SetLongField: jni.o;
+text: .text%__1cHGCCauseJto_string6Fn0AFCause__pkc_;
+text: .text%__1cJOopMapSetHcopy_to6MpC_v_;
+text: .text%__1cQjava_lang_ThreadRset_thread_status6FpnHoopDesc_n0AMThreadStatus__v_;
+text: .text%__1cJOopMapSetJheap_size6kM_i_;
+text: .text%__1cNSafePointNodeKgrow_stack6MpnIJVMState_I_v_;
+text: .text%__1cIJVMState2t6Mi_v_;
+text: .text%__1cIAndLNodeKadd_opcode6kM_i_: classes.o;
+text: .text%__1cIAndLNodeKmul_opcode6kM_i_: classes.o;
+text: .text%__1cJLoadSNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cMMachProjNodeHsize_of6kM_I_: classes.o;
+text: .text%__1cOPhaseIdealLoopUsplit_if_with_blocks6MrnJVectorSet_rnKNode_Stack__v_;
+text: .text%__1cNinstanceKlassPadd_implementor6MpnMklassOopDesc__v_;
+text: .text%__1cLOopRecorderIoop_size6M_i_;
+text: .text%__1cYDebugInformationRecorderJdata_size6M_i_;
+text: .text%__1cYDebugInformationRecorderIpcs_size6M_i_;
+text: .text%__1cOPhaseIdealLoopOset_early_ctrl6MpnENode__v_;
+text: .text%__1cHnmethodKtotal_size6kM_i_;
+text: .text%__1cbFunnecessary_membar_volatileNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cMloadConLNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cFParseNadd_safepoint6M_v_;
+text: .text%__1cOPhaseTransform2t6Mp0nFPhaseLPhaseNumber__v_;
+text: .text%__1cLPhaseValues2t6Mp0_v_;
+text: .text%__1cQcmovI_reg_ltNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cXPhaseAggressiveCoalesceGverify6M_v_: coalesce.o;
+text: .text%__1cHCompilebBregister_library_intrinsics6M_v_;
+text: .text%__1cXPhaseAggressiveCoalesceNinsert_copies6MrnHMatcher__v_;
+text: .text%__1cNPhaseRegAlloc2t6MIrnIPhaseCFG_rnHMatcher_pF_v_v_;
+text: .text%__1cIPhaseCFGJbuild_cfg6M_I_;
+text: .text%__1cHCompileEInit6Mi_v_;
+text: .text%__1cVExceptionHandlerTable2t6Mi_v_;
+text: .text%__1cMPhaseChaitin2t6MIrnIPhaseCFG_rnHMatcher__v_;
+text: .text%__1cMPhaseChaitinRRegister_Allocate6M_v_;
+text: .text%__1cHCompileTset_cached_top_node6MpnENode__v_;
+text: .text%__1cHMatcherZnumber_of_saved_registers6F_i_;
+text: .text%__1cNPhaseRegAllocTpd_preallocate_hook6M_v_;
+text: .text%__1cLBlock_Array2t6MpnFArena__v_: block.o;
+text: .text%__1cMPhaseChaitinMreset_uf_map6MI_v_;
+text: .text%__1cMPhaseChaitinRbuild_ifg_virtual6M_v_;
+text: .text%__1cIPhaseCFGQGlobalCodeMotion6MrnHMatcher_IrnJNode_List__v_;
+text: .text%__1cHMatcherTFixup_Save_On_Entry6M_v_;
+text: .text%__1cHMatcherPinit_spill_mask6MpnENode__v_;
+text: .text%__1cHCompileICode_Gen6M_v_;
+text: .text%__1cFArena2t6MI_v_;
+text: .text%__1cUDebugInfoWriteStream2t6MpnYDebugInformationRecorder_i_v_;
+text: .text%__1cHMatcherVinit_first_stack_mask6M_v_;
+text: .text%__1cFArenaNmove_contents6Mp0_1_;
+text: .text%__1cFArenaRdestruct_contents6M_v_;
+text: .text%__1cIPhaseIFG2t6MpnFArena__v_;
+text: .text%__1cFDictIFreset6MpknEDict__v_;
+text: .text%__1cHMatcherFmatch6M_v_;
+text: .text%__1cHMatcher2t6MrnJNode_List__v_;
+text: .text%__1cIPhaseCFGVschedule_pinned_nodes6MrnJVectorSet__v_;
+text: .text%__1cETypeKInitialize6FpnHCompile__v_;
+text: .text%__1cIPhaseCFGYEstimate_Block_Frequency6M_v_;
+text: .text%__1cYDebugInformationRecorder2t6MpnLOopRecorder__v_;
+text: .text%__1cOCompileWrapper2t6MpnHCompile__v_;
+text: .text%__1cIPhaseCFGKDominators6M_v_;
+text: .text%__1cIPhaseCFG2t6MpnFArena_pnIRootNode_rnHMatcher__v_;
+text: .text%__1cJPhaseLive2t6MrknIPhaseCFG_rnILRG_List_pnFArena__v_;
+text: .text%__1cHCompileYinit_scratch_buffer_blob6M_v_;
+text: .text%__1cOMachPrologNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cHCompileTFillExceptionTables6MIpI1pnFLabel__v_;
+text: .text%__1cMPhaseChaitinbApost_allocate_copy_removal6M_v_;
+text: .text%__1cHCompileGOutput6M_v_;
+text: .text%__1cWImplicitExceptionTableIset_size6MI_v_;
+text: .text%__1cHCompileMBuildOopMaps6M_v_;
+text: .text%__1cLdo_liveness6FpnNPhaseRegAlloc_pnIPhaseCFG_pnKBlock_List_ipnFArena_pnEDict__v_: buildOopMap.o;
+text: .text%__1cMPhaseChaitinMfixup_spills6M_v_;
+text: .text%__1cNPhaseRegAllocPalloc_node_regs6Mi_v_;
+text: .text%__1cHCompileLFill_buffer6M_v_;
+text: .text%__1cVCallRuntimeDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cNSignatureInfoJdo_double6M_v_: bytecode.o;
+text: .text%__1cENodeHrm_prec6MI_v_;
+text: .text%__1cHRetNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cKstoreFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cRPrivilegedElementKinitialize6MpnMvframeStream_pnHoopDesc_p0pnGThread__v_;
+text: .text%JVM_DoPrivileged;
+text: .text%__1cRsubI_zero_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cHRetNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cIConDNodeGOpcode6kM_i_;
+text: .text%__1cObranchConFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cTresource_free_bytes6FpcI_v_;
+text: .text%__1cNmethodOopDescbDbuild_interpreter_method_data6FnMmethodHandle_pnGThread__v_;
+text: .text%__1cRcompL_reg_conNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cNMemoryManagerHoops_do6MpnKOopClosure__v_;
+text: .text%__1cPconvL2I_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cFciEnvKcompile_id6M_I_;
+text: .text%__1cPmethodDataKlassIallocate6MnMmethodHandle_pnGThread__pnRmethodDataOopDesc__;
+text: .text%__1cKoopFactoryOnew_methodData6FnMmethodHandle_pnGThread__pnRmethodDataOopDesc__;
+text: .text%__1cIAndLNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cKCodeBuffer2t6MpCi_v_;
+text: .text%__1cVshrL_reg_imm6_L2INodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cLConvL2INodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cIciMethodRinstructions_size6M_i_;
+text: .text%__1cSmulI_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cCosXthread_local_storage_at6Fi_pv_;
+text: .text%__1cMindIndexOperNconstant_disp6kM_i_: ad_sparc.o;
+text: .text%__1cMindIndexOperOindex_position6kM_i_: ad_sparc.o;
+text: .text%__1cMindIndexOperFscale6kM_i_: ad_sparc.o;
+text: .text%__1cOMacroAssemblerWbang_stack_with_offset6Mi_v_: assembler_sparc.o;
+text: .text%__1cRAbstractAssemblerbDgenerate_stack_overflow_check6Mi_v_;
+text: .text%__1cMindIndexOperNbase_position6kM_i_: ad_sparc.o;
+text: .text%__1cNloadKlassNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cFStateR_sub_Op_LoadKlass6MpknENode__v_;
+text: .text%__1cGTarjanICOMPRESS6M_v_;
+text: .text%__1cKstoreCNodeOmemory_operand6kM_pknIMachOper__;
+text: .text%__1cICmpDNodeGOpcode6kM_i_;
+text: .text%__1cNloadConL0NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cIMulLNodeGmul_id6kM_pknEType__: classes.o;
+text: .text%__1cOPhaseIdealLoopOplace_near_use6kMpnENode__2_;
+text: .text%__1cVCallRuntimeDirectNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cLstoreB0NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cSInterpreterRuntimeOprofile_method6FpnKJavaThread_pC_i_;
+text: .text%__1cMURShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cJloadPNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cLOopMapCacheLoop_iterate6MpnKOopClosure__v_;
+text: .text%__1cLRShiftINodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cIMachNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cLOpaque2NodeLbottom_type6kM_pknEType__: connode.o;
+text: .text%__1cSconvI2D_helperNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cUPSGenerationCountersKupdate_all6M_v_: psGenerationCounters.o;
+text: .text%__1cQComputeCallStackHdo_long6M_v_: generateOopMap.o;
+text: .text%__1cKTypeOopPtrSmake_from_constant6FpnIciObject__pk0_;
+text: .text%__1cQregP_to_stkPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cOGenerateOopMapHppstore6MpnNCellTypeState_i_v_;
+text: .text%__1cJTimeStampSticks_since_update6kM_x_;
+text: .text%__1cQmodI_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cIMulINodeImul_ring6kMpknEType_3_3_;
+text: .text%__1cURethrowExceptionNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cIAddLNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cQcmovI_reg_ltNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cLstoreB0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cSaddI_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cIModINodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cKklassKlassIoop_size6kMpnHoopDesc__i_;
+text: .text%__1cJcmpOpOperHgreater6kM_i_: ad_sparc_clone.o;
+text: .text%__1cJimmL0OperJconstantL6kM_x_: ad_sparc_clone.o;
+text: .text%__1cJimmI0OperJnum_edges6kM_I_: ad_sparc_clone.o;
+text: .text%__1cFStateM_sub_Op_ConL6MpknENode__v_;
+text: .text%__1cOloadConL13NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cNObjectMonitorHis_busy6kM_i_;
+text: .text%JVM_GetClassNameUTF;
+text: .text%__1cKloadUBNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cIXorINodeGadd_id6kM_pknEType__: classes.o;
+text: .text%__1cFStateM_sub_Op_AndI6MpknENode__v_;
+text: .text%__1cVshrL_reg_imm6_L2INodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cKcmpOpFOperJnum_edges6kM_I_: ad_sparc_clone.o;
+text: .text%__1cLRuntimeStubHoops_do6MpnKOopClosure__v_: codeBlob.o;
+text: .text%__1cTmembar_volatileNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cJloadFNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cFStateL_sub_Op_OrI6MpknENode__v_;
+text: .text%__1cJCmpL3NodeGOpcode6kM_i_;
+text: .text%JVM_FindLoadedClass;
+text: .text%__1cIMulLNodeKadd_opcode6kM_i_: classes.o;
+text: .text%__1cIMulLNodeKmul_opcode6kM_i_: classes.o;
+text: .text%__1cVAdaptivePaddedAverageGsample6Mf_v_;
+text: .text%__1cIConFNodeGOpcode6kM_i_;
+text: .text%__1cSmembar_acquireNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
+text: .text%__1cQmulD_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cIModLNodeGOpcode6kM_i_;
+text: .text%__1cbIjava_lang_reflect_AccessibleObjectIoverride6FpnHoopDesc__C_;
+text: .text%__1cQLibraryIntrinsicIgenerate6MpnIJVMState__2_;
+text: .text%__1cLRShiftLNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cKTypeRawPtrFxdual6kM_pknEType__;
+text: .text%__1cNloadConL0NodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
+text: .text%__1cFTypeFEmake6Ff_pk0_;
+text: .text%__1cIimmFOperJconstantF6kM_f_: ad_sparc_clone.o;
+text: .text%__1cEUTF8Ounicode_length6Fpkc_i_;
+text: .text%__1cCosRcurrent_thread_id6F_i_;
+text: .text%__1cUSafepointSynchronizeFblock6FpnKJavaThread__v_;
+text: .text%__1cOGenerateOopMapJppdupswap6Mipkc_v_;
+text: .text%__1cJttyLockerbCbreak_tty_lock_for_safepoint6Fi_v_;
+text: .text%__1cSmembar_acquireNodeIadr_type6kM_pknHTypePtr__;
+text: .text%__1cPorI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cIPhaseCFGOinsert_goto_at6MII_v_;
+text: .text%__1cITypeLongFwiden6kMpknEType__3_;
+text: .text%__1cSThreadLocalStoragePget_thread_slow6F_pnGThread__;
+text: .text%__1cPCallRuntimeNodeGOpcode6kM_i_;
+text: .text%__1cJcmpOpOperNgreater_equal6kM_i_: ad_sparc_clone.o;
+text: .text%__1cMindIndexOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
+text: .text%__1cMindIndexOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
+text: .text%__1cMindIndexOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
+text: .text%JVM_FindClassFromClass;
+text: .text%__1cRshrP_reg_imm5NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cObranchConFNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cQshrI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cbDjava_lang_reflect_ConstructorFclazz6FpnHoopDesc__2_;
+text: .text%__1cbDjava_lang_reflect_ConstructorEslot6FpnHoopDesc__i_;
+text: .text%__1cbDjava_lang_reflect_ConstructorPparameter_types6FpnHoopDesc__2_;
+text: .text%__1cKReflectionSinvoke_constructor6FpnHoopDesc_nOobjArrayHandle_pnGThread__2_;
+text: .text%JVM_NewInstanceFromConstructor;
+text: .text%__1cFParseFBlockMadd_new_path6M_i_;
+text: .text%__1cIimmPOperJnum_edges6kM_I_: ad_sparc_clone.o;
+text: .text%__1cQsubL_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cJloadBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cLConvF2DNodeGOpcode6kM_i_;
+text: .text%__1cLConvI2DNodeGOpcode6kM_i_;
+text: .text%__1cSciExceptionHandlerLcatch_klass6M_pnPciInstanceKlass__;
+text: .text%__1cMloadConFNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
+text: .text%__1cKcmpOpPOperNgreater_equal6kM_i_: ad_sparc_clone.o;
+text: .text%__1cLRShiftLNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cKimmL13OperJconstantL6kM_x_: ad_sparc_clone.o;
+text: .text%__1cSTailCalljmpIndNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cKstoreLNodeOmemory_operand6kM_pknIMachOper__;
+text: .text%__1cGIfNodeMdominated_by6MpnENode_pnMPhaseIterGVN__v_;
+text: .text%__1cOcompiledVFrame2t6MpknFframe_pknLRegisterMap_pnKJavaThread_pnJScopeDesc__v_;
+text: .text%__1cJScopeDesc2t6MpknHnmethod_i_v_;
+text: .text%__1cQshlI_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cOGenerateOopMapJdo_astore6Mi_v_;
+text: .text%__1cbFunnecessary_membar_volatileNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cULinearLeastSquareFitGupdate6Mdd_v_;
+text: .text%__1cOoop_RelocationIoop_addr6M_ppnHoopDesc__;
+text: .text%__1cKstoreCNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cKstoreCNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cJcmpOpOperKless_equal6kM_i_: ad_sparc_clone.o;
+text: .text%__1cXmembar_acquire_lockNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cPfieldDescriptorUstring_initial_value6kMpnGThread__pnHoopDesc__;
+text: .text%__1cMloadConLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cIMaxINodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cMloadConDNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cMindirectOperNconstant_disp6kM_i_: ad_sparc.o;
+text: .text%__1cMindirectOperNbase_position6kM_i_: ad_sparc.o;
+text: .text%__1cIAddLNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cMindirectOperFscale6kM_i_: ad_sparc.o;
+text: .text%__1cYinternal_word_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o;
+text: .text%__1cSsubL_reg_reg_2NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%jni_NewString: jni.o;
+text: .text%__1cLConvL2INodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cQjava_lang_StringXcreate_oop_from_unicode6FpHipnGThread__pnHoopDesc__;
+text: .text%__1cKoopFactoryNnew_charArray6FpkcpnGThread__pnQtypeArrayOopDesc__;
+text: .text%__1cOcompiledVFrameEcode6kM_pnHnmethod__;
+text: .text%__1cIGraphKitMnext_monitor6M_i_;
+text: .text%__1cLBoxLockNode2t6Mi_v_;
+text: .text%__1cPconvF2D_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cLOptoRuntimebBcomplete_monitor_enter_Type6F_pknITypeFunc__;
+text: .text%__1cIGraphKitLshared_lock6MpnENode__pnMFastLockNode__;
+text: .text%__1cPcmpFastLockNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
+text: .text%__1cNloadConP0NodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cRorI_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cKcmpOpUOperEless6kM_i_: ad_sparc_clone.o;
+text: .text%__1cQaddF_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cRLowMemoryDetectorWdetect_after_gc_memory6FpnKMemoryPool__v_;
+text: .text%lwp_mutex_init: os_solaris.o;
+text: .text%__1cRsubI_zero_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cFframeLnmethods_do6M_v_;
+text: .text%__1cQjava_lang_ThreadGthread6FpnHoopDesc__pnKJavaThread__;
+text: .text%__1cQnotemp_iRegIOperEtype6kM_pknEType__: ad_sparc.o;
+text: .text%__1cITemplateIbytecode6kM_nJBytecodesECode__;
+text: .text%__1cODataRelocationGoffset6M_i_: relocInfo.o;
+text: .text%__1cYinternal_word_RelocationFvalue6M_pC_: relocInfo.o;
+text: .text%__1cCosPhint_no_preempt6F_v_;
+text: .text%__1cOcmovII_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cIMulLNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cIMulINodeGmul_id6kM_pknEType__: classes.o;
+text: .text%__1cPciObjectFactory2t6MpnFArena_i_v_;
+text: .text%__1cRsarL_reg_imm6NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cFciEnvWget_method_from_handle6MpnI_jobject__pnIciMethod__;
+text: .text%__1cSstring_compareNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cFciEnv2T6M_v_;
+text: .text%__1cIGraphKitNgen_checkcast6MpnENode_2p2_2_;
+text: .text%__1cMMergeMemNodeIadr_type6kM_pknHTypePtr__: memnode.o;
+text: .text%__1cJcmpOpOperJnot_equal6kM_i_: ad_sparc_clone.o;
+text: .text%__1cGvframeDtop6kM_p0_;
+text: .text%__1cOCompiledRFrameEinit6M_v_;
+text: .text%__1cXmembar_acquire_lockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cJloadSNodeOmemory_operand6kM_pknIMachOper__;
+text: .text%__1cVCallRuntimeDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cPcmpFastLockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cQciTypeArrayKlassEmake6FnJBasicType__p0_;
+text: .text%__1cIXorINodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cIGraphKitRgen_subtype_check6MpnENode_2_2_;
+text: .text%__1cOMacroAssemblerLsave_thread6MkpnMRegisterImpl__v_;
+text: .text%__1cOcmovII_immNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cMloadConINodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cRshlL_reg_imm6NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cFParseGdo_new6M_v_;
+text: .text%__1cIimmIOperJnum_edges6kM_I_: ad_sparc_clone.o;
+text: .text%__1cQmodI_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cLConvI2LNodeJideal_reg6kM_I_: classes.o;
+text: .text%jni_GetObjectClass: jni.o;
+text: .text%__1cSxorI_reg_imm13NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cOMacroAssemblerFalign6Mi_v_;
+text: .text%__1cRappend_interfaces6FnOobjArrayHandle_ripnPobjArrayOopDesc__v_;
+text: .text%__1cKManagementJtimestamp6F_x_;
+text: .text%__1cIPSOldGenPupdate_counters6M_v_;
+text: .text%__1cQshrI_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cFForteNregister_stub6FpkcpC3_v_;
+text: .text%__1cFVTuneNregister_stub6FpkcpC3_v_;
+text: .text%__1cNinstanceKlassbFlookup_method_in_all_interfaces6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__;
+text: .text%__1cTloadL_unalignedNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cJloadLNodeOmemory_operand6kM_pknIMachOper__;
+text: .text%__1cOMacroAssemblerVreset_last_Java_frame6M_v_;
+text: .text%__1cOMacroAssemblerTset_last_Java_frame6MpnMRegisterImpl_2_v_;
+text: .text%__1cSstring_compareNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cOstackSlotIOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cQregF_to_stkINodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cINodeHash2t6MpnFArena_I_v_;
+text: .text%__1cPconvI2L_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cOPhaseTransform2t6MpnFArena_nFPhaseLPhaseNumber__v_;
+text: .text%__1cLPhaseValues2t6MpnFArena_I_v_;
+text: .text%__1cJStubQdDueueGcommit6Mi_v_;
+text: .text%__1cJStubQdDueueHrequest6Mi_pnEStub__;
+text: .text%__1cOcmovII_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cKstoreFNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cOMacroAssemblerKsave_frame6Mi_v_;
+text: .text%__1cSmulI_reg_imm13NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cLstoreC0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cOPhaseIdealLoopVclone_up_backedge_goo6MpnENode_22_2_;
+text: .text%__1cITemplateKinitialize6MinITosState_1pFi_vi_v_;
+text: .text%__1cITemplateIgenerate6MpnZInterpreterMacroAssembler__v_;
+text: .text%JVM_FindClassFromClassLoader;
+text: .text%JVM_FindClassFromBootLoader;
+text: .text%signalHandler;
+text: .text%__1cTtypeArrayKlassKlassIoop_size6kMpnHoopDesc__i_: typeArrayKlassKlass.o;
+text: .text%JVM_handle_solaris_signal;
+text: .text%__1cQjava_lang_ThreadRget_thread_status6FpnHoopDesc__n0AMThreadStatus__;
+text: .text%__1cNSignatureInfoIdo_float6M_v_: bytecode.o;
+text: .text%__1cFStateM_sub_Op_AndL6MpknENode__v_;
+text: .text%__1cKConv2BNodeGOpcode6kM_i_;
+text: .text%__1cZInterpreterMacroAssemblerZcheck_and_handle_popframe6MpnMRegisterImpl__v_;
+text: .text%JVM_IHashCode;
+text: .text%__1cSconvI2D_helperNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cJStartNodeJideal_reg6kM_I_: callnode.o;
+text: .text%__1cOMacroAssemblerbBcheck_and_forward_exception6MpnMRegisterImpl__v_;
+text: .text%__1cQcmovI_reg_ltNodeHtwo_adr6kM_I_: ad_sparc_misc.o;
+text: .text%__1cQandL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cLPhaseValuesKis_IterGVN6M_pnMPhaseIterGVN__: phaseX.o;
+text: .text%__1cMLinkResolverXresolve_invokeinterface6FrnICallInfo_nGHandle_nSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cKC2CompilerOcompile_method6MpnFciEnv_pnIciMethod_i_v_;
+text: .text%JVM_GetClassLoader;
+text: .text%__1cNCompileBrokerZinvoke_compiler_on_method6FpnLCompileTask__v_;
+text: .text%__1cCosRelapsed_frequency6F_x_;
+text: .text%__1cFStateP_sub_Op_ConvL2I6MpknENode__v_;
+text: .text%__1cOPhaseIdealLoopLdo_split_if6MpnENode__v_;
+text: .text%__1cLAccessFlagsRatomic_clear_bits6Mi_v_;
+text: .text%__1cKScheduling2t6MpnFArena_rnHCompile__v_;
+text: .text%__1cKSchedulingMDoScheduling6M_v_;
+text: .text%__1cNCompileBrokerScollect_statistics6FpnOCompilerThread_nMelapsedTimer_pnLCompileTask__v_;
+text: .text%__1cFciEnvbOcheck_for_system_dictionary_modification6MpnIciMethod__v_;
+text: .text%__1cSCardTableExtensionbAscavenge_contents_parallel6MpnQObjectStartArray_pnMMutableSpace_pnIHeapWord_pnSPSPromotionManager_I_v_;
+text: .text%__1cRframe_gc_prologue6FpnFframe_pknLRegisterMap__v_: thread.o;
+text: .text%__1cFframeMpd_gc_epilog6M_v_;
+text: .text%__1cMelapsedTimerHseconds6kM_d_;
+text: .text%__1cJStealTaskEname6M_pc_: psTasks.o;
+text: .text%__1cRframe_gc_epilogue6FpnFframe_pknLRegisterMap__v_: thread.o;
+text: .text%__1cFframeLgc_epilogue6M_v_;
+text: .text%__1cFframeLgc_prologue6M_v_;
+text: .text%__1cTOldToYoungRootsTaskEname6M_pc_: psTasks.o;
+text: .text%__1cJStealTaskFdo_it6MpnNGCTaskManager_I_v_;
+text: .text%__1cTOldToYoungRootsTaskFdo_it6MpnNGCTaskManager_I_v_;
+text: .text%__1cNGCTaskManagerMnote_release6MI_v_;
+text: .text%__1cMciMethodDataStrap_recompiled_at6MpnLProfileData__i_;
+text: .text%__1cJloadLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cSmembar_acquireNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cSmembar_acquireNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cYDebugInformationRecorderHcopy_to6MpnHnmethod__v_;
+text: .text%__1cVExceptionHandlerTableHcopy_to6MpnHnmethod__v_;
+text: .text%__1cJCodeCacheGcommit6FpnICodeBlob__v_;
+text: .text%__1cFVTuneOcreate_nmethod6FpnHnmethod__v_;
+text: .text%__1cHnmethodQcopy_scopes_data6MpCi_v_;
+text: .text%__1cFciEnvVnum_inlined_bytecodes6kM_i_;
+text: .text%__1cWImplicitExceptionTableHcopy_to6MpnHnmethod__v_;
+text: .text%__1cLOopRecorderHcopy_to6MpnICodeBlob__v_;
+text: .text%__1cIciMethodRbuild_method_data6M_v_;
+text: .text%__1cHCompileIOptimize6M_v_;
+text: .text%__1cHCompileLFinish_Warm6M_v_;
+text: .text%__1cbAfinal_graph_reshaping_walk6FrnKNode_Stack_pnENode_rnUFinal_Reshape_Counts__v_: compile.o;
+text: .text%__1cHCompileLInline_Warm6M_i_;
+text: .text%__1cSPhaseRemoveUseless2t6MpnIPhaseGVN_pnQUnique_Node_List__v_;
+text: .text%__1cJStartNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cKInlineTreeWbuild_inline_tree_root6F_p0_;
+text: .text%__1cHCompileRbuild_start_state6MpnJStartNode_pknITypeFunc__pnIJVMState__;
+text: .text%__1cIPhaseCCPHanalyze6M_v_;
+text: .text%__1cIPhaseCCPMdo_transform6M_v_;
+text: .text%__1cIPhaseCCPJtransform6MpnENode__2_;
+text: .text%__1cIPhaseCCP2t6MpnMPhaseIterGVN__v_;
+text: .text%__1cHCompileVidentify_useful_nodes6MrnQUnique_Node_List__v_;
+text: .text%__1cHCompileUremove_useless_nodes6MrnQUnique_Node_List__v_;
+text: .text%__1cQUnique_Node_ListUremove_useless_nodes6MrnJVectorSet__v_;
+text: .text%__1cMPhaseIterGVN2t6MpnIPhaseGVN__v_;
+text: .text%__1cMPhaseIterGVN2t6Mp0_v_;
+text: .text%__1cQmulI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cNTemplateTableKtransition6FnITosState_1_v_;
+text: .text%__1cHCompileNreturn_values6MpnIJVMState__v_;
+text: .text%__1cOcmovII_immNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cOMachEpilogNodeQsafepoint_offset6kM_i_;
+text: .text%__1cZInterpreterMacroAssemblerPdispatch_epilog6MnITosState_i_v_;
+text: .text%__1cZInterpreterMacroAssemblerPdispatch_prolog6MnITosState_i_v_;
+text: .text%__1cIModINodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cFStateP_sub_Op_RShiftI6MpknENode__v_;
+text: .text%__1cRsarI_reg_imm5NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%lwp_cond_init: os_solaris.o;
+text: .text%__1cTmembar_volatileNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cNloadConL0NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cQComputeCallStackGdo_int6M_v_: generateOopMap.o;
+text: .text%__1cXmembar_acquire_lockNodeIadr_type6kM_pknHTypePtr__;
+text: .text%__1cKPSYoungGenRcapacity_in_bytes6kM_I_;
+text: .text%__1cNSafepointBlobbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_: codeBlob.o;
+text: .text%__1cOloadConI13NodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cJloadSNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cIAddFNodeGOpcode6kM_i_;
+text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_;
+text: .text%__1cFStateO_sub_Op_Binary6MpknENode__v_;
+text: .text%__1cKBinaryNodeGOpcode6kM_i_;
+text: .text%__1cNSignatureInfoIdo_short6M_v_: bytecode.o;
+text: .text%__1cLBoxLockNodeDcmp6kMrknENode__I_;
+text: .text%__1cSCompiledStaticCallSset_to_interpreted6MnMmethodHandle_pC_v_;
+text: .text%__1cSCompiledStaticCallJfind_stub6M_pC_;
+text: .text%__1cRNativeMovConstRegIset_data6Mi_v_;
+text: .text%__1cFParsebLincrement_and_test_invocation_counter6Mi_v_;
+text: .text%__1cSsafePoint_pollNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cMTailCallNodeGOpcode6kM_i_;
+text: .text%__1cSInterpreterRuntimeTprepare_native_call6FpnKJavaThread_pnNmethodOopDesc__v_;
+text: .text%__1cXSignatureHandlerLibraryDadd6FnMmethodHandle__v_;
+text: .text%__1cSsafePoint_pollNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cNobjArrayKlassWcompute_modifier_flags6kMpnGThread__i_;
+text: .text%__1cPClassFileParserUverify_constantvalue6MiinSconstantPoolHandle_pnGThread__v_;
+text: .text%__1cZInterpreterMacroAssemblerNdispatch_next6MnITosState_i_v_;
+text: .text%__1cIMulFNodeGOpcode6kM_i_;
+text: .text%__1cISubLNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cQmulD_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cNSCMemProjNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cSThreadLocalStorageGthread6F_pnGThread__: assembler_sparc.o;
+text: .text%jni_SetByteArrayRegion: jni.o;
+text: .text%__1cQregI_to_stkINodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cQjava_lang_StringPcreate_from_str6FpkcpnGThread__nGHandle__;
+text: .text%__1cSdivL_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cFStateM_sub_Op_XorI6MpknENode__v_;
+text: .text%__1cHTypePtrEmake6FnETypeFTYPES_n0ADPTR_i_pk0_;
+text: .text%__1cCosLelapsedTime6F_d_;
+text: .text%__1cKScopeValueJread_from6FpnTDebugInfoReadStream__p0_;
+text: .text%__1cKPerfMemoryMmark_updated6F_v_;
+text: .text%__1cSobjArrayKlassKlassbCallocate_objArray_klass_impl6FnYobjArrayKlassKlassHandle_inLKlassHandle_pnGThread__pnMklassOopDesc__;
+text: .text%__1cIPerfData2t6MnJCounterNS_pkcn0AFUnits_n0ALVariability__v_;
+text: .text%__1cKPerfMemoryFalloc6FI_pc_;
+text: .text%__1cLStrCompNodeKmatch_edge6kMI_I_;
+text: .text%__1cQmulL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cILocation2t6MpnTDebugInfoReadStream__v_;
+text: .text%__1cKJavaThreadZsecurity_get_caller_class6Mi_pnMklassOopDesc__;
+text: .text%jni_ReleaseStringUTFChars;
+text: .text%jni_GetStringUTFChars: jni.o;
+text: .text%__1cSobjArrayKlassKlassXallocate_objArray_klass6MinLKlassHandle_pnGThread__pnMklassOopDesc__;
+text: .text%__1cFParseLarray_store6MnJBasicType__v_;
+text: .text%__1cSInterpreterRuntimeNquicken_io_cc6FpnKJavaThread__v_;
+text: .text%__1cSInterpreterRuntimeXthrow_pending_exception6FpnKJavaThread__v_;
+text: .text%JVM_IsInterrupted;
+text: .text%__1cLLShiftLNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cNSignatureInfoHdo_char6M_v_: bytecode.o;
+text: .text%JVM_FindLibraryEntry;
+text: .text%__1cWConstantPoolCacheEntrySset_interface_call6MnMmethodHandle_i_v_;
+text: .text%__1cLklassItableUcompute_itable_index6FpnNmethodOopDesc__i_;
+text: .text%__1cRshlL_reg_imm6NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cQshlL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cPconvF2D_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cLRShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cSstring_compareNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cOMacroAssemblerEstop6Mpkc_v_;
+text: .text%__1cObranchConFNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cKloadUBNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cQaddP_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cLcmpD_ccNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cTloadL_unalignedNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cLLShiftLNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cbIjava_lang_reflect_AccessibleObjectMset_override6FpnHoopDesc_C_v_;
+text: .text%__1cXJNI_ArgumentPusherVaArgHget_int6M_v_: jni.o;
+text: .text%__1cRbranchLoopEndNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cQaddF_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cKcmpOpUOperHgreater6kM_i_: ad_sparc_clone.o;
+text: .text%__1cUParallelScavengeHeapEused6kM_I_;
+text: .text%__1cIDivINodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cQmulF_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cQxorI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cWCallLeafNoFPDirectNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cLcmpD_ccNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cWCallLeafNoFPDirectNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
+text: .text%__1cJloadINodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cbBopt_virtual_call_RelocationLstatic_stub6M_pC_;
+text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFi_vi_v_;
+text: .text%__1cIMinINodeGadd_id6kM_pknEType__: classes.o;
+text: .text%__1cKarrayKlassKjava_super6kM_pnMklassOopDesc__;
+text: .text%__1cOClearArrayNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cRbranchLoopEndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cRbranchLoopEndNodeJlabel_set6MrnFLabel_I_v_;
+text: .text%__1cLMachUEPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cCosTnative_java_library6F_pv_;
+text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_533pnGThread__v_;
+text: .text%__1cSInterpreterRuntimeOmultianewarray6FpnKJavaThread_pi_v_;
+text: .text%__1cSxorI_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cMPhaseChaitinGSelect6M_I_;
+text: .text%__1cFParseSjump_switch_ranges6MpnENode_pnLSwitchRange_4i_v_;
+text: .text%__1cSbranchCon_longNodeJlabel_set6MrnFLabel_I_v_;
+text: .text%__1cSbranchCon_longNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cSbranchCon_longNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cCosYprint_jni_name_suffix_on6FpnMoutputStream_i_v_;
+text: .text%__1cCosYprint_jni_name_prefix_on6FpnMoutputStream_i_v_;
+text: .text%__1cLstoreP0NodeOmemory_operand6kM_pknIMachOper__;
+text: .text%__1cFParseTprofile_switch_case6Mi_v_;
+text: .text%__1cSandI_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cIimmLOperJnum_edges6kM_I_: ad_sparc_clone.o;
+text: .text%__1cFParseOmerge_new_path6Mi_v_;
+text: .text%__1cQregP_to_stkPNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
+text: .text%__1cQjava_lang_StringGoffset6FpnHoopDesc__i_;
+text: .text%__1cQjava_lang_StringFvalue6FpnHoopDesc__pnQtypeArrayOopDesc__;
+text: .text%__1cQjava_lang_StringScreate_from_symbol6FnMsymbolHandle_pnGThread__nGHandle__;
+text: .text%__1cSmembar_releaseNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%jni_NewByteArray: jni.o;
+text: .text%__1cQdivL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_53pnGThread__v_;
+text: .text%__1cQSystemDictionarybAvalidate_protection_domain6FnTinstanceKlassHandle_nGHandle_2pnGThread__v_;
+text: .text%__1cKDictionaryVadd_protection_domain6MiInTinstanceKlassHandle_nGHandle_2pnGThread__v_;
+text: .text%__1cFParseLdo_newarray6MnJBasicType__v_;
+text: .text%__1cPmethodDataKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
+text: .text%__1cNmethodOopDescKklass_name6kM_pnNsymbolOopDesc__;
+text: .text%__1cSconvI2D_helperNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cLstoreP0NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cMciArrayKlass2t6MnLKlassHandle__v_;
+text: .text%__1cSmembar_releaseNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cIPerfLong2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability__v_;
+text: .text%__1cKarrayKlassXbase_create_array_klass6FrknKKlass_vtbl_inLKlassHandle_pnGThread__nQarrayKlassHandle__;
+text: .text%__1cKarrayKlassbBcomplete_create_array_klass6FnQarrayKlassHandle_nLKlassHandle_pnGThread__v_;
+text: .text%__1cSTailCalljmpIndNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cQcmovI_reg_gtNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%JVM_GetMethodIxExceptionTableEntry;
+text: .text%__1cIDivINodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cLstoreP0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cQstkI_to_regFNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cLRethrowNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cKloadUBNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cHCompileSrethrow_exceptions6MpnIJVMState__v_;
+text: .text%__1cURethrowExceptionNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cLRethrowNode2t6MpnENode_22222_v_;
+text: .text%__1cTLoadL_unalignedNodeGOpcode6kM_i_;
+text: .text%__1cSmulI_reg_imm13NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cZInterpreterMacroAssemblerZget_2_byte_integer_at_bcp6MipnMRegisterImpl_2n0ALsignedOrNot_n0AKsetCCOrNot__v_;
+text: .text%__1cQcmovI_reg_gtNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cURethrowExceptionNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cPfieldDescriptorSlong_initial_value6kM_x_;
+text: .text%__1cISubLNodeDsub6kMpknEType_3_3_;
+text: .text%__1cPciObjArrayKlass2t6MnLKlassHandle__v_;
+text: .text%__1cJLoadINodeMstore_Opcode6kM_i_: classes.o;
+text: .text%__1cQandI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cNobjArrayKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__;
+text: .text%__1cQmulI_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cFParsePmerge_exception6Mi_v_;
+text: .text%__1cLStrCompNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cNobjArrayKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: objArrayKlass.o;
+text: .text%__1cNloadConP0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%jni_ReleaseStringCritical: jni.o;
+text: .text%__1cJCMoveNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%jni_GetStringCritical: jni.o;
+text: .text%__1cHciKlassSsuper_check_offset6M_I_;
+text: .text%__1cPciObjArrayKlassGloader6M_pnHoopDesc__: ciObjArrayKlass.o;
+text: .text%__1cWCallLeafNoFPDirectNodeKmethod_set6Mi_v_;
+text: .text%__1cWCallLeafNoFPDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cIDivLNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cPICStubInterfaceRcode_size_to_size6kMi_i_: icBuffer.o;
+text: .text%__1cPICStubInterfaceKinitialize6MpnEStub_i_v_: icBuffer.o;
+text: .text%__1cMloadConFNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cMadjust_check6FpnENode_11iipnMPhaseIterGVN__v_: ifnode.o;
+text: .text%__1cJScopeDescGsender6kM_p0_;
+text: .text%__1cSxorI_reg_imm13NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cOcompiledVFrameGsender6kM_pnGvframe__;
+text: .text%__1cZInterpreterMacroAssemblerDpop6MnITosState__v_;
+text: .text%__1cGThreadHoops_do6MpnKOopClosure__v_;
+text: .text%__1cQPlaceholderTableHoops_do6MpnKOopClosure__v_;
+text: .text%__1cXJvmtiCurrentBreakpointsHoops_do6FpnKOopClosure__v_;
+text: .text%__1cNMemoryServiceHoops_do6FpnKOopClosure__v_;
+text: .text%__1cNThreadServiceHoops_do6FpnKOopClosure__v_;
+text: .text%__1cKJNIHandlesHoops_do6FpnKOopClosure__v_;
+text: .text%__1cQSystemDictionaryRpreloaded_oops_do6FpnKOopClosure__v_;
+text: .text%__1cLJvmtiExportHoops_do6FpnKOopClosure__v_;
+text: .text%__1cIVMThreadHoops_do6MpnKOopClosure__v_;
+text: .text%__1cKJNIHandlesMweak_oops_do6FpnRBoolObjectClosure_pnKOopClosure__v_;
+text: .text%__1cSObjectSynchronizerHoops_do6FpnKOopClosure__v_;
+text: .text%__1cMFlatProfilerHoops_do6FpnKOopClosure__v_;
+text: .text%__1cOPhaseIdealLoopOadd_constraint6MiipnENode_22p23_v_;
+text: .text%__1cKManagementHoops_do6FpnKOopClosure__v_;
+text: .text%__1cKstoreBNodeOmemory_operand6kM_pknIMachOper__;
+text: .text%__1cSaddL_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cQSystemDictionaryRnumber_of_classes6F_i_;
+text: .text%__1cQComputeCallStackIdo_short6M_v_: generateOopMap.o;
+text: .text%__1cLstoreI0NodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cFframeIpatch_pc6MpnGThread_pC_v_;
+text: .text%__1cRtestI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cNmethodOopDescbGresolved_checked_exceptions_impl6Fp0pnGThread__nOobjArrayHandle__;
+text: .text%__1cFParseMdo_checkcast6M_v_;
+text: .text%__1cOCompiledRFrameKtop_method6kM_nMmethodHandle__: rframe.o;
+text: .text%__1cKReflectionTget_parameter_types6FnMmethodHandle_ippnHoopDesc_pnGThread__nOobjArrayHandle__;
+text: .text%__1cRtestI_reg_immNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cOcmovIL_immNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cJimmU6OperIconstant6kM_i_: ad_sparc_clone.o;
+text: .text%__1cHRegMask2t6M_v_: matcher.o;
+text: .text%__1cOGenerateOopMapIcopy_cts6MpnNCellTypeState_2_i_;
+text: .text%__1cNObjectMonitorGEnterI6MpnGThread__v_;
+text: .text%__1cSmulL_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cPstoreI_FregNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cLcmpD_ccNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cXMachCallDynamicJavaNodePret_addr_offset6M_i_;
+text: .text%__1cNflagsRegFOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cXvirtual_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o;
+text: .text%__1cPPerfDataManagerMcounter_name6Fpkc2_pc_;
+text: .text%__1cIModLNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cMloadConFNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cbBjava_lang_ref_SoftReferenceJset_clock6Fx_v_;
+text: .text%__1cbAPSGCAdaptivePolicyCountersbBupdate_counters_from_policy6M_v_;
+text: .text%__1cXTraceMemoryManagerStats2T6M_v_;
+text: .text%__1cQSystemDictionaryHoops_do6FpnKOopClosure__v_;
+text: .text%__1cQLRUMaxHeapPolicy2t6M_v_;
+text: .text%__1cUParallelScavengeHeapQresize_all_tlabs6M_v_;
+text: .text%__1cUParallelScavengeHeapPupdate_counters6M_v_;
+text: .text%__1cUParallelScavengeHeapbFaccumulate_statistics_all_tlabs6M_v_;
+text: .text%__1cVLoaderConstraintTableHoops_do6MpnKOopClosure__v_;
+text: .text%__1cTDerivedPointerTablePupdate_pointers6F_v_;
+text: .text%__1cNCollectedHeapbFaccumulate_statistics_all_tlabs6M_v_;
+text: .text%__1cNCollectedHeapQresize_all_tlabs6M_v_;
+text: .text%__1cMTypeKlassPtrFxmeet6kMpknEType__3_;
+text: .text%__1cKPSYoungGenPupdate_counters6M_v_;
+text: .text%__1cWThreadLocalAllocBufferbFaccumulate_statistics_before_gc6F_v_;
+text: .text%__1cWThreadLocalAllocBufferQresize_all_tlabs6F_v_;
+text: .text%__1cPGCMemoryManagerIgc_begin6M_v_;
+text: .text%__1cPGCMemoryManagerGgc_end6M_v_;
+text: .text%__1cRLowMemoryDetectorRdetect_low_memory6F_v_;
+text: .text%__1cNMemoryServiceStrack_memory_usage6F_v_;
+text: .text%__1cbAPSGCAdaptivePolicyCountersPupdate_counters6M_v_;
+text: .text%__1cTDerivedPointerTableFclear6F_v_;
+text: .text%__1cKDictionaryHoops_do6MpnKOopClosure__v_;
+text: .text%__1cORuntimeServiceWrecord_safepoint_begin6F_v_;
+text: .text%__1cSObjectSynchronizerVdeflate_idle_monitors6F_v_;
+text: .text%__1cMCounterDecayFdecay6F_v_;
+text: .text%__1cCosbCmake_polling_page_unreadable6F_v_;
+text: .text%__1cRInlineCacheBufferUupdate_inline_caches6F_v_;
+text: .text%__1cLConvI2FNodeGOpcode6kM_i_;
+text: .text%__1cORuntimeServicebDrecord_safepoint_synchronized6F_v_;
+text: .text%__1cQaddF_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cUSafepointSynchronizeFbegin6F_v_;
+text: .text%__1cKarrayKlassTallocate_arrayArray6MiipnGThread__pnPobjArrayOopDesc__;
+text: .text%__1cONMethodSweeperFsweep6F_v_;
+text: .text%__1cCosbAmake_polling_page_readable6F_v_;
+text: .text%__1cUSafepointSynchronizeDend6F_v_;
+text: .text%__1cOcmovII_immNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cORuntimeServiceUrecord_safepoint_end6F_v_;
+text: .text%__1cKimmU13OperIconstant6kM_i_: ad_sparc_clone.o;
+text: .text%__1cQshlL_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cUcompU_iReg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%JVM_GetCallerClass;
+text: .text%__1cNSignatureInfoHdo_byte6M_v_: bytecode.o;
+text: .text%__1cOcmovPP_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cKstoreBNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cSobjArrayKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
+text: .text%__1cLstoreC0NodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cTloadL_unalignedNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cICmpFNodeGOpcode6kM_i_;
+text: .text%__1cOstackSlotPOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cQregF_to_stkINodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cJLoadDNodeGOpcode6kM_i_;
+text: .text%__1cQmulD_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%jni_IsAssignableFrom: jni.o;
+text: .text%jni_GetFieldID: jni.o;
+text: .text%__1cJLoadPNodeMstore_Opcode6kM_i_: classes.o;
+text: .text%__1cLstoreB0NodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cZInterpreterMacroAssemblerbAget_cache_and_index_at_bcp6MpnMRegisterImpl_2i_v_;
+text: .text%__1cHTypeAryFxdual6kM_pknEType__;
+text: .text%__1cMtlsLoadPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cIVMThreadHexecute6FpnMVM_Operation__v_;
+text: .text%__1cCosMget_priority6FkpknGThread_rnOThreadPriority__nIOSReturn__;
+text: .text%__1cGThreadMget_priority6Fkpk0_nOThreadPriority__;
+text: .text%__1cMVM_OperationIevaluate6M_v_;
+text: .text%__1cMVM_OperationSset_calling_thread6MpnGThread_nOThreadPriority__v_;
+text: .text%__1cCosTget_native_priority6FkpknGThread_pi_nIOSReturn__;
+text: .text%__1cMnegD_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cQcmovI_reg_gtNodeHtwo_adr6kM_I_: ad_sparc_misc.o;
+text: .text%__1cGGCTask2t6Mn0AEKindEkind__v_;
+text: .text%__1cNGCTaskManagerVrelease_all_resources6M_v_;
+text: .text%__1cLGCTaskQdDueueHenqueue6Mp0_v_;
+text: .text%__1cSCardTableExtensionRscavenge_contents6MpnQObjectStartArray_pnMMutableSpace_pnIHeapWord_pnSPSPromotionManager__v_;
+text: .text%__1cUWaitForBarrierGCTaskFdo_it6MpnNGCTaskManager_I_v_;
+text: .text%__1cNGCTaskManagerIadd_list6MpnLGCTaskQdDueue__v_;
+text: .text%__1cHThreadsZcreate_thread_roots_tasks6FpnLGCTaskQdDueue__v_;
+text: .text%__1cUWaitForBarrierGCTaskGcreate6F_p0_;
+text: .text%__1cUWaitForBarrierGCTaskIdestruct6M_v_;
+text: .text%__1cSObjectSynchronizerJfast_exit6FpnHoopDesc_pnJBasicLock_pnGThread__v_;
+text: .text%__1cSPSPromotionManagerNpost_scavenge6F_v_;
+text: .text%__1cNBarrierGCTaskOdo_it_internal6MpnNGCTaskManager_I_v_;
+text: .text%__1cNJvmtiGCMarker2T6M_v_;
+text: .text%__1cUWaitForBarrierGCTaskHdestroy6Fp0_v_;
+text: .text%__1cLGCTaskQdDueueGcreate6F_p0_;
+text: .text%__1cSPSPromotionManagerMpre_scavenge6F_v_;
+text: .text%__1cZSerialOldToYoungRootsTaskFdo_it6MpnNGCTaskManager_I_v_;
+text: .text%__1cQinstanceRefKlassZacquire_pending_list_lock6FpnJBasicLock__v_;
+text: .text%__1cZSerialOldToYoungRootsTaskEname6M_pc_: psTasks.o;
+text: .text%__1cKPSYoungGenLswap_spaces6M_v_;
+text: .text%__1cUParallelScavengeHeapQresize_young_gen6MII_v_;
+text: .text%__1cKPSYoungGenGresize6MII_v_;
+text: .text%__1cKPSYoungGenNresize_spaces6MII_v_;
+text: .text%__1cSPSPromotionManagerbBvm_thread_promotion_manager6F_p0_;
+text: .text%__1cUWaitForBarrierGCTaskIwait_for6M_v_;
+text: .text%__1cPVM_GC_OperationNdoit_epilogue6M_v_;
+text: .text%__1cNMonitorSupplyHreserve6F_pnHMonitor__;
+text: .text%__1cNMonitorSupplyHrelease6FpnHMonitor__v_;
+text: .text%__1cUWaitForBarrierGCTaskEname6M_pc_: gcTaskManager.o;
+text: .text%__1cTmembar_volatileNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
+text: .text%__1cVLoaderConstraintTableWfind_constrained_klass6MnMsymbolHandle_nGHandle__pnMklassOopDesc__;
+text: .text%__1cTloadL_unalignedNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cOcmovII_immNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cQComputeCallStackHdo_bool6M_v_: generateOopMap.o;
+text: .text%__1cMURShiftLNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cSCompiledStaticCallNcompute_entry6FnMmethodHandle_rnOStaticCallInfo__v_;
+text: .text%__1cPClassFileParserbJparse_classfile_signature_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_;
+text: .text%__1cKstoreBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cSCompiledStaticCallDset6MrknOStaticCallInfo__v_;
+text: .text%__1cOGenerateOopMapXreplace_all_CTS_matches6MnNCellTypeState_1_v_;
+text: .text%__1cFframeZinterpreter_frame_set_mdp6MpC_v_;
+text: .text%__1cZInterpreterMacroAssemblerIpush_ptr6MpnMRegisterImpl__v_;
+text: .text%__1cISubLNodeGadd_id6kM_pknEType__: classes.o;
+text: .text%__1cIciMethodRinterpreter_entry6M_pC_;
+text: .text%__1cQmulF_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cPconvF2D_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cRcompL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cJloadBNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%jni_SetBooleanField: jni.o;
+text: .text%__1cKimmL13OperJnum_edges6kM_I_: ad_sparc_clone.o;
+text: .text%__1cLcmpF_ccNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cLRuntimeStubbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_: codeBlob.o;
+text: .text%__1cRorI_reg_imm13NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cRsarL_reg_imm6NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cQmulI_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorLpass_object6M_v_;
+text: .text%__1cZInterpreterMacroAssemblerGpush_i6MpnMRegisterImpl__v_;
+text: .text%__1cPClassFileParserbBcheck_illegal_static_method6FnTinstanceKlassHandle_pnGThread__v_;
+text: .text%__1cLLShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cQComputeCallStackJdo_double6M_v_: generateOopMap.o;
+text: .text%__1cJloadSNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cRloadConP_pollNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cNObjectMonitorHRecycle6M_v_;
+text: .text%__1cNSharedRuntimeSfind_callee_method6FpnKJavaThread_pnGThread__nMmethodHandle__;
+text: .text%__1cMloadConLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
+text: .text%__1cJloadDNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cQSystemDictionaryTresolve_from_stream6FnMsymbolHandle_nGHandle_2pnPClassFileStream_pnGThread__pnMklassOopDesc__;
+text: .text%__1cQstkI_to_regFNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cQregP_to_stkPNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cZInterpreterMacroAssemblerFpop_i6MpnMRegisterImpl__v_;
+text: .text%__1cIMaxINodeGadd_id6kM_pknEType__: classes.o;
+text: .text%__1cNSharedRuntimeTreresolve_call_site6FpnKJavaThread_pnGThread__nMmethodHandle__;
+text: .text%__1cYcompareAndSwapL_boolNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cNSCMemProjNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cYcompareAndSwapL_boolNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
+text: .text%__1cIProjNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cIPSOldGenMmax_gen_size6M_I_: psOldGen.o;
+text: .text%__1cKExceptionsK_throw_msg6FpnGThread_pkcipnNsymbolOopDesc_4_v_;
+text: .text%__1cSdivL_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cbDVM_ParallelGCFailedAllocationEdoit6M_v_;
+text: .text%__1cQaddL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cPadd_derived_oop6FppnHoopDesc_2_v_: oopMap.o;
+text: .text%__1cMregD_lowOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cHOrINodeIadd_ring6kMpknEType_3_3_;
+text: .text%__1cOMethodLivenessKBasicBlockFsplit6Mi_p1_;
+text: .text%__1cOcmovII_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cENodeEgetd6kM_d_;
+text: .text%__1cOcmovIL_immNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cTAbstractInterpreterSBasicType_as_index6FnJBasicType__i_;
+text: .text%__1cZInterpreterMacroAssemblerGpush_f6MpnRFloatRegisterImpl__v_;
+text: .text%__1cIciObject2t6MpnHciKlass__v_;
+text: .text%__1cPjava_lang_ClassQprimitive_mirror6FnJBasicType__pnHoopDesc__;
+text: .text%__1cKExceptionsL_throw_args6FpnGThread_pkcinMsymbolHandle_5pnRJavaCallArguments__v_;
+text: .text%__1cPstoreI_FregNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cKCMovePNodeGOpcode6kM_i_;
+text: .text%__1cLstoreC0NodeIpipeline6kM_pknIPipeline__;
+text: .text%JVM_MonitorWait;
+text: .text%__1cSObjectSynchronizerEwait6FnGHandle_xpnGThread__v_;
+text: .text%__1cIAddLNodeIadd_ring6kMpknEType_3_3_;
+text: .text%__1cHciKlass2t6MpnIciSymbol_p0_v_;
+text: .text%__1cGciType2t6MpnHciKlass__v_;
+text: .text%__1cQshlI_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cQdivD_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cFParseSjump_if_false_fork6MpnGIfNode_ii_v_;
+text: .text%__1cNloadConL0NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cRshrL_reg_imm6NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cUciInstanceKlassKlassEmake6F_p0_;
+text: .text%__1cENode2t6Mp0111111_v_;
+text: .text%__1cIDivLNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cZInterpreterMacroAssemblerGpush_d6MpnRFloatRegisterImpl__v_;
+text: .text%__1cFParseRarray_store_check6M_v_;
+text: .text%__1cQsubF_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cIciSymbolHbyte_at6Mi_i_;
+text: .text%__1cKCompiledICSset_ic_destination6MpC_v_;
+text: .text%__1cQaddD_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cCosTset_native_priority6FpnGThread_i_nIOSReturn__;
+text: .text%__1cPPerfDataManagerUcreate_long_variable6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnQPerfLongVariable__;
+text: .text%__1cQset_lwp_priority6Fiii_i_;
+text: .text%__1cQjava_lang_StringTcreate_oop_from_str6FpkcpnGThread__pnHoopDesc__;
+text: .text%jni_NewStringUTF: jni.o;
+text: .text%__1cZInterpreterMacroAssemblerGpush_l6MpnMRegisterImpl__v_;
+text: .text%__1cQsubI_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cZInterpreterMacroAssemblerXget_constant_pool_cache6MpnMRegisterImpl__v_;
+text: .text%__1cSbranchCon_longNodeGnegate6M_v_: ad_sparc_misc.o;
+text: .text%__1cKcmpOpUOperKless_equal6kM_i_: ad_sparc_clone.o;
+text: .text%__1cPciInstanceKlassNloader_handle6M_pnI_jobject__;
+text: .text%__1cPciInstanceKlassYprotection_domain_handle6M_pnI_jobject__;
+text: .text%__1cUParallelScavengeHeapIcapacity6kM_I_;
+text: .text%__1cNmethodOopDescKjmethod_id6M_pnK_jmethodID__;
+text: .text%__1cSsubL_reg_reg_2NodeIpipeline6kM_pknIPipeline__;
+text: .text%JVM_DefineClassWithSource;
+text: .text%__1cLstoreF0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%JVM_SetClassSigners;
+text: .text%__1cKCompiledICMset_to_clean6M_v_;
+text: .text%__1cSandL_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cRbranchLoopEndNodeGnegate6M_v_: ad_sparc_misc.o;
+text: .text%__1cLRShiftLNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cJMarkSweepSFollowStackClosureHdo_void6M_v_: markSweep.o;
+text: .text%__1cFParseWcheck_interpreter_type6MpnENode_pknEType_rpnNSafePointNode__2_;
+text: .text%__1cOcmovPP_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cSThreadLocalStorageSset_thread_in_slot6FpnGThread__v_;
+text: .text%get_thread;
+text: .text%__1cKstoreCNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cSThreadLocalStoragebBget_thread_via_cache_slowly6FIi_pnGThread__;
+text: .text%__1cSThreadLocalStorageKset_thread6FpnGThread__v_;
+text: .text%jni_CallIntMethod: jni.o;
+text: .text%__1cSThreadLocalStorageNpd_set_thread6FpnGThread__v_;
+text: .text%__1cKloadUBNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cSconvD2I_helperNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cIMulDNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cSaddP_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cIAddDNodeGOpcode6kM_i_;
+text: .text%__1cOloadI_fregNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cOloadI_fregNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cCosJyield_all6Fi_v_;
+text: .text%__1cKstoreLNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cKstoreLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cPstoreI_FregNodeIpipeline6kM_pknIPipeline__;
+text: .text%JVM_GetClassMethodsCount;
+text: .text%__1cKstoreINodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%JVM_GetClassFieldsCount;
+text: .text%__1cLconvI2BNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%JVM_GetClassCPEntriesCount;
+text: .text%JVM_GetClassCPTypes;
+text: .text%__1cQmulI_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cOMacroAssemblerKverify_FPU6Mipkc_v_;
+text: .text%__1cbCfind_class_from_class_loader6FpnHJNIEnv__nMsymbolHandle_CnGHandle_3CpnGThread__pnH_jclass__;
+text: .text%__1cQjava_lang_ThreadKset_thread6FpnHoopDesc_pnKJavaThread__v_;
+text: .text%__1cIAddFNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cQregI_to_stkINodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cQmulF_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4nGHandle_5pnGThread__v_;
+text: .text%__1cXSignatureHandlerLibraryOpd_set_handler6FpC_v_;
+text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorIgenerate6MX_v_;
+text: .text%JVM_IsPrimitiveClass;
+text: .text%__1cJimmU6OperJnum_edges6kM_I_: ad_sparc_clone.o;
+text: .text%__1cOPhaseIdealLoopUpeeled_dom_test_elim6MpnNIdealLoopTree_rnJNode_List__v_;
+text: .text%__1cIDivDNodeGOpcode6kM_i_;
+text: .text%__1cQsubI_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cLStringTableJbasic_add6MinGHandle_pHiIpnGThread__pnHoopDesc__;
+text: .text%__1cIModLNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%jni_FindClass: jni.o;
+text: .text%__1cbDjava_lang_reflect_ConstructorTset_exception_types6FpnHoopDesc_2_v_;
+text: .text%__1cOMacroAssemblerOstore_argument6MpnMRegisterImpl_rnIArgument__v_: interpreterRT_sparc.o;
+text: .text%__1cFParseHdo_irem6M_v_;
+text: .text%__1cbDjava_lang_reflect_ConstructorNset_modifiers6FpnHoopDesc_i_v_;
+text: .text%__1cbDjava_lang_reflect_ConstructorZset_parameter_annotations6FpnHoopDesc_2_v_;
+text: .text%__1cbDjava_lang_reflect_ConstructorPset_annotations6FpnHoopDesc_2_v_;
+text: .text%__1cbDjava_lang_reflect_ConstructorIset_slot6FpnHoopDesc_i_v_;
+text: .text%__1cbDjava_lang_reflect_ConstructorTset_parameter_types6FpnHoopDesc_2_v_;
+text: .text%__1cbDjava_lang_reflect_ConstructorJset_clazz6FpnHoopDesc_2_v_;
+text: .text%__1cbDjava_lang_reflect_ConstructorGcreate6FpnGThread__nGHandle__;
+text: .text%__1cKReflectionPnew_constructor6FnMmethodHandle_pnGThread__pnHoopDesc__;
+text: .text%__1cOcmovII_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cSdivL_reg_imm13NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cTloadL_unalignedNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cNSharedRuntimeDd2l6Fd_x_;
+text: .text%__1cJStubQdDueueRrequest_committed6Mi_pnEStub__;
+text: .text%__1cRInlineCacheBufferRic_stub_code_size6F_i_;
+text: .text%__1cFStateP_sub_Op_RShiftL6MpknENode__v_;
+text: .text%__1cPICStubInterfaceEsize6kMpnEStub__i_: icBuffer.o;
+text: .text%__1cPICStubInterfaceIfinalize6MpnEStub__v_: icBuffer.o;
+text: .text%__1cOGenerateOopMapOdo_monitorexit6Mi_v_;
+text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_5pnGThread__v_;
+text: .text%__1cQregI_to_stkINodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cRorI_reg_imm13NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cOGenerateOopMapLmonitor_pop6M_nNCellTypeState__;
+text: .text%__1cMregD_lowOperEtype6kM_pknEType__: ad_sparc.o;
+text: .text%__1cLConvD2INodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cSconvI2F_helperNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cHMonitor2T6M_v_;
+text: .text%__1cFTypeDFxmeet6kMpknEType__3_;
+text: .text%__1cFMutex2T6M_v_;
+text: .text%lwp_cond_destroy: os_solaris.o;
+text: .text%lwp_mutex_destroy: os_solaris.o;
+text: .text%__1cQdivI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cVcompiledICHolderKlassIoop_size6kMpnHoopDesc__i_;
+text: .text%__1cQregP_to_stkPNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cQstkI_to_regFNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cQregI_to_stkINodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cQRelocationHolderEplus6kMi_0_;
+text: .text%__1cUPSMarkSweepDecoratorPadjust_pointers6M_v_;
+text: .text%__1cUPSMarkSweepDecoratorKprecompact6M_v_;
+text: .text%__1cQjava_lang_ThreadLthreadGroup6FpnHoopDesc__2_;
+text: .text%__1cHCompileQgrow_alias_types6M_v_;
+text: .text%__1cISubLNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cOcmovII_immNodeHtwo_adr6kM_I_: ad_sparc_misc.o;
+text: .text%__1cNinstanceKlassKlink_class6MpnGThread__v_;
+text: .text%__1cKloadUBNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cTloadD_unalignedNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cJLoadFNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cOloadConL13NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cRcompL_reg_conNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cQaddF_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cICmpDNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cKsplit_once6FpnMPhaseIterGVN_pnENode_333_v_: cfgnode.o;
+text: .text%__1cLLShiftLNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cJloadFNodeOmemory_operand6kM_pknIMachOper__;
+text: .text%__1cJCMoveNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cOPhaseIdealLoopOdo_range_check6MpnNIdealLoopTree_rnJNode_List__v_;
+text: .text%__1cSconvD2I_helperNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cIGraphKitPdstore_rounding6MpnENode__2_;
+text: .text%__1cJloadINodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cSdivL_reg_imm13NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cRloadConP_pollNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cIModINodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cZCallDynamicJavaDirectNodeKmethod_set6Mi_v_;
+text: .text%__1cZCallDynamicJavaDirectNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cSconvD2I_helperNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cZCallDynamicJavaDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cUmulL_reg_imm13_1NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cQdivL_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cUdivL_reg_imm13_1NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cUmulL_reg_imm13_1NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cUVirtualCallGeneratorIgenerate6MpnIJVMState__2_;
+text: .text%__1cNObjectMonitor2t6M_v_;
+text: .text%__1cIMulINodeKadd_opcode6kM_i_: classes.o;
+text: .text%__1cIMulINodeKmul_opcode6kM_i_: classes.o;
+text: .text%__1cQdivD_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cJCmpD3NodeGOpcode6kM_i_;
+text: .text%__1cJloadDNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cIMinINodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cOBasicHashtableJnew_entry6MI_pnTBasicHashtableEntry__;
+text: .text%__1cQmulF_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%JVM_MonitorNotify;
+text: .text%__1cFBlockNset_next_call6MpnENode_rnJVectorSet_rnLBlock_Array__v_;
+text: .text%__1cSObjectSynchronizerGnotify6FnGHandle_pnGThread__v_;
+text: .text%__1cXNativeSignatureIteratorJdo_object6Mii_v_: interpreterRT_sparc.o;
+text: .text%__1cKstoreFNodeOmemory_operand6kM_pknIMachOper__;
+text: .text%__1cSstring_compareNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cRtestI_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cVshrL_reg_imm6_L2INodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cYjava_lang_reflect_MethodIset_slot6FpnHoopDesc_i_v_;
+text: .text%__1cOloadConL13NodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
+text: .text%__1cYjava_lang_reflect_MethodPset_return_type6FpnHoopDesc_2_v_;
+text: .text%__1cYjava_lang_reflect_MethodPset_annotations6FpnHoopDesc_2_v_;
+text: .text%__1cYjava_lang_reflect_MethodGcreate6FpnGThread__nGHandle__;
+text: .text%__1cINegDNodeGOpcode6kM_i_;
+text: .text%__1cYjava_lang_reflect_MethodJset_clazz6FpnHoopDesc_2_v_;
+text: .text%__1cYjava_lang_reflect_MethodZset_parameter_annotations6FpnHoopDesc_2_v_;
+text: .text%__1cYjava_lang_reflect_MethodWset_annotation_default6FpnHoopDesc_2_v_;
+text: .text%__1cYjava_lang_reflect_MethodTset_parameter_types6FpnHoopDesc_2_v_;
+text: .text%__1cYjava_lang_reflect_MethodTset_exception_types6FpnHoopDesc_2_v_;
+text: .text%__1cYjava_lang_reflect_MethodNset_modifiers6FpnHoopDesc_i_v_;
+text: .text%__1cOimmI_32_63OperIconstant6kM_i_: ad_sparc_clone.o;
+text: .text%__1cYjava_lang_reflect_MethodIset_name6FpnHoopDesc_2_v_;
+text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cSsubL_reg_reg_2NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cOcmovII_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o;
+text: .text%__1cOstackSlotPOperEtype6kM_pknEType__: ad_sparc.o;
+text: .text%jni_GetMethodID: jni.o;
+text: .text%__1cQshlL_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cIMulINodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cNminI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
+text: .text%__1cRshlI_reg_imm5NodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cOloadConL13NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cNObjectMonitorGnotify6MpnGThread__v_;
+text: .text%__1cOMacroAssemblerDjmp6MpnMRegisterImpl_ipkci_v_;
+text: .text%__1cIDivLNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%JVM_GetClassDeclaredConstructors;
+text: .text%__1cUdivL_reg_imm13_1NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cKJavaThreadbScheck_safepoint_and_suspend_for_native_trans6Fp0_v_;
+text: .text%__1cRInlineCacheBufferVic_buffer_entry_point6FpC_1_;
+text: .text%__1cUmulL_reg_imm13_1NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cQsubD_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cUdivL_reg_imm13_1NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cQregP_to_stkPNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cSconvI2F_helperNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cRInlineCacheBufferWcreate_transition_stub6FpnKCompiledIC_pnHoopDesc_pC_v_;
+text: .text%__1cRInlineCacheBufferXassemble_ic_buffer_code6FpCpnHoopDesc_1_v_;
+text: .text%__1cOcmovIF_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cQcmovI_reg_ltNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cNloadConL0NodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cKo1RegPOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cSsubL_reg_reg_1NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cIBytecodeIset_code6MnJBytecodesECode__v_;
+text: .text%__1cQshrL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cRsarL_reg_imm6NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cJloadFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cICodeHeapLfirst_block6kM_pnJHeapBlock__;
+text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorIpass_int6M_v_: interpreterRT_sparc.o;
+text: .text%__1cRorI_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cQshrL_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cQshrI_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cOimmI_32_63OperJnum_edges6kM_I_: ad_sparc_clone.o;
+text: .text%__1cOloadI_fregNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cLConvI2DNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cQjava_lang_ThreadMset_priority6FpnHoopDesc_nOThreadPriority__v_;
+text: .text%__1cQdivL_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cKCompiledICKcached_oop6kM_pnHoopDesc__;
+text: .text%__1cISubFNodeGOpcode6kM_i_;
+text: .text%JVM_IsThreadAlive;
+text: .text%__1cXPartialSubtypeCheckNodeGOpcode6kM_i_;
+text: .text%__1cLconvI2BNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cOcmovIF_immNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cRsarL_reg_imm6NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cQaddI_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cRtestI_reg_immNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cRtestI_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cRsubI_zero_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cSmulL_reg_reg_1NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cQaddD_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cOcmovPI_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cKConv2BNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cSstring_compareNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cQregL_to_stkLNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cQjava_lang_SystemTout_offset_in_bytes6F_i_;
+text: .text%__1cQjava_lang_SystemSin_offset_in_bytes6F_i_;
+text: .text%__1cWPredictedCallGeneratorIgenerate6MpnIJVMState__2_;
+text: .text%__1cSconvI2F_helperNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cNCallGeneratorRfor_uncommon_trap6FpnIciMethod_nODeoptimizationLDeoptReason_n0CLDeoptAction__p0_;
+text: .text%__1cOcmovPP_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cZUncommonTrapCallGeneratorIgenerate6MpnIJVMState__2_;
+text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cIMulFNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cGThread2t6M_v_;
+text: .text%__1cCosHSolarisPhotspot_sigmask6FpnGThread__v_;
+text: .text%__1cCosHSolarisVinit_thread_fpu_state6F_v_;
+text: .text%__1cFTypeFFxmeet6kMpknEType__3_;
+text: .text%__1cCosScurrent_stack_size6F_I_;
+text: .text%__1cIOSThreadNpd_initialize6M_v_;
+text: .text%__1cCosScurrent_stack_base6F_pC_;
+text: .text%__1cIOSThread2t6MpFpv_i1_v_;
+text: .text%__1cIMulDNodeImul_ring6kMpknEType_3_3_;
+text: .text%__1cCosRinitialize_thread6F_v_;
+text: .text%__1cSdivL_reg_reg_1NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cCosPpd_start_thread6FpnGThread__v_;
+text: .text%__1cLConvI2FNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cNobjArrayKlassIallocate6MipnGThread__pnPobjArrayOopDesc__;
+text: .text%__1cNobjArrayKlassKinitialize6MpnGThread__v_;
+text: .text%jni_NewObjectArray: jni.o;
+text: .text%__1cSsubL_reg_reg_1NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cOcmovIF_immNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%JVM_SetThreadPriority;
+text: .text%__1cCosMstart_thread6FpnGThread__v_;
+text: .text%__1cXjava_lang_reflect_FieldNset_modifiers6FpnHoopDesc_i_v_;
+text: .text%JVM_GetStackAccessControlContext;
+text: .text%__1cXjava_lang_reflect_FieldPset_annotations6FpnHoopDesc_2_v_;
+text: .text%__1cFStateM_sub_Op_ModI6MpknENode__v_;
+text: .text%JVM_Read;
+text: .text%__1cOcmovPI_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cKCompiledICOset_cached_oop6MpnHoopDesc__v_;
+text: .text%__1cFStateM_sub_Op_SubL6MpknENode__v_;
+text: .text%__1cKCompiledICMstub_address6kM_pC_;
+text: .text%__1cJvmSymbolsOsignature_type6FpnNsymbolOopDesc__nJBasicType__;
+text: .text%__1cQsubL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cQmodI_reg_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
+text: .text%__1cISubDNodeGOpcode6kM_i_;
+text: .text%__1cQmodI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cPfieldDescriptorLannotations6kM_pnQtypeArrayOopDesc__;
+text: .text%__1cRsarI_reg_imm5NodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: callGenerator.o;
+text: .text%__1cKReflectionInew_type6FnMsymbolHandle_nLKlassHandle_pnGThread__nGHandle__;
+text: .text%__1cXjava_lang_reflect_FieldIset_slot6FpnHoopDesc_i_v_;
+text: .text%__1cXjava_lang_reflect_FieldIset_type6FpnHoopDesc_2_v_;
+text: .text%__1cXjava_lang_reflect_FieldGcreate6FpnGThread__nGHandle__;
+text: .text%__1cXjava_lang_reflect_FieldJset_clazz6FpnHoopDesc_2_v_;
+text: .text%__1cXjava_lang_reflect_FieldIset_name6FpnHoopDesc_2_v_;
+text: .text%__1cNinstanceKlassYremove_dependent_nmethod6MpnHnmethod__v_;
+text: .text%jni_GetStaticFieldID: jni.o;
+text: .text%__1cNloadKlassNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cLstoreF0NodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cPciObjArrayKlassEmake6FpnHciKlass__p0_;
+text: .text%__1cNinstanceKlassKjni_id_for6Mi_pnFJNIid__;
+text: .text%__1cFStateO_sub_Op_CMoveI6MpknENode__v_;
+text: .text%__1cENodeEgetf6kM_f_;
+text: .text%JVM_DesiredAssertionStatus;
+text: .text%__1cKJavaThreadKinitialize6M_v_;
+text: .text%__1cWThreadLocalAllocBufferKinitialize6M_v_;
+text: .text%__1cLConvL2DNodeGOpcode6kM_i_;
+text: .text%__1cQThreadStatistics2t6M_v_;
+text: .text%__1cUThreadSafepointStateGcreate6FpnKJavaThread__v_;
+text: .text%__1cQshrL_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cQsubD_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cGThreadFstart6Fp0_v_;
+text: .text%__1cOMacroAssemblerIround_to6MpnMRegisterImpl_i_v_: interp_masm_sparc.o;
+text: .text%__1cPconvI2D_memNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%jni_GetFloatArrayRegion: jni.o;
+text: .text%__1cJMarkSweepMfollow_stack6F_v_;
+text: .text%__1cNimmP_pollOperJnum_edges6kM_I_: ad_sparc_clone.o;
+text: .text%__1cRtestI_reg_immNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cJMemRegionMintersection6kMk0_0_;
+text: .text%__1cKJavaThread2t6MpFp0pnGThread__vI_v_;
+text: .text%__1cKJavaThreadDrun6M_v_;
+text: .text%__1cNSafepointBlobHoops_do6MpnKOopClosure__v_: codeBlob.o;
+text: .text%__1cPjava_lang_ClassOprimitive_type6FpnHoopDesc__nJBasicType__;
+text: .text%JVM_IsArrayClass;
+text: .text%jni_CallStaticVoidMethod: jni.o;
+text: .text%__1cPPerfDataManagerTcreate_long_counter6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnPPerfLongCounter__;
+text: .text%__1cLConvF2DNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cNsymbolOopDescWas_klass_external_name6kM_pkc_;
+text: .text%__1cHnmethodbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_;
+text: .text%__1cKstoreBNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cFKlassNexternal_name6kM_pkc_;
+text: .text%__1cOGenerateOopMapYrewrite_refval_conflicts6M_v_;
+text: .text%__1cKstoreLNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cOGenerateOopMapKinterp_all6M_v_;
+text: .text%__1cOGenerateOopMapPinitialize_vars6M_v_;
+text: .text%__1cTloadD_unalignedNodeIpipeline6kM_pknIPipeline__;
+text: .text%JVM_GetClassName;
+text: .text%__1cOloadI_fregNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cOGenerateOopMapbAmake_context_uninitialized6M_v_;
+text: .text%__1cOGenerateOopMapKinit_state6M_v_;
+text: .text%__1cOGenerateOopMapYsetup_method_entry_state6M_v_;
+text: .text%__1cOGenerateOopMapTmark_reachable_code6M_v_;
+text: .text%__1cOGenerateOopMapRinit_basic_blocks6M_v_;
+text: .text%__1cLStringTableGintern6FpkcpnGThread__pnHoopDesc__;
+text: .text%__1cOcmovIF_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cCosMset_priority6FpnGThread_nOThreadPriority__nIOSReturn__;
+text: .text%__1cLConvD2INodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cOcmovIL_immNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cXNativeSignatureIteratorGdo_int6M_v_: interpreterRT_sparc.o;
+text: .text%__1cINodeHashEgrow6M_v_;
+text: .text%__1cOGenerateOopMapPdo_monitorenter6Mi_v_;
+text: .text%__1cOcmovPP_regNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
+text: .text%__1cMloadConDNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
+text: .text%__1cIMaxINodeIadd_ring6kMpknEType_3_3_;
+text: .text%__1cJloadSNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cOGenerateOopMapLcompute_map6MpnGThread__v_;
+text: .text%__1cLConvF2DNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%JVM_Open;
+text: .text%__1cRInvocationCounterFreset6M_v_;
+text: .text%__1cRCompilationPolicybIreset_counter_for_invocation_event6MnMmethodHandle__v_;
+text: .text%__1cOGenerateOopMap2t6MnMmethodHandle__v_;
+text: .text%__1cOGenerateOopMapRdo_interpretation6M_v_;
+text: .text%__1cIRetTableRcompute_ret_table6MnMmethodHandle__v_;
+text: .text%__1cOGenerateOopMapMmonitor_push6MnNCellTypeState__v_;
+text: .text%__1cOGenerateOopMapNinitialize_bb6M_v_;
+text: .text%__1cOGenerateOopMapbImark_bbheaders_and_count_gc_points6M_v_;
+text: .text%__1cSmulL_reg_reg_1NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cSdivL_reg_reg_1NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cZInterpreterMacroAssemblerEpush6MnITosState__v_;
+text: .text%JVM_StartThread;
+text: .text%__1cMthread_entry6FpnKJavaThread_pnGThread__v_: jvm.o;
+text: .text%jni_GetStaticObjectField: jni.o;
+text: .text%__1cJArrayDataKcell_count6M_i_: ciMethodData.o;
+text: .text%__1cIGraphKitSprecision_rounding6MpnENode__2_;
+text: .text%__1cNPerfByteArray2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability_i_v_;
+text: .text%__1cIGraphKitRcreate_and_map_if6MpnENode_2ff_pnGIfNode__: generateOptoStub.o;
+text: .text%__1cQjava_lang_ThreadIpriority6FpnHoopDesc__nOThreadPriority__;
+text: .text%__1cQjava_lang_ThreadJstackSize6FpnHoopDesc__x_;
+text: .text%__1cMLinkResolverYresolve_interface_method6FrnMmethodHandle_rnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cKJavaThreadHprepare6MpnI_jobject_nOThreadPriority__v_;
+text: .text%__1cTLoadD_unalignedNodeGOpcode6kM_i_;
+text: .text%__1cQshrI_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%JVM_FreeMemory;
+text: .text%__1cVcompiledICHolderKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%JVM_TotalMemory;
+text: .text%__1cVcompiledICHolderKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cMloadConDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cQdivL_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cOcmovIL_immNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cPconvI2D_memNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cSandL_reg_imm13NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cHThreadsGremove6FpnKJavaThread__v_;
+text: .text%__1cIOSThread2T6M_v_;
+text: .text%__1cUThreadSafepointStateHdestroy6FpnKJavaThread__v_;
+text: .text%__1cKJavaThreadYremove_stack_guard_pages6M_v_;
+text: .text%__1cQandI_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cQjava_lang_ThreadNset_stillborn6FpnHoopDesc__v_;
+text: .text%__1cRInterpreterOopMapNresource_copy6MpnQOopMapCacheEntry__v_;
+text: .text%__1cLConvD2INodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cIOSThreadKpd_destroy6M_v_;
+text: .text%__1cWstatic_call_RelocationLstatic_stub6M_pC_;
+text: .text%__1cKJavaThread2T6M_v_;
+text: .text%__1cGThread2T5B6M_v_;
+text: .text%__1cCosLfree_thread6FpnIOSThread__v_;
+text: .text%__1cFStateM_sub_Op_MulI6MpknENode__v_;
+text: .text%__1cNThreadServiceWcurrent_thread_exiting6FpnKJavaThread__v_;
+text: .text%__1cLensure_join6FpnKJavaThread__v_: thread.o;
+text: .text%__1cQOopMapCacheEntryEfill6MnMmethodHandle_i_v_;
+text: .text%__1cSTailCalljmpIndNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cOGenerateOopMapEppop6MpnNCellTypeState__v_;
+text: .text%__1cSTailCalljmpIndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cQsubF_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cRNativeMovConstRegEdata6kM_i_;
+text: .text%__1cbFunnecessary_membar_volatileNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
+text: .text%__1cLcmpF_ccNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cNObjectMonitorJnotifyAll6MpnGThread__v_;
+text: .text%jni_CallObjectMethod: jni.o;
+text: .text%__1cQaddD_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cPconvD2F_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cRInlineCacheBufferUic_buffer_cached_oop6FpC_pnHoopDesc__;
+text: .text%__1cQdivD_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cZInterpreterMacroAssemblerbEset_method_data_pointer_offset6MpnMRegisterImpl__v_;
+text: .text%__1cIMaxINodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cQChunkPoolCleanerEtask6M_v_: allocation.o;
+text: .text%__1cHTypeInt2t6Miii_v_;
+text: .text%__1cTOopMapForCacheEntryLcompute_map6MpnGThread__v_;
+text: .text%__1cOcmovIL_immNodeHtwo_adr6kM_I_: ad_sparc_misc.o;
+text: .text%__1cKConv2BNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cSandL_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cNloadRangeNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cRshlI_reg_imm5NodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorJpass_long6M_v_;
+text: .text%__1cQregL_to_stkLNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cOGenerateOopMapVresult_for_basicblock6Mi_v_;
+text: .text%__1cXNativeSignatureIteratorHdo_long6M_v_: interpreterRT_sparc.o;
+text: .text%__1cQOopMapCacheEntryIset_mask6MpnNCellTypeState_2i_v_;
+text: .text%__1cLOptoRuntimeYcurrent_time_millis_Type6F_pknITypeFunc__;
+text: .text%__1cHTypePtrFxdual6kM_pknEType__;
+text: .text%__1cOstackSlotIOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
+text: .text%__1cOstackSlotIOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
+text: .text%JVM_MonitorNotifyAll;
+text: .text%__1cJloadDNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cOstackSlotIOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
+text: .text%__1cKCMoveLNodeGOpcode6kM_i_;
+text: .text%__1cRshlL_reg_imm6NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cMnegD_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cODeoptimizationVtrap_state_has_reason6Fii_i_;
+text: .text%__1cTloadD_unalignedNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cJloadDNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cNiRegIsafeOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cNloadConP0NodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cSinstanceKlassKlassOklass_oop_size6kM_i_: instanceKlassKlass.o;
+text: .text%__1cIAddDNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cMnegD_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cSandL_reg_imm13NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cPmethodDataKlassOklass_oop_size6kM_i_: methodDataKlass.o;
+text: .text%__1cKarrayKlassWuncached_lookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__;
+text: .text%__1cLmethodKlassOklass_oop_size6kM_i_: methodKlass.o;
+text: .text%__1cKarrayKlassWcompute_modifier_flags6kMpnGThread__i_;
+text: .text%__1cWconstantPoolCacheKlassOklass_oop_size6kM_i_: cpCacheKlass.o;
+text: .text%__1cQconstMethodKlassOklass_oop_size6kM_i_: constMethodKlass.o;
+text: .text%__1cXJNI_ArgumentPusherVaArgJget_float6M_v_: jni.o;
+text: .text%__1cKklassKlassOklass_oop_size6kM_i_: arrayKlassKlass.o;
+text: .text%__1cSobjArrayKlassKlassOklass_oop_size6kM_i_: objArrayKlassKlass.o;
+text: .text%__1cLsymbolKlassOklass_oop_size6kM_i_: symbolKlass.o;
+text: .text%__1cVcompiledICHolderKlassOklass_oop_size6kM_i_: compiledICHolderKlass.o;
+text: .text%__1cSsubL_reg_reg_1NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cSmulL_reg_reg_1NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cSdivL_reg_reg_1NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cRconstantPoolKlassOklass_oop_size6kM_i_: constantPoolKlass.o;
+text: .text%__1cTtypeArrayKlassKlassOklass_oop_size6kM_i_: typeArrayKlassKlass.o;
+text: .text%__1cOloadI_fregNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cRtestI_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cQjava_lang_ThreadbGinherited_access_control_context6FpnHoopDesc__2_;
+text: .text%__1cJLoadSNodeMstore_Opcode6kM_i_: classes.o;
+text: .text%__1cLstoreF0NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cIMinINodeIadd_ring6kMpknEType_3_3_;
+text: .text%JVM_GetInheritedAccessControlContext;
+text: .text%__1cPPerfDataManagerWcreate_string_constant6FnJCounterNS_pkc3pnGThread__pnSPerfStringConstant__;
+text: .text%__1cNmaxI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
+text: .text%JVM_NativePath;
+text: .text%__1cOMacroAssemblerNflush_windows6M_v_;
+text: .text%__1cSsubD_regD_regDNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cVCallRuntimeDirectNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cFJNIidHoops_do6MpnKOopClosure__v_;
+text: .text%__1cJHashtableHoops_do6MpnKOopClosure__v_;
+text: .text%__1cHCompileKinit_start6MpnJStartNode__v_;
+text: .text%__1cKg3RegPOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cVinline_cache_regPOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cKstorePNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cQObjectStartArrayFreset6M_v_;
+text: .text%__1cPconvI2D_memNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cHThreadsHoops_do6FpnKOopClosure__v_;
+text: .text%__1cQaddD_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cLConvF2INodeGOpcode6kM_i_;
+text: .text%__1cVCallRuntimeDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cJHashtableGunlink6MpnRBoolObjectClosure__v_;
+text: .text%__1cIPSOldGenPadjust_pointers6M_v_;
+text: .text%__1cVCallRuntimeDirectNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
+text: .text%__1cOcmovPI_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cIPSOldGenHcompact6M_v_;
+text: .text%__1cMtlsLoadPNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cLcmpF_ccNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cVCallRuntimeDirectNodeKmethod_set6Mi_v_;
+text: .text%__1cKimmI11OperIconstant6kM_i_: ad_sparc_clone.o;
+text: .text%__1cQcmovI_reg_gtNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cLstoreP0NodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cOcmovIF_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cOcmovLL_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%jni_GetStaticMethodID: jni.o;
+text: .text%__1cZInterpreterMacroAssemblerUupdate_mdp_by_offset6MipnMRegisterImpl__v_;
+text: .text%__1cRtestI_reg_immNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cHnmethodbAmake_not_entrant_or_zombie6Mi_v_;
+text: .text%__1cPconvF2D_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cOPhaseIdealLoopKdo_peeling6MpnNIdealLoopTree_rnJNode_List__v_;
+text: .text%__1cOcmovLL_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%jint_cmp: parse2.o;
+text: .text%__1cXjava_lang_boxing_objectJget_value6FpnHoopDesc_pnGjvalue__nJBasicType__;
+text: .text%__1cNloadConL0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cOMacroAssemblerKnull_check6MpnMRegisterImpl_i_v_;
+text: .text%__1cVMoveL2D_stack_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cIMulDNodeGmul_id6kM_pknEType__: classes.o;
+text: .text%__1cIGraphKitTdprecision_rounding6MpnENode__2_;
+text: .text%__1cOcmovLL_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cLConvD2FNodeGOpcode6kM_i_;
+text: .text%__1cIMulFNodeImul_ring6kMpknEType_3_3_;
+text: .text%__1cWloadConI_x41f00000NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cKcmpOpFOperFccode6kM_i_: ad_sparc_clone.o;
+text: .text%__1cLstoreC0NodeOmemory_operand6kM_pknIMachOper__;
+text: .text%__1cQregL_to_stkLNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cZregDHi_regDLo_to_regDNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cOcmovIF_immNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cOcmovDF_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cQaddL_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cZregDHi_regDLo_to_regDNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%JVM_Close;
+text: .text%__1cSmulD_regD_regDNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cQsubL_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cIMulDNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cKstoreFNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cSsubD_regD_regDNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cSaddD_regD_regDNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cSaddP_reg_imm13NodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cXconvI2D_regDHi_regDNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cKstoreFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cPPerfDataManagerUcreate_long_constant6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnQPerfLongConstant__;
+text: .text%__1cOMacroAssemblerNget_vm_result6MpnMRegisterImpl__v_;
+text: .text%__1cQsubF_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cZInterpreterMacroAssemblerbIcompute_extra_locals_size_in_bytes6MpnMRegisterImpl_22_v_;
+text: .text%__1cLcmpF_ccNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cPMultiBranchDataScompute_cell_count6FpnOBytecodeStream__i_;
+text: .text%__1cPorI_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cSxorI_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cPconvI2D_memNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cQdivI_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cLconvI2BNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cISubFNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cWloadConI_x43300000NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cWloadConI_x41f00000NodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cSmulI_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cOtailjmpIndNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o;
+text: .text%__1cRInlineCacheBufferSic_destination_for6FpnKCompiledIC__pC_;
+text: .text%__1cbFunnecessary_membar_volatileNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cJSubFPNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cFParseNdo_instanceof6M_v_;
+text: .text%__1cLconvI2BNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cIGraphKitOgen_instanceof6MpnENode_2_2_;
+text: .text%__1cbFunnecessary_membar_volatileNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cRshrL_reg_imm6NodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cJloadBNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cQdivI_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cIDivLNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cLConvI2DNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cSmulD_regD_regDNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cOstackSlotLOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
+text: .text%__1cKConv2BNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cQshlI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cXjava_lang_reflect_FieldFclazz6FpnHoopDesc__2_;
+text: .text%__1cXjava_lang_reflect_FieldJmodifiers6FpnHoopDesc__i_;
+text: .text%__1cJloadDNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cOcmovPP_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cQsubF_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%jni_NewObjectV: jni.o;
+text: .text%__1cOcmovLI_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cPciInstanceKlassLjava_mirror6M_pnKciInstance__;
+text: .text%__1cCosHSolarisKmmap_chunk6FpcIii_2_;
+text: .text%__1cXPartialSubtypeCheckNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%jni_EnsureLocalCapacity;
+text: .text%__1cLstoreI0NodeOmemory_operand6kM_pknIMachOper__;
+text: .text%__1cIAddFNodeIIdentity6MpnOPhaseTransform__pnENode__: classes.o;
+text: .text%__1cLConvD2INodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cKoopFactoryUnew_compiledICHolder6FnMmethodHandle_nLKlassHandle_pnGThread__pnXcompiledICHolderOopDesc__;
+text: .text%__1cSCompiledStaticCallMset_to_clean6M_v_;
+text: .text%__1cIDivDNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cVcompiledICHolderKlassIallocate6MpnGThread__pnXcompiledICHolderOopDesc__;
+text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cSaddD_regD_regDNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cPfieldDescriptorUdouble_initial_value6kM_d_;
+text: .text%__1cQsubD_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cOcmovPP_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o;
+text: .text%__1cNSafePointNodeQpeek_monitor_obj6kM_pnENode__;
+text: .text%__1cJloadFNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cSaddI_reg_imm13NodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cFParsePdo_monitor_exit6M_v_;
+text: .text%__1cObranchConFNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cObranchConFNodeJlabel_set6MrnFLabel_I_v_;
+text: .text%__1cSconvF2I_helperNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cSmembar_releaseNodeIadr_type6kM_pknHTypePtr__;
+text: .text%__1cObranchConFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cLcmpD_ccNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cJloadLNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cISubDNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cZInterpreterMacroAssemblerUprofile_taken_branch6MpnMRegisterImpl_2_v_;
+text: .text%__1cLResourceObj2n6FIn0APallocation_type__pv_;
+text: .text%__1cNSafePointNodeQpeek_monitor_box6kM_pnENode__;
+text: .text%__1cFTypeFFxdual6kM_pknEType__;
+text: .text%__1cICmpFNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cKVtableStubRpd_code_alignment6F_i_;
+text: .text%__1cKarrayKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__;
+text: .text%__1cKloadUBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cTloadL_unalignedNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cINegDNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cLConvI2FNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cOcmovLL_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cRorI_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cTloadL_unalignedNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
+text: .text%__1cTloadL_unalignedNodeOmemory_operand6kM_pknIMachOper__;
+text: .text%__1cKloadUBNodeOmemory_operand6kM_pknIMachOper__;
+text: .text%__1cXconvI2D_regDHi_regDNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cZInterpreterMacroAssemblerbFtest_invocation_counter_for_mdp6MpnMRegisterImpl_22rnFLabel__v_;
+text: .text%__1cXconvI2D_regDHi_regDNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cSvframeArrayElementHfill_in6MpnOcompiledVFrame__v_;
+text: .text%__1cFTypeDFxdual6kM_pknEType__;
+text: .text%__1cSaddD_regD_regDNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cZInterpreterMacroAssemblerbAincrement_backedge_counter6MpnMRegisterImpl_2_v_;
+text: .text%__1cZInterpreterMacroAssemblerbBtest_backedge_count_for_osr6MpnMRegisterImpl_22_v_;
+text: .text%__1cSmulL_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cOcmovPI_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cKEntryPoint2t6M_v_;
+text: .text%__1cTloadD_unalignedNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cZregDHi_regDLo_to_regDNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cOcompiledVFrameImonitors6kM_pnNGrowableArray4CpnLMonitorInfo____;
+text: .text%__1cOcompiledVFrameLexpressions6kM_pnUStackValueCollection__;
+text: .text%__1cHciKlassOsuper_of_depth6MI_p0_;
+text: .text%__1cOcompiledVFrameGlocals6kM_pnUStackValueCollection__;
+text: .text%__1cOcompiledVFrameGmethod6kM_pnNmethodOopDesc__;
+text: .text%__1cJimmP0OperJnum_edges6kM_I_: ad_sparc_clone.o;
+text: .text%__1cOcompiledVFrameHraw_bci6kM_i_;
+text: .text%__1cQshrI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cWloadConI_x43300000NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cHThreadsbMis_supported_jni_version_including_1_16Fi_C_;
+text: .text%__1cMTailJumpNodeKmatch_edge6kMI_I_;
+text: .text%__1cWloadConI_x41f00000NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cODeoptimizationbJupdate_method_data_from_interpreter6FnQmethodDataHandle_ii_v_;
+text: .text%__1cIimmDOperJnum_edges6kM_I_: ad_sparc_clone.o;
+text: .text%__1cFframeZinterpreter_frame_set_mdx6Mi_v_;
+text: .text%__1cOstackSlotLOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
+text: .text%__1cOstackSlotLOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
+text: .text%__1cTloadD_unalignedNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cIModLNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cOtailjmpIndNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cSmulD_regD_regDNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cINegFNodeGOpcode6kM_i_;
+text: .text%__1cSsubD_regD_regDNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cJScopeDescImonitors6M_pnNGrowableArray4CpnMMonitorValue____;
+text: .text%__1cJScopeDescLexpressions6M_pnNGrowableArray4CpnKScopeValue____;
+text: .text%__1cJScopeDescGlocals6M_pnNGrowableArray4CpnKScopeValue____;
+text: .text%JVM_GetComponentType;
+text: .text%__1cQdivI_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%Unsafe_DefineClass1;
+text: .text%__1cOcmovII_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cLvframeArrayPunpack_to_stack6MrnFframe_i_v_;
+text: .text%__1cKReflectionUarray_component_type6FpnHoopDesc_pnGThread__2_;
+text: .text%__1cLConvF2DNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cSvframeArrayElementDbci6kM_i_;
+text: .text%__1cVMoveF2I_stack_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%JVM_GetCPFieldModifiers;
+text: .text%__1cKJavaThreadbFdeoptimized_wrt_marked_nmethods6M_v_;
+text: .text%__1cNnmethodLocker2t6MpC_v_;
+text: .text%__1cNSharedRuntimebJcontinuation_for_implicit_exception6FpnKJavaThread_pCn0AVImplicitExceptionKind__3_;
+text: .text%__1cODeoptimizationNuncommon_trap6FpnKJavaThread_i_pn0ALUnrollBlock__;
+text: .text%__1cODeoptimizationTuncommon_trap_inner6FpnKJavaThread_i_v_;
+text: .text%__1cODeoptimizationNunpack_frames6FpnKJavaThread_i_nJBasicType__;
+text: .text%__1cODeoptimizationYfetch_unroll_info_helper6FpnKJavaThread__pn0ALUnrollBlock__;
+text: .text%__1cZInterpreterMacroAssemblerXindex_check_without_pop6MpnMRegisterImpl_2i22_v_;
+text: .text%__1cRSignatureIteratorKparse_type6M_i_;
+text: .text%__1cPconvD2F_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cHciKlassLjava_mirror6M_pnKciInstance__;
+text: .text%__1cODeoptimizationRlast_frame_adjust6Fii_i_;
+text: .text%__1cQsubD_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%JVM_DefineClass;
+text: .text%JVM_InvokeMethod;
+text: .text%__1cOcmovPP_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%jni_NewDirectByteBuffer;
+text: .text%__1cHJNIEnv_JNewObject6MpnH_jclass_pnK_jmethodID_E_pnI_jobject__: jni.o;
+text: .text%jni_AllocObject: jni.o;
+text: .text%__1cNTemplateTableMlocals_index6FpnMRegisterImpl_i_v_;
+text: .text%__1cTmembar_volatileNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cMnegD_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%Unsafe_AllocateInstance;
+text: .text%__1cQComputeCallStackHdo_byte6M_v_: generateOopMap.o;
+text: .text%__1cQstkI_to_regINodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cYjava_lang_reflect_MethodEslot6FpnHoopDesc__i_;
+text: .text%__1cYjava_lang_reflect_MethodFclazz6FpnHoopDesc__2_;
+text: .text%__1cYinternal_word_RelocationGtarget6M_pC_;
+text: .text%__1cJStubQdDueueKremove_all6M_v_;
+text: .text%__1cMloadConFNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
+text: .text%__1cPconvI2D_memNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cPorL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cZInterpreterMacroAssemblerLindex_check6MpnMRegisterImpl_2i22_v_;
+text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_533pnGThread__v_;
+text: .text%__1cSaddL_reg_imm13NodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cOcmovPI_regNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
+text: .text%__1cKstfSSFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cMloadConFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cKReflectionNinvoke_method6FpnHoopDesc_nGHandle_nOobjArrayHandle_pnGThread__2_;
+text: .text%__1cYjava_lang_reflect_MethodPparameter_types6FpnHoopDesc__2_;
+text: .text%__1cTmembar_volatileNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cPconvI2L_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cOcmovII_regNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cYjava_lang_reflect_MethodLreturn_type6FpnHoopDesc__2_;
+text: .text%__1cJCmpF3NodeGOpcode6kM_i_;
+text: .text%__1cLMoveL2DNodeGOpcode6kM_i_;
+text: .text%__1cFKlassWcompute_modifier_flags6kMpnGThread__i_;
+text: .text%__1cKReflectionRreflect_new_array6FpnHoopDesc_ipnGThread__pnMarrayOopDesc__;
+text: .text%__1cOcmovII_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cIAddFNodeJideal_reg6kM_I_: classes.o;
+text: .text%JVM_NewArray;
+text: .text%__1cHOrLNodeGOpcode6kM_i_;
+text: .text%__1cLStrCompNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cLOopMapCache2t6M_v_;
+text: .text%__1cNTemplateTableHconvert6F_v_;
+text: .text%__1cOcmovDF_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cZInterpreterMacroAssemblerFpop_l6MpnMRegisterImpl__v_;
+text: .text%__1cOcmovLI_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cSMachBreakpointNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cSInterpreterRuntimeQcreate_exception6FpnKJavaThread_pc3_v_;
+text: .text%__1cQComputeCallStackIdo_array6Mii_v_: generateOopMap.o;
+text: .text%__1cKPSYoungGenKprecompact6M_v_;
+text: .text%__1cXjava_lang_reflect_FieldEslot6FpnHoopDesc__i_;
+text: .text%__1cSconvD2I_helperNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
+text: .text%__1cMnegF_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cHThreadsLgc_prologue6F_v_;
+text: .text%__1cHThreadsLgc_epilogue6F_v_;
+text: .text%__1cPconvI2L_regNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cPconvD2I_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
+text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4nGHandle_pnGThread__v_;
+text: .text%__1cUParallelScavengeHeapHcollect6MnHGCCauseFCause__v_;
+text: .text%__1cRCardTableModRefBSFclear6MnJMemRegion__v_;
+text: .text%__1cVLoaderConstraintTableYpurge_loader_constraints6MpnRBoolObjectClosure__v_;
+text: .text%__1cVLoaderConstraintTableYalways_strong_classes_do6MpnKOopClosure__v_;
+text: .text%__1cLconvP2BNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
+text: .text%__1cQSystemDictionaryValways_strong_oops_do6FpnKOopClosure__v_;
+text: .text%__1cIciMethodVget_osr_flow_analysis6Mi_pnKciTypeFlow__;
+text: .text%__1cLMoveF2INodeGOpcode6kM_i_;
+text: .text%__1cKNativeJumpUpatch_verified_entry6FpC11_v_;
+text: .text%__1cMStartOSRNodeKosr_domain6F_pknJTypeTuple__;
+text: .text%__1cVVM_ParallelGCSystemGCEdoit6M_v_;
+text: .text%__1cJArgumentsQPropertyList_add6FppnOSystemProperty_2_v_;
+text: .text%__1cOMacroAssemblerPbreakpoint_trap6M_v_;
+text: .text%__1cJBasicLockHmove_to6MpnHoopDesc_p0_v_;
+text: .text%__1cJMarkSweepNrestore_marks6F_v_;
+text: .text%__1cJMarkSweepMadjust_marks6F_v_;
+text: .text%__1cJMarkSweepXfollow_weak_klass_links6F_v_;
+text: .text%__1cRStubCodeGeneratorLstub_epilog6MpnMStubCodeDesc__v_;
+text: .text%__1cMStubCodeMark2t6MpnRStubCodeGenerator_pkc4_v_;
+text: .text%__1cMStubCodeMark2T6M_v_;
+text: .text%__1cNCallGeneratorHfor_osr6FpnIciMethod_i_p0_;
+text: .text%__1cLClassLoaderSget_system_package6FpkcpnGThread__pnHoopDesc__;
+text: .text%__1cJPSPermGenKprecompact6M_v_;
+text: .text%JVM_GC;
+text: .text%__1cIPSOldGenKprecompact6M_v_;
+text: .text%__1cUPSMarkSweepDecoratorbIset_destination_decorator_perm_gen6F_v_;
+text: .text%__1cUPSMarkSweepDecoratorbHset_destination_decorator_tenured6F_v_;
+text: .text%__1cKDictionaryYalways_strong_classes_do6MpnKOopClosure__v_;
+text: .text%__1cQmulL_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cUPSAdaptiveSizePolicyUmajor_collection_end6MInHGCCauseFCause__v_;
+text: .text%__1cUPSAdaptiveSizePolicyWmajor_collection_begin6M_v_;
+text: .text%__1cIUniverseWupdate_heap_info_at_gc6F_v_;
+text: .text%__1cJPSPermGenQcompute_new_size6MI_v_;
+text: .text%__1cKPSYoungGenHcompact6M_v_;
+text: .text%JVM_GetSystemPackage;
+text: .text%__1cPfieldDescriptorTfloat_initial_value6kM_f_;
+text: .text%__1cKPSYoungGenPadjust_pointers6M_v_;
+text: .text%__1cQUncommonTrapBlobHoops_do6MpnKOopClosure__v_: codeBlob.o;
+text: .text%__1cSDeoptimizationBlobHoops_do6MpnKOopClosure__v_: codeBlob.o;
+text: .text%__1cNExceptionBlobHoops_do6MpnKOopClosure__v_: codeBlob.o;
+text: .text%__1cJCodeCacheHoops_do6FpnKOopClosure__v_;
+text: .text%__1cJCodeCacheLgc_prologue6F_v_;
+text: .text%__1cJCodeCacheLgc_epilogue6F_v_;
+text: .text%__1cIXorINodeIadd_ring6kMpknEType_3_3_;
+text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cQregL_to_stkLNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cKcmpOpFOperKless_equal6kM_i_: ad_sparc_clone.o;
+text: .text%__1cOcmovPI_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o;
+text: .text%__1cSmulL_reg_imm13NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cOcmovIF_immNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cKCMoveDNodeGOpcode6kM_i_;
+text: .text%__1cJLoadDNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cIMulFNodeGmul_id6kM_pknEType__: classes.o;
+text: .text%__1cNStubGeneratorLstub_prolog6MpnMStubCodeDesc__v_: stubGenerator_sparc.o;
+text: .text%__1cQaddL_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%jni_GetStringRegion: jni.o;
+text: .text%JVM_RawMonitorCreate;
+text: .text%__1cJloadLNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cIMulFNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cNinstanceKlassPadd_osr_nmethod6MpnHnmethod__v_;
+text: .text%__1cOstackSlotPOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
+text: .text%__1cOstackSlotPOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
+text: .text%__1cOstackSlotPOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
+text: .text%__1cZInterpreterMacroAssemblerNunlock_object6MpnMRegisterImpl__v_;
+text: .text%JVM_Sleep;
+text: .text%__1cLConvL2DNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cQstkI_to_regFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cQinstanceRefKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceRefKlass.o;
+text: .text%__1cRorI_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%Unsafe_CompareAndSwapInt;
+text: .text%JVM_Lseek;
+text: .text%__1cNloadRangeNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cPconvD2F_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cRComputeEntryStackJdo_object6Mii_v_: generateOopMap.o;
+text: .text%__1cPconvF2D_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cQmulI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cQmulF_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cMnegF_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cSconvF2I_helperNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cQmulD_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cOcmovLI_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cPMultiBranchDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_;
+text: .text%__1cQregP_to_stkPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cZInterpreterMacroAssemblerQtest_mdp_data_at6MipnMRegisterImpl_rnFLabel_2_v_;
+text: .text%__1cQstkI_to_regINodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cOcmovLI_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cGciType2t6MnJBasicType__v_;
+text: .text%__1cJLoadBNodeMstore_Opcode6kM_i_: classes.o;
+text: .text%__1cQaddF_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cETypeEmake6Fn0AFTYPES__pk0_;
+text: .text%__1cSconvF2I_helperNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cRsarL_reg_imm6NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cSstring_compareNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%jni_GetEnv;
+text: .text%__1cJloadDNodeOmemory_operand6kM_pknIMachOper__;
+text: .text%__1cQstkI_to_regINodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cSstring_compareNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
+text: .text%__1cXNativeSignatureIteratorHdo_bool6M_v_: interpreterRT_sparc.o;
+text: .text%Unsafe_GetNativeByte;
+text: .text%JVM_NanoTime;
+text: .text%__1cCosNjavaTimeNanos6F_x_;
+text: .text%__1cOMacroAssemblerOrestore_thread6MkpnMRegisterImpl__v_;
+text: .text%__1cVcompiledICHolderKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
+text: .text%__1cQandL_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cIimmFOperJnum_edges6kM_I_: ad_sparc_clone.o;
+text: .text%__1cHThreadsLnmethods_do6F_v_;
+text: .text%__1cKcmpOpFOperGnegate6M_v_: ad_sparc_clone.o;
+text: .text%__1cICodeBlobFflush6M_v_;
+text: .text%__1cFParseMdo_anewarray6M_v_;
+text: .text%__1cSdivL_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%jni_CallVoidMethod: jni.o;
+text: .text%__1cJCodeCacheFfirst6F_pnICodeBlob__;
+text: .text%__1cObranchConFNodeGnegate6M_v_: ad_sparc_misc.o;
+text: .text%__1cFParseOdo_tableswitch6M_v_;
+text: .text%__1cOcmovIF_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cLConvI2FNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cSaddL_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cLstoreC0NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%Unsafe_GetNativeFloat;
+text: .text%__1cOstackSlotFOperEtype6kM_pknEType__: ad_sparc.o;
+text: .text%__1cHnmethodFflush6M_v_;
+text: .text%__1cHnmethodSflush_dependencies6MpnRBoolObjectClosure__v_;
+text: .text%__1cKo2RegPOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cQregI_to_stkINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cbCAbstractInterpreterGeneratorVgenerate_method_entry6MnTAbstractInterpreterKMethodKind__pC_;
+text: .text%__1cParrayKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
+text: .text%__1cFVTuneOdelete_nmethod6FpnHnmethod__v_;
+text: .text%__1cWloadConI_x43300000NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cFParseQdo_monitor_enter6M_v_;
+text: .text%__1cPorL_reg_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cLstoreC0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%JVM_FindPrimitiveClass;
+text: .text%__1cVMoveL2D_stack_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cNTemplateTableEiop26Fn0AJOperation__v_;
+text: .text%__1cZInterpreterMacroAssemblerMdispatch_via6MnITosState_ppC_v_;
+text: .text%__1cSmodL_reg_imm13NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
+text: .text%__1cRshrI_reg_imm5NodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnGThread__v_;
+text: .text%__1cSsubL_reg_reg_2NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cUmulL_reg_imm13_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cIDivDNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cPconvI2F_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
+text: .text%__1cNinstanceKlassUfind_interface_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__pnMklassOopDesc__;
+text: .text%__1cOstackSlotFOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cUdivL_reg_imm13_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cRSignatureIteratorHiterate6M_v_;
+text: .text%__1cOcmovLL_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o;
+text: .text%__1cJname2type6Fpkc_nJBasicType__;
+text: .text%__1cSmulL_reg_imm13NodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cPBytecode_invokeLresult_type6kMpnGThread__nJBasicType__;
+text: .text%__1cOloadConL13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cKcmpOpFOperHgreater6kM_i_: ad_sparc_clone.o;
+text: .text%__1cIDivDNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cOMacroAssemblerKget_thread6M_v_;
+text: .text%__1cOcmovDF_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cOcmovIF_immNodeHtwo_adr6kM_I_: ad_sparc_misc.o;
+text: .text%__1cSconvI2F_helperNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cKVtableStub2n6FIi_pv_;
+text: .text%__1cbEJvmtiDynamicCodeEventCollector2T6M_v_;
+text: .text%__1cOtypeArrayKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: typeArrayKlass.o;
+text: .text%__1cPconvD2F_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cIciMethodMnative_entry6M_pC_;
+text: .text%__1cVMoveF2I_stack_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cPPerfDataManagerWcreate_string_variable6FnJCounterNS_pkci3pnGThread__pnSPerfStringVariable__;
+text: .text%__1cPorL_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cPconvD2F_regNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cIciSymbolHas_utf86M_pkc_;
+text: .text%__1cQandI_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cQciTypeArrayKlass2t6MnLKlassHandle__v_;
+text: .text%__1cMnegD_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cFStateO_sub_Op_CMoveP6MpknENode__v_;
+text: .text%__1cQmulD_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cOMacroAssemblerZtotal_frame_size_in_bytes6Mi_i_;
+text: .text%__1cNTemplateTableQfast_accessfield6FnITosState__v_;
+text: .text%__1cKCompiledICSset_to_megamorphic6MpnICallInfo_nJBytecodesECode_pnGThread__v_;
+text: .text%Unsafe_StaticFieldOffset;
+text: .text%__1cQmulI_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cNTemplateTableXresolve_cache_and_index6FipnMRegisterImpl_2_v_;
+text: .text%__1cQaddI_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cOcmovLI_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o;
+text: .text%JVM_GetClassContext;
+text: .text%Unsafe_StaticFieldBaseFromField;
+text: .text%Unsafe_EnsureClassInitialized;
+text: .text%__1cOcmovIF_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o;
+text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpnMRegisterImpl_pCi_v_;
+text: .text%__1cNTemplateTableZjvmti_post_fast_field_mod6F_v_;
+text: .text%Unsafe_GetObjectVolatile;
+text: .text%__1cbEJvmtiDynamicCodeEventCollector2t6M_v_;
+text: .text%__1cKstoreFNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cVMoveL2D_stack_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cJLoadLNodeMstore_Opcode6kM_i_: classes.o;
+text: .text%__1cNSharedRuntimeVhandle_ic_miss_helper6FpnKJavaThread_pnGThread__nMmethodHandle__;
+text: .text%__1cOloadConL13NodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cNTemplateTablePfast_storefield6FnITosState__v_;
+text: .text%__1cLstoreF0NodeOmemory_operand6kM_pknIMachOper__;
+text: .text%__1cPconvI2D_memNodeOmemory_operand6kM_pknIMachOper__;
+text: .text%__1cETypeFxdual6kM_pk0_;
+text: .text%__1cJOopMapSetQsingular_oop_map6M_pnGOopMap__;
+text: .text%__1cKimmU13OperJnum_edges6kM_I_: ad_sparc_clone.o;
+text: .text%__1cZInterpreterMacroAssemblerTnotify_method_entry6M_v_;
+text: .text%__1cZInterpreterMacroAssemblerbCincrement_invocation_counter6MpnMRegisterImpl_2_v_;
+text: .text%__1cZInterpreterMacroAssemblerQaccess_local_int6MpnMRegisterImpl_2_v_;
+text: .text%__1cZInterpreterMacroAssemblerWempty_expression_stack6M_v_;
+text: .text%__1cUInterpreterGeneratorVgenerate_counter_incr6MpnFLabel_22_v_;
+text: .text%__1cOcmovIL_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cPPerfDataManagerKname_space6Fpkci_pc_;
+text: .text%__1cOtailjmpIndNodePoper_input_base6kM_I_: ad_sparc_misc.o;
+text: .text%__1cNMemoryManagerIadd_pool6MpnKMemoryPool__v_;
+text: .text%__1cCosEstat6FpkcpnEstat__i_;
+text: .text%__1cQregF_to_stkINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cRComputeEntryStackIdo_short6M_v_: generateOopMap.o;
+text: .text%__1cRComputeEntryStackGdo_int6M_v_: generateOopMap.o;
+text: .text%__1cMMonitorChunk2t6Mi_v_;
+text: .text%__1cQSystemDictionaryPresolve_or_null6FnMsymbolHandle_pnGThread__pnMklassOopDesc__;
+text: .text%__1cOPhaseIdealLoopJclone_iff6MpnHPhiNode_pnNIdealLoopTree__pnIBoolNode__;
+text: .text%__1cQComputeCallStackIdo_float6M_v_: generateOopMap.o;
+text: .text%__1cMMonitorValue2t6MpnTDebugInfoReadStream__v_;
+text: .text%__1cPciObjArrayKlassJmake_impl6FpnHciKlass__p0_;
+text: .text%__1cPorL_reg_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cLOptoRuntimeMrethrow_Type6F_pknITypeFunc__;
+text: .text%jni_SetStaticObjectField: jni.o;
+text: .text%jni_RegisterNatives: jni.o;
+text: .text%__1cFframebLprevious_monitor_in_interpreter_frame6kMpnPBasicObjectLock__2_;
+text: .text%__1cQshlL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%JVM_GetClassDeclaredFields;
+text: .text%__1cCosMuser_handler6F_pv_;
+text: .text%JVM_IsSameClassPackage;
+text: .text%__1cKMemoryPoolLadd_manager6MpnNMemoryManager__v_;
+text: .text%__1cKJavaThreadRadd_monitor_chunk6MpnMMonitorChunk__v_;
+text: .text%__1cKJavaThreadUremove_monitor_chunk6MpnMMonitorChunk__v_;
+text: .text%__1cVMoveL2D_stack_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cNTemplateTableGiconst6Fi_v_;
+text: .text%__1cLConvF2INodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%JVM_LoadLibrary;
+text: .text%JVM_IsSupportedJNIVersion;
+text: .text%Unsafe_ObjectFieldOffset;
+text: .text%__1cZInterpreterMacroAssemblerYtest_method_data_pointer6MrnFLabel__v_;
+text: .text%__1cNTemplateTableHif_0cmp6Fn0AJCondition__v_;
+text: .text%__1cZInterpreterMacroAssemblerSget_cpool_and_tags6MpnMRegisterImpl_2_v_;
+text: .text%__1cIAddDNodeIIdentity6MpnOPhaseTransform__pnENode__: classes.o;
+text: .text%__1cNTemplateTableHif_icmp6Fn0AJCondition__v_;
+text: .text%__1cNTemplateTableH_return6FnITosState__v_;
+text: .text%__1cHOrLNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cKimmP13OperJnum_edges6kM_I_: ad_sparc_clone.o;
+text: .text%__1cLConvD2FNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cSObjectSynchronizerJjni_enter6FnGHandle_pnGThread__v_;
+text: .text%__1cHnmethodbJcontinuation_for_implicit_exception6MpC_1_;
+text: .text%__1cNSharedRuntimeEdrem6Fdd_d_;
+text: .text%__1cPstoreI_FregNodeOmemory_operand6kM_pknIMachOper__;
+text: .text%__1cTloadD_unalignedNodeOmemory_operand6kM_pknIMachOper__;
+text: .text%__1cOloadI_fregNodeOmemory_operand6kM_pknIMachOper__;
+text: .text%__1cLconvP2BNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cCosZvm_allocation_granularity6F_i_;
+text: .text%__1cMTailJumpNodeGOpcode6kM_i_;
+text: .text%__1cTloadD_unalignedNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cHciKlass2t6MnLKlassHandle_pnIciSymbol__v_;
+text: .text%__1cJMemRegion2t6M_v_: cardTableModRefBS.o;
+text: .text%__1cSObjectSynchronizerIjni_exit6FpnHoopDesc_pnGThread__v_;
+text: .text%__1cNRegisterSaverWrestore_live_registers6FpnOMacroAssembler__v_;
+text: .text%__1cLTypeInstPtrOxmeet_unloaded6kMpk0_2_;
+text: .text%__1cRtestI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cPPerfLongVariant2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability_pnUPerfLongSampleHelper__v_;
+text: .text%__1cWImplicitExceptionTable2t6MpknHnmethod__v_;
+text: .text%__1cWImplicitExceptionTableCat6kMI_I_;
+text: .text%__1cFParseVcatch_call_exceptions6MrnYciExceptionHandlerStream__v_;
+text: .text%jni_GetJavaVM;
+text: .text%__1cOcmovDF_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%jni_MonitorEnter: jni.o;
+text: .text%jni_MonitorExit: jni.o;
+text: .text%__1cLConvL2DNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cULinearLeastSquareFit2t6MI_v_;
+text: .text%__1cQdivL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cPciObjectFactoryTget_unloaded_method6MpnPciInstanceKlass_pnIciSymbol_4_pnIciMethod__;
+text: .text%__1cNReservedSpace2t6MI_v_;
+text: .text%__1cSCardTableExtensionVresize_covered_region6MnJMemRegion__v_;
+text: .text%__1cOloadI_fregNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cRCardTableModRefBSVresize_covered_region6MnJMemRegion__v_;
+text: .text%__1cIAddDNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cJloadFNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cKConv2BNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cLConvI2DNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cSconvD2I_helperNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%jni_Throw: jni.o;
+text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC_v_;
+text: .text%__1cLMoveL2DNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cIDivINodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cISubDNodeGadd_id6kM_pknEType__: classes.o;
+text: .text%__1cPstoreI_FregNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cINegFNodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cOLibraryCallKitXgenerate_current_thread6MrpnENode__2_;
+text: .text%__1cOMacroAssemblerEfneg6MnRFloatRegisterImplFWidth_p13_v_;
+text: .text%__1cXNativeSignatureIteratorJdo_double6M_v_: interpreterRT_sparc.o;
+text: .text%__1cRtestI_reg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cNSpaceCounters2t6MpkciIpnMMutableSpace_pnSGenerationCounters__v_;
+text: .text%__1cLcmpF_ccNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cMNativeLookupTbase_library_lookup6Fpkc22_pC_;
+text: .text%jni_SetObjectField: jni.o;
+text: .text%__1cPPerfDataManagerUcreate_long_variable6FnJCounterNS_pkcnIPerfDataFUnits_pnUPerfLongSampleHelper_pnGThread__pnQPerfLongVariable__;
+text: .text%__1cPPerfDataManagerKname_space6Fpkc2i_pc_;
+text: .text%bootstrap_flush_windows;
+text: .text%__1cSdivL_reg_reg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cZInterpreterMacroAssemblerbCverify_oop_or_return_address6MpnMRegisterImpl_2_v_;
+text: .text%__1cFStateO_sub_Op_Conv2B6MpknENode__v_;
+text: .text%__1cNRegisterSaverTsave_live_registers6FpnOMacroAssembler_ipi_pnGOopMap__;
+text: .text%__1cSmulL_reg_reg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_int_field06FnJBasicType__pC_;
+text: .text%__1cKExceptionsK_throw_oop6FpnGThread_pkcipnHoopDesc__v_;
+text: .text%__1cSsubL_reg_reg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorLpass_double6M_v_;
+text: .text%__1cQmulD_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%Unsafe_AllocateMemory;
+text: .text%__1cSandL_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%JVM_GetLastErrorString;
+text: .text%__1cQmodL_reg_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
+text: .text%__1cNTemplateTableElop26Fn0AJOperation__v_;
+text: .text%__1cQjava_lang_ThreadKset_daemon6FpnHoopDesc__v_;
+text: .text%__1cNTemplateTableEfop26Fn0AJOperation__v_;
+text: .text%__1cPstoreI_FregNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cNTemplateTableEdop26Fn0AJOperation__v_;
+text: .text%__1cSandI_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cMnegD_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cNciMethodKlassEmake6F_p0_;
+text: .text%__1cNTemplateTableGlstore6Fi_v_;
+text: .text%__1cLConvF2INodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cIciMethod2t6MpnPciInstanceKlass_pnIciSymbol_4_v_;
+text: .text%__1cRcompL_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cLconvI2BNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
+text: .text%__1cLConvD2FNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cSconvD2I_helperNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cRsubI_zero_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cKstfSSFNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cOClassPathEntry2t6M_v_;
+text: .text%__1cZInterpreterMacroAssemblerQaccess_local_ptr6MpnMRegisterImpl_2_v_;
+text: .text%__1cNTemplateTableGistore6Fi_v_;
+text: .text%__1cIRetTableUfind_jsrs_for_target6Mi_pnNRetTableEntry__;
+text: .text%__1cPconvL2I_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cUcompI_iReg_imm13NodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cRsarI_reg_imm5NodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cNTemplateTableGastore6Fi_v_;
+text: .text%__1cIRetTableHadd_jsr6Mii_v_;
+text: .text%__1cMnegF_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cQregF_to_stkINodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cRComputeEntryStackHdo_bool6M_v_: generateOopMap.o;
+text: .text%__1cNTemplateTableGdstore6Fi_v_;
+text: .text%__1cNTemplateTableGfstore6Fi_v_;
+text: .text%jni_CallStaticObjectMethod: jni.o;
+text: .text%__1cOcmovLL_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cLconvI2BNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cODeoptimizationLUnrollBlockOsize_of_frames6kM_i_;
+text: .text%__1cCosGsignal6Fipv_1_;
+text: .text%__1cQaddD_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cISubDNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cISubFNodeGadd_id6kM_pknEType__: classes.o;
+text: .text%__1cISubFNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cNTemplateTableFlload6Fi_v_;
+text: .text%__1cNTemplateTableFiload6Fi_v_;
+text: .text%__1cMOopMapStream2t6MpnGOopMap_i_v_;
+text: .text%__1cLconvP2BNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cVMoveF2I_stack_regNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpnMRegisterImpl_pC22_v_;
+text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpnMRegisterImpl_pC2_v_;
+text: .text%__1cTjava_lang_ThrowableLset_message6FpnHoopDesc_2_v_;
+text: .text%__1cOGenerateOopMapTret_jump_targets_do6MpnOBytecodeStream_pFp0ipi_vi4_v_;
+text: .text%__1cPconvI2D_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
+text: .text%Unsafe_SetMemory;
+text: .text%__1cKstfSSFNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cZInterpreterMacroAssemblerOthrow_if_not_x6MnJAssemblerJCondition_pCpnMRegisterImpl__v_;
+text: .text%__1cVMoveF2I_stack_regNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cHTypePtrKadd_offset6kMi_pk0_;
+text: .text%__1cOcmovLI_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
+text: .text%__1cNloadConL0NodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cOcmovPI_regNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cOcmovDF_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o;
+text: .text%__1cQsubF_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cFParseRjump_if_true_fork6MpnGIfNode_ii_v_;
+text: .text%__1cZInterpreterMacroAssemblerQthrow_if_not_icc6MnJAssemblerJCondition_pCpnMRegisterImpl__v_;
+text: .text%__1cNTemplateTableFfload6Fi_v_;
+text: .text%__1cFParsePdo_lookupswitch6M_v_;
+text: .text%__1cNTemplateTableFdload6Fi_v_;
+text: .text%__1cKstfSSFNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cINegDNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cNTemplateTableFaload6Fi_v_;
+text: .text%__1cRMachSpillCopyNodeHsize_of6kM_I_: ad_sparc.o;
+text: .text%__1cQCompilerCounters2t6MpkcipnGThread__v_;
+text: .text%__1cOGenerateOopMapRdo_multianewarray6Mii_v_;
+text: .text%__1cNCompileBrokerUcompiler_thread_loop6F_v_;
+text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
+text: .text%jni_CallStaticObjectMethodV: jni.o;
+text: .text%__1cNTemplateTableMfast_xaccess6FnITosState__v_;
+text: .text%__1cJMemRegionFminus6kMk0_0_;
+text: .text%__1cNCompileBrokerUmake_compiler_thread6FpkcpnMCompileQdDueue_pnQCompilerCounters_pnGThread__pnOCompilerThread__;
+text: .text%__1cSInterpreterRuntimebKthrow_ArrayIndexOutOfBoundsException6FpnKJavaThread_pci_v_;
+text: .text%__1cNMemoryManager2t6M_v_;
+text: .text%__1cFStatebB_sub_Op_PartialSubtypeCheck6MpknENode__v_;
+text: .text%__1cFStateM_sub_Op_DivI6MpknENode__v_;
+text: .text%__1cUPSGenerationCounters2t6MpkciipnOPSVirtualSpace__v_;
+text: .text%__1cCosFyield6F_v_;
+text: .text%__1cQsubD_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cXNativeSignatureIteratorIdo_float6M_v_: interpreterRT_sparc.o;
+text: .text%__1cIDivDNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cFParseRdo_multianewarray6M_v_;
+text: .text%__1cLOptoRuntimeTmultianewarray_Type6Fi_pknITypeFunc__;
+text: .text%__1cZInterpreterMacroAssemblerRget_constant_pool6MpnMRegisterImpl__v_;
+text: .text%__1cXPartialSubtypeCheckNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cOcmovIF_regNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cLMoveF2INodeLbottom_type6kM_pknEType__: classes.o;
+text: .text%__1cSconvI2D_helperNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cLstoreF0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cZInterpreterMacroAssemblerLlock_object6MpnMRegisterImpl_2_v_;
+text: .text%__1cPstoreI_FregNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cOcmovLL_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cZInterpreterMacroAssemblerUupdate_mdp_by_offset6MpnMRegisterImpl_i2_v_;
+text: .text%__1cNSafepointBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_;
+text: .text%__1cMciArrayKlassRbase_element_type6M_pnGciType__;
+text: .text%JVM_GetInterfaceVersion;
+text: .text%__1cZInterpreterMacroAssemblerRgen_subtype_check6MpnMRegisterImpl_2222rnFLabel__v_;
+text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cNTemplateTableGfconst6Fi_v_;
+text: .text%__1cGThreadbFinitialize_thread_local_storage6M_v_;
+text: .text%__1cOcmovPI_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cGThreadbArecord_stack_base_and_size6M_v_;
+text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC2_v_;
+text: .text%JVM_RegisterSignal;
+text: .text%JVM_FindSignal;
+text: .text%__1cTMaskFillerForNative2t6MnMmethodHandle_pIi_v_: oopMapCache.o;
+text: .text%jio_vsnprintf;
+text: .text%__1cQshrL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cZInterpreterMacroAssemblerTprofile_switch_case6MpnMRegisterImpl_222_v_;
+text: .text%__1cOCompilerThread2t6MpnMCompileQdDueue_pnQCompilerCounters__v_;
+text: .text%__1cOPSVirtualSpace2t6MnNReservedSpace_I_v_;
+text: .text%__1cVcompiler_thread_entry6FpnKJavaThread_pnGThread__v_: thread.o;
+text: .text%__1cNIdealLoopTreeUmerge_many_backedges6MpnOPhaseIdealLoop__v_;
+text: .text%__1cODeoptimizationLUnrollBlock2T6M_v_;
+text: .text%jni_GetDoubleArrayRegion: jni.o;
+text: .text%__1cMLinkResolverbBlookup_method_in_interfaces6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_;
+text: .text%__1cLconvP2BNodeErule6kM_I_: ad_sparc_misc.o;
+text: .text%__1cKfix_parent6FpnNIdealLoopTree_1_v_: loopnode.o;
+text: .text%JVM_Available;
+text: .text%__1cZInterpreterMacroAssemblerSprofile_final_call6MpnMRegisterImpl__v_;
+text: .text%__1cQshlL_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cZInterpreterMacroAssemblerQtop_most_monitor6M_nHAddress__;
+text: .text%__1cLstoreF0NodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cZInterpreterMacroAssemblerWprofile_switch_default6MpnMRegisterImpl__v_;
+text: .text%__1cTAbstract_VM_VersionOvm_info_string6F_pkc_;
+text: .text%__1cJStubQdDueue2t6MpnNStubInterface_ipnFMutex_pkc_v_;
+text: .text%__1cSconvF2I_helperNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
+text: .text%__1cHThreadsbFdeoptimized_wrt_marked_nmethods6F_v_;
+text: .text%__1cbAconvL2D_reg_slow_fxtofNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
+text: .text%__1cOstackSlotFOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
+text: .text%__1cOstackSlotFOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
+text: .text%__1cOstackSlotFOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
+text: .text%__1cPconvF2I_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
+text: .text%__1cNTemplateTableGlconst6Fi_v_;
+text: .text%__1cLstoreC0NodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cMPeriodicTaskGenroll6M_v_;
+text: .text%__1cMPeriodicTask2t6MI_v_;
+text: .text%__1cNTemplateTableHcastore6F_v_;
+text: .text%Unsafe_CompareAndSwapObject;
+text: .text%__1cLNamedThread2t6M_v_;
+text: .text%__1cLNamedThreadIset_name6MpkcE_v_;
+text: .text%__1cJloadDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cQdivD_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cWloadConI_x43300000NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cNTemplateTableKinitialize6F_v_;
+text: .text%__1cKcmpOpFOperJnot_equal6kM_i_: ad_sparc_clone.o;
+text: .text%__1cPconvD2F_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cNTemplateTableKdouble_cmp6Fi_v_;
+text: .text%__1cNTemplateTableJfloat_cmp6Fi_v_;
+text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC22_v_;
+text: .text%__1cNTemplateTableGdconst6Fi_v_;
+text: .text%__1cSconvF2I_helperNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cOcmovIF_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cOcmovIF_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cJimmL0OperJnum_edges6kM_I_: ad_sparc_clone.o;
+text: .text%__1cSaddD_regD_regDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cSsubD_regD_regDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cQregF_to_stkINodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cNTemplateTableTinvokevfinal_helper6FpnMRegisterImpl_2_v_;
+text: .text%__1cSmulD_regD_regDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cNTemplateTableUgenerate_vtable_call6FpnMRegisterImpl_22_v_;
+text: .text%__1cNTemplateTableKif_nullcmp6Fn0AJCondition__v_;
+text: .text%__1cNTemplateTableHif_acmp6Fn0AJCondition__v_;
+text: .text%__1cNVM_DeoptimizeEdoit6M_v_;
+text: .text%__1cMnegF_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cQsubL_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cMVirtualSpace2t6M_v_;
+text: .text%__1cWloadConI_x41f00000NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cQdivI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cZregDHi_regDLo_to_regDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cXconvI2D_regDHi_regDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cKloadUBNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cNTemplateTableEidiv6F_v_;
+text: .text%__1cQstkI_to_regINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cLMoveL2DNodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cLConvD2FNodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cLConvF2INodeIIdentity6MpnOPhaseTransform__pnENode__;
+text: .text%__1cQJNI_FastGetFieldbEgenerate_fast_get_float_field06FnJBasicType__pC_;
+text: .text%__1cLMoveF2INodeFValue6kMpnOPhaseTransform__pknEType__;
+text: .text%__1cLOptoRuntimeIl2f_Type6F_pknITypeFunc__;
+text: .text%__1cOMacroAssemblerUcalc_mem_param_words6MpnMRegisterImpl_2_v_;
+text: .text%__1cZInterpreterMacroAssemblerLprofile_ret6MnITosState_pnMRegisterImpl_3_v_;
+text: .text%__1cZInterpreterMacroAssemblerUprofile_virtual_call6MpnMRegisterImpl_2_v_;
+text: .text%__1cZInterpreterMacroAssemblerMprofile_call6MpnMRegisterImpl__v_;
+text: .text%__1cLklassVtableQindex_of_miranda6MpnNsymbolOopDesc_2_i_;
+text: .text%__1cZInterpreterMacroAssemblerSupdate_mdp_for_ret6MnITosState_pnMRegisterImpl__v_;
+text: .text%__1cMLinkResolverbEvtable_index_of_miranda_method6FnLKlassHandle_nMsymbolHandle_2pnGThread__i_;
+text: .text%__1cUInterpreterGeneratorLlock_method6M_v_;
+text: .text%__1cZInterpreterMacroAssemblerOthrow_if_not_26MpCpnMRegisterImpl_rnFLabel__v_;
+text: .text%__1cZInterpreterMacroAssemblerQthrow_if_not_1_x6MnJAssemblerJCondition_rnFLabel__v_;
+text: .text%__1cZInterpreterMacroAssemblerZget_4_byte_integer_at_bcp6MipnMRegisterImpl_2n0AKsetCCOrNot__v_;
+text: .text%__1cCosHrealloc6FpvI_1_;
+text: .text%__1cODeoptimizationVdeoptimize_dependents6F_i_;
+text: .text%__1cFStateO_sub_Op_CMoveL6MpknENode__v_;
+text: .text%__1cZInterpreterMacroAssemblerRaccess_local_long6MpnMRegisterImpl_2_v_;
+text: .text%__1cIPSOldGenPinitialize_work6Mpkci_v_;
+text: .text%__1cCosIjvm_path6Fpci_v_;
+text: .text%__1cCosNsigexitnum_pd6F_i_;
+text: .text%__1cCosScurrent_process_id6F_i_;
+text: .text%__1cINegFNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cSInterpreterRuntimeMat_safepoint6FpnKJavaThread__v_;
+text: .text%__1cLConvL2DNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cLConvF2INodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cLConvD2FNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cKJNIHandlesQmake_weak_global6FnGHandle__pnI_jobject__;
+text: .text%__1cZInterpreterMacroAssemblerSaccess_local_float6MpnMRegisterImpl_pnRFloatRegisterImpl__v_;
+text: .text%__1cZInterpreterMacroAssemblerTaccess_local_double6MpnMRegisterImpl_pnRFloatRegisterImpl__v_;
+text: .text%__1cZInterpreterMacroAssemblerPstore_local_int6MpnMRegisterImpl_2_v_;
+text: .text%__1cZInterpreterMacroAssemblerQstore_local_long6MpnMRegisterImpl_2_v_;
+text: .text%__1cZInterpreterMacroAssemblerRstore_local_float6MpnMRegisterImpl_pnRFloatRegisterImpl__v_;
+text: .text%__1cZInterpreterMacroAssemblerSstore_local_double6MpnMRegisterImpl_pnRFloatRegisterImpl__v_;
+text: .text%__1cCosWactive_processor_count6F_i_;
+text: .text%__1cTAbstractInterpreterKinitialize6F_v_;
+text: .text%jni_NewWeakGlobalRef: jni.o;
+text: .text%__1cRComputeEntryStackIdo_array6Mii_v_: generateOopMap.o;
+text: .text%__1cTMaskFillerForNativeLpass_object6M_v_: oopMapCache.o;
+text: .text%__1cOGenerateOopMapTadd_to_ref_init_set6Mi_v_;
+text: .text%__1cUGcThreadCountClosureJdo_thread6MpnGThread__v_;
+text: .text%__1cNinstanceKlassSremove_osr_nmethod6MpnHnmethod__v_;
+text: .text%__1cOPSVirtualSpace2t6M_v_;
+text: .text%jni_IsInstanceOf: jni.o;
+text: .text%__1cMGCTaskThreadDrun6M_v_;
+text: .text%__1cJCodeCachebGmake_marked_nmethods_not_entrant6F_v_;
+text: .text%__1cTMaskFillerForNativeJpass_long6M_v_: oopMapCache.o;
+text: .text%jni_CallStaticVoidMethodV: jni.o;
+text: .text%jni_CallStaticBooleanMethod: jni.o;
+text: .text%__1cMGCTaskThread2t6MpnNGCTaskManager_II_v_;
+text: .text%__1cOtailjmpIndNodeIpipeline6kM_pknIPipeline__;
+text: .text%__1cMGCTaskThreadFstart6M_v_;
+text: .text%__1cQObjectStartArrayKinitialize6MnJMemRegion__v_;
+text: .text%__1cQObjectStartArraySset_covered_region6MnJMemRegion__v_;
+text: .text%__1cZInterpreterMacroAssemblerbAdispatch_next_noverify_oop6MnITosState_i_v_;
+text: .text%__1cRCollectorCounters2t6Mpkci_v_;
+text: .text%__1cFParseDl2f6M_v_;
+text: .text%__1cPGCMemoryManagerXinitialize_gc_stat_info6M_v_;
+text: .text%__1cJArgumentsVset_parallel_gc_flags6F_v_;
+text: .text%__1cPGCMemoryManager2t6M_v_;
+text: .text%__1cRComputeEntryStackHdo_long6M_v_: generateOopMap.o;
+text: .text%__1cSInterpreterRuntimeWcreate_klass_exception6FpnKJavaThread_pcpnHoopDesc__v_;
+text: .text%__1cQcreate_os_thread6FpnGThread_I_pnIOSThread__: os_solaris.o;
+text: .text%__1cYjava_lang_reflect_MethodPcompute_offsets6F_v_;
+text: .text%__1cSInterpreterRuntimeSupdate_mdp_for_ret6FpnKJavaThread_i_v_;
+text: .text%__1cPorL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cQjava_lang_ThreadPcompute_offsets6F_v_;
+text: .text%__1cXNativeSignatureIteratorHdo_byte6M_v_: interpreterRT_sparc.o;
+text: .text%__1cCosHSolarisQsignal_sets_init6F_v_;
+text: .text%__1cCosbDallocate_thread_local_storage6F_i_;
+text: .text%__1cUInterpreterGeneratorVrestore_native_result6M_v_;
+text: .text%__1cVjava_lang_ThreadGroupPcompute_offsets6F_v_;
+text: .text%__1cLconvP2BNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cVshrL_reg_imm6_L2INodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_5pnGThread__v_;
+text: .text%__1cCosGstrdup6Fpkc_pc_;
+text: .text%__1cCosLinit_random6Fl_v_;
+text: .text%__1cUInterpreterGeneratorXgenerate_accessor_entry6M_pC_;
+text: .text%__1cCosXterminate_signal_thread6F_v_;
+text: .text%__1cCosLsignal_init6F_v_;
+text: .text%__1cTsignal_thread_entry6FpnKJavaThread_pnGThread__v_: os.o;
+text: .text%__1cOtailjmpIndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cUInterpreterGeneratorUgenerate_empty_entry6M_pC_;
+text: .text%__1cUInterpreterGenerator2t6MpnJStubQdDueue__v_;
+text: .text%__1cCosbDinit_system_properties_values6F_v_;
+text: .text%__1cCosPphysical_memory6F_X_;
+text: .text%__1cHvm_exit6Fi_v_;
+text: .text%__1cLbefore_exit6FpnKJavaThread__v_;
+text: .text%__1cbCAbstractInterpreterGeneratorbFgenerate_slow_signature_handler6M_pC_;
+text: .text%__1cSThreadLocalStorageHpd_init6F_v_;
+text: .text%__1cVMoveF2I_stack_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cVMoveL2D_stack_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cWinvocationCounter_init6F_v_;
+text: .text%__1cKTypeOopPtrEmake6FnHTypePtrDPTR_i_pk0_;
+text: .text%__1cKTypeOopPtrFxdual6kM_pknEType__;
+text: .text%__1cFParseMjump_if_join6MpnENode_2_2_;
+text: .text%__1cSinstanceKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
+text: .text%__1cSinstanceKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceKlassKlass.o;
+text: .text%__1cLconvP2BNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
+text: .text%__1cETypeRInitialize_shared6FpnHCompile__v_;
+text: .text%__1cQinstanceRefKlassZupdate_nonstatic_oop_maps6FpnMklassOopDesc__v_;
+text: .text%__1cVInterfaceSupport_init6F_v_;
+text: .text%__1cZInterpreterMacroAssemblerSsuper_call_VM_leaf6MpnMRegisterImpl_pC2_v_;
+text: .text%__1cPGenerationSizerQinitialize_flags6M_v_: parallelScavengeHeap.o;
+text: .text%__1cZInterpreterMacroAssemblerPdispatch_normal6MnITosState__v_;
+text: .text%__1cJTimeStampMmilliseconds6kM_x_;
+text: .text%__1cDhpiZinitialize_socket_library6F_i_;
+text: .text%__1cDhpiYinitialize_get_interface6FpnIvm_calls__v_;
+text: .text%__1cWInlineCacheBuffer_init6F_v_;
+text: .text%__1cWThreadLocalAllocBufferWstartup_initialization6F_v_;
+text: .text%__1cPGlobalTLABStats2t6M_v_;
+text: .text%__1cLicache_init6F_v_;
+text: .text%__1cSThreadLocalStorageEinit6F_v_;
+text: .text%__1cNThreadServiceEinit6F_v_;
+text: .text%__1cTICacheStubGeneratorVgenerate_icache_flush6MppFpCii_i_v_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: indexSet.o;
+text: .text%__1cPvm_init_globals6F_v_;
+text: .text%__1cMinit_globals6F_i_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: ad_sparc_expand.o;
+text: .text%__1cMexit_globals6F_v_;
+text: .text%__1cSset_init_completed6F_v_;
+text: .text%__1cNinstanceKlassZrelease_C_heap_structures6M_v_;
+text: .text%__1cJTimeStampJupdate_to6Mx_v_;
+text: .text%__1cUParallelScavengeHeapItop_addr6kM_ppnIHeapWord__: parallelScavengeHeap.o;
+text: .text%__1cCosHSolarisXinstall_signal_handlers6F_v_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interp_masm_sparc.o;
+text: .text%__1cQinterpreter_init6F_v_;
+text: .text%__1cbCAbstractInterpreterGenerator2t6MpnJStubQdDueue__v_;
+text: .text%__1cRlwp_priocntl_init6F_i_: os_solaris.o;
+text: .text%__1cNpriocntl_stub6FinGidtype_lipc_l_: os_solaris.o;
+text: .text%__1cbCAbstractInterpreterGeneratorMgenerate_all6M_v_;
+text: .text%__1cCosLsignal_wait6F_i_;
+text: .text%__1cCosNsignal_notify6Fi_v_;
+text: .text%__1cCosOsignal_init_pd6F_v_;
+text: .text%__1cCosHSolarisPinit_signal_mem6F_v_;
+text: .text%__1cCosSget_temp_directory6F_pkc_;
+text: .text%__1cCosHSolarisOlibthread_init6F_v_;
+text: .text%__1cUParallelScavengeHeapIend_addr6kM_ppnIHeapWord__: parallelScavengeHeap.o;
+text: .text%__1cUParallelScavengeHeapEheap6F_p0_;
+text: .text%__1cUParallelScavengeHeapNgc_threads_do6kMpnNThreadClosure__v_;
+text: .text%__1cUParallelScavengeHeapYpermanent_object_iterate6MpnNObjectClosure__v_;
+text: .text%__1cKcmpOpFOperNgreater_equal6kM_i_: ad_sparc_clone.o;
+text: .text%__1cUParallelScavengeHeapMmax_capacity6kM_I_;
+text: .text%__1cUParallelScavengeHeapPpost_initialize6M_v_;
+text: .text%__1cUParallelScavengeHeapKinitialize6M_i_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: parGCAllocBuffer.o;
+text: .text%__1cZInterpreterMacroAssemblerbFset_method_data_pointer_for_bcp6M_v_;
+text: .text%__SLIP.DELETER__C: ostream.o;
+text: .text%__1cMostream_exit6F_v_;
+text: .text%__1cQostream_init_log6F_v_;
+text: .text%__1cMostream_init6F_v_;
+text: .text%__1cCosXnon_memory_address_word6F_pc_;
+text: .text%__1cCosGinit_26F_i_;
+text: .text%__1cCosEinit6F_v_;
+text: .text%__1cCosHSolarisUsynchronization_init6F_v_;
+text: .text%__1cVjni_GetLongField_addr6F_pC_;
+text: .text%__1cNIdealLoopTreeQsplit_outer_loop6MpnOPhaseIdealLoop__v_;
+text: .text%__1cRLowMemoryDetectorKinitialize6F_v_;
+text: .text%__1cRLowMemoryDetectorbGlow_memory_detector_thread_entry6FpnKJavaThread_pnGThread__v_;
+text: .text%__1cNReservedSpaceUpage_align_size_down6FI_I_;
+text: .text%__1cNReservedSpaceYallocation_align_size_up6FI_I_;
+text: .text%__1cTloadL_unalignedNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: machnode.o;
+text: .text%__1cPmanagement_init6F_v_;
+text: .text%__1cOvmStructs_init6F_v_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vmStructs.o;
+text: .text%__1cJvmSymbolsKinitialize6FpnGThread__v_;
+text: .text%__1cKManagementKinitialize6FpnGThread__v_;
+text: .text%__1cKManagementWrecord_vm_startup_time6Fxx_v_;
+text: .text%__1cIVMThreadGcreate6F_v_;
+text: .text%__1cIVMThreadDrun6M_v_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: management.o;
+text: .text%__1cLJvmtiExportNpost_vm_start6F_v_;
+text: .text%__1cLJvmtiExportTpost_vm_initialized6F_v_;
+text: .text%__1cLJvmtiExportNpost_vm_death6F_v_;
+text: .text%__1cLJvmtiExportbMtransition_pending_onload_raw_monitors6F_v_;
+text: .text%__1cUJvmtiPendingMonitorsXtransition_raw_monitors6F_v_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiImpl.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiTagMap.o;
+text: .text%__1cKklassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
+text: .text%__1cKklassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: klassKlass.o;
+text: .text%__1cVLoaderConstraintTable2t6Mi_v_;
+text: .text%__1cQregL_to_stkLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cHRetDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_;
+text: .text%__1cTAbstract_VM_VersionKvm_release6F_pkc_;
+text: .text%__1cTAbstract_VM_VersionXinternal_vm_info_string6F_pkc_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vm_version.o;
+text: .text%__1cPVM_Version_init6F_v_;
+text: .text%__1cKVM_VersionKinitialize6F_v_;
+text: .text%__1cHRetDataJfixup_ret6MinQmethodDataHandle__pC_;
+text: .text%__1cLmethodKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
+text: .text%__1cLmethodKlassOset_alloc_size6MI_v_: methodKlass.o;
+text: .text%__1cQvtableStubs_init6F_v_;
+text: .text%__1cKi0RegPOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cKg1RegPOperKin_RegMask6kMi_pknHRegMask__;
+text: .text%__1cFVTuneEexit6F_v_;
+text: .text%__1cLmethodKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: methodKlass.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: methodLiveness.o;
+text: .text%__1cMMutableSpaceOobject_iterate6MpnNObjectClosure__v_;
+text: .text%__1cKvtune_init6F_v_;
+text: .text%__1cKmutex_init6F_v_;
+text: .text%__1cQaccessFlags_init6F_v_;
+text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpnMRegisterImpl_pC222_v_;
+text: .text%__1cTAbstract_VM_VersionJvm_vendor6F_pkc_;
+text: .text%__1cOmarksweep_init6F_v_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: markSweep.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: matcher.o;
+text: .text%__1cNMemoryManagerbDget_code_cache_memory_manager6F_p0_;
+text: .text%__1cNMemoryManagerbDget_psScavenge_memory_manager6F_pnPGCMemoryManager__;
+text: .text%__1cNMemoryManagerbEget_psMarkSweep_memory_manager6F_pnPGCMemoryManager__;
+text: .text%__1cHVM_ExitEdoit6M_v_;
+text: .text%__1cNMemoryServiceRset_universe_heap6FpnNCollectedHeap__v_;
+text: .text%__1cNMemoryServiceZadd_code_heap_memory_pool6FpnICodeHeap__v_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: memoryService.o;
+text: .text%__1cPmethodDataKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
+text: .text%__1cPmethodDataKlassOset_alloc_size6MI_v_: methodDataKlass.o;
+text: .text%__1cPmethodDataKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: methodDataKlass.o;
+text: .text%__1cTAbstract_VM_VersionHvm_name6F_pkc_;
+text: .text%__1cLstoreF0NodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%JNI_CreateJavaVM;
+text: .text%__1cQJNI_FastGetFieldbFgenerate_fast_get_boolean_field6F_pC_;
+text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_byte_field6F_pC_;
+text: .text%__1cTtypeArrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
+text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_char_field6F_pC_;
+text: .text%__1cQJNI_FastGetFieldbDgenerate_fast_get_short_field6F_pC_;
+text: .text%__1cQJNI_FastGetFieldbBgenerate_fast_get_int_field6F_pC_;
+text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_long_field6F_pC_;
+text: .text%__1cTtypeArrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: typeArrayKlassKlass.o;
+text: .text%__1cIUniversePcheck_alignment6FIIpkc_v_;
+text: .text%__1cIUniverseHgenesis6FpnGThread__v_;
+text: .text%__1cVquicken_jni_functions6F_v_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: oopMap.o;
+text: .text%__1cYjava_lang_reflect_MethodNset_signature6FpnHoopDesc_2_v_;
+text: .text%__1cbDjava_lang_reflect_ConstructorPcompute_offsets6F_v_;
+text: .text%__1cXjava_lang_reflect_FieldPcompute_offsets6F_v_;
+text: .text%__1cXjava_lang_reflect_FieldNset_signature6FpnHoopDesc_2_v_;
+text: .text%__1cQdivD_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cLJavaClassesbAcompute_hard_coded_offsets6F_v_;
+text: .text%__1cQjavaClasses_init6F_v_;
+text: .text%jni_ToReflectedMethod: jni.o;
+text: .text%__1cQsubD_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cYjni_GetBooleanField_addr6F_pC_;
+text: .text%__1cVjni_GetByteField_addr6F_pC_;
+text: .text%__1cQaddF_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cVjni_GetCharField_addr6F_pC_;
+text: .text%__1cWjni_GetShortField_addr6F_pC_;
+text: .text%__1cUjni_GetIntField_addr6F_pC_;
+text: .text%__1cOtypeArrayKlassKinitialize6MpnGThread__v_;
+text: .text%__1cWjni_GetFloatField_addr6F_pC_;
+text: .text%__1cRsarL_reg_imm6NodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cXjni_GetDoubleField_addr6F_pC_;
+text: .text%__1cQshlI_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cIUniverseNfixup_mirrors6FpnGThread__v_;
+text: .text%JVM_InitializeSocketLibrary;
+text: .text%JVM_RegisterUnsafeMethods;
+text: .text%__1cOcmovLI_regNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cOcmovLI_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cOcmovDF_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%JVM_Socket;
+text: .text%__1cbEinitialize_converter_functions6F_v_;
+text: .text%JVM_SupportsCX8;
+text: .text%__1cOcmovIF_immNodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cUJvmtiEventControllerIvm_start6F_v_;
+text: .text%__1cUJvmtiEventControllerHvm_init6F_v_;
+text: .text%__1cUJvmtiEventControllerIvm_death6F_v_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEventController.o;
+text: .text%__1cKstfSSFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cLJvmtiExportRenter_start_phase6F_v_;
+text: .text%__1cLJvmtiExportQenter_live_phase6F_v_;
+text: .text%__1cSmulL_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
+text: .text%__1cQJNI_FastGetFieldbDgenerate_fast_get_float_field6F_pC_;
+text: .text%__1cSmulI_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_;
+text: .text%__1cQJNI_FastGetFieldbEgenerate_fast_get_double_field6F_pC_;
+text: .text%__1cNuniverse_init6F_i_;
+text: .text%__1cOuniverse2_init6F_v_;
+text: .text%__1cQjni_handles_init6F_v_;
+text: .text%__1cSobjArrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: objArrayKlassKlass.o;
+text: .text%Unsafe_SetNativeLong;
+text: .text%JVM_InitProperties;
+text: .text%JVM_Halt;
+text: .text%Unsafe_FreeMemory;
+text: .text%Unsafe_PageSize;
+text: .text%JVM_MaxMemory;
+text: .text%__1cSobjArrayKlassKlassbEallocate_system_objArray_klass6MpnGThread__pnMklassOopDesc__;
+text: .text%__1cSobjArrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
+text: .text%JVM_GetClassDeclaredMethods;
+text: .text%__1cPPerfDataManagerHsampled6F_pnMPerfDataList__;
+text: .text%__1cQSystemDictionaryKclasses_do6FpFpnMklassOopDesc__v_v_;
+text: .text%__1cQSystemDictionaryKinitialize6FpnGThread__v_;
+text: .text%__1cQSystemDictionarybCinitialize_preloaded_classes6FpnGThread__v_;
+text: .text%__1cPciObjectFactoryTinit_shared_objects6M_v_;
+text: .text%__1cPClassFileParserbFjava_lang_ref_Reference_fix_pre6MpnPtypeArrayHandle_nSconstantPoolHandle_pnUFieldAllocationCount_pnGThread__v_;
+text: .text%__1cLClassLoaderbBsetup_bootstrap_search_path6F_v_;
+text: .text%__1cLClassLoaderQload_zip_library6F_v_;
+text: .text%__1cLClassLoaderZcreate_package_info_table6F_v_;
+text: .text%__1cLClassLoaderKinitialize6F_v_;
+text: .text%__1cLClassLoaderVcompute_Object_vtable6F_i_;
+text: .text%__1cMPeriodicTask2T5B6M_v_;
+text: .text%__1cQclassLoader_init6F_v_;
+text: .text%__1cMPeriodicTaskJdisenroll6M_v_;
+text: .text%__1cOBasicHashtable2t6Mii_v_: classLoader.o;
+text: .text%__1cTClassLoadingServiceEinit6F_v_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: regmask.o;
+text: .text%__1cUciObjArrayKlassKlassEmake6F_p0_;
+text: .text%__1cVRegistersForDebuggingRrestore_registers6FpnOMacroAssembler_pnMRegisterImpl__v_: assembler_sparc.o;
+text: .text%__1cVRegistersForDebuggingOsave_registers6FpnOMacroAssembler__v_: assembler_sparc.o;
+text: .text%__1cJBytecodesKinitialize6F_v_;
+text: .text%__1cQSystemDictionarybAcompute_java_system_loader6FpnGThread__v_;
+text: .text%__1cObytecodes_init6F_v_;
+text: .text%__1cLOptoRuntimeIgenerate6FpnFciEnv__v_;
+text: .text%__1cJBytecodesNpd_initialize6F_v_;
+text: .text%__1cHCompileRpd_compiler2_init6F_v_;
+text: .text%__1cKC2CompilerKinitialize6M_v_;
+text: .text%__1cRCardTableModRefBS2t6MnJMemRegion_i_v_;
+text: .text%__1cRCardTableModRefBSbBct_max_alignment_constraint6F_I_;
+text: .text%__1cMciArrayKlass2t6MpnIciSymbol_ipnHciKlass__v_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: relocInfo.o;
+text: .text%__1cMciKlassKlassEmake6F_p0_;
+text: .text%__1cIciMethodMvtable_index6M_i_;
+text: .text%__1cPciObjArrayKlass2t6MpnIciSymbol_pnHciKlass_i_v_;
+text: .text%__1cJLoadFNodeMstore_Opcode6kM_i_: classes.o;
+text: .text%__1cNTemplateTableGsipush6F_v_;
+text: .text%__1cQUncommonTrapBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_;
+text: .text%__1cNTemplateTableGldc2_w6F_v_;
+text: .text%__1cNExceptionBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_;
+text: .text%__1cNTemplateTableFiload6F_v_;
+text: .text%__1cNTemplateTableLfast_iload26F_v_;
+text: .text%__1cNTemplateTableKfast_iload6F_v_;
+text: .text%__1cNTemplateTableFlload6F_v_;
+text: .text%__1cNTemplateTableFfload6F_v_;
+text: .text%__1cNTemplateTableFdload6F_v_;
+text: .text%__1cNTemplateTableFaload6F_v_;
+text: .text%__1cNTemplateTableKwide_iload6F_v_;
+text: .text%__1cNTemplateTableKwide_lload6F_v_;
+text: .text%__1cNTemplateTableKwide_fload6F_v_;
+text: .text%__1cNTemplateTableKwide_dload6F_v_;
+text: .text%__1cNTemplateTableKwide_aload6F_v_;
+text: .text%__1cNTemplateTableGiaload6F_v_;
+text: .text%__1cNTemplateTableGlaload6F_v_;
+text: .text%__1cNTemplateTableGfaload6F_v_;
+text: .text%__1cNTemplateTableGdaload6F_v_;
+text: .text%__1cNTemplateTableGbipush6F_v_;
+text: .text%__1cLMoveF2INodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cLMoveL2DNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC222_v_;
+text: .text%__1cHOrLNodeGadd_id6kM_pknEType__: classes.o;
+text: .text%__1cHOrLNodeJideal_reg6kM_I_: classes.o;
+text: .text%__1cNTemplateTableF_goto6F_v_;
+text: .text%__1cNTemplateTableGgoto_w6F_v_;
+text: .text%__1cNTemplateTableFjsr_w6F_v_;
+text: .text%__1cNTemplateTableDjsr6F_v_;
+text: .text%__1cXreferenceProcessor_init6F_v_;
+text: .text%__1cICodeBlobMset_oop_maps6MpnJOopMapSet__v_;
+text: .text%__1cStemplateTable_init6F_v_;
+text: .text%__1cNTemplateTableNpd_initialize6F_v_;
+text: .text%__1cNTemplateTableDnop6F_v_;
+text: .text%__1cNTemplateTableSshouldnotreachhere6F_v_;
+text: .text%__1cNTemplateTableLaconst_null6F_v_;
+text: .text%__1cKPSYoungGenbCreset_survivors_after_shrink6M_v_;
+text: .text%__1cKPSYoungGenQlimit_gen_shrink6MI_I_;
+text: .text%__1cKPSYoungGenRavailable_to_live6M_I_;
+text: .text%__1cSDeoptimizationBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_iiii_p0_;
+text: .text%__1cLOptoRuntimeUmultianewarray2_Type6F_pknITypeFunc__;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: ad_sparc_pipeline.o;
+text: .text%__1cUAdjoiningGenerations2t6MnNReservedSpace_IIIIIII_v_;
+text: .text%__1cWAdjoiningVirtualSpaces2t6MnNReservedSpace_III_v_;
+text: .text%__1cOchunkpool_init6F_v_;
+text: .text%__1cFChunkbDstart_chunk_pool_cleaner_task6F_v_;
+text: .text%__1cJArgumentsWinit_system_properties6F_v_;
+text: .text%__1cMSysClassPathPexpand_endorsed6M_v_;
+text: .text%__1cMSysClassPathQadd_jars_to_path6Fpcpkc_1_;
+text: .text%__1cJArgumentsTset_parnew_gc_flags6F_v_;
+text: .text%__1cJArgumentsbBset_cms_and_parnew_gc_flags6F_v_;
+text: .text%__1cJArgumentsUset_ergonomics_flags6F_v_;
+text: .text%__1cJArgumentsSparse_vm_init_args6FpknOJavaVMInitArgs__i_;
+text: .text%__1cLStatSamplerGengage6F_v_;
+text: .text%__1cNStubGeneratorbNgenerate_flush_callers_register_windows6M_pC_: stubGenerator_sparc.o;
+text: .text%__1cSstubRoutines_init16F_v_;
+text: .text%__1cSstubRoutines_init26F_v_;
+text: .text%__1cNStubGeneratorbIgenerate_handler_for_unsafe_access6M_pC_: stubGenerator_sparc.o;
+text: .text%__1cNStubGeneratorbAgenerate_forward_exception6M_pC_: stubGenerator_sparc.o;
+text: .text%__1cNStubGeneratorSgenerate_call_stub6MrpC_1_: stubGenerator_sparc.o;
+text: .text%__1cNStubGeneratorYgenerate_catch_exception6M_pC_: stubGenerator_sparc.o;
+text: .text%__1cNStubGeneratorSgenerate_test_stop6M_pC_: stubGenerator_sparc.o;
+text: .text%__1cNStubGeneratorbEgenerate_partial_subtype_check6M_pC_: stubGenerator_sparc.o;
+text: .text%__1cISubFNodeDsub6kMpknEType_3_3_;
+text: .text%__1cRStubCodeGeneratorLstub_prolog6MpnMStubCodeDesc__v_;
+text: .text%__1cLStatSamplerbMcreate_system_property_instrumentation6FpnGThread__v_;
+text: .text%__1cLStatSamplerHdestroy6F_v_;
+text: .text%__1cLStatSamplerJdisengage6F_v_;
+text: .text%__1cNRegisterSaverYrestore_result_registers6FpnOMacroAssembler__v_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: runtimeService.o;
+text: .text%__1cORuntimeServiceYrecord_application_start6F_v_;
+text: .text%__1cOMacroAssemblerNset_vm_result6MpnMRegisterImpl__v_;
+text: .text%__1cORuntimeServiceEinit6F_v_;
+text: .text%__1cOMacroAssemblerVverify_oop_subroutine6M_v_;
+text: .text%__1cOMacroAssemblerPstop_subroutine6M_v_;
+text: .text%__1cOMacroAssemblerElcmp6MpnMRegisterImpl_2222_v_;
+text: .text%__1cOMacroAssemblerElneg6MpnMRegisterImpl_2_v_;
+text: .text%__1cOMacroAssemblerElshl6MpnMRegisterImpl_22222_v_;
+text: .text%__1cOMacroAssemblerElshr6MpnMRegisterImpl_22222_v_;
+text: .text%__1cOMacroAssemblerFlushr6MpnMRegisterImpl_22222_v_;
+text: .text%__1cLOptoRuntimeUmultianewarray5_Type6F_pknITypeFunc__;
+text: .text%__1cLOptoRuntimeUmultianewarray4_Type6F_pknITypeFunc__;
+text: .text%__1cLOptoRuntimeUmultianewarray3_Type6F_pknITypeFunc__;
+text: .text%__1cLsymbolKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: symbolKlass.o;
+text: .text%__1cJArgumentsFparse6FpknOJavaVMInitArgs__i_;
+text: .text%__1cJArgumentsWPropertyList_get_value6FpnOSystemProperty_pkc_4_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: sharedHeap.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: arguments.o;
+text: .text%__1cParrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
+text: .text%__1cKklassKlassOset_alloc_size6MI_v_: arrayKlassKlass.o;
+text: .text%__1cLsymbolKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
+text: .text%__1cParrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: arrayKlassKlass.o;
+text: .text%__1cOMacroAssemblerRcall_VM_leaf_base6MpnMRegisterImpl_pCi_v_;
+text: .text%__1cNSharedRuntimeTgenerate_deopt_blob6F_v_;
+text: .text%__1cLsymbolKlassOset_alloc_size6MI_v_: symbolKlass.o;
+text: .text%__1cNTemplateTableGaaload6F_v_;
+text: .text%__1cQconstMethodKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
+text: .text%__1cQconstMethodKlassOset_alloc_size6MI_v_: constMethodKlass.o;
+text: .text%__1cQconstMethodKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: constMethodKlass.o;
+text: .text%__1cGThreadMset_priority6Fp0nOThreadPriority__v_;
+text: .text%__1cRconstantPoolKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
+text: .text%__1cRconstantPoolKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: constantPoolKlass.o;
+text: .text%__1cQPlaceholderTable2t6Mi_v_;
+text: .text%__1cbBcreate_initial_thread_group6FpnGThread__nGHandle__: thread.o;
+text: .text%__1cVcreate_initial_thread6FnGHandle_pnKJavaThread_pnGThread__pnHoopDesc__: thread.o;
+text: .text%__1cbAcall_initializeSystemClass6FpnGThread__v_: thread.o;
+text: .text%__1cWreset_vm_info_property6FpnGThread__v_: thread.o;
+text: .text%__1cbAPSGCAdaptivePolicyCounters2t6MpkciipnUPSAdaptiveSizePolicy__v_;
+text: .text%__1cNTemplateTableRfast_invokevfinal6Fi_v_;
+text: .text%__1cVcompiledICHolderKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: compiledICHolderKlass.o;
+text: .text%__1cNTemplateTableNinvokespecial6Fi_v_;
+text: .text%__1cNTemplateTableMinvokestatic6Fi_v_;
+text: .text%__1cNTemplateTablebDinvokeinterface_object_method6FpnMRegisterImpl_222_v_;
+text: .text%__1cNTemplateTablePinvokeinterface6Fi_v_;
+text: .text%__1cNTemplateTableE_new6F_v_;
+text: .text%__1cNTemplateTableInewarray6F_v_;
+text: .text%__1cNTemplateTableJanewarray6F_v_;
+text: .text%__1cNTemplateTableLarraylength6F_v_;
+text: .text%__1cNTemplateTableJcheckcast6F_v_;
+text: .text%__1cNTemplateTableKinstanceof6F_v_;
+text: .text%__1cNTemplateTableL_breakpoint6F_v_;
+text: .text%__1cNTemplateTableGathrow6F_v_;
+text: .text%__1cNTemplateTableMmonitorenter6F_v_;
+text: .text%__1cNTemplateTableLmonitorexit6F_v_;
+text: .text%__1cNTemplateTableEwide6F_v_;
+text: .text%__1cNTemplateTableOmultianewarray6F_v_;
+text: .text%__1cTcompilerOracle_init6F_v_;
+text: .text%__1cWconstantPoolCacheKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
+text: .text%__1cPPerfDataManagerTcreate_long_counter6FnJCounterNS_pkcnIPerfDataFUnits_pnUPerfLongSampleHelper_pnGThread__pnPPerfLongCounter__;
+text: .text%__1cZCompiledArgumentOopFinderRhandle_oop_offset6M_v_: frame.o;
+text: .text%__1cQGCPolicyCounters2t6Mpkcii_v_;
+text: .text%__1cHGCStats2t6M_v_;
+text: .text%__1cNGCTaskManager2t6MI_v_;
+text: .text%__1cNGCTaskManagerKinitialize6M_v_;
+text: .text%__1cNGCTaskManagerKthreads_do6MpnNThreadClosure__v_;
+text: .text%__1cPPerfDataManagerHdestroy6F_v_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: genCollectedHeap.o;
+text: .text%__1cJGenRemSetYmax_alignment_constraint6Fn0AEName__I_;
+text: .text%__1cWResolveOopMapConflictsUdo_potential_rewrite6MpnGThread__nMmethodHandle__;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: generateOopMap.o;
+text: .text%__1cOThreadCriticalKinitialize6F_v_;
+text: .text%__1cSThreadLocalStoragebCgenerate_code_for_get_thread6F_v_;
+text: .text%__1cICodeHeap2t6M_v_;
+text: .text%__1cDhpiKinitialize6F_i_;
+text: .text%__1cMPerfDataList2T6M_v_;
+text: .text%__1cNWatcherThreadDrun6M_v_;
+text: .text%__1cNWatcherThreadEstop6F_v_;
+text: .text%__1cWconstantPoolCacheKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: cpCacheKlass.o;
+text: .text%__1cFStateO_sub_Op_CMoveD6MpknENode__v_;
+text: .text%__1cFStateP_sub_Op_MoveF2I6MpknENode__v_;
+text: .text%__1cKDictionary2t6Mi_v_;
+text: .text%__1cKDictionaryKclasses_do6MpFpnMklassOopDesc__v_v_;
+text: .text%__1cNeventlog_init6F_v_;
+text: .text%__1cScheck_ThreadShadow6F_v_;
+text: .text%__1cOtailjmpIndNodeLout_RegMask6kM_rknHRegMask__;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: fprofiler.o;
+text: .text%__1cFframeVinterpreter_frame_mdp6kM_pC_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: phase.o;
+text: .text%__1cKPerfMemoryUdelete_memory_region6F_v_;
+text: .text%__1cKPerfMemoryUcreate_memory_region6FI_v_;
+text: .text%__1cbBcleanup_sharedmem_resources6Fpkc_v_: perfMemory_solaris.o;
+text: .text%__1cPperfMemory_exit6F_v_;
+text: .text%__1cPperfMemory_init6F_v_;
+text: .text%__1cNTemplateTableNinvokevirtual6Fi_v_;
+text: .text%__1cNTemplateTableHfastore6F_v_;
+text: .text%__1cNTemplateTableHdastore6F_v_;
+text: .text%__1cNTemplateTableHaastore6F_v_;
+text: .text%__1cNTemplateTableHbastore6F_v_;
+text: .text%__1cNTemplateTableHsastore6F_v_;
+text: .text%__1cOcodeCache_init6F_v_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: codeCache.o;
+text: .text%__1cNTemplateTableDpop6F_v_;
+text: .text%__1cNTemplateTableEpop26F_v_;
+text: .text%__1cNTemplateTableDdup6F_v_;
+text: .text%__1cNTemplateTableGdup_x16F_v_;
+text: .text%__1cNTemplateTableGdup_x26F_v_;
+text: .text%__1cNTemplateTableEdup26F_v_;
+text: .text%__1cNTemplateTableHdup2_x16F_v_;
+text: .text%__1cNTemplateTableHdup2_x26F_v_;
+text: .text%__1cNTemplateTableEswap6F_v_;
+text: .text%__1cNCollectedHeap2t6M_v_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psScavenge.o;
+text: .text%__1cNTemplateTableEirem6F_v_;
+text: .text%__1cNTemplateTableElmul6F_v_;
+text: .text%__1cNTemplateTableHlastore6F_v_;
+text: .text%__1cNTemplateTableGbaload6F_v_;
+text: .text%__1cNTemplateTableGcaload6F_v_;
+text: .text%__1cNTemplateTableMfast_icaload6F_v_;
+text: .text%__1cNTemplateTableGsaload6F_v_;
+text: .text%__1cKPSYoungGenPinitialize_work6M_v_;
+text: .text%__1cKPSYoungGenKinitialize6MnNReservedSpace_I_v_;
+text: .text%__1cKPSYoungGenYinitialize_virtual_space6MnNReservedSpace_I_v_;
+text: .text%__1cKPSYoungGen2t6MIII_v_;
+text: .text%__1cNTemplateTableHaload_06F_v_;
+text: .text%__1cNTemplateTableGistore6F_v_;
+text: .text%__1cNTemplateTableGlstore6F_v_;
+text: .text%__1cNTemplateTableGfstore6F_v_;
+text: .text%__1cNTemplateTableGdstore6F_v_;
+text: .text%__1cNTemplateTableGastore6F_v_;
+text: .text%__1cNTemplateTableLwide_istore6F_v_;
+text: .text%__1cNTemplateTableLwide_lstore6F_v_;
+text: .text%__1cNTemplateTableLwide_fstore6F_v_;
+text: .text%__1cNTemplateTableLwide_dstore6F_v_;
+text: .text%__1cNTemplateTableLwide_astore6F_v_;
+text: .text%__1cNTemplateTableHiastore6F_v_;
+text: .text%__1cNTemplateTableEldiv6F_v_;
+text: .text%__1cNTemplateTableLtableswitch6F_v_;
+text: .text%__1cNTemplateTableMlookupswitch6F_v_;
+text: .text%__1cNTemplateTableRfast_linearswitch6F_v_;
+text: .text%__1cNTemplateTableRfast_binaryswitch6F_v_;
+text: .text%__1cNCompileBrokerVinit_compiler_threads6Fi_v_;
+text: .text%__1cJPSPermGen2t6MnNReservedSpace_IIIIpkci_v_;
+text: .text%__1cNCompileBrokerQset_should_block6F_v_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compileBroker.o;
+text: .text%__1cNTemplateTableIgetfield6Fi_v_;
+text: .text%__1cNTemplateTableJgetstatic6Fi_v_;
+text: .text%__1cIPSOldGenKinitialize6MnNReservedSpace_Ipkci_v_;
+text: .text%__1cIPSOldGen2t6MIIIpkci_v_;
+text: .text%__1cIPSOldGen2t6MnNReservedSpace_IIIIpkci_v_;
+text: .text%__1cVcompiledICHolderKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psMarkSweep.o;
+text: .text%__1cNTemplateTableIputfield6Fi_v_;
+text: .text%__1cNTemplateTableJputstatic6Fi_v_;
+text: .text%__1cVcompiledICHolderKlassOset_alloc_size6MI_v_: compiledICHolderKlass.o;
+text: .text%__1cLPSMarkSweepKinitialize6F_v_;
+text: .text%__1cNTemplateTableIwide_ret6F_v_;
+text: .text%__1cNTemplateTableElrem6F_v_;
+text: .text%__1cNTemplateTableElshl6F_v_;
+text: .text%__1cNTemplateTableElshr6F_v_;
+text: .text%__1cNTemplateTableFlushr6F_v_;
+text: .text%__1cbCTwoGenerationCollectorPolicyQinitialize_flags6M_v_;
+text: .text%__1cbCTwoGenerationCollectorPolicyUinitialize_size_info6M_v_;
+text: .text%__1cNTemplateTableEineg6F_v_;
+text: .text%__1cNTemplateTableElneg6F_v_;
+text: .text%__1cNTemplateTableEfneg6F_v_;
+text: .text%__1cNTemplateTableEdneg6F_v_;
+text: .text%__1cNTemplateTableEiinc6F_v_;
+text: .text%__1cNTemplateTableJwide_iinc6F_v_;
+text: .text%__1cKPSScavengeKinitialize6F_v_;
+text: .text%__1cNTemplateTableElcmp6F_v_;
+text: .text%__1cWcompilationPolicy_init6F_v_;
+text: .text%__1cRCompilationPolicyUcompleted_vm_startup6F_v_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compilationPolicy.o;
+text: .text%__1cSPSPromotionManagerKinitialize6F_v_;
+text: .text%__1cNTemplateTableDret6F_v_;
--- a/hotspot/make/solaris/makefiles/sa.make	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/make/solaris/makefiles/sa.make	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -44,6 +44,9 @@
 AGENT_FILES1 := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_FILES1))
 AGENT_FILES2 := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_FILES2))
 
+AGENT_FILES1_LIST := $(GENERATED)/agent1.classes.list
+AGENT_FILES2_LIST := $(GENERATED)/agent2.classes.list
+
 SA_CLASSDIR = $(GENERATED)/saclasses
 
 SA_BUILD_VERSION_PROP = "sun.jvm.hotspot.runtime.VM.saBuildVersion=$(SA_BUILD_VERSION)"
@@ -70,8 +73,23 @@
 	$(QUIETLY) if [ ! -d $(SA_CLASSDIR) ] ; then \
 	  mkdir -p $(SA_CLASSDIR);        \
 	fi
-	$(QUIETLY) $(COMPILE.JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) $(AGENT_FILES1)
-	$(QUIETLY) $(COMPILE.JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) $(AGENT_FILES2)
+	
+# Note: When indented, make tries to execute the '$(shell' comment.
+# In some environments, cmd processors have limited line length.
+# To prevent the javac invocation in the next block from using
+# a very long cmd line, we use javac's @file-list option. We
+# generate the file lists using make's built-in 'foreach' control
+# flow which also avoids cmd processor line length issues. Since
+# the 'foreach' is done as part of make's macro expansion phase,
+# the initialization of the lists is also done in the same phase
+# using '$(shell rm ...' instead of using the more traditional
+# 'rm ...' rule.
+	$(shell rm -rf $(AGENT_FILES1_LIST) $(AGENT_FILES2_LIST))
+	$(foreach file,$(AGENT_FILES1),$(shell echo $(file) >> $(AGENT_FILES1_LIST)))
+	$(foreach file,$(AGENT_FILES2),$(shell echo $(file) >> $(AGENT_FILES2_LIST)))
+	
+	$(QUIETLY) $(COMPILE.JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) @$(AGENT_FILES1_LIST)
+	$(QUIETLY) $(COMPILE.JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) @$(AGENT_FILES2_LIST)
 	
 	$(QUIETLY) $(COMPILE.RMIC)  -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer
 	$(QUIETLY) echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES)
@@ -88,3 +106,4 @@
 clean:
 	rm -rf $(SA_CLASSDIR)
 	rm -rf $(GENERATED)/sa-jdi.jar
+	rm -rf $(AGENT_FILES1_LIST) $(AGENT_FILES2_LIST)
--- a/hotspot/make/templates/bsd-header	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/make/templates/bsd-header	Wed Jul 05 17:22:53 2017 +0200
@@ -1,4 +1,4 @@
-Copyright %YEARS% Oracle and/or its affiliates. All rights reserved.
+Copyright (c) %YEARS%, Oracle and/or its affiliates. All rights reserved.
 
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions
--- a/hotspot/make/templates/gpl-cp-header	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/make/templates/gpl-cp-header	Wed Jul 05 17:22:53 2017 +0200
@@ -1,4 +1,4 @@
-Copyright %YEARS% Oracle and/or its affiliates. All rights reserved.
+Copyright (c) %YEARS%, Oracle and/or its affiliates. All rights reserved.
 DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
 This code is free software; you can redistribute it and/or modify it
--- a/hotspot/make/templates/gpl-header	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/make/templates/gpl-header	Wed Jul 05 17:22:53 2017 +0200
@@ -1,4 +1,4 @@
-Copyright %YEARS% Oracle and/or its affiliates. All rights reserved.
+Copyright (c) %YEARS%, Oracle and/or its affiliates. All rights reserved.
 DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
 This code is free software; you can redistribute it and/or modify it
--- a/hotspot/make/windows/build.make	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/make/windows/build.make	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1998, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -19,7 +19,7 @@
 # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 # or visit www.oracle.com if you need additional information or have any
 # questions.
-#  
+#
 #
 
 # Note: this makefile is invoked both from build.bat and from the J2SE
@@ -72,13 +72,11 @@
 !endif
 !endif
 
-!if "$(BUILDARCH)" != "amd64"
 !if "$(BUILDARCH)" != "ia64"
 !ifndef CC_INTERP
 FORCE_TIERED=1
 !endif
 !endif
-!endif
 
 !if "$(BUILDARCH)" == "amd64"
 Platform_arch=x86
@@ -135,7 +133,7 @@
 # We can have update versions like "01a", but Windows requires
 # we use only integers in the file version field.  So:
 # JDK_UPDATE_VER = JDK_UPDATE_VERSION * 10 + EXCEPTION_VERSION
-# 
+#
 JDK_UPDATE_VER=0
 JDK_BUILD_NUMBER=0
 
@@ -148,7 +146,7 @@
 #       1.6.0_01a-b02 will be 6.0.11.2
 #
 # JDK_* variables are defined in make/hotspot_version or on command line
-# 
+#
 JDK_VER=$(JDK_MINOR_VER),$(JDK_MICRO_VER),$(JDK_UPDATE_VER),$(JDK_BUILD_NUMBER)
 JDK_DOTVER=$(JDK_MINOR_VER).$(JDK_MICRO_VER).$(JDK_UPDATE_VER).$(JDK_BUILD_NUMBER)
 !if "$(JRE_RELEASE_VERSION)" == ""
@@ -162,7 +160,7 @@
 
 # Hotspot Express VM FileVersion:
 # 10.0-b<yz> will have DLL version 10.0.0.yz (need 4 numbers).
-# 
+#
 # HS_* variables are defined in make/hotspot_version
 #
 HS_VER=$(HS_MAJOR_VER),$(HS_MINOR_VER),0,$(HS_BUILD_NUMBER)
@@ -182,7 +180,7 @@
 
 
 # We don't support SA on ia64, and we can't
-# build it if we are using a version of Vis Studio 
+# build it if we are using a version of Vis Studio
 # older than .Net 2003.
 # SA_INCLUDE and SA_LIB are hold-overs from a previous
 # implementation in which we could build SA using
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -4192,7 +4192,7 @@
 
 static void generate_satb_log_enqueue(bool with_frame) {
   BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize);
-  CodeBuffer buf(bb->instructions_begin(), bb->instructions_size());
+  CodeBuffer buf(bb);
   MacroAssembler masm(&buf);
   address start = masm.pc();
   Register pre_val;
@@ -4421,7 +4421,7 @@
 // This gets to assume that o0 contains the object address.
 static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) {
   BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2);
-  CodeBuffer buf(bb->instructions_begin(), bb->instructions_size());
+  CodeBuffer buf(bb);
   MacroAssembler masm(&buf);
   address start = masm.pc();
 
--- a/hotspot/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -57,13 +57,12 @@
 #endif
 }
 
-#ifdef TIERED
 
 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
   __ set(_bci, G4);
   __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type);
-  __ delayed()->nop();
+  __ delayed()->mov_or_nop(_method->as_register(), G5);
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
 
@@ -71,7 +70,6 @@
   __ delayed()->nop();
 }
 
-#endif // TIERED
 
 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
   if (_offset != -1) {
--- a/hotspot/src/cpu/sparc/vm/c1_FrameMap_sparc.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_FrameMap_sparc.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -73,6 +73,7 @@
 // some useful constant RInfo's:
 LIR_Opr FrameMap::in_long_opr;
 LIR_Opr FrameMap::out_long_opr;
+LIR_Opr FrameMap::g1_long_single_opr;
 
 LIR_Opr FrameMap::F0_opr;
 LIR_Opr FrameMap::F0_double_opr;
@@ -238,6 +239,7 @@
 
   in_long_opr    = as_long_opr(I0);
   out_long_opr   = as_long_opr(O0);
+  g1_long_single_opr    = as_long_single_opr(G1);
 
   G0_opr = as_opr(G0);
   G1_opr = as_opr(G1);
--- a/hotspot/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -103,6 +103,7 @@
 
   static LIR_Opr in_long_opr;
   static LIR_Opr out_long_opr;
+  static LIR_Opr g1_long_single_opr;
 
   static LIR_Opr F0_opr;
   static LIR_Opr F0_double_opr;
@@ -113,18 +114,25 @@
  private:
   static FloatRegister  _fpu_regs [nof_fpu_regs];
 
+  static LIR_Opr as_long_single_opr(Register r) {
+    return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r));
+  }
+  static LIR_Opr as_long_pair_opr(Register r) {
+    return LIR_OprFact::double_cpu(cpu_reg2rnr(r->successor()), cpu_reg2rnr(r));
+  }
+
  public:
 
 #ifdef _LP64
   static LIR_Opr as_long_opr(Register r) {
-    return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r));
+    return as_long_single_opr(r);
   }
   static LIR_Opr as_pointer_opr(Register r) {
-    return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r));
+    return as_long_single_opr(r);
   }
 #else
   static LIR_Opr as_long_opr(Register r) {
-    return LIR_OprFact::double_cpu(cpu_reg2rnr(r->successor()), cpu_reg2rnr(r));
+    return as_long_pair_opr(r);
   }
   static LIR_Opr as_pointer_opr(Register r) {
     return as_opr(r);
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1625,13 +1625,18 @@
 
 void LIR_Assembler::return_op(LIR_Opr result) {
   // the poll may need a register so just pick one that isn't the return register
-#ifdef TIERED
+#if defined(TIERED) && !defined(_LP64)
   if (result->type_field() == LIR_OprDesc::long_type) {
     // Must move the result to G1
     // Must leave proper result in O0,O1 and G1 (TIERED only)
     __ sllx(I0, 32, G1);          // Shift bits into high G1
     __ srl (I1, 0, I1);           // Zero extend O1 (harmless?)
     __ or3 (I1, G1, G1);          // OR 64 bits into G1
+#ifdef ASSERT
+    // mangle it so any problems will show up
+    __ set(0xdeadbeef, I0);
+    __ set(0xdeadbeef, I1);
+#endif
   }
 #endif // TIERED
   __ set((intptr_t)os::get_polling_page(), L0);
@@ -2424,6 +2429,192 @@
 }
 
 
+void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
+                                        ciMethodData *md, ciProfileData *data,
+                                        Register recv, Register tmp1, Label* update_done) {
+  uint i;
+  for (i = 0; i < VirtualCallData::row_limit(); i++) {
+    Label next_test;
+    // See if the receiver is receiver[n].
+    Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
+                          mdo_offset_bias);
+    __ ld_ptr(receiver_addr, tmp1);
+    __ verify_oop(tmp1);
+    __ cmp(recv, tmp1);
+    __ brx(Assembler::notEqual, false, Assembler::pt, next_test);
+    __ delayed()->nop();
+    Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
+                      mdo_offset_bias);
+    __ ld_ptr(data_addr, tmp1);
+    __ add(tmp1, DataLayout::counter_increment, tmp1);
+    __ st_ptr(tmp1, data_addr);
+    __ ba(false, *update_done);
+    __ delayed()->nop();
+    __ bind(next_test);
+  }
+
+  // Didn't find receiver; find next empty slot and fill it in
+  for (i = 0; i < VirtualCallData::row_limit(); i++) {
+    Label next_test;
+    Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
+                      mdo_offset_bias);
+    load(recv_addr, tmp1, T_OBJECT);
+    __ br_notnull(tmp1, false, Assembler::pt, next_test);
+    __ delayed()->nop();
+    __ st_ptr(recv, recv_addr);
+    __ set(DataLayout::counter_increment, tmp1);
+    __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
+              mdo_offset_bias);
+    __ ba(false, *update_done);
+    __ delayed()->nop();
+    __ bind(next_test);
+  }
+}
+
+
+void LIR_Assembler::setup_md_access(ciMethod* method, int bci,
+                                    ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) {
+  md = method->method_data();
+  if (md == NULL) {
+    bailout("out of memory building methodDataOop");
+    return;
+  }
+  data = md->bci_to_data(bci);
+  assert(data != NULL,       "need data for checkcast");
+  assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
+  if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
+    // The offset is large so bias the mdo by the base of the slot so
+    // that the ld can use simm13s to reference the slots of the data
+    mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
+  }
+}
+
+void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
+  // we always need a stub for the failure case.
+  CodeStub* stub = op->stub();
+  Register obj = op->object()->as_register();
+  Register k_RInfo = op->tmp1()->as_register();
+  Register klass_RInfo = op->tmp2()->as_register();
+  Register dst = op->result_opr()->as_register();
+  Register Rtmp1 = op->tmp3()->as_register();
+  ciKlass* k = op->klass();
+
+
+  if (obj == k_RInfo) {
+    k_RInfo = klass_RInfo;
+    klass_RInfo = obj;
+  }
+
+  ciMethodData* md;
+  ciProfileData* data;
+  int mdo_offset_bias = 0;
+  if (op->should_profile()) {
+    ciMethod* method = op->profiled_method();
+    assert(method != NULL, "Should have method");
+    setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
+
+    Label not_null;
+    __ br_notnull(obj, false, Assembler::pn, not_null);
+    __ delayed()->nop();
+    Register mdo      = k_RInfo;
+    Register data_val = Rtmp1;
+    jobject2reg(md->constant_encoding(), mdo);
+    if (mdo_offset_bias > 0) {
+      __ set(mdo_offset_bias, data_val);
+      __ add(mdo, data_val, mdo);
+    }
+    Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
+    __ ldub(flags_addr, data_val);
+    __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
+    __ stb(data_val, flags_addr);
+    __ ba(false, *obj_is_null);
+    __ delayed()->nop();
+    __ bind(not_null);
+  } else {
+    __ br_null(obj, false, Assembler::pn, *obj_is_null);
+    __ delayed()->nop();
+  }
+
+  Label profile_cast_failure, profile_cast_success;
+  Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
+  Label *success_target = op->should_profile() ? &profile_cast_success : success;
+
+  // patching may screw with our temporaries on sparc,
+  // so let's do it before loading the class
+  if (k->is_loaded()) {
+    jobject2reg(k->constant_encoding(), k_RInfo);
+  } else {
+    jobject2reg_with_patching(k_RInfo, op->info_for_patch());
+  }
+  assert(obj != k_RInfo, "must be different");
+
+  // get object class
+  // not a safepoint as obj null check happens earlier
+  load(obj, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
+  if (op->fast_check()) {
+    assert_different_registers(klass_RInfo, k_RInfo);
+    __ cmp(k_RInfo, klass_RInfo);
+    __ brx(Assembler::notEqual, false, Assembler::pt, *failure_target);
+    __ delayed()->nop();
+  } else {
+    bool need_slow_path = true;
+    if (k->is_loaded()) {
+      if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())
+        need_slow_path = false;
+      // perform the fast part of the checking logic
+      __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg,
+                                       (need_slow_path ? success_target : NULL),
+                                       failure_target, NULL,
+                                       RegisterOrConstant(k->super_check_offset()));
+    } else {
+      // perform the fast part of the checking logic
+      __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target,
+                                       failure_target, NULL);
+    }
+    if (need_slow_path) {
+      // call out-of-line instance of __ check_klass_subtype_slow_path(...):
+      assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
+      __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
+      __ delayed()->nop();
+      __ cmp(G3, 0);
+      __ br(Assembler::equal, false, Assembler::pn, *failure_target);
+      __ delayed()->nop();
+      // Fall through to success case
+    }
+  }
+
+  if (op->should_profile()) {
+    Register mdo  = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1;
+    assert_different_registers(obj, mdo, recv, tmp1);
+    __ bind(profile_cast_success);
+    jobject2reg(md->constant_encoding(), mdo);
+    if (mdo_offset_bias > 0) {
+      __ set(mdo_offset_bias, tmp1);
+      __ add(mdo, tmp1, mdo);
+    }
+    load(Address(obj, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT);
+    type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success);
+    // Jump over the failure case
+    __ ba(false, *success);
+    __ delayed()->nop();
+    // Cast failure case
+    __ bind(profile_cast_failure);
+    jobject2reg(md->constant_encoding(), mdo);
+    if (mdo_offset_bias > 0) {
+      __ set(mdo_offset_bias, tmp1);
+      __ add(mdo, tmp1, mdo);
+    }
+    Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
+    __ ld_ptr(data_addr, tmp1);
+    __ sub(tmp1, DataLayout::counter_increment, tmp1);
+    __ st_ptr(tmp1, data_addr);
+    __ ba(false, *failure);
+    __ delayed()->nop();
+  }
+  __ ba(false, *success);
+  __ delayed()->nop();
+}
+
 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
   LIR_Code code = op->code();
   if (code == lir_store_check) {
@@ -2434,193 +2625,106 @@
     Register Rtmp1 = op->tmp3()->as_register();
 
     __ verify_oop(value);
-
     CodeStub* stub = op->stub();
-    Label done;
-    __ cmp(value, 0);
-    __ br(Assembler::equal, false, Assembler::pn, done);
-    __ delayed()->nop();
+    // check if it needs to be profiled
+    ciMethodData* md;
+    ciProfileData* data;
+    int mdo_offset_bias = 0;
+    if (op->should_profile()) {
+      ciMethod* method = op->profiled_method();
+      assert(method != NULL, "Should have method");
+      setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
+    }
+    Label profile_cast_success, profile_cast_failure, done;
+    Label *success_target = op->should_profile() ? &profile_cast_success : &done;
+    Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
+
+    if (op->should_profile()) {
+      Label not_null;
+      __ br_notnull(value, false, Assembler::pn, not_null);
+      __ delayed()->nop();
+      Register mdo      = k_RInfo;
+      Register data_val = Rtmp1;
+      jobject2reg(md->constant_encoding(), mdo);
+      if (mdo_offset_bias > 0) {
+        __ set(mdo_offset_bias, data_val);
+        __ add(mdo, data_val, mdo);
+      }
+      Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
+      __ ldub(flags_addr, data_val);
+      __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
+      __ stb(data_val, flags_addr);
+      __ ba(false, done);
+      __ delayed()->nop();
+      __ bind(not_null);
+    } else {
+      __ br_null(value, false, Assembler::pn, done);
+      __ delayed()->nop();
+    }
     load(array, oopDesc::klass_offset_in_bytes(), k_RInfo, T_OBJECT, op->info_for_exception());
     load(value, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
 
     // get instance klass
     load(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc), k_RInfo, T_OBJECT, NULL);
     // perform the fast part of the checking logic
-    __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, &done, stub->entry(), NULL);
+    __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL);
 
     // call out-of-line instance of __ check_klass_subtype_slow_path(...):
     assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
     __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
     __ delayed()->nop();
     __ cmp(G3, 0);
-    __ br(Assembler::equal, false, Assembler::pn, *stub->entry());
+    __ br(Assembler::equal, false, Assembler::pn, *failure_target);
     __ delayed()->nop();
-    __ bind(done);
-  } else if (op->code() == lir_checkcast) {
-    // we always need a stub for the failure case.
-    CodeStub* stub = op->stub();
-    Register obj = op->object()->as_register();
-    Register k_RInfo = op->tmp1()->as_register();
-    Register klass_RInfo = op->tmp2()->as_register();
-    Register dst = op->result_opr()->as_register();
-    Register Rtmp1 = op->tmp3()->as_register();
-    ciKlass* k = op->klass();
-
-    if (obj == k_RInfo) {
-      k_RInfo = klass_RInfo;
-      klass_RInfo = obj;
-    }
-    if (op->profiled_method() != NULL) {
-      ciMethod* method = op->profiled_method();
-      int bci          = op->profiled_bci();
-
-      // We need two temporaries to perform this operation on SPARC,
-      // so to keep things simple we perform a redundant test here
-      Label profile_done;
-      __ cmp(obj, 0);
-      __ br(Assembler::notEqual, false, Assembler::pn, profile_done);
-      __ delayed()->nop();
-      // Object is null; update methodDataOop
-      ciMethodData* md = method->method_data();
-      if (md == NULL) {
-        bailout("out of memory building methodDataOop");
-        return;
-      }
-      ciProfileData* data = md->bci_to_data(bci);
-      assert(data != NULL,       "need data for checkcast");
-      assert(data->is_BitData(), "need BitData for checkcast");
-      Register mdo      = k_RInfo;
-      Register data_val = Rtmp1;
+    // fall through to the success case
+
+    if (op->should_profile()) {
+      Register mdo  = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1;
+      assert_different_registers(value, mdo, recv, tmp1);
+      __ bind(profile_cast_success);
       jobject2reg(md->constant_encoding(), mdo);
-
-      int mdo_offset_bias = 0;
-      if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
-        // The offset is large so bias the mdo by the base of the slot so
-        // that the ld can use simm13s to reference the slots of the data
-        mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
-        __ set(mdo_offset_bias, data_val);
-        __ add(mdo, data_val, mdo);
+      if (mdo_offset_bias > 0) {
+        __ set(mdo_offset_bias, tmp1);
+        __ add(mdo, tmp1, mdo);
       }
-
-
-      Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
-      __ ldub(flags_addr, data_val);
-      __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
-      __ stb(data_val, flags_addr);
-      __ bind(profile_done);
-    }
-
-    Label done;
-    // patching may screw with our temporaries on sparc,
-    // so let's do it before loading the class
-    if (k->is_loaded()) {
-      jobject2reg(k->constant_encoding(), k_RInfo);
-    } else {
-      jobject2reg_with_patching(k_RInfo, op->info_for_patch());
+      load(Address(value, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT);
+      type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done);
+      __ ba(false, done);
+      __ delayed()->nop();
+      // Cast failure case
+      __ bind(profile_cast_failure);
+      jobject2reg(md->constant_encoding(), mdo);
+      if (mdo_offset_bias > 0) {
+        __ set(mdo_offset_bias, tmp1);
+        __ add(mdo, tmp1, mdo);
+      }
+      Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
+      __ ld_ptr(data_addr, tmp1);
+      __ sub(tmp1, DataLayout::counter_increment, tmp1);
+      __ st_ptr(tmp1, data_addr);
+      __ ba(false, *stub->entry());
+      __ delayed()->nop();
     }
-    assert(obj != k_RInfo, "must be different");
-    __ cmp(obj, 0);
-    __ br(Assembler::equal, false, Assembler::pn, done);
-    __ delayed()->nop();
-
-    // get object class
-    // not a safepoint as obj null check happens earlier
-    load(obj, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
-    if (op->fast_check()) {
-      assert_different_registers(klass_RInfo, k_RInfo);
-      __ cmp(k_RInfo, klass_RInfo);
-      __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
-      __ delayed()->nop();
-      __ bind(done);
-    } else {
-      bool need_slow_path = true;
-      if (k->is_loaded()) {
-        if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())
-          need_slow_path = false;
-        // perform the fast part of the checking logic
-        __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg,
-                                         (need_slow_path ? &done : NULL),
-                                         stub->entry(), NULL,
-                                         RegisterOrConstant(k->super_check_offset()));
-      } else {
-        // perform the fast part of the checking logic
-        __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7,
-                                         &done, stub->entry(), NULL);
-      }
-      if (need_slow_path) {
-        // call out-of-line instance of __ check_klass_subtype_slow_path(...):
-        assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
-        __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
-        __ delayed()->nop();
-        __ cmp(G3, 0);
-        __ br(Assembler::equal, false, Assembler::pn, *stub->entry());
-        __ delayed()->nop();
-      }
-      __ bind(done);
-    }
+    __ bind(done);
+  } else if (code == lir_checkcast) {
+    Register obj = op->object()->as_register();
+    Register dst = op->result_opr()->as_register();
+    Label success;
+    emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
+    __ bind(success);
     __ mov(obj, dst);
   } else if (code == lir_instanceof) {
     Register obj = op->object()->as_register();
-    Register k_RInfo = op->tmp1()->as_register();
-    Register klass_RInfo = op->tmp2()->as_register();
     Register dst = op->result_opr()->as_register();
-    Register Rtmp1 = op->tmp3()->as_register();
-    ciKlass* k = op->klass();
-
-    Label done;
-    if (obj == k_RInfo) {
-      k_RInfo = klass_RInfo;
-      klass_RInfo = obj;
-    }
-    // patching may screw with our temporaries on sparc,
-    // so let's do it before loading the class
-    if (k->is_loaded()) {
-      jobject2reg(k->constant_encoding(), k_RInfo);
-    } else {
-      jobject2reg_with_patching(k_RInfo, op->info_for_patch());
-    }
-    assert(obj != k_RInfo, "must be different");
-    __ cmp(obj, 0);
-    __ br(Assembler::equal, true, Assembler::pn, done);
-    __ delayed()->set(0, dst);
-
-    // get object class
-    // not a safepoint as obj null check happens earlier
-    load(obj, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
-    if (op->fast_check()) {
-      __ cmp(k_RInfo, klass_RInfo);
-      __ br(Assembler::equal, true, Assembler::pt, done);
-      __ delayed()->set(1, dst);
-      __ set(0, dst);
-      __ bind(done);
-    } else {
-      bool need_slow_path = true;
-      if (k->is_loaded()) {
-        if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())
-          need_slow_path = false;
-        // perform the fast part of the checking logic
-        __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, O7, noreg,
-                                         (need_slow_path ? &done : NULL),
-                                         (need_slow_path ? &done : NULL), NULL,
-                                         RegisterOrConstant(k->super_check_offset()),
-                                         dst);
-      } else {
-        assert(dst != klass_RInfo && dst != k_RInfo, "need 3 registers");
-        // perform the fast part of the checking logic
-        __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, O7, dst,
-                                         &done, &done, NULL,
-                                         RegisterOrConstant(-1),
-                                         dst);
-      }
-      if (need_slow_path) {
-        // call out-of-line instance of __ check_klass_subtype_slow_path(...):
-        assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
-        __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
-        __ delayed()->nop();
-        __ mov(G3, dst);
-      }
-      __ bind(done);
-    }
+    Label success, failure, done;
+    emit_typecheck_helper(op, &success, &failure, &failure);
+    __ bind(failure);
+    __ set(0, dst);
+    __ ba(false, done);
+    __ delayed()->nop();
+    __ bind(success);
+    __ set(1, dst);
+    __ bind(done);
   } else {
     ShouldNotReachHere();
   }
@@ -2776,9 +2880,14 @@
   ciProfileData* data = md->bci_to_data(bci);
   assert(data->is_CounterData(), "need CounterData for calls");
   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
+  Register mdo  = op->mdo()->as_register();
+#ifdef _LP64
+  assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
+  Register tmp1 = op->tmp1()->as_register_lo();
+#else
   assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
-  Register mdo  = op->mdo()->as_register();
   Register tmp1 = op->tmp1()->as_register();
+#endif
   jobject2reg(md->constant_encoding(), mdo);
   int mdo_offset_bias = 0;
   if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) +
@@ -2795,13 +2904,13 @@
   // Perform additional virtual call profiling for invokevirtual and
   // invokeinterface bytecodes
   if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
-      Tier1ProfileVirtualCalls) {
+      C1ProfileVirtualCalls) {
     assert(op->recv()->is_single_cpu(), "recv must be allocated");
     Register recv = op->recv()->as_register();
     assert_different_registers(mdo, tmp1, recv);
     assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
     ciKlass* known_klass = op->known_holder();
-    if (Tier1OptimizeVirtualCallProfiling && known_klass != NULL) {
+    if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
       // We know the type that will be seen at this call site; we can
       // statically update the methodDataOop rather than needing to do
       // dynamic tests on the receiver type
@@ -2816,9 +2925,9 @@
           Address data_addr(mdo, md->byte_offset_of_slot(data,
                                                          VirtualCallData::receiver_count_offset(i)) -
                             mdo_offset_bias);
-          __ lduw(data_addr, tmp1);
+          __ ld_ptr(data_addr, tmp1);
           __ add(tmp1, DataLayout::counter_increment, tmp1);
-          __ stw(tmp1, data_addr);
+          __ st_ptr(tmp1, data_addr);
           return;
         }
       }
@@ -2837,70 +2946,32 @@
           __ st_ptr(tmp1, recv_addr);
           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
                             mdo_offset_bias);
-          __ lduw(data_addr, tmp1);
+          __ ld_ptr(data_addr, tmp1);
           __ add(tmp1, DataLayout::counter_increment, tmp1);
-          __ stw(tmp1, data_addr);
+          __ st_ptr(tmp1, data_addr);
           return;
         }
       }
     } else {
       load(Address(recv, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT);
       Label update_done;
-      uint i;
-      for (i = 0; i < VirtualCallData::row_limit(); i++) {
-        Label next_test;
-        // See if the receiver is receiver[n].
-        Address receiver_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
-                              mdo_offset_bias);
-        __ ld_ptr(receiver_addr, tmp1);
-        __ verify_oop(tmp1);
-        __ cmp(recv, tmp1);
-        __ brx(Assembler::notEqual, false, Assembler::pt, next_test);
-        __ delayed()->nop();
-        Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
-                          mdo_offset_bias);
-        __ lduw(data_addr, tmp1);
-        __ add(tmp1, DataLayout::counter_increment, tmp1);
-        __ stw(tmp1, data_addr);
-        __ br(Assembler::always, false, Assembler::pt, update_done);
-        __ delayed()->nop();
-        __ bind(next_test);
-      }
-
-      // Didn't find receiver; find next empty slot and fill it in
-      for (i = 0; i < VirtualCallData::row_limit(); i++) {
-        Label next_test;
-        Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
-                          mdo_offset_bias);
-        load(recv_addr, tmp1, T_OBJECT);
-        __ tst(tmp1);
-        __ brx(Assembler::notEqual, false, Assembler::pt, next_test);
-        __ delayed()->nop();
-        __ st_ptr(recv, recv_addr);
-        __ set(DataLayout::counter_increment, tmp1);
-        __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
-                  mdo_offset_bias);
-        __ br(Assembler::always, false, Assembler::pt, update_done);
-        __ delayed()->nop();
-        __ bind(next_test);
-      }
+      type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done);
       // Receiver did not match any saved receiver and there is no empty row for it.
       // Increment total counter to indicate polymorphic case.
-      __ lduw(counter_addr, tmp1);
+      __ ld_ptr(counter_addr, tmp1);
       __ add(tmp1, DataLayout::counter_increment, tmp1);
-      __ stw(tmp1, counter_addr);
+      __ st_ptr(tmp1, counter_addr);
 
       __ bind(update_done);
     }
   } else {
     // Static call
-    __ lduw(counter_addr, tmp1);
+    __ ld_ptr(counter_addr, tmp1);
     __ add(tmp1, DataLayout::counter_increment, tmp1);
-    __ stw(tmp1, counter_addr);
+    __ st_ptr(tmp1, counter_addr);
   }
 }
 
-
 void LIR_Assembler::align_backward_branch_target() {
   __ align(OptoLoopAlignment);
 }
@@ -3093,31 +3164,36 @@
   // no-op on TSO
 }
 
-// Macro to Pack two sequential registers containing 32 bit values
+// Pack two sequential registers containing 32 bit values
 // into a single 64 bit register.
-// rs and rs->successor() are packed into rd
-// rd and rs may be the same register.
-// Note: rs and rs->successor() are destroyed.
-void LIR_Assembler::pack64( Register rs, Register rd ) {
+// src and src->successor() are packed into dst
+// src and dst may be the same register.
+// Note: src is destroyed
+void LIR_Assembler::pack64(LIR_Opr src, LIR_Opr dst) {
+  Register rs = src->as_register();
+  Register rd = dst->as_register_lo();
   __ sllx(rs, 32, rs);
   __ srl(rs->successor(), 0, rs->successor());
   __ or3(rs, rs->successor(), rd);
 }
 
-// Macro to unpack a 64 bit value in a register into
+// Unpack a 64 bit value in a register into
 // two sequential registers.
-// rd is unpacked into rd and rd->successor()
-void LIR_Assembler::unpack64( Register rd ) {
-  __ mov(rd, rd->successor());
-  __ srax(rd, 32, rd);
-  __ sra(rd->successor(), 0, rd->successor());
+// src is unpacked into dst and dst->successor()
+void LIR_Assembler::unpack64(LIR_Opr src, LIR_Opr dst) {
+  Register rs = src->as_register_lo();
+  Register rd = dst->as_register_hi();
+  assert_different_registers(rs, rd, rd->successor());
+  __ srlx(rs, 32, rd);
+  __ srl (rs,  0, rd->successor());
 }
 
 
 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) {
   LIR_Address* addr = addr_opr->as_address_ptr();
   assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1 && Assembler::is_simm13(addr->disp()), "can't handle complex addresses yet");
-  __ add(addr->base()->as_register(), addr->disp(), dest->as_register());
+
+  __ add(addr->base()->as_pointer_register(), addr->disp(), dest->as_pointer_register());
 }
 
 
@@ -3188,11 +3264,36 @@
             tty->cr();
           }
 #endif
-          continue;
+        } else {
+          LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
+          inst->insert_before(i + 1, delay_op);
+          i++;
         }
 
-        LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
-        inst->insert_before(i + 1, delay_op);
+#if defined(TIERED) && !defined(_LP64)
+        // fixup the return value from G1 to O0/O1 for long returns.
+        // It's done here instead of in LIRGenerator because there's
+        // such a mismatch between the single reg and double reg
+        // calling convention.
+        LIR_OpJavaCall* callop = op->as_OpJavaCall();
+        if (callop->result_opr() == FrameMap::out_long_opr) {
+          LIR_OpJavaCall* call;
+          LIR_OprList* arguments = new LIR_OprList(callop->arguments()->length());
+          for (int a = 0; a < arguments->length(); a++) {
+            arguments[a] = callop->arguments()[a];
+          }
+          if (op->code() == lir_virtual_call) {
+            call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
+                                      callop->vtable_offset(), arguments, callop->info());
+          } else {
+            call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
+                                      callop->addr(), arguments, callop->info());
+          }
+          inst->at_put(i - 1, call);
+          inst->insert_before(i + 1, new LIR_Op1(lir_unpack64, FrameMap::g1_long_single_opr, callop->result_opr(),
+                                                 T_LONG, lir_patch_none, NULL));
+        }
+#endif
         break;
       }
     }
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -71,9 +71,16 @@
 
   static bool is_single_instruction(LIR_Op* op);
 
+  // Record the type of the receiver in ReceiverTypeData
+  void type_profile_helper(Register mdo, int mdo_offset_bias,
+                           ciMethodData *md, ciProfileData *data,
+                           Register recv, Register tmp1, Label* update_done);
+  // Setup pointers to MDO, MDO slot, also compute offset bias to access the slot.
+  void setup_md_access(ciMethod* method, int bci,
+                       ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias);
  public:
-  void pack64( Register rs, Register rd );
-  void unpack64( Register rd );
+  void   pack64(LIR_Opr src, LIR_Opr dst);
+  void unpack64(LIR_Opr src, LIR_Opr dst);
 
 enum {
 #ifdef _LP64
--- a/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -227,29 +227,37 @@
   }
 }
 
+LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
+  LIR_Opr r;
+  if (type == T_LONG) {
+    r = LIR_OprFact::longConst(x);
+  } else if (type == T_INT) {
+    r = LIR_OprFact::intConst(x);
+  } else {
+    ShouldNotReachHere();
+  }
+  if (!Assembler::is_simm13(x)) {
+    LIR_Opr tmp = new_register(type);
+    __ move(r, tmp);
+    return tmp;
+  }
+  return r;
+}
 
-void LIRGenerator::increment_counter(address counter, int step) {
+void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
   LIR_Opr pointer = new_pointer_register();
   __ move(LIR_OprFact::intptrConst(counter), pointer);
-  LIR_Address* addr = new LIR_Address(pointer, T_INT);
+  LIR_Address* addr = new LIR_Address(pointer, type);
   increment_counter(addr, step);
 }
 
 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
-  LIR_Opr temp = new_register(T_INT);
+  LIR_Opr temp = new_register(addr->type());
   __ move(addr, temp);
-  LIR_Opr c = LIR_OprFact::intConst(step);
-  if (Assembler::is_simm13(step)) {
-    __ add(temp, c, temp);
-  } else {
-    LIR_Opr temp2 = new_register(T_INT);
-    __ move(c, temp2);
-    __ add(temp, temp2, temp);
-  }
+  __ add(temp, load_immediate(step, addr->type()), temp);
   __ move(temp, addr);
 }
 
-
 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
   LIR_Opr o7opr = FrameMap::O7_opr;
   __ load(new LIR_Address(base, disp, T_INT), o7opr, info);
@@ -611,7 +619,6 @@
   left.load_item();
   right.load_item();
   LIR_Opr reg = rlock_result(x);
-
   if (x->x()->type()->is_float_kind()) {
     Bytecodes::Code code = x->op();
     __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
@@ -1040,7 +1047,9 @@
   LIR_Opr tmp1 = FrameMap::G1_oop_opr;
   LIR_Opr tmp2 = FrameMap::G3_oop_opr;
   LIR_Opr tmp3 = FrameMap::G4_oop_opr;
-  __ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,  x->direct_compare(), patching_info);
+  __ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
+                x->direct_compare(), patching_info,
+                x->profiled_method(), x->profiled_bci());
 }
 
 
@@ -1089,12 +1098,12 @@
   // add safepoint before generating condition code so it can be recomputed
   if (x->is_safepoint()) {
     // increment backedge counter if needed
-    increment_backedge_counter(state_for(x, x->state_before()));
-
+    increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
     __ safepoint(new_register(T_INT), state_for(x, x->state_before()));
   }
 
   __ cmp(lir_cond(cond), left, right);
+  // Generate branch profiling. Profiling code doesn't kill flags.
   profile_branch(x, cond);
   move_to_phi(x->state());
   if (x->x()->type()->is_float_kind()) {
--- a/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -465,12 +465,10 @@
 
       break;
 
-#ifdef TIERED
     case counter_overflow_id:
-        // G4 contains bci
-      oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), G4);
+        // G4 contains bci, G5 contains method
+      oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), G4, G5);
       break;
-#endif // TIERED
 
     case new_type_array_id:
     case new_object_array_id:
--- a/hotspot/src/cpu/sparc/vm/c1_globals_sparc.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_globals_sparc.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,14 +34,7 @@
 define_pd_global(bool, UseOnStackReplacement,        true );
 define_pd_global(bool, TieredCompilation,            false);
 define_pd_global(intx, CompileThreshold,             1000 ); // Design center runs on 1.3.1
-define_pd_global(intx, Tier2CompileThreshold,        1500 );
-define_pd_global(intx, Tier3CompileThreshold,        2000 );
-define_pd_global(intx, Tier4CompileThreshold,        2500 );
-
 define_pd_global(intx, BackEdgeThreshold,            100000);
-define_pd_global(intx, Tier2BackEdgeThreshold,       100000);
-define_pd_global(intx, Tier3BackEdgeThreshold,       100000);
-define_pd_global(intx, Tier4BackEdgeThreshold,       100000);
 
 define_pd_global(intx, OnStackReplacePercentage,     1400 );
 define_pd_global(bool, UseTLAB,                      true );
--- a/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,21 +37,8 @@
 define_pd_global(bool, ProfileInterpreter,           true);
 #endif // CC_INTERP
 define_pd_global(bool, TieredCompilation,            false);
-#ifdef TIERED
-define_pd_global(intx, CompileThreshold,             1000);
-define_pd_global(intx, BackEdgeThreshold,            14000);
-#else
 define_pd_global(intx, CompileThreshold,             10000);
 define_pd_global(intx, BackEdgeThreshold,            140000);
-#endif // TIERED
-
-define_pd_global(intx, Tier2CompileThreshold,        10000); // unused level
-define_pd_global(intx, Tier3CompileThreshold,        10000);
-define_pd_global(intx, Tier4CompileThreshold,        40000);
-
-define_pd_global(intx, Tier2BackEdgeThreshold,       100000);
-define_pd_global(intx, Tier3BackEdgeThreshold,       100000);
-define_pd_global(intx, Tier4BackEdgeThreshold,       100000);
 
 define_pd_global(intx, OnStackReplacePercentage,     140);
 define_pd_global(intx, ConditionalMoveLimit,         4);
--- a/hotspot/src/cpu/sparc/vm/codeBuffer_sparc.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/codeBuffer_sparc.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,5 +30,5 @@
 
   // Heuristic for pre-packing the pt/pn bit of a predicted branch.
   bool is_backward_branch(Label& L) {
-    return L.is_bound() && code_end() <= locator_address(L.loc());
+    return L.is_bound() && insts_end() <= locator_address(L.loc());
   }
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -253,11 +253,12 @@
     }
 
     // Could just be some random pointer within the codeBlob
-    if (!sender.cb()->instructions_contains(sender_pc)) return false;
+    if (!sender.cb()->code_contains(sender_pc)) {
+      return false;
+    }
 
     // We should never be able to see an adapter if the current frame is something from code cache
-
-    if ( sender_blob->is_adapter_blob()) {
+    if (sender_blob->is_adapter_blob()) {
       return false;
     }
 
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -263,8 +263,7 @@
   };
 
  private:
-
-  constantPoolCacheOop* frame::interpreter_frame_cpoolcache_addr() const;
+  constantPoolCacheOop* interpreter_frame_cpoolcache_addr() const;
 
 #ifndef CC_INTERP
 
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -2431,3 +2431,20 @@
   }
 #endif // CC_INTERP
 }
+
+// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
+void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
+                                                        int increment, int mask,
+                                                        Register scratch1, Register scratch2,
+                                                        Condition cond, Label *where) {
+  ld(counter_addr, scratch1);
+  add(scratch1, increment, scratch1);
+  if (is_simm13(mask)) {
+    andcc(scratch1, mask, G0);
+  } else {
+    set(mask, scratch2);
+    andcc(scratch1, scratch2,  G0);
+  }
+  br(cond, false, Assembler::pn, *where);
+  delayed()->st(scratch1, counter_addr);
+}
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -278,6 +278,10 @@
   void increment_mdp_data_at(Register reg, int constant,
                              Register bumped_count, Register scratch2,
                              bool decrement = false);
+  void increment_mask_and_jump(Address counter_addr,
+                               int increment, int mask,
+                               Register scratch1, Register scratch2,
+                               Condition cond, Label *where);
   void set_mdp_flag_at(int flag_constant, Register scratch);
   void test_mdp_data_at(int offset, Register value, Label& not_equal_continue,
                         Register scratch);
@@ -321,4 +325,5 @@
 
   void save_return_value(TosState state, bool is_native_call);
   void restore_return_value(TosState state, bool is_native_call);
+
 };
--- a/hotspot/src/cpu/sparc/vm/jniFastGetField_sparc.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/jniFastGetField_sparc.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,10 +50,10 @@
     default:        ShouldNotReachHere();
   }
   ResourceMark rm;
-  BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE*wordSize);
-  address fast_entry = b->instructions_begin();
-  CodeBuffer cbuf(fast_entry, b->instructions_size());
+  BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE*wordSize);
+  CodeBuffer cbuf(blob);
   MacroAssembler* masm = new MacroAssembler(&cbuf);
+  address fast_entry = __ pc();
 
   Label label1, label2;
 
@@ -129,10 +129,10 @@
 address JNI_FastGetField::generate_fast_get_long_field() {
   const char *name = "jni_fast_GetLongField";
   ResourceMark rm;
-  BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE*wordSize);
-  address fast_entry = b->instructions_begin();
-  CodeBuffer cbuf(fast_entry, b->instructions_size());
+  BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE*wordSize);
+  CodeBuffer cbuf(blob);
   MacroAssembler* masm = new MacroAssembler(&cbuf);
+  address fast_entry = __ pc();
 
   Label label1, label2;
 
@@ -201,10 +201,10 @@
     default:       ShouldNotReachHere();
   }
   ResourceMark rm;
-  BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE*wordSize);
-  address fast_entry = b->instructions_begin();
-  CodeBuffer cbuf(fast_entry, b->instructions_size());
+  BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE*wordSize);
+  CodeBuffer cbuf(blob);
   MacroAssembler* masm = new MacroAssembler(&cbuf);
+  address fast_entry = __ pc();
 
   Label label1, label2;
 
--- a/hotspot/src/cpu/sparc/vm/nativeInst_sparc.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/nativeInst_sparc.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -193,17 +193,17 @@
 
   a->call( a->pc(), relocInfo::none );
   a->delayed()->nop();
-  nc = nativeCall_at( cb.code_begin() );
+  nc = nativeCall_at( cb.insts_begin() );
   nc->print();
 
   nc = nativeCall_overwriting_at( nc->next_instruction_address() );
   for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
-    nc->set_destination( cb.code_begin() + offsets[idx] );
-    assert(nc->destination() == (cb.code_begin() + offsets[idx]), "check unit test");
+    nc->set_destination( cb.insts_begin() + offsets[idx] );
+    assert(nc->destination() == (cb.insts_begin() + offsets[idx]), "check unit test");
     nc->print();
   }
 
-  nc = nativeCall_before( cb.code_begin() + 8 );
+  nc = nativeCall_before( cb.insts_begin() + 8 );
   nc->print();
 
   VM_Version::revert();
@@ -368,7 +368,7 @@
   a->sethi(al2, O2);
   a->add(O2, al2.low10(), O2);
 
-  nm = nativeMovConstReg_at( cb.code_begin() );
+  nm = nativeMovConstReg_at( cb.insts_begin() );
   nm->print();
 
   nm = nativeMovConstReg_at( nm->next_instruction_address() );
@@ -480,7 +480,7 @@
   a->nop();
   a->add(O2, al2.low10(), O2);
 
-  nm = nativeMovConstRegPatching_at( cb.code_begin() );
+  nm = nativeMovConstRegPatching_at( cb.insts_begin() );
   nm->print();
 
   nm = nativeMovConstRegPatching_at( nm->next_instruction_address() );
@@ -616,7 +616,7 @@
   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
   a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
 
-  nm = nativeMovRegMem_at( cb.code_begin() );
+  nm = nativeMovRegMem_at( cb.insts_begin() );
   nm->print();
   nm->set_offset( low10(0) );
   nm->print();
@@ -760,7 +760,7 @@
   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
   a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
 
-  nm = nativeMovRegMemPatching_at( cb.code_begin() );
+  nm = nativeMovRegMemPatching_at( cb.insts_begin() );
   nm->print();
   nm->set_offset( low10(0) );
   nm->print();
@@ -849,7 +849,7 @@
   a->jmpl(I3, al.low10(), L3, RelocationHolder::none);
   a->delayed()->nop();
 
-  nj = nativeJump_at( cb.code_begin() );
+  nj = nativeJump_at( cb.insts_begin() );
   nj->print();
 
   nj = nativeJump_at( nj->next_instruction_address() );
--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -3331,10 +3331,8 @@
   __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr);
 #if !defined(_LP64)
 #if defined(COMPILER2)
-  if (!TieredCompilation) {
-    // 32-bit 1-register longs return longs in G1
-    __ stx(Greturn1, saved_Greturn1_addr);
-  }
+  // 32-bit 1-register longs return longs in G1
+  __ stx(Greturn1, saved_Greturn1_addr);
 #endif
   __ set_last_Java_frame(SP, noreg);
   __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode);
@@ -3347,24 +3345,15 @@
   __ reset_last_Java_frame();
   __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0);
 
-  // In tiered we never use C2 to compile methods returning longs so
-  // the result is where we expect it already.
-
 #if !defined(_LP64) && defined(COMPILER2)
   // In 32 bit, C2 returns longs in G1 so restore the saved G1 into
-  // I0/I1 if the return value is long.  In the tiered world there is
-  // a mismatch between how C1 and C2 return longs compiles and so
-  // currently compilation of methods which return longs is disabled
-  // for C2 and so is this code.  Eventually C1 and C2 will do the
-  // same thing for longs in the tiered world.
-  if (!TieredCompilation) {
-    Label not_long;
-    __ cmp(O0,T_LONG);
-    __ br(Assembler::notEqual, false, Assembler::pt, not_long);
-    __ delayed()->nop();
-    __ ldd(saved_Greturn1_addr,I0);
-    __ bind(not_long);
-  }
+  // I0/I1 if the return value is long.
+  Label not_long;
+  __ cmp(O0,T_LONG);
+  __ br(Assembler::notEqual, false, Assembler::pt, not_long);
+  __ delayed()->nop();
+  __ ldd(saved_Greturn1_addr,I0);
+  __ bind(not_long);
 #endif
   __ ret();
   __ delayed()->restore();
--- a/hotspot/src/cpu/sparc/vm/sparc.ad	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad	Wed Jul 05 17:22:53 2017 +0200
@@ -677,8 +677,7 @@
            (f20 << 20) |
            (f19 << 19) |
            (f0  <<  0);
-  *((int*)(cbuf.code_end())) = op;
-  cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord);
+  cbuf.insts()->emit_int32(op);
 }
 
 // Standard Sparc opcode form2 field breakdown
@@ -689,8 +688,7 @@
            (f25 << 25) |
            (f22 << 22) |
            (f0  <<  0);
-  *((int*)(cbuf.code_end())) = op;
-  cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord);
+  cbuf.insts()->emit_int32(op);
 }
 
 // Standard Sparc opcode form3 field breakdown
@@ -701,8 +699,7 @@
            (f14 << 14) |
            (f5  <<  5) |
            (f0  <<  0);
-  *((int*)(cbuf.code_end())) = op;
-  cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord);
+  cbuf.insts()->emit_int32(op);
 }
 
 // Standard Sparc opcode form3 field breakdown
@@ -714,8 +711,7 @@
            (f14 << 14) |
            (1   << 13) | // bit to indicate immediate-mode
            (simm13<<0);
-  *((int*)(cbuf.code_end())) = op;
-  cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord);
+  cbuf.insts()->emit_int32(op);
 }
 
 static inline void emit3_simm10(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int simm10 ) {
@@ -910,9 +906,7 @@
     instr |= disp & 0x1FFF;
   }
 
-  uint *code = (uint*)cbuf.code_end();
-  *code = instr;
-  cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord);
+  cbuf.insts()->emit_int32(instr);
 
 #ifdef ASSERT
   {
@@ -1532,7 +1526,7 @@
   // set (empty), G5
   // jmp -1
 
-  address mark = cbuf.inst_mark();  // get mark within main instrs section
+  address mark = cbuf.insts_mark();  // get mark within main instrs section
 
   MacroAssembler _masm(&cbuf);
 
@@ -1632,7 +1626,7 @@
 // Emit exception handler code.
 int emit_exception_handler(CodeBuffer& cbuf) {
   Register temp_reg = G3;
-  AddressLiteral exception_blob(OptoRuntime::exception_blob()->instructions_begin());
+  AddressLiteral exception_blob(OptoRuntime::exception_blob()->entry_point());
   MacroAssembler _masm(&cbuf);
 
   address base =
@@ -2292,8 +2286,7 @@
              (0 << 13) |                    // select register move
              ($pcc$$constant << 11) |       // cc1, cc0 bits for 'icc' or 'xcc'
              ($src$$reg << 0);
-    *((int*)(cbuf.code_end())) = op;
-    cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord);
+    cbuf.insts()->emit_int32(op);
   %}
 
   enc_class enc_cmov_imm( cmpOp cmp, iRegI dst, immI11 src, immI pcc ) %{
@@ -2306,8 +2299,7 @@
              (1 << 13) |                    // select immediate move
              ($pcc$$constant << 11) |       // cc1, cc0 bits for 'icc'
              (simm11 << 0);
-    *((int*)(cbuf.code_end())) = op;
-    cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord);
+    cbuf.insts()->emit_int32(op);
   %}
 
   enc_class enc_cmov_reg_f( cmpOpF cmp, iRegI dst, iRegI src, flagsRegF fcc ) %{
@@ -2319,8 +2311,7 @@
              (0 << 13) |                    // select register move
              ($fcc$$reg << 11) |            // cc1, cc0 bits for fcc0-fcc3
              ($src$$reg << 0);
-    *((int*)(cbuf.code_end())) = op;
-    cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord);
+    cbuf.insts()->emit_int32(op);
   %}
 
   enc_class enc_cmov_imm_f( cmpOp cmp, iRegI dst, immI11 src, flagsRegF fcc ) %{
@@ -2333,8 +2324,7 @@
              (1 << 13) |                    // select immediate move
              ($fcc$$reg << 11) |            // cc1, cc0 bits for fcc0-fcc3
              (simm11 << 0);
-    *((int*)(cbuf.code_end())) = op;
-    cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord);
+    cbuf.insts()->emit_int32(op);
   %}
 
   enc_class enc_cmovf_reg( cmpOp cmp, regD dst, regD src, immI pcc ) %{
@@ -2347,8 +2337,7 @@
              ($pcc$$constant << 11) |       // cc1-cc0 bits for 'icc' or 'xcc'
              ($primary << 5) |              // select single, double or quad
              ($src$$reg << 0);
-    *((int*)(cbuf.code_end())) = op;
-    cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord);
+    cbuf.insts()->emit_int32(op);
   %}
 
   enc_class enc_cmovff_reg( cmpOpF cmp, flagsRegF fcc, regD dst, regD src ) %{
@@ -2360,8 +2349,7 @@
              ($fcc$$reg << 11) |            // cc2-cc0 bits for 'fccX'
              ($primary << 5) |              // select single, double or quad
              ($src$$reg << 0);
-    *((int*)(cbuf.code_end())) = op;
-    cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord);
+    cbuf.insts()->emit_int32(op);
   %}
 
   // Used by the MIN/MAX encodings.  Same as a CMOV, but
@@ -2375,8 +2363,7 @@
              (0 << 13) |                    // select register move
              (0 << 11) |                    // cc1, cc0 bits for 'icc'
              ($src$$reg << 0);
-    *((int*)(cbuf.code_end())) = op;
-    cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord);
+    cbuf.insts()->emit_int32(op);
   %}
 
   enc_class enc_cmov_reg_minmax_long( iRegL dst, iRegL src ) %{
@@ -2388,8 +2375,7 @@
              (0 << 13) |                    // select register move
              (0 << 11) |                    // cc1, cc0 bits for 'icc'
              ($src$$reg << 0);
-    *((int*)(cbuf.code_end())) = op;
-    cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord);
+    cbuf.insts()->emit_int32(op);
   %}
 
   // Utility encoding for loading a 64 bit Pointer into a register
@@ -3055,7 +3041,7 @@
   %}
 
   enc_class enc_rethrow() %{
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     Register temp_reg = G3;
     AddressLiteral rethrow_stub(OptoRuntime::rethrow_stub());
     assert(temp_reg != reg_to_register_object(R_I0_num), "temp must not break oop_reg");
@@ -3076,23 +3062,17 @@
 
   enc_class emit_mem_nop() %{
     // Generates the instruction LDUXA [o6,g0],#0x82,g0
-    unsigned int *code = (unsigned int*)cbuf.code_end();
-    *code = (unsigned int)0xc0839040;
-    cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord);
+    cbuf.insts()->emit_int32((unsigned int) 0xc0839040);
   %}
 
   enc_class emit_fadd_nop() %{
     // Generates the instruction FMOVS f31,f31
-    unsigned int *code = (unsigned int*)cbuf.code_end();
-    *code = (unsigned int)0xbfa0003f;
-    cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord);
+    cbuf.insts()->emit_int32((unsigned int) 0xbfa0003f);
   %}
 
   enc_class emit_br_nop() %{
     // Generates the instruction BPN,PN .
-    unsigned int *code = (unsigned int*)cbuf.code_end();
-    *code = (unsigned int)0x00400000;
-    cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord);
+    cbuf.insts()->emit_int32((unsigned int) 0x00400000);
   %}
 
   enc_class enc_membar_acquire %{
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1588,6 +1588,229 @@
   }
 
   //
+  //  Generate stub for disjoint short fill.  If "aligned" is true, the
+  //  "to" address is assumed to be heapword aligned.
+  //
+  // Arguments for generated stub:
+  //      to:    O0
+  //      value: O1
+  //      count: O2 treated as signed
+  //
+  address generate_fill(BasicType t, bool aligned, const char* name) {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ pc();
+
+    const Register to        = O0;   // source array address
+    const Register value     = O1;   // fill value
+    const Register count     = O2;   // elements count
+    // O3 is used as a temp register
+
+    assert_clean_int(count, O3);     // Make sure 'count' is clean int.
+
+    Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
+    Label L_fill_2_bytes, L_fill_elements, L_fill_32_bytes;
+
+    int shift = -1;
+    switch (t) {
+       case T_BYTE:
+        shift = 2;
+        break;
+       case T_SHORT:
+        shift = 1;
+        break;
+      case T_INT:
+         shift = 0;
+        break;
+      default: ShouldNotReachHere();
+    }
+
+    BLOCK_COMMENT("Entry:");
+
+    if (t == T_BYTE) {
+      // Zero extend value
+      __ and3(value, 0xff, value);
+      __ sllx(value, 8, O3);
+      __ or3(value, O3, value);
+    }
+    if (t == T_SHORT) {
+      // Zero extend value
+      __ sllx(value, 48, value);
+      __ srlx(value, 48, value);
+    }
+    if (t == T_BYTE || t == T_SHORT) {
+      __ sllx(value, 16, O3);
+      __ or3(value, O3, value);
+    }
+
+    __ cmp(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
+    __ brx(Assembler::lessUnsigned, false, Assembler::pn, L_fill_elements); // use unsigned cmp
+    __ delayed()->andcc(count, 1, G0);
+
+    if (!aligned && (t == T_BYTE || t == T_SHORT)) {
+      // align source address at 4 bytes address boundary
+      if (t == T_BYTE) {
+        // One byte misalignment happens only for byte arrays
+        __ andcc(to, 1, G0);
+        __ br(Assembler::zero, false, Assembler::pt, L_skip_align1);
+        __ delayed()->nop();
+        __ stb(value, to, 0);
+        __ inc(to, 1);
+        __ dec(count, 1);
+        __ BIND(L_skip_align1);
+      }
+      // Two bytes misalignment happens only for byte and short (char) arrays
+      __ andcc(to, 2, G0);
+      __ br(Assembler::zero, false, Assembler::pt, L_skip_align2);
+      __ delayed()->nop();
+      __ sth(value, to, 0);
+      __ inc(to, 2);
+      __ dec(count, 1 << (shift - 1));
+      __ BIND(L_skip_align2);
+    }
+#ifdef _LP64
+    if (!aligned) {
+#endif
+    // align to 8 bytes, we know we are 4 byte aligned to start
+    __ andcc(to, 7, G0);
+    __ br(Assembler::zero, false, Assembler::pt, L_fill_32_bytes);
+    __ delayed()->nop();
+    __ stw(value, to, 0);
+    __ inc(to, 4);
+    __ dec(count, 1 << shift);
+    __ BIND(L_fill_32_bytes);
+#ifdef _LP64
+    }
+#endif
+
+    if (t == T_INT) {
+      // Zero extend value
+      __ srl(value, 0, value);
+    }
+    if (t == T_BYTE || t == T_SHORT || t == T_INT) {
+      __ sllx(value, 32, O3);
+      __ or3(value, O3, value);
+    }
+
+    Label L_check_fill_8_bytes;
+    // Fill 32-byte chunks
+    __ subcc(count, 8 << shift, count);
+    __ brx(Assembler::less, false, Assembler::pt, L_check_fill_8_bytes);
+    __ delayed()->nop();
+
+    Label L_fill_32_bytes_loop, L_fill_4_bytes;
+    __ align(16);
+    __ BIND(L_fill_32_bytes_loop);
+
+    __ stx(value, to, 0);
+    __ stx(value, to, 8);
+    __ stx(value, to, 16);
+    __ stx(value, to, 24);
+
+    __ subcc(count, 8 << shift, count);
+    __ brx(Assembler::greaterEqual, false, Assembler::pt, L_fill_32_bytes_loop);
+    __ delayed()->add(to, 32, to);
+
+    __ BIND(L_check_fill_8_bytes);
+    __ addcc(count, 8 << shift, count);
+    __ brx(Assembler::zero, false, Assembler::pn, L_exit);
+    __ delayed()->subcc(count, 1 << (shift + 1), count);
+    __ brx(Assembler::less, false, Assembler::pn, L_fill_4_bytes);
+    __ delayed()->andcc(count, 1<<shift, G0);
+
+    //
+    // length is too short, just fill 8 bytes at a time
+    //
+    Label L_fill_8_bytes_loop;
+    __ BIND(L_fill_8_bytes_loop);
+    __ stx(value, to, 0);
+    __ subcc(count, 1 << (shift + 1), count);
+    __ brx(Assembler::greaterEqual, false, Assembler::pn, L_fill_8_bytes_loop);
+    __ delayed()->add(to, 8, to);
+
+    // fill trailing 4 bytes
+    __ andcc(count, 1<<shift, G0);  // in delay slot of branches
+    if (t == T_INT) {
+      __ BIND(L_fill_elements);
+    }
+    __ BIND(L_fill_4_bytes);
+    __ brx(Assembler::zero, false, Assembler::pt, L_fill_2_bytes);
+    if (t == T_BYTE || t == T_SHORT) {
+      __ delayed()->andcc(count, 1<<(shift-1), G0);
+    } else {
+      __ delayed()->nop();
+    }
+    __ stw(value, to, 0);
+    if (t == T_BYTE || t == T_SHORT) {
+      __ inc(to, 4);
+      // fill trailing 2 bytes
+      __ andcc(count, 1<<(shift-1), G0); // in delay slot of branches
+      __ BIND(L_fill_2_bytes);
+      __ brx(Assembler::zero, false, Assembler::pt, L_fill_byte);
+      __ delayed()->andcc(count, 1, count);
+      __ sth(value, to, 0);
+      if (t == T_BYTE) {
+        __ inc(to, 2);
+        // fill trailing byte
+        __ andcc(count, 1, count);  // in delay slot of branches
+        __ BIND(L_fill_byte);
+        __ brx(Assembler::zero, false, Assembler::pt, L_exit);
+        __ delayed()->nop();
+        __ stb(value, to, 0);
+      } else {
+        __ BIND(L_fill_byte);
+      }
+    } else {
+      __ BIND(L_fill_2_bytes);
+    }
+    __ BIND(L_exit);
+    __ retl();
+    __ delayed()->nop();
+
+    // Handle copies less than 8 bytes.  Int is handled elsewhere.
+    if (t == T_BYTE) {
+      __ BIND(L_fill_elements);
+      Label L_fill_2, L_fill_4;
+      // in delay slot __ andcc(count, 1, G0);
+      __ brx(Assembler::zero, false, Assembler::pt, L_fill_2);
+      __ delayed()->andcc(count, 2, G0);
+      __ stb(value, to, 0);
+      __ inc(to, 1);
+      __ BIND(L_fill_2);
+      __ brx(Assembler::zero, false, Assembler::pt, L_fill_4);
+      __ delayed()->andcc(count, 4, G0);
+      __ stb(value, to, 0);
+      __ stb(value, to, 1);
+      __ inc(to, 2);
+      __ BIND(L_fill_4);
+      __ brx(Assembler::zero, false, Assembler::pt, L_exit);
+      __ delayed()->nop();
+      __ stb(value, to, 0);
+      __ stb(value, to, 1);
+      __ stb(value, to, 2);
+      __ retl();
+      __ delayed()->stb(value, to, 3);
+    }
+
+    if (t == T_SHORT) {
+      Label L_fill_2;
+      __ BIND(L_fill_elements);
+      // in delay slot __ andcc(count, 1, G0);
+      __ brx(Assembler::zero, false, Assembler::pt, L_fill_2);
+      __ delayed()->andcc(count, 2, G0);
+      __ sth(value, to, 0);
+      __ inc(to, 2);
+      __ BIND(L_fill_2);
+      __ brx(Assembler::zero, false, Assembler::pt, L_exit);
+      __ delayed()->nop();
+      __ sth(value, to, 0);
+      __ retl();
+      __ delayed()->sth(value, to, 2);
+    }
+    return start;
+  }
+
+  //
   //  Generate stub for conjoint short copy.  If "aligned" is true, the
   //  "from" and "to" addresses are assumed to be heapword aligned.
   //
@@ -2855,6 +3078,13 @@
     StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy");
     StubRoutines::_unsafe_arraycopy    = generate_unsafe_copy("unsafe_arraycopy");
     StubRoutines::_generic_arraycopy   = generate_generic_copy("generic_arraycopy");
+
+    StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill");
+    StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill");
+    StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill");
+    StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
+    StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
+    StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
   }
 
   void generate_initial() {
--- a/hotspot/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -43,7 +43,7 @@
 
 // MethodHandles adapters
 enum method_handles_platform_dependent_constants {
-  method_handles_adapters_code_size = 6000
+  method_handles_adapters_code_size = 12000
 };
 
 class Sparc {
--- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -294,34 +294,64 @@
 // ??: invocation counter
 //
 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
-  // Update standard invocation counters
-  __ increment_invocation_counter(O0, G3_scratch);
-  if (ProfileInterpreter) {  // %%% Merge this into methodDataOop
-    Address interpreter_invocation_counter(Lmethod, methodOopDesc::interpreter_invocation_counter_offset());
-    __ ld(interpreter_invocation_counter, G3_scratch);
-    __ inc(G3_scratch);
-    __ st(G3_scratch, interpreter_invocation_counter);
-  }
+  // Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not.
+  if (TieredCompilation) {
+    const int increment = InvocationCounter::count_increment;
+    const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
+    Label no_mdo, done;
+    if (ProfileInterpreter) {
+      // If no method data exists, go to profile_continue.
+      __ ld_ptr(Lmethod, methodOopDesc::method_data_offset(), G4_scratch);
+      __ br_null(G4_scratch, false, Assembler::pn, no_mdo);
+      __ delayed()->nop();
+      // Increment counter
+      Address mdo_invocation_counter(G4_scratch,
+                                     in_bytes(methodDataOopDesc::invocation_counter_offset()) +
+                                     in_bytes(InvocationCounter::counter_offset()));
+      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask,
+                                 G3_scratch, Lscratch,
+                                 Assembler::zero, overflow);
+      __ ba(false, done);
+      __ delayed()->nop();
+    }
 
-  if (ProfileInterpreter && profile_method != NULL) {
-    // Test to see if we should create a method data oop
-    AddressLiteral profile_limit(&InvocationCounter::InterpreterProfileLimit);
-    __ sethi(profile_limit, G3_scratch);
-    __ ld(G3_scratch, profile_limit.low10(), G3_scratch);
-    __ cmp(O0, G3_scratch);
-    __ br(Assembler::lessUnsigned, false, Assembler::pn, *profile_method_continue);
-    __ delayed()->nop();
+    // Increment counter in methodOop
+    __ bind(no_mdo);
+    Address invocation_counter(Lmethod,
+                               in_bytes(methodOopDesc::invocation_counter_offset()) +
+                               in_bytes(InvocationCounter::counter_offset()));
+    __ increment_mask_and_jump(invocation_counter, increment, mask,
+                               G3_scratch, Lscratch,
+                               Assembler::zero, overflow);
+    __ bind(done);
+  } else {
+    // Update standard invocation counters
+    __ increment_invocation_counter(O0, G3_scratch);
+    if (ProfileInterpreter) {  // %%% Merge this into methodDataOop
+      Address interpreter_invocation_counter(Lmethod,in_bytes(methodOopDesc::interpreter_invocation_counter_offset()));
+      __ ld(interpreter_invocation_counter, G3_scratch);
+      __ inc(G3_scratch);
+      __ st(G3_scratch, interpreter_invocation_counter);
+    }
 
-    // if no method data exists, go to profile_method
-    __ test_method_data_pointer(*profile_method);
-  }
+    if (ProfileInterpreter && profile_method != NULL) {
+      // Test to see if we should create a method data oop
+      AddressLiteral profile_limit((address)&InvocationCounter::InterpreterProfileLimit);
+      __ load_contents(profile_limit, G3_scratch);
+      __ cmp(O0, G3_scratch);
+      __ br(Assembler::lessUnsigned, false, Assembler::pn, *profile_method_continue);
+      __ delayed()->nop();
 
-  AddressLiteral invocation_limit(&InvocationCounter::InterpreterInvocationLimit);
-  __ sethi(invocation_limit, G3_scratch);
-  __ ld(G3_scratch, invocation_limit.low10(), G3_scratch);
-  __ cmp(O0, G3_scratch);
-  __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow);
-  __ delayed()->nop();
+      // if no method data exists, go to profile_method
+      __ test_method_data_pointer(*profile_method);
+    }
+
+    AddressLiteral invocation_limit((address)&InvocationCounter::InterpreterInvocationLimit);
+    __ load_contents(invocation_limit, G3_scratch);
+    __ cmp(O0, G3_scratch);
+    __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow);
+    __ delayed()->nop();
+  }
 
 }
 
--- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1580,6 +1580,7 @@
   const Register O0_cur_bcp = O0;
   __ mov( Lbcp, O0_cur_bcp );
 
+
   bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
   if ( increment_invocation_counter_for_backward_branches ) {
     Label Lforward;
@@ -1588,17 +1589,84 @@
     // Bump bytecode pointer by displacement (take the branch)
     __ delayed()->add( O1_disp, Lbcp, Lbcp );     // add to bc addr
 
-    // Update Backedge branch separately from invocations
-    const Register G4_invoke_ctr = G4;
-    __ increment_backedge_counter(G4_invoke_ctr, G1_scratch);
-    if (ProfileInterpreter) {
-      __ test_invocation_counter_for_mdp(G4_invoke_ctr, Lbcp, G3_scratch, Lforward);
-      if (UseOnStackReplacement) {
-        __ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch);
+    if (TieredCompilation) {
+      Label Lno_mdo, Loverflow;
+      int increment = InvocationCounter::count_increment;
+      int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
+      if (ProfileInterpreter) {
+        // If no method data exists, go to profile_continue.
+        __ ld_ptr(Lmethod, methodOopDesc::method_data_offset(), G4_scratch);
+        __ br_null(G4_scratch, false, Assembler::pn, Lno_mdo);
+        __ delayed()->nop();
+
+        // Increment backedge counter in the MDO
+        Address mdo_backedge_counter(G4_scratch, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
+                                                 in_bytes(InvocationCounter::counter_offset()));
+        __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, Lscratch,
+                                   Assembler::notZero, &Lforward);
+        __ ba(false, Loverflow);
+        __ delayed()->nop();
       }
+
+      // If there's no MDO, increment counter in methodOop
+      __ bind(Lno_mdo);
+      Address backedge_counter(Lmethod, in_bytes(methodOopDesc::backedge_counter_offset()) +
+                                        in_bytes(InvocationCounter::counter_offset()));
+      __ increment_mask_and_jump(backedge_counter, increment, mask, G3_scratch, Lscratch,
+                                 Assembler::notZero, &Lforward);
+      __ bind(Loverflow);
+
+      // notify point for loop, pass branch bytecode
+      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O0_cur_bcp);
+
+      // Was an OSR adapter generated?
+      // O0 = osr nmethod
+      __ br_null(O0, false, Assembler::pn, Lforward);
+      __ delayed()->nop();
+
+      // Has the nmethod been invalidated already?
+      __ ld(O0, nmethod::entry_bci_offset(), O2);
+      __ cmp(O2, InvalidOSREntryBci);
+      __ br(Assembler::equal, false, Assembler::pn, Lforward);
+      __ delayed()->nop();
+
+      // migrate the interpreter frame off of the stack
+
+      __ mov(G2_thread, L7);
+      // save nmethod
+      __ mov(O0, L6);
+      __ set_last_Java_frame(SP, noreg);
+      __ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);
+      __ reset_last_Java_frame();
+      __ mov(L7, G2_thread);
+
+      // move OSR nmethod to I1
+      __ mov(L6, I1);
+
+      // OSR buffer to I0
+      __ mov(O0, I0);
+
+      // remove the interpreter frame
+      __ restore(I5_savedSP, 0, SP);
+
+      // Jump to the osr code.
+      __ ld_ptr(O1, nmethod::osr_entry_point_offset(), O2);
+      __ jmp(O2, G0);
+      __ delayed()->nop();
+
     } else {
-      if (UseOnStackReplacement) {
-        __ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch);
+      // Update Backedge branch separately from invocations
+      const Register G4_invoke_ctr = G4;
+      __ increment_backedge_counter(G4_invoke_ctr, G1_scratch);
+      if (ProfileInterpreter) {
+        __ test_invocation_counter_for_mdp(G4_invoke_ctr, Lbcp, G3_scratch, Lforward);
+        if (UseOnStackReplacement) {
+          __ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch);
+        }
+      } else {
+        if (UseOnStackReplacement) {
+          __ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch);
+        }
       }
     }
 
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -4993,19 +4993,22 @@
       ttyLocker ttyl;
       tty->print_cr("eip = 0x%08x", eip);
 #ifndef PRODUCT
-      tty->cr();
-      findpc(eip);
-      tty->cr();
+      if ((WizardMode || Verbose) && PrintMiscellaneous) {
+        tty->cr();
+        findpc(eip);
+        tty->cr();
+      }
 #endif
-      tty->print_cr("rax, = 0x%08x", rax);
-      tty->print_cr("rbx, = 0x%08x", rbx);
+      tty->print_cr("rax = 0x%08x", rax);
+      tty->print_cr("rbx = 0x%08x", rbx);
       tty->print_cr("rcx = 0x%08x", rcx);
       tty->print_cr("rdx = 0x%08x", rdx);
       tty->print_cr("rdi = 0x%08x", rdi);
       tty->print_cr("rsi = 0x%08x", rsi);
-      tty->print_cr("rbp, = 0x%08x", rbp);
+      tty->print_cr("rbp = 0x%08x", rbp);
       tty->print_cr("rsp = 0x%08x", rsp);
       BREAKPOINT;
+      assert(false, "start up GDB");
     }
   } else {
     ttyLocker ttyl;
@@ -7677,11 +7680,19 @@
   movptr(tmp, ExternalAddress((address) delayed_value_addr));
 
 #ifdef ASSERT
-  Label L;
-  testptr(tmp, tmp);
-  jccb(Assembler::notZero, L);
-  hlt();
-  bind(L);
+  { Label L;
+    testptr(tmp, tmp);
+    if (WizardMode) {
+      jcc(Assembler::notZero, L);
+      char* buf = new char[40];
+      sprintf(buf, "DelayedValue="INTPTR_FORMAT, delayed_value_addr[1]);
+      stop(buf);
+    } else {
+      jccb(Assembler::notZero, L);
+      hlt();
+    }
+    bind(L);
+  }
 #endif
 
   if (offset != 0)
@@ -8767,6 +8778,186 @@
   bind(DONE);
 }
 
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) /* nothing */
+#else
+#define BLOCK_COMMENT(str) block_comment(str)
+#endif
+
+#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
+void MacroAssembler::generate_fill(BasicType t, bool aligned,
+                                   Register to, Register value, Register count,
+                                   Register rtmp, XMMRegister xtmp) {
+  assert_different_registers(to, value, count, rtmp);
+  Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
+  Label L_fill_2_bytes, L_fill_4_bytes;
+
+  int shift = -1;
+  switch (t) {
+    case T_BYTE:
+      shift = 2;
+      break;
+    case T_SHORT:
+      shift = 1;
+      break;
+    case T_INT:
+      shift = 0;
+      break;
+    default: ShouldNotReachHere();
+  }
+
+  if (t == T_BYTE) {
+    andl(value, 0xff);
+    movl(rtmp, value);
+    shll(rtmp, 8);
+    orl(value, rtmp);
+  }
+  if (t == T_SHORT) {
+    andl(value, 0xffff);
+  }
+  if (t == T_BYTE || t == T_SHORT) {
+    movl(rtmp, value);
+    shll(rtmp, 16);
+    orl(value, rtmp);
+  }
+
+  cmpl(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
+  jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp
+  if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
+    // align source address at 4 bytes address boundary
+    if (t == T_BYTE) {
+      // One byte misalignment happens only for byte arrays
+      testptr(to, 1);
+      jccb(Assembler::zero, L_skip_align1);
+      movb(Address(to, 0), value);
+      increment(to);
+      decrement(count);
+      BIND(L_skip_align1);
+    }
+    // Two bytes misalignment happens only for byte and short (char) arrays
+    testptr(to, 2);
+    jccb(Assembler::zero, L_skip_align2);
+    movw(Address(to, 0), value);
+    addptr(to, 2);
+    subl(count, 1<<(shift-1));
+    BIND(L_skip_align2);
+  }
+  if (UseSSE < 2) {
+    Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
+    // Fill 32-byte chunks
+    subl(count, 8 << shift);
+    jcc(Assembler::less, L_check_fill_8_bytes);
+    align(16);
+
+    BIND(L_fill_32_bytes_loop);
+
+    for (int i = 0; i < 32; i += 4) {
+      movl(Address(to, i), value);
+    }
+
+    addptr(to, 32);
+    subl(count, 8 << shift);
+    jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
+    BIND(L_check_fill_8_bytes);
+    addl(count, 8 << shift);
+    jccb(Assembler::zero, L_exit);
+    jmpb(L_fill_8_bytes);
+
+    //
+    // length is too short, just fill qwords
+    //
+    BIND(L_fill_8_bytes_loop);
+    movl(Address(to, 0), value);
+    movl(Address(to, 4), value);
+    addptr(to, 8);
+    BIND(L_fill_8_bytes);
+    subl(count, 1 << (shift + 1));
+    jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
+    // fall through to fill 4 bytes
+  } else {
+    Label L_fill_32_bytes;
+    if (!UseUnalignedLoadStores) {
+      // align to 8 bytes, we know we are 4 byte aligned to start
+      testptr(to, 4);
+      jccb(Assembler::zero, L_fill_32_bytes);
+      movl(Address(to, 0), value);
+      addptr(to, 4);
+      subl(count, 1<<shift);
+    }
+    BIND(L_fill_32_bytes);
+    {
+      assert( UseSSE >= 2, "supported cpu only" );
+      Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
+      // Fill 32-byte chunks
+      movdl(xtmp, value);
+      pshufd(xtmp, xtmp, 0);
+
+      subl(count, 8 << shift);
+      jcc(Assembler::less, L_check_fill_8_bytes);
+      align(16);
+
+      BIND(L_fill_32_bytes_loop);
+
+      if (UseUnalignedLoadStores) {
+        movdqu(Address(to, 0), xtmp);
+        movdqu(Address(to, 16), xtmp);
+      } else {
+        movq(Address(to, 0), xtmp);
+        movq(Address(to, 8), xtmp);
+        movq(Address(to, 16), xtmp);
+        movq(Address(to, 24), xtmp);
+      }
+
+      addptr(to, 32);
+      subl(count, 8 << shift);
+      jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
+      BIND(L_check_fill_8_bytes);
+      addl(count, 8 << shift);
+      jccb(Assembler::zero, L_exit);
+      jmpb(L_fill_8_bytes);
+
+      //
+      // length is too short, just fill qwords
+      //
+      BIND(L_fill_8_bytes_loop);
+      movq(Address(to, 0), xtmp);
+      addptr(to, 8);
+      BIND(L_fill_8_bytes);
+      subl(count, 1 << (shift + 1));
+      jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
+    }
+  }
+  // fill trailing 4 bytes
+  BIND(L_fill_4_bytes);
+  testl(count, 1<<shift);
+  jccb(Assembler::zero, L_fill_2_bytes);
+  movl(Address(to, 0), value);
+  if (t == T_BYTE || t == T_SHORT) {
+    addptr(to, 4);
+    BIND(L_fill_2_bytes);
+    // fill trailing 2 bytes
+    testl(count, 1<<(shift-1));
+    jccb(Assembler::zero, L_fill_byte);
+    movw(Address(to, 0), value);
+    if (t == T_BYTE) {
+      addptr(to, 2);
+      BIND(L_fill_byte);
+      // fill trailing byte
+      testl(count, 1);
+      jccb(Assembler::zero, L_exit);
+      movb(Address(to, 0), value);
+    } else {
+      BIND(L_fill_byte);
+    }
+  } else {
+    BIND(L_fill_2_bytes);
+  }
+  BIND(L_exit);
+}
+#undef BIND
+#undef BLOCK_COMMENT
+
+
 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
   switch (cond) {
     // Note some conditions are synonyms for others
--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -2242,6 +2242,11 @@
                           Register limit, Register result, Register chr,
                           XMMRegister vec1, XMMRegister vec2);
 
+  // Fill primitive arrays
+  void generate_fill(BasicType t, bool aligned,
+                     Register to, Register value, Register count,
+                     Register rtmp, XMMRegister xtmp);
+
 #undef VIRTUAL
 
 };
--- a/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -68,19 +68,15 @@
   __ jmp(_continuation);
 }
 
-#ifdef TIERED
 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
+  ce->store_parameter(_method->as_register(), 1);
   ce->store_parameter(_bci, 0);
   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
-
   __ jmp(_continuation);
 }
-#endif // TIERED
-
-
 
 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
                                bool throw_index_out_of_bounds_exception)
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1613,6 +1613,188 @@
   __ bind(*op->stub()->continuation());
 }
 
+void LIR_Assembler::type_profile_helper(Register mdo,
+                                        ciMethodData *md, ciProfileData *data,
+                                        Register recv, Label* update_done) {
+  for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
+    Label next_test;
+    // See if the receiver is receiver[n].
+    __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
+    __ jccb(Assembler::notEqual, next_test);
+    Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
+    __ addptr(data_addr, DataLayout::counter_increment);
+    __ jmp(*update_done);
+    __ bind(next_test);
+  }
+
+  // Didn't find receiver; find next empty slot and fill it in
+  for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
+    Label next_test;
+    Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));
+    __ cmpptr(recv_addr, (intptr_t)NULL_WORD);
+    __ jccb(Assembler::notEqual, next_test);
+    __ movptr(recv_addr, recv);
+    __ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment);
+    __ jmp(*update_done);
+    __ bind(next_test);
+  }
+}
+
+void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
+  // we always need a stub for the failure case.
+  CodeStub* stub = op->stub();
+  Register obj = op->object()->as_register();
+  Register k_RInfo = op->tmp1()->as_register();
+  Register klass_RInfo = op->tmp2()->as_register();
+  Register dst = op->result_opr()->as_register();
+  ciKlass* k = op->klass();
+  Register Rtmp1 = noreg;
+
+  // check if it needs to be profiled
+  ciMethodData* md;
+  ciProfileData* data;
+
+  if (op->should_profile()) {
+    ciMethod* method = op->profiled_method();
+    assert(method != NULL, "Should have method");
+    int bci = op->profiled_bci();
+    md = method->method_data();
+    if (md == NULL) {
+      bailout("out of memory building methodDataOop");
+      return;
+    }
+    data = md->bci_to_data(bci);
+    assert(data != NULL,                "need data for type check");
+    assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
+  }
+  Label profile_cast_success, profile_cast_failure;
+  Label *success_target = op->should_profile() ? &profile_cast_success : success;
+  Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
+
+  if (obj == k_RInfo) {
+    k_RInfo = dst;
+  } else if (obj == klass_RInfo) {
+    klass_RInfo = dst;
+  }
+  if (k->is_loaded()) {
+    select_different_registers(obj, dst, k_RInfo, klass_RInfo);
+  } else {
+    Rtmp1 = op->tmp3()->as_register();
+    select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
+  }
+
+  assert_different_registers(obj, k_RInfo, klass_RInfo);
+  if (!k->is_loaded()) {
+    jobject2reg_with_patching(k_RInfo, op->info_for_patch());
+  } else {
+#ifdef _LP64
+    __ movoop(k_RInfo, k->constant_encoding());
+#endif // _LP64
+  }
+  assert(obj != k_RInfo, "must be different");
+
+  __ cmpptr(obj, (int32_t)NULL_WORD);
+  if (op->should_profile()) {
+    Label not_null;
+    __ jccb(Assembler::notEqual, not_null);
+    // Object is null; update MDO and exit
+    Register mdo  = klass_RInfo;
+    __ movoop(mdo, md->constant_encoding());
+    Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
+    int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
+    __ orl(data_addr, header_bits);
+    __ jmp(*obj_is_null);
+    __ bind(not_null);
+  } else {
+    __ jcc(Assembler::equal, *obj_is_null);
+  }
+  __ verify_oop(obj);
+
+  if (op->fast_check()) {
+    // get object class
+    // not a safepoint as obj null check happens earlier
+    if (k->is_loaded()) {
+#ifdef _LP64
+      __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
+#else
+      __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
+#endif // _LP64
+    } else {
+      __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
+    }
+    __ jcc(Assembler::notEqual, *failure_target);
+    // successful cast, fall through to profile or jump
+  } else {
+    // get object class
+    // not a safepoint as obj null check happens earlier
+    __ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
+    if (k->is_loaded()) {
+      // See if we get an immediate positive hit
+#ifdef _LP64
+      __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
+#else
+      __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
+#endif // _LP64
+      if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) {
+        __ jcc(Assembler::notEqual, *failure_target);
+        // successful cast, fall through to profile or jump
+      } else {
+        // See if we get an immediate positive hit
+        __ jcc(Assembler::equal, *success_target);
+        // check for self
+#ifdef _LP64
+        __ cmpptr(klass_RInfo, k_RInfo);
+#else
+        __ cmpoop(klass_RInfo, k->constant_encoding());
+#endif // _LP64
+        __ jcc(Assembler::equal, *success_target);
+
+        __ push(klass_RInfo);
+#ifdef _LP64
+        __ push(k_RInfo);
+#else
+        __ pushoop(k->constant_encoding());
+#endif // _LP64
+        __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
+        __ pop(klass_RInfo);
+        __ pop(klass_RInfo);
+        // result is a boolean
+        __ cmpl(klass_RInfo, 0);
+        __ jcc(Assembler::equal, *failure_target);
+        // successful cast, fall through to profile or jump
+      }
+    } else {
+      // perform the fast part of the checking logic
+      __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
+      // call out-of-line instance of __ check_klass_subtype_slow_path(...):
+      __ push(klass_RInfo);
+      __ push(k_RInfo);
+      __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
+      __ pop(klass_RInfo);
+      __ pop(k_RInfo);
+      // result is a boolean
+      __ cmpl(k_RInfo, 0);
+      __ jcc(Assembler::equal, *failure_target);
+      // successful cast, fall through to profile or jump
+    }
+  }
+  if (op->should_profile()) {
+    Register mdo  = klass_RInfo, recv = k_RInfo;
+    __ bind(profile_cast_success);
+    __ movoop(mdo, md->constant_encoding());
+    __ movptr(recv, Address(obj, oopDesc::klass_offset_in_bytes()));
+    Label update_done;
+    type_profile_helper(mdo, md, data, recv, success);
+    __ jmp(*success);
+
+    __ bind(profile_cast_failure);
+    __ movoop(mdo, md->constant_encoding());
+    Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
+    __ subptr(counter_addr, DataLayout::counter_increment);
+    __ jmp(*failure);
+  }
+  __ jmp(*success);
+}
 
 
 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
@@ -1625,9 +1807,44 @@
     Register Rtmp1 = op->tmp3()->as_register();
 
     CodeStub* stub = op->stub();
-    Label done;
+
+    // check if it needs to be profiled
+    ciMethodData* md;
+    ciProfileData* data;
+
+    if (op->should_profile()) {
+      ciMethod* method = op->profiled_method();
+      assert(method != NULL, "Should have method");
+      int bci = op->profiled_bci();
+      md = method->method_data();
+      if (md == NULL) {
+        bailout("out of memory building methodDataOop");
+        return;
+      }
+      data = md->bci_to_data(bci);
+      assert(data != NULL,                "need data for type check");
+      assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
+    }
+    Label profile_cast_success, profile_cast_failure, done;
+    Label *success_target = op->should_profile() ? &profile_cast_success : &done;
+    Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
+
     __ cmpptr(value, (int32_t)NULL_WORD);
-    __ jcc(Assembler::equal, done);
+    if (op->should_profile()) {
+      Label not_null;
+      __ jccb(Assembler::notEqual, not_null);
+      // Object is null; update MDO and exit
+      Register mdo  = klass_RInfo;
+      __ movoop(mdo, md->constant_encoding());
+      Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
+      int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
+      __ orl(data_addr, header_bits);
+      __ jmp(done);
+      __ bind(not_null);
+    } else {
+      __ jcc(Assembler::equal, done);
+    }
+
     add_debug_info_for_null_check_here(op->info_for_exception());
     __ movptr(k_RInfo, Address(array, oopDesc::klass_offset_in_bytes()));
     __ movptr(klass_RInfo, Address(value, oopDesc::klass_offset_in_bytes()));
@@ -1635,7 +1852,7 @@
     // get instance klass
     __ movptr(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
     // perform the fast part of the checking logic
-    __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, &done, stub->entry(), NULL);
+    __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
     // call out-of-line instance of __ check_klass_subtype_slow_path(...):
     __ push(klass_RInfo);
     __ push(k_RInfo);
@@ -1644,229 +1861,52 @@
     __ pop(k_RInfo);
     // result is a boolean
     __ cmpl(k_RInfo, 0);
-    __ jcc(Assembler::equal, *stub->entry());
-    __ bind(done);
-  } else if (op->code() == lir_checkcast) {
-    // we always need a stub for the failure case.
-    CodeStub* stub = op->stub();
-    Register obj = op->object()->as_register();
-    Register k_RInfo = op->tmp1()->as_register();
-    Register klass_RInfo = op->tmp2()->as_register();
-    Register dst = op->result_opr()->as_register();
-    ciKlass* k = op->klass();
-    Register Rtmp1 = noreg;
-
-    Label done;
-    if (obj == k_RInfo) {
-      k_RInfo = dst;
-    } else if (obj == klass_RInfo) {
-      klass_RInfo = dst;
-    }
-    if (k->is_loaded()) {
-      select_different_registers(obj, dst, k_RInfo, klass_RInfo);
-    } else {
-      Rtmp1 = op->tmp3()->as_register();
-      select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
-    }
-
-    assert_different_registers(obj, k_RInfo, klass_RInfo);
-    if (!k->is_loaded()) {
-      jobject2reg_with_patching(k_RInfo, op->info_for_patch());
-    } else {
-#ifdef _LP64
-      __ movoop(k_RInfo, k->constant_encoding());
-#else
-      k_RInfo = noreg;
-#endif // _LP64
-    }
-    assert(obj != k_RInfo, "must be different");
-    __ cmpptr(obj, (int32_t)NULL_WORD);
-    if (op->profiled_method() != NULL) {
-      ciMethod* method = op->profiled_method();
-      int bci          = op->profiled_bci();
-
-      Label profile_done;
-      __ jcc(Assembler::notEqual, profile_done);
-      // Object is null; update methodDataOop
-      ciMethodData* md = method->method_data();
-      if (md == NULL) {
-        bailout("out of memory building methodDataOop");
-        return;
-      }
-      ciProfileData* data = md->bci_to_data(bci);
-      assert(data != NULL,       "need data for checkcast");
-      assert(data->is_BitData(), "need BitData for checkcast");
-      Register mdo  = klass_RInfo;
+    __ jcc(Assembler::equal, *failure_target);
+    // fall through to the success case
+
+    if (op->should_profile()) {
+      Register mdo  = klass_RInfo, recv = k_RInfo;
+      __ bind(profile_cast_success);
+      __ movoop(mdo, md->constant_encoding());
+      __ movptr(recv, Address(value, oopDesc::klass_offset_in_bytes()));
+      Label update_done;
+      type_profile_helper(mdo, md, data, recv, &done);
+      __ jmpb(done);
+
+      __ bind(profile_cast_failure);
       __ movoop(mdo, md->constant_encoding());
-      Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
-      int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
-      __ orl(data_addr, header_bits);
-      __ jmp(done);
-      __ bind(profile_done);
-    } else {
-      __ jcc(Assembler::equal, done);
+      Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
+      __ subptr(counter_addr, DataLayout::counter_increment);
+      __ jmp(*stub->entry());
     }
-    __ verify_oop(obj);
-
-    if (op->fast_check()) {
-      // get object classo
-      // not a safepoint as obj null check happens earlier
-      if (k->is_loaded()) {
-#ifdef _LP64
-        __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
-#else
-        __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
-#endif // _LP64
-      } else {
-        __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
-
+
+    __ bind(done);
+  } else
+    if (code == lir_checkcast) {
+      Register obj = op->object()->as_register();
+      Register dst = op->result_opr()->as_register();
+      Label success;
+      emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
+      __ bind(success);
+      if (dst != obj) {
+        __ mov(dst, obj);
       }
-      __ jcc(Assembler::notEqual, *stub->entry());
-      __ bind(done);
-    } else {
-      // get object class
-      // not a safepoint as obj null check happens earlier
-      __ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
-      if (k->is_loaded()) {
-        // See if we get an immediate positive hit
-#ifdef _LP64
-        __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
-#else
-        __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
-#endif // _LP64
-        if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) {
-          __ jcc(Assembler::notEqual, *stub->entry());
-        } else {
-          // See if we get an immediate positive hit
-          __ jcc(Assembler::equal, done);
-          // check for self
-#ifdef _LP64
-          __ cmpptr(klass_RInfo, k_RInfo);
-#else
-          __ cmpoop(klass_RInfo, k->constant_encoding());
-#endif // _LP64
-          __ jcc(Assembler::equal, done);
-
-          __ push(klass_RInfo);
-#ifdef _LP64
-          __ push(k_RInfo);
-#else
-          __ pushoop(k->constant_encoding());
-#endif // _LP64
-          __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
-          __ pop(klass_RInfo);
-          __ pop(klass_RInfo);
-          // result is a boolean
-          __ cmpl(klass_RInfo, 0);
-          __ jcc(Assembler::equal, *stub->entry());
-        }
+    } else
+      if (code == lir_instanceof) {
+        Register obj = op->object()->as_register();
+        Register dst = op->result_opr()->as_register();
+        Label success, failure, done;
+        emit_typecheck_helper(op, &success, &failure, &failure);
+        __ bind(failure);
+        __ xorptr(dst, dst);
+        __ jmpb(done);
+        __ bind(success);
+        __ movptr(dst, 1);
         __ bind(done);
       } else {
-        // perform the fast part of the checking logic
-        __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, &done, stub->entry(), NULL);
-        // call out-of-line instance of __ check_klass_subtype_slow_path(...):
-        __ push(klass_RInfo);
-        __ push(k_RInfo);
-        __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
-        __ pop(klass_RInfo);
-        __ pop(k_RInfo);
-        // result is a boolean
-        __ cmpl(k_RInfo, 0);
-        __ jcc(Assembler::equal, *stub->entry());
-        __ bind(done);
+        ShouldNotReachHere();
       }
 
-    }
-    if (dst != obj) {
-      __ mov(dst, obj);
-    }
-  } else if (code == lir_instanceof) {
-    Register obj = op->object()->as_register();
-    Register k_RInfo = op->tmp1()->as_register();
-    Register klass_RInfo = op->tmp2()->as_register();
-    Register dst = op->result_opr()->as_register();
-    ciKlass* k = op->klass();
-
-    Label done;
-    Label zero;
-    Label one;
-    if (obj == k_RInfo) {
-      k_RInfo = klass_RInfo;
-      klass_RInfo = obj;
-    }
-    // patching may screw with our temporaries on sparc,
-    // so let's do it before loading the class
-    if (!k->is_loaded()) {
-      jobject2reg_with_patching(k_RInfo, op->info_for_patch());
-    } else {
-      LP64_ONLY(__ movoop(k_RInfo, k->constant_encoding()));
-    }
-    assert(obj != k_RInfo, "must be different");
-
-    __ verify_oop(obj);
-    if (op->fast_check()) {
-      __ cmpptr(obj, (int32_t)NULL_WORD);
-      __ jcc(Assembler::equal, zero);
-      // get object class
-      // not a safepoint as obj null check happens earlier
-      if (LP64_ONLY(false &&) k->is_loaded()) {
-        NOT_LP64(__ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding()));
-        k_RInfo = noreg;
-      } else {
-        __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
-
-      }
-      __ jcc(Assembler::equal, one);
-    } else {
-      // get object class
-      // not a safepoint as obj null check happens earlier
-      __ cmpptr(obj, (int32_t)NULL_WORD);
-      __ jcc(Assembler::equal, zero);
-      __ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
-
-#ifndef _LP64
-      if (k->is_loaded()) {
-        // See if we get an immediate positive hit
-        __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
-        __ jcc(Assembler::equal, one);
-        if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() == k->super_check_offset()) {
-          // check for self
-          __ cmpoop(klass_RInfo, k->constant_encoding());
-          __ jcc(Assembler::equal, one);
-          __ push(klass_RInfo);
-          __ pushoop(k->constant_encoding());
-          __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
-          __ pop(klass_RInfo);
-          __ pop(dst);
-          __ jmp(done);
-        }
-      }
-        else // next block is unconditional if LP64:
-#endif // LP64
-      {
-        assert(dst != klass_RInfo && dst != k_RInfo, "need 3 registers");
-
-        // perform the fast part of the checking logic
-        __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, dst, &one, &zero, NULL);
-        // call out-of-line instance of __ check_klass_subtype_slow_path(...):
-        __ push(klass_RInfo);
-        __ push(k_RInfo);
-        __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
-        __ pop(klass_RInfo);
-        __ pop(dst);
-        __ jmp(done);
-      }
-    }
-    __ bind(zero);
-    __ xorptr(dst, dst);
-    __ jmp(done);
-    __ bind(one);
-    __ movptr(dst, 1);
-    __ bind(done);
-  } else {
-    ShouldNotReachHere();
-  }
-
 }
 
 
@@ -1922,7 +1962,6 @@
   }
 }
 
-
 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result) {
   Assembler::Condition acond, ncond;
   switch (condition) {
@@ -2014,11 +2053,11 @@
       jint c = right->as_constant_ptr()->as_jint();
       switch (code) {
         case lir_add: {
-          __ increment(lreg, c);
+          __ incrementl(lreg, c);
           break;
         }
         case lir_sub: {
-          __ decrement(lreg, c);
+          __ decrementl(lreg, c);
           break;
         }
         default: ShouldNotReachHere();
@@ -3253,13 +3292,13 @@
   // Perform additional virtual call profiling for invokevirtual and
   // invokeinterface bytecodes
   if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
-      Tier1ProfileVirtualCalls) {
+      C1ProfileVirtualCalls) {
     assert(op->recv()->is_single_cpu(), "recv must be allocated");
     Register recv = op->recv()->as_register();
     assert_different_registers(mdo, recv);
     assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
     ciKlass* known_klass = op->known_holder();
-    if (Tier1OptimizeVirtualCallProfiling && known_klass != NULL) {
+    if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
       // We know the type that will be seen at this call site; we can
       // statically update the methodDataOop rather than needing to do
       // dynamic tests on the receiver type
@@ -3272,7 +3311,7 @@
         ciKlass* receiver = vc_data->receiver(i);
         if (known_klass->equals(receiver)) {
           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
-          __ addl(data_addr, DataLayout::counter_increment);
+          __ addptr(data_addr, DataLayout::counter_increment);
           return;
         }
       }
@@ -3288,49 +3327,26 @@
           Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
           __ movoop(recv_addr, known_klass->constant_encoding());
           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
-          __ addl(data_addr, DataLayout::counter_increment);
+          __ addptr(data_addr, DataLayout::counter_increment);
           return;
         }
       }
     } else {
       __ movptr(recv, Address(recv, oopDesc::klass_offset_in_bytes()));
       Label update_done;
-      uint i;
-      for (i = 0; i < VirtualCallData::row_limit(); i++) {
-        Label next_test;
-        // See if the receiver is receiver[n].
-        __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))));
-        __ jcc(Assembler::notEqual, next_test);
-        Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
-        __ addl(data_addr, DataLayout::counter_increment);
-        __ jmp(update_done);
-        __ bind(next_test);
-      }
-
-      // Didn't find receiver; find next empty slot and fill it in
-      for (i = 0; i < VirtualCallData::row_limit(); i++) {
-        Label next_test;
-        Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
-        __ cmpptr(recv_addr, (int32_t)NULL_WORD);
-        __ jcc(Assembler::notEqual, next_test);
-        __ movptr(recv_addr, recv);
-        __ movl(Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))), DataLayout::counter_increment);
-        __ jmp(update_done);
-        __ bind(next_test);
-      }
+      type_profile_helper(mdo, md, data, recv, &update_done);
       // Receiver did not match any saved receiver and there is no empty row for it.
       // Increment total counter to indicate polymorphic case.
-      __ addl(counter_addr, DataLayout::counter_increment);
+      __ addptr(counter_addr, DataLayout::counter_increment);
 
       __ bind(update_done);
     }
   } else {
     // Static call
-    __ addl(counter_addr, DataLayout::counter_increment);
+    __ addptr(counter_addr, DataLayout::counter_increment);
   }
 }
 
-
 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
   Unimplemented();
 }
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -42,7 +42,10 @@
   // method.
   Address as_Address(LIR_Address* addr, Register tmp);
 
-
+  // Record the type of the receiver in ReceiverTypeData
+  void type_profile_helper(Register mdo,
+                           ciMethodData *md, ciProfileData *data,
+                           Register recv, Label* update_done);
 public:
 
   void store_parameter(Register r, int offset_from_esp_in_words);
--- a/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -182,10 +182,22 @@
 }
 
 
-void LIRGenerator::increment_counter(address counter, int step) {
+LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
+  LIR_Opr r;
+  if (type == T_LONG) {
+    r = LIR_OprFact::longConst(x);
+  } else if (type == T_INT) {
+    r = LIR_OprFact::intConst(x);
+  } else {
+    ShouldNotReachHere();
+  }
+  return r;
+}
+
+void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
   LIR_Opr pointer = new_pointer_register();
   __ move(LIR_OprFact::intptrConst(counter), pointer);
-  LIR_Address* addr = new LIR_Address(pointer, T_INT);
+  LIR_Address* addr = new LIR_Address(pointer, type);
   increment_counter(addr, step);
 }
 
@@ -194,7 +206,6 @@
   __ add((LIR_Opr)addr, LIR_OprFact::intConst(step), (LIR_Opr)addr);
 }
 
-
 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
   __ cmp_mem_int(condition, base, disp, c, info);
 }
@@ -1145,10 +1156,10 @@
     patching_info = state_for(x, x->state_before());
   }
   obj.load_item();
-  LIR_Opr tmp = new_register(objectType);
   __ instanceof(reg, obj.result(), x->klass(),
-                tmp, new_register(objectType), LIR_OprFact::illegalOpr,
-                x->direct_compare(), patching_info);
+                new_register(objectType), new_register(objectType),
+                !x->klass()->is_loaded() ? new_register(objectType) : LIR_OprFact::illegalOpr,
+                x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
 }
 
 
@@ -1188,8 +1199,7 @@
   // add safepoint before generating condition code so it can be recomputed
   if (x->is_safepoint()) {
     // increment backedge counter if needed
-    increment_backedge_counter(state_for(x, x->state_before()));
-
+    increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
     __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
   }
   set_no_result(x);
@@ -1197,6 +1207,7 @@
   LIR_Opr left = xin->result();
   LIR_Opr right = yin->result();
   __ cmp(lir_cond(cond), left, right);
+  // Generate branch profiling. Profiling code doesn't kill flags.
   profile_branch(x, cond);
   move_to_phi(x->state());
   if (x->x()->type()->is_float_kind()) {
--- a/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1068,15 +1068,16 @@
 
       break;
 
-#ifdef TIERED
     case counter_overflow_id:
       {
-        Register bci = rax;
+        Register bci = rax, method = rbx;
         __ enter();
-        OopMap* map = save_live_registers(sasm, 2);
+        OopMap* map = save_live_registers(sasm, 3);
         // Retrieve bci
         __ movl(bci, Address(rbp, 2*BytesPerWord));
-        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci);
+        // And a pointer to the methodOop
+        __ movptr(method, Address(rbp, 3*BytesPerWord));
+        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
         oop_maps = new OopMapSet();
         oop_maps->add_gc_map(call_offset, map);
         restore_live_registers(sasm);
@@ -1084,7 +1085,6 @@
         __ ret(0);
       }
       break;
-#endif // TIERED
 
     case new_type_array_id:
     case new_object_array_id:
--- a/hotspot/src/cpu/x86/vm/c1_globals_x86.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_globals_x86.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,14 +35,7 @@
 define_pd_global(bool, UseOnStackReplacement,        true );
 define_pd_global(bool, TieredCompilation,            false);
 define_pd_global(intx, CompileThreshold,             1500 );
-define_pd_global(intx, Tier2CompileThreshold,        1500 );
-define_pd_global(intx, Tier3CompileThreshold,        2500 );
-define_pd_global(intx, Tier4CompileThreshold,        4500 );
-
 define_pd_global(intx, BackEdgeThreshold,            100000);
-define_pd_global(intx, Tier2BackEdgeThreshold,       100000);
-define_pd_global(intx, Tier3BackEdgeThreshold,       100000);
-define_pd_global(intx, Tier4BackEdgeThreshold,       100000);
 
 define_pd_global(intx, OnStackReplacePercentage,     933  );
 define_pd_global(intx, FreqInlineSize,               325  );
--- a/hotspot/src/cpu/x86/vm/c2_globals_x86.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/c2_globals_x86.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,19 +39,8 @@
 define_pd_global(bool, ProfileInterpreter,           true);
 #endif // CC_INTERP
 define_pd_global(bool, TieredCompilation,            false);
-#ifdef TIERED
-define_pd_global(intx, CompileThreshold,             1000);
-#else
 define_pd_global(intx, CompileThreshold,             10000);
-#endif // TIERED
-define_pd_global(intx, Tier2CompileThreshold,        10000);
-define_pd_global(intx, Tier3CompileThreshold,        20000);
-define_pd_global(intx, Tier4CompileThreshold,        40000);
-
 define_pd_global(intx, BackEdgeThreshold,            100000);
-define_pd_global(intx, Tier2BackEdgeThreshold,       100000);
-define_pd_global(intx, Tier3BackEdgeThreshold,       100000);
-define_pd_global(intx, Tier4BackEdgeThreshold,       100000);
 
 define_pd_global(intx, OnStackReplacePercentage,     140);
 define_pd_global(intx, ConditionalMoveLimit,         3);
--- a/hotspot/src/cpu/x86/vm/frame_x86.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/frame_x86.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -141,12 +141,12 @@
     }
 
     // Could just be some random pointer within the codeBlob
-
-    if (!sender_blob->instructions_contains(sender_pc)) return false;
+    if (!sender_blob->code_contains(sender_pc)) {
+      return false;
+    }
 
     // We should never be able to see an adapter if the current frame is something from code cache
-
-    if ( sender_blob->is_adapter_blob()) {
+    if (sender_blob->is_adapter_blob()) {
       return false;
     }
 
@@ -340,7 +340,7 @@
   fr._unextended_sp = unextended_sp;
 
   address original_pc = nm->get_original_pc(&fr);
-  assert(nm->code_contains(original_pc), "original PC must be in nmethod");
+  assert(nm->insts_contains(original_pc), "original PC must be in nmethod");
   assert(nm->is_method_handle_return(original_pc) == is_method_handle_return, "must be");
 }
 #endif
--- a/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -63,7 +63,7 @@
   address original_pc = nmethod::get_deopt_original_pc(this);
   if (original_pc != NULL) {
     _pc = original_pc;
-    assert(((nmethod*)_cb)->code_contains(_pc), "original PC must be in nmethod");
+    assert(((nmethod*)_cb)->insts_contains(_pc), "original PC must be in nmethod");
     _deopt_state = is_deoptimized;
   } else {
     _deopt_state = not_deoptimized;
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1397,3 +1397,17 @@
     NOT_CC_INTERP(pop(state));
   }
 }
+
+// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
+void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
+                                                        int increment, int mask,
+                                                        Register scratch, bool preloaded,
+                                                        Condition cond, Label* where) {
+  if (!preloaded) {
+    movl(scratch, counter_addr);
+  }
+  incrementl(scratch, increment);
+  movl(counter_addr, scratch);
+  andl(scratch, mask);
+  jcc(cond, *where);
+}
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -185,6 +185,10 @@
                              bool decrement = false);
   void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
                              bool decrement = false);
+  void increment_mask_and_jump(Address counter_addr,
+                               int increment, int mask,
+                               Register scratch, bool preloaded,
+                               Condition cond, Label* where);
   void set_mdp_flag_at(Register mdp_in, int flag_constant);
   void test_mdp_data_at(Register mdp_in, int offset, Register value,
                         Register test_value_out,
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1480,3 +1480,17 @@
     NOT_CC_INTERP(pop(state));
   }
 }
+
+// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
+void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
+                                                        int increment, int mask,
+                                                        Register scratch, bool preloaded,
+                                                        Condition cond, Label* where) {
+  if (!preloaded) {
+    movl(scratch, counter_addr);
+  }
+  incrementl(scratch, increment);
+  movl(counter_addr, scratch);
+  andl(scratch, mask);
+  jcc(cond, *where);
+}
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -194,6 +194,10 @@
                              bool decrement = false);
   void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
                              bool decrement = false);
+  void increment_mask_and_jump(Address counter_addr,
+                               int increment, int mask,
+                               Register scratch, bool preloaded,
+                               Condition cond, Label* where);
   void set_mdp_flag_at(Register mdp_in, int flag_constant);
   void test_mdp_data_at(Register mdp_in, int offset, Register value,
                         Register test_value_out,
--- a/hotspot/src/cpu/x86/vm/jniFastGetField_x86_32.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/jniFastGetField_x86_32.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,10 +54,10 @@
     default:        ShouldNotReachHere();
   }
   ResourceMark rm;
-  BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE*wordSize);
-  address fast_entry = b->instructions_begin();
-  CodeBuffer cbuf(fast_entry, b->instructions_size());
+  BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE*wordSize);
+  CodeBuffer cbuf(blob);
   MacroAssembler* masm = new MacroAssembler(&cbuf);
+  address fast_entry = __ pc();
 
   Label slow;
 
@@ -135,11 +135,11 @@
   return fast_entry;
 #else
   switch (type) {
-    case T_BOOLEAN: jni_fast_GetBooleanField_fp = (GetBooleanField_t)fast_entry; break;
-    case T_BYTE:    jni_fast_GetByteField_fp = (GetByteField_t)fast_entry; break;
-    case T_CHAR:    jni_fast_GetCharField_fp = (GetCharField_t)fast_entry; break;
-    case T_SHORT:   jni_fast_GetShortField_fp = (GetShortField_t)fast_entry; break;
-    case T_INT:     jni_fast_GetIntField_fp = (GetIntField_t)fast_entry;
+  case T_BOOLEAN: jni_fast_GetBooleanField_fp = (GetBooleanField_t) fast_entry; break;
+  case T_BYTE:    jni_fast_GetByteField_fp    = (GetByteField_t)    fast_entry; break;
+  case T_CHAR:    jni_fast_GetCharField_fp    = (GetCharField_t)    fast_entry; break;
+  case T_SHORT:   jni_fast_GetShortField_fp   = (GetShortField_t)   fast_entry; break;
+  case T_INT:     jni_fast_GetIntField_fp     = (GetIntField_t)     fast_entry; break;
   }
   return os::win32::fast_jni_accessor_wrapper(type);
 #endif
@@ -168,10 +168,10 @@
 address JNI_FastGetField::generate_fast_get_long_field() {
   const char *name = "jni_fast_GetLongField";
   ResourceMark rm;
-  BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE*wordSize);
-  address fast_entry = b->instructions_begin();
-  CodeBuffer cbuf(fast_entry, b->instructions_size());
+  BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE*wordSize);
+  CodeBuffer cbuf(blob);
   MacroAssembler* masm = new MacroAssembler(&cbuf);
+  address fast_entry = __ pc();
 
   Label slow;
 
@@ -246,7 +246,7 @@
 #ifndef _WINDOWS
   return fast_entry;
 #else
-  jni_fast_GetLongField_fp = (GetLongField_t)fast_entry;
+  jni_fast_GetLongField_fp = (GetLongField_t) fast_entry;
   return os::win32::fast_jni_accessor_wrapper(T_LONG);
 #endif
 }
@@ -259,10 +259,10 @@
     default:       ShouldNotReachHere();
   }
   ResourceMark rm;
-  BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE*wordSize);
-  address fast_entry = b->instructions_begin();
-  CodeBuffer cbuf(fast_entry, b->instructions_size());
+  BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE*wordSize);
+  CodeBuffer cbuf(blob);
   MacroAssembler* masm = new MacroAssembler(&cbuf);
+  address fast_entry = __ pc();
 
   Label slow_with_pop, slow;
 
@@ -348,8 +348,8 @@
   return fast_entry;
 #else
   switch (type) {
-    case T_FLOAT:  jni_fast_GetFloatField_fp = (GetFloatField_t)fast_entry; break;
-    case T_DOUBLE: jni_fast_GetDoubleField_fp = (GetDoubleField_t)fast_entry;
+  case T_FLOAT:  jni_fast_GetFloatField_fp  = (GetFloatField_t)  fast_entry; break;
+  case T_DOUBLE: jni_fast_GetDoubleField_fp = (GetDoubleField_t) fast_entry; break;
   }
   return os::win32::fast_jni_accessor_wrapper(type);
 #endif
--- a/hotspot/src/cpu/x86/vm/jniFastGetField_x86_64.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/jniFastGetField_x86_64.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -58,10 +58,10 @@
     default:        ShouldNotReachHere();
   }
   ResourceMark rm;
-  BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE);
-  address fast_entry = b->instructions_begin();
-  CodeBuffer cbuf(fast_entry, b->instructions_size());
+  BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE);
+  CodeBuffer cbuf(blob);
   MacroAssembler* masm = new MacroAssembler(&cbuf);
+  address fast_entry = __ pc();
 
   Label slow;
 
@@ -156,10 +156,10 @@
     default:          ShouldNotReachHere();
   }
   ResourceMark rm;
-  BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE);
-  address fast_entry = b->instructions_begin();
-  CodeBuffer cbuf(fast_entry, b->instructions_size());
+  BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE);
+  CodeBuffer cbuf(blob);
   MacroAssembler* masm = new MacroAssembler(&cbuf);
+  address fast_entry = __ pc();
 
   Label slow;
 
--- a/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -27,6 +27,14 @@
 
 #define __ _masm->
 
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) /* nothing */
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif
+
+#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
+
 address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
                                                 address interpreted_entry) {
   // Just before the actual machine code entry point, allocate space
@@ -64,6 +72,7 @@
                            const char* error_message) {
   // Verify that argslot lies within (rsp, rbp].
   Label L_ok, L_bad;
+  BLOCK_COMMENT("{ verify_argslot");
   __ cmpptr(argslot_reg, rbp);
   __ jccb(Assembler::above, L_bad);
   __ cmpptr(rsp, argslot_reg);
@@ -71,6 +80,7 @@
   __ bind(L_bad);
   __ stop(error_message);
   __ bind(L_ok);
+  BLOCK_COMMENT("} verify_argslot");
 }
 #endif
 
@@ -80,16 +90,21 @@
   // rbx: methodOop
   // rcx: receiver method handle (must load from sp[MethodTypeForm.vmslots])
   // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
-  // rdx: garbage temp, blown away
+  // rdx, rdi: garbage temp, blown away
 
   Register rbx_method = rbx;
   Register rcx_recv   = rcx;
   Register rax_mtype  = rax;
   Register rdx_temp   = rdx;
+  Register rdi_temp   = rdi;
 
   // emit WrongMethodType path first, to enable jccb back-branch from main path
   Label wrong_method_type;
   __ bind(wrong_method_type);
+  Label invoke_generic_slow_path;
+  assert(methodOopDesc::intrinsic_id_size_in_bytes() == sizeof(u1), "");;
+  __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeExact);
+  __ jcc(Assembler::notEqual, invoke_generic_slow_path);
   __ push(rax_mtype);       // required mtype
   __ push(rcx_recv);        // bad mh (1st stacked argument)
   __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
@@ -106,17 +121,68 @@
       tem = rax_mtype;          // in case there is another indirection
     }
   }
-  Register rbx_temp = rbx_method; // done with incoming methodOop
 
   // given the MethodType, find out where the MH argument is buried
   __ movptr(rdx_temp, Address(rax_mtype,
-                              __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rbx_temp)));
-  __ movl(rdx_temp, Address(rdx_temp,
-                            __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, rbx_temp)));
-  __ movptr(rcx_recv, __ argument_address(rdx_temp));
+                              __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rdi_temp)));
+  Register rdx_vmslots = rdx_temp;
+  __ movl(rdx_vmslots, Address(rdx_temp,
+                               __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, rdi_temp)));
+  __ movptr(rcx_recv, __ argument_address(rdx_vmslots));
+
+  trace_method_handle(_masm, "invokeExact");
+
+  __ check_method_handle_type(rax_mtype, rcx_recv, rdi_temp, wrong_method_type);
+  __ jump_to_method_handle_entry(rcx_recv, rdi_temp);
+
+  // for invokeGeneric (only), apply argument and result conversions on the fly
+  __ bind(invoke_generic_slow_path);
+#ifdef ASSERT
+  { Label L;
+    __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeGeneric);
+    __ jcc(Assembler::equal, L);
+    __ stop("bad methodOop::intrinsic_id");
+    __ bind(L);
+  }
+#endif //ASSERT
+  Register rbx_temp = rbx_method;  // don't need it now
+
+  // make room on the stack for another pointer:
+  Register rcx_argslot = rcx_recv;
+  __ lea(rcx_argslot, __ argument_address(rdx_vmslots, 1));
+  insert_arg_slots(_masm, 2 * stack_move_unit(), _INSERT_REF_MASK,
+                   rcx_argslot, rbx_temp, rdx_temp);
 
-  __ check_method_handle_type(rax_mtype, rcx_recv, rdx_temp, wrong_method_type);
-  __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
+  // load up an adapter from the calling type (Java weaves this)
+  __ movptr(rdx_temp, Address(rax_mtype,
+                              __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rdi_temp)));
+  Register rdx_adapter = rdx_temp;
+  // movptr(rdx_adapter, Address(rdx_temp, java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes()));
+  // deal with old JDK versions:
+  __ lea(rdi_temp, Address(rdx_temp,
+                           __ delayed_value(java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes, rdi_temp)));
+  __ cmpptr(rdi_temp, rdx_temp);
+  Label sorry_no_invoke_generic;
+  __ jccb(Assembler::below, sorry_no_invoke_generic);
+
+  __ movptr(rdx_adapter, Address(rdi_temp, 0));
+  __ testptr(rdx_adapter, rdx_adapter);
+  __ jccb(Assembler::zero, sorry_no_invoke_generic);
+  __ movptr(Address(rcx_argslot, 1 * Interpreter::stackElementSize), rdx_adapter);
+  // As a trusted first argument, pass the type being called, so the adapter knows
+  // the actual types of the arguments and return values.
+  // (Generic invokers are shared among form-families of method-type.)
+  __ movptr(Address(rcx_argslot, 0 * Interpreter::stackElementSize), rax_mtype);
+  // FIXME: assert that rdx_adapter is of the right method-type.
+  __ mov(rcx, rdx_adapter);
+  trace_method_handle(_masm, "invokeGeneric");
+  __ jump_to_method_handle_entry(rcx, rdi_temp);
+
+  __ bind(sorry_no_invoke_generic); // no invokeGeneric implementation available!
+  __ movptr(rcx_recv, Address(rcx_argslot, -1 * Interpreter::stackElementSize));  // recover original MH
+  __ push(rax_mtype);       // required mtype
+  __ push(rcx_recv);        // bad mh (1st stacked argument)
+  __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
 
   return entry_point;
 }
@@ -164,11 +230,12 @@
   //   for (rdx = rsp + size; rdx < argslot; rdx++)
   //     rdx[-size] = rdx[0]
   //   argslot -= size;
+  BLOCK_COMMENT("insert_arg_slots {");
   __ mov(rdx_temp, rsp);                        // source pointer for copy
   __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr));
   {
     Label loop;
-    __ bind(loop);
+    __ BIND(loop);
     // pull one word down each time through the loop
     __ movptr(rbx_temp, Address(rdx_temp, 0));
     __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
@@ -179,6 +246,7 @@
 
   // Now move the argslot down, to point to the opened-up space.
   __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr));
+  BLOCK_COMMENT("} insert_arg_slots");
 }
 
 // Helper to remove argument slots from the stack.
@@ -218,6 +286,7 @@
   }
 #endif
 
+  BLOCK_COMMENT("remove_arg_slots {");
   // Pull up everything shallower than rax_argslot.
   // Then remove the excess space on the stack.
   // The stacked return address gets pulled up with everything else.
@@ -229,7 +298,7 @@
   __ lea(rdx_temp, Address(rax_argslot, -wordSize)); // source pointer for copy
   {
     Label loop;
-    __ bind(loop);
+    __ BIND(loop);
     // pull one word up each time through the loop
     __ movptr(rbx_temp, Address(rdx_temp, 0));
     __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
@@ -242,12 +311,14 @@
   __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr));
   // And adjust the argslot address to point at the deletion point.
   __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr));
+  BLOCK_COMMENT("} remove_arg_slots");
 }
 
 #ifndef PRODUCT
 extern "C" void print_method_handle(oop mh);
 void trace_method_handle_stub(const char* adaptername,
                               oop mh,
+                              intptr_t* saved_regs,
                               intptr_t* entry_sp,
                               intptr_t* saved_sp,
                               intptr_t* saved_bp) {
@@ -256,9 +327,47 @@
   intptr_t* base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset];
   printf("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT"\n",
          adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
-  if (last_sp != saved_sp)
+  if (last_sp != saved_sp && last_sp != NULL)
     printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp);
-  if (Verbose)  print_method_handle(mh);
+  if (Verbose) {
+    printf(" reg dump: ");
+    int saved_regs_count = (entry_sp-1) - saved_regs;
+    // 32 bit: rdi rsi rbp rsp; rbx rdx rcx (*) rax
+    int i;
+    for (i = 0; i <= saved_regs_count; i++) {
+      if (i > 0 && i % 4 == 0 && i != saved_regs_count)
+        printf("\n   + dump: ");
+      printf(" %d: "INTPTR_FORMAT, i, saved_regs[i]);
+    }
+    printf("\n");
+    int stack_dump_count = 16;
+    if (stack_dump_count < (int)(saved_bp + 2 - saved_sp))
+      stack_dump_count = (int)(saved_bp + 2 - saved_sp);
+    if (stack_dump_count > 64)  stack_dump_count = 48;
+    for (i = 0; i < stack_dump_count; i += 4) {
+      printf(" dump at SP[%d] "INTPTR_FORMAT": "INTPTR_FORMAT" "INTPTR_FORMAT" "INTPTR_FORMAT" "INTPTR_FORMAT"\n",
+             i, &entry_sp[i+0], entry_sp[i+0], entry_sp[i+1], entry_sp[i+2], entry_sp[i+3]);
+    }
+    print_method_handle(mh);
+  }
+}
+void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
+  if (!TraceMethodHandles)  return;
+  BLOCK_COMMENT("trace_method_handle {");
+  __ push(rax);
+  __ lea(rax, Address(rsp, wordSize*6)); // entry_sp
+  __ pusha();
+  // arguments:
+  __ push(rbp);               // interpreter frame pointer
+  __ push(rsi);               // saved_sp
+  __ push(rax);               // entry_sp
+  __ push(rcx);               // mh
+  __ push(rcx);
+  __ movptr(Address(rsp, 0), (intptr_t) adaptername);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), 5);
+  __ popa();
+  __ pop(rax);
+  BLOCK_COMMENT("} trace_method_handle");
 }
 #endif //PRODUCT
 
@@ -324,21 +433,9 @@
   address interp_entry = __ pc();
   if (UseCompressedOops)  __ unimplemented("UseCompressedOops");
 
-#ifndef PRODUCT
-  if (TraceMethodHandles) {
-    __ push(rax); __ push(rbx); __ push(rcx); __ push(rdx); __ push(rsi); __ push(rdi);
-    __ lea(rax, Address(rsp, wordSize*6)); // entry_sp
-    // arguments:
-    __ push(rbp);               // interpreter frame pointer
-    __ push(rsi);               // saved_sp
-    __ push(rax);               // entry_sp
-    __ push(rcx);               // mh
-    __ push(rcx);
-    __ movptr(Address(rsp, 0), (intptr_t)entry_name(ek));
-    __ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), 5);
-    __ pop(rdi); __ pop(rsi); __ pop(rdx); __ pop(rcx); __ pop(rbx); __ pop(rax);
-  }
-#endif //PRODUCT
+  trace_method_handle(_masm, entry_name(ek));
+
+  BLOCK_COMMENT(entry_name(ek));
 
   switch ((int) ek) {
   case _raise_exception:
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1039,6 +1039,33 @@
   }
 
 
+  address generate_fill(BasicType t, bool aligned, const char *name) {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ pc();
+
+    BLOCK_COMMENT("Entry:");
+
+    const Register to       = rdi;  // source array address
+    const Register value    = rdx;  // value
+    const Register count    = rsi;  // elements count
+
+    __ enter(); // required for proper stackwalking of RuntimeStub frame
+    __ push(rsi);
+    __ push(rdi);
+    __ movptr(to   , Address(rsp, 12+ 4));
+    __ movl(value, Address(rsp, 12+ 8));
+    __ movl(count, Address(rsp, 12+ 12));
+
+    __ generate_fill(t, aligned, to, value, count, rax, xmm0);
+
+    __ pop(rdi);
+    __ pop(rsi);
+    __ leave(); // required for proper stackwalking of RuntimeStub frame
+    __ ret(0);
+    return start;
+  }
+
   address generate_conjoint_copy(BasicType t, bool aligned,
                                  Address::ScaleFactor sf,
                                  address nooverlap_target,
@@ -2001,6 +2028,13 @@
         generate_conjoint_long_copy(entry, &entry_jlong_arraycopy,
                                     "jlong_arraycopy");
 
+    StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill");
+    StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill");
+    StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill");
+    StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
+    StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
+    StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
+
     StubRoutines::_arrayof_jint_disjoint_arraycopy  =
         StubRoutines::_jint_disjoint_arraycopy;
     StubRoutines::_arrayof_oop_disjoint_arraycopy   =
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1625,6 +1625,26 @@
     return start;
   }
 
+  address generate_fill(BasicType t, bool aligned, const char *name) {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ pc();
+
+    BLOCK_COMMENT("Entry:");
+
+    const Register to       = c_rarg0;  // source array address
+    const Register value    = c_rarg1;  // value
+    const Register count    = c_rarg2;  // elements count
+
+    __ enter(); // required for proper stackwalking of RuntimeStub frame
+
+    __ generate_fill(t, aligned, to, value, count, rax, xmm0);
+
+    __ leave(); // required for proper stackwalking of RuntimeStub frame
+    __ ret(0);
+    return start;
+  }
+
   // Arguments:
   //   aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
   //             ignored
@@ -2712,6 +2732,13 @@
     StubRoutines::_unsafe_arraycopy    = generate_unsafe_copy("unsafe_arraycopy");
     StubRoutines::_generic_arraycopy   = generate_generic_copy("generic_arraycopy");
 
+    StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill");
+    StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill");
+    StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill");
+    StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
+    StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
+    StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
+
     // We don't generate specialized code for HeapWord-aligned source
     // arrays, so just use the code we've already generated
     StubRoutines::_arrayof_jbyte_disjoint_arraycopy  = StubRoutines::_jbyte_disjoint_arraycopy;
--- a/hotspot/src/cpu/x86/vm/stubRoutines_x86_32.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/stubRoutines_x86_32.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -33,7 +33,7 @@
 
 // MethodHandles adapters
 enum method_handles_platform_dependent_constants {
-  method_handles_adapters_code_size = 5000
+  method_handles_adapters_code_size = 10000
 };
 
 class x86 {
--- a/hotspot/src/cpu/x86/vm/stubRoutines_x86_64.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/stubRoutines_x86_64.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -35,7 +35,7 @@
 
 // MethodHandles adapters
 enum method_handles_platform_dependent_constants {
-  method_handles_adapters_code_size = 13000
+  method_handles_adapters_code_size = 26000
 };
 
 class x86 {
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -359,40 +359,62 @@
 // rcx: invocation counter
 //
 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
-
-  const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset());
-  const Address backedge_counter  (rbx, methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset());
-
-  if (ProfileInterpreter) { // %%% Merge this into methodDataOop
-    __ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset()));
-  }
-  // Update standard invocation counters
-  __ movl(rax, backedge_counter);               // load backedge counter
-
-  __ incrementl(rcx, InvocationCounter::count_increment);
-  __ andl(rax, InvocationCounter::count_mask_value);  // mask out the status bits
-
-  __ movl(invocation_counter, rcx);             // save invocation count
-  __ addl(rcx, rax);                            // add both counters
+  const Address invocation_counter(rbx, in_bytes(methodOopDesc::invocation_counter_offset()) +
+                                        in_bytes(InvocationCounter::counter_offset()));
+  // Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not.
+  if (TieredCompilation) {
+    int increment = InvocationCounter::count_increment;
+    int mask = ((1 << Tier0InvokeNotifyFreqLog)  - 1) << InvocationCounter::count_shift;
+    Label no_mdo, done;
+    if (ProfileInterpreter) {
+      // Are we profiling?
+      __ movptr(rax, Address(rbx, methodOopDesc::method_data_offset()));
+      __ testptr(rax, rax);
+      __ jccb(Assembler::zero, no_mdo);
+      // Increment counter in the MDO
+      const Address mdo_invocation_counter(rax, in_bytes(methodDataOopDesc::invocation_counter_offset()) +
+                                                in_bytes(InvocationCounter::counter_offset()));
+      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
+      __ jmpb(done);
+    }
+    __ bind(no_mdo);
+    // Increment counter in methodOop (we don't need to load it, it's in rcx).
+    __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow);
+    __ bind(done);
+  } else {
+    const Address backedge_counter  (rbx, methodOopDesc::backedge_counter_offset() +
+                                          InvocationCounter::counter_offset());
 
-  // profile_method is non-null only for interpreted method so
-  // profile_method != NULL == !native_call
-  // BytecodeInterpreter only calls for native so code is elided.
+    if (ProfileInterpreter) { // %%% Merge this into methodDataOop
+      __ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset()));
+    }
+    // Update standard invocation counters
+    __ movl(rax, backedge_counter);               // load backedge counter
+
+    __ incrementl(rcx, InvocationCounter::count_increment);
+    __ andl(rax, InvocationCounter::count_mask_value);  // mask out the status bits
+
+    __ movl(invocation_counter, rcx);             // save invocation count
+    __ addl(rcx, rax);                            // add both counters
 
-  if (ProfileInterpreter && profile_method != NULL) {
-    // Test to see if we should create a method data oop
+    // profile_method is non-null only for interpreted method so
+    // profile_method != NULL == !native_call
+    // BytecodeInterpreter only calls for native so code is elided.
+
+    if (ProfileInterpreter && profile_method != NULL) {
+      // Test to see if we should create a method data oop
+      __ cmp32(rcx,
+               ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
+      __ jcc(Assembler::less, *profile_method_continue);
+
+      // if no method data exists, go to profile_method
+      __ test_method_data_pointer(rax, *profile_method);
+    }
+
     __ cmp32(rcx,
-             ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
-    __ jcc(Assembler::less, *profile_method_continue);
-
-    // if no method data exists, go to profile_method
-    __ test_method_data_pointer(rax, *profile_method);
+             ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
+    __ jcc(Assembler::aboveEqual, *overflow);
   }
-
-  __ cmp32(rcx,
-           ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
-  __ jcc(Assembler::aboveEqual, *overflow);
-
 }
 
 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -310,42 +310,61 @@
         Label* overflow,
         Label* profile_method,
         Label* profile_method_continue) {
-
-  const Address invocation_counter(rbx,
-                                   methodOopDesc::invocation_counter_offset() +
+  const Address invocation_counter(rbx, in_bytes(methodOopDesc::invocation_counter_offset()) +
+                                        in_bytes(InvocationCounter::counter_offset()));
+  // Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not.
+  if (TieredCompilation) {
+    int increment = InvocationCounter::count_increment;
+    int mask = ((1 << Tier0InvokeNotifyFreqLog)  - 1) << InvocationCounter::count_shift;
+    Label no_mdo, done;
+    if (ProfileInterpreter) {
+      // Are we profiling?
+      __ movptr(rax, Address(rbx, methodOopDesc::method_data_offset()));
+      __ testptr(rax, rax);
+      __ jccb(Assembler::zero, no_mdo);
+      // Increment counter in the MDO
+      const Address mdo_invocation_counter(rax, in_bytes(methodDataOopDesc::invocation_counter_offset()) +
+                                                in_bytes(InvocationCounter::counter_offset()));
+      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
+      __ jmpb(done);
+    }
+    __ bind(no_mdo);
+    // Increment counter in methodOop (we don't need to load it, it's in ecx).
+    __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow);
+    __ bind(done);
+  } else {
+    const Address backedge_counter(rbx,
+                                   methodOopDesc::backedge_counter_offset() +
                                    InvocationCounter::counter_offset());
-  const Address backedge_counter(rbx,
-                                 methodOopDesc::backedge_counter_offset() +
-                                 InvocationCounter::counter_offset());
-
-  if (ProfileInterpreter) { // %%% Merge this into methodDataOop
-    __ incrementl(Address(rbx,
-                    methodOopDesc::interpreter_invocation_counter_offset()));
-  }
-  // Update standard invocation counters
-  __ movl(rax, backedge_counter); // load backedge counter
 
-  __ incrementl(rcx, InvocationCounter::count_increment);
-  __ andl(rax, InvocationCounter::count_mask_value); // mask out the
-                                                     // status bits
+    if (ProfileInterpreter) { // %%% Merge this into methodDataOop
+      __ incrementl(Address(rbx,
+                            methodOopDesc::interpreter_invocation_counter_offset()));
+    }
+    // Update standard invocation counters
+    __ movl(rax, backedge_counter);   // load backedge counter
 
-  __ movl(invocation_counter, rcx); // save invocation count
-  __ addl(rcx, rax); // add both counters
+    __ incrementl(rcx, InvocationCounter::count_increment);
+    __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
 
-  // profile_method is non-null only for interpreted method so
-  // profile_method != NULL == !native_call
+    __ movl(invocation_counter, rcx); // save invocation count
+    __ addl(rcx, rax);                // add both counters
 
-  if (ProfileInterpreter && profile_method != NULL) {
-    // Test to see if we should create a method data oop
-    __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
-    __ jcc(Assembler::less, *profile_method_continue);
+    // profile_method is non-null only for interpreted method so
+    // profile_method != NULL == !native_call
+
+    if (ProfileInterpreter && profile_method != NULL) {
+      // Test to see if we should create a method data oop
+      __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
+      __ jcc(Assembler::less, *profile_method_continue);
 
-    // if no method data exists, go to profile_method
-    __ test_method_data_pointer(rax, *profile_method);
+      // if no method data exists, go to profile_method
+      __ test_method_data_pointer(rax, *profile_method);
+    }
+
+    __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
+    __ jcc(Assembler::aboveEqual, *overflow);
   }
-
-  __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
-  __ jcc(Assembler::aboveEqual, *overflow);
 }
 
 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1558,47 +1558,68 @@
     __ testl(rdx, rdx);             // check if forward or backward branch
     __ jcc(Assembler::positive, dispatch); // count only if backward branch
 
-    // increment counter
-    __ movl(rax, Address(rcx, be_offset));        // load backedge counter
-    __ incrementl(rax, InvocationCounter::count_increment); // increment counter
-    __ movl(Address(rcx, be_offset), rax);        // store counter
-
-    __ movl(rax, Address(rcx, inv_offset));    // load invocation counter
-    __ andl(rax, InvocationCounter::count_mask_value);     // and the status bits
-    __ addl(rax, Address(rcx, be_offset));        // add both counters
-
-    if (ProfileInterpreter) {
-      // Test to see if we should create a method data oop
-      __ cmp32(rax,
-               ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
-      __ jcc(Assembler::less, dispatch);
-
-      // if no method data exists, go to profile method
-      __ test_method_data_pointer(rax, profile_method);
-
-      if (UseOnStackReplacement) {
-        // check for overflow against rbx, which is the MDO taken count
-        __ cmp32(rbx,
-                 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
-        __ jcc(Assembler::below, dispatch);
-
-        // When ProfileInterpreter is on, the backedge_count comes from the
-        // methodDataOop, which value does not get reset on the call to
-        // frequency_counter_overflow().  To avoid excessive calls to the overflow
-        // routine while the method is being compiled, add a second test to make
-        // sure the overflow function is called only once every overflow_frequency.
-        const int overflow_frequency = 1024;
-        __ andptr(rbx, overflow_frequency-1);
-        __ jcc(Assembler::zero, backedge_counter_overflow);
-
+    if (TieredCompilation) {
+      Label no_mdo;
+      int increment = InvocationCounter::count_increment;
+      int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
+      if (ProfileInterpreter) {
+        // Are we profiling?
+        __ movptr(rbx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
+        __ testptr(rbx, rbx);
+        __ jccb(Assembler::zero, no_mdo);
+        // Increment the MDO backedge counter
+        const Address mdo_backedge_counter(rbx, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
+                                                in_bytes(InvocationCounter::counter_offset()));
+        __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
+                                   rax, false, Assembler::zero, &backedge_counter_overflow);
+        __ jmp(dispatch);
       }
+      __ bind(no_mdo);
+      // Increment backedge counter in methodOop
+      __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
+                                 rax, false, Assembler::zero, &backedge_counter_overflow);
     } else {
-      if (UseOnStackReplacement) {
-        // check for overflow against rax, which is the sum of the counters
+      // increment counter
+      __ movl(rax, Address(rcx, be_offset));        // load backedge counter
+      __ incrementl(rax, InvocationCounter::count_increment); // increment counter
+      __ movl(Address(rcx, be_offset), rax);        // store counter
+
+      __ movl(rax, Address(rcx, inv_offset));    // load invocation counter
+      __ andl(rax, InvocationCounter::count_mask_value);     // and the status bits
+      __ addl(rax, Address(rcx, be_offset));        // add both counters
+
+      if (ProfileInterpreter) {
+        // Test to see if we should create a method data oop
         __ cmp32(rax,
-                 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
-        __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
-
+                 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
+        __ jcc(Assembler::less, dispatch);
+
+        // if no method data exists, go to profile method
+        __ test_method_data_pointer(rax, profile_method);
+
+        if (UseOnStackReplacement) {
+          // check for overflow against rbx, which is the MDO taken count
+          __ cmp32(rbx,
+                   ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
+          __ jcc(Assembler::below, dispatch);
+
+          // When ProfileInterpreter is on, the backedge_count comes from the
+          // methodDataOop, which value does not get reset on the call to
+          // frequency_counter_overflow().  To avoid excessive calls to the overflow
+          // routine while the method is being compiled, add a second test to make
+          // sure the overflow function is called only once every overflow_frequency.
+          const int overflow_frequency = 1024;
+          __ andptr(rbx, overflow_frequency-1);
+          __ jcc(Assembler::zero, backedge_counter_overflow);
+        }
+      } else {
+        if (UseOnStackReplacement) {
+          // check for overflow against rax, which is the sum of the counters
+          __ cmp32(rax,
+                   ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
+          __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
+
+        }
       }
     }
     __ bind(dispatch);
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1583,51 +1583,71 @@
     // r14: locals pointer
     __ testl(rdx, rdx);             // check if forward or backward branch
     __ jcc(Assembler::positive, dispatch); // count only if backward branch
-
-    // increment counter
-    __ movl(rax, Address(rcx, be_offset));        // load backedge counter
-    __ incrementl(rax, InvocationCounter::count_increment); // increment
-                                                            // counter
-    __ movl(Address(rcx, be_offset), rax);        // store counter
-
-    __ movl(rax, Address(rcx, inv_offset));    // load invocation counter
-    __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
-    __ addl(rax, Address(rcx, be_offset));        // add both counters
-
-    if (ProfileInterpreter) {
-      // Test to see if we should create a method data oop
-      __ cmp32(rax,
-               ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
-      __ jcc(Assembler::less, dispatch);
-
-      // if no method data exists, go to profile method
-      __ test_method_data_pointer(rax, profile_method);
-
-      if (UseOnStackReplacement) {
-        // check for overflow against ebx which is the MDO taken count
-        __ cmp32(rbx,
-                 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
-        __ jcc(Assembler::below, dispatch);
-
-        // When ProfileInterpreter is on, the backedge_count comes
-        // from the methodDataOop, which value does not get reset on
-        // the call to frequency_counter_overflow().  To avoid
-        // excessive calls to the overflow routine while the method is
-        // being compiled, add a second test to make sure the overflow
-        // function is called only once every overflow_frequency.
-        const int overflow_frequency = 1024;
-        __ andl(rbx, overflow_frequency - 1);
-        __ jcc(Assembler::zero, backedge_counter_overflow);
-
+    if (TieredCompilation) {
+      Label no_mdo;
+      int increment = InvocationCounter::count_increment;
+      int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
+      if (ProfileInterpreter) {
+        // Are we profiling?
+        __ movptr(rbx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
+        __ testptr(rbx, rbx);
+        __ jccb(Assembler::zero, no_mdo);
+        // Increment the MDO backedge counter
+        const Address mdo_backedge_counter(rbx, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
+                                           in_bytes(InvocationCounter::counter_offset()));
+        __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
+                                   rax, false, Assembler::zero, &backedge_counter_overflow);
+        __ jmp(dispatch);
       }
+      __ bind(no_mdo);
+      // Increment backedge counter in methodOop
+      __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
+                                 rax, false, Assembler::zero, &backedge_counter_overflow);
     } else {
-      if (UseOnStackReplacement) {
-        // check for overflow against eax, which is the sum of the
-        // counters
+      // increment counter
+      __ movl(rax, Address(rcx, be_offset));        // load backedge counter
+      __ incrementl(rax, InvocationCounter::count_increment); // increment counter
+      __ movl(Address(rcx, be_offset), rax);        // store counter
+
+      __ movl(rax, Address(rcx, inv_offset));    // load invocation counter
+      __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
+      __ addl(rax, Address(rcx, be_offset));        // add both counters
+
+      if (ProfileInterpreter) {
+        // Test to see if we should create a method data oop
         __ cmp32(rax,
-                 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
-        __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
-
+                 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
+        __ jcc(Assembler::less, dispatch);
+
+        // if no method data exists, go to profile method
+        __ test_method_data_pointer(rax, profile_method);
+
+        if (UseOnStackReplacement) {
+          // check for overflow against ebx which is the MDO taken count
+          __ cmp32(rbx,
+                   ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
+          __ jcc(Assembler::below, dispatch);
+
+          // When ProfileInterpreter is on, the backedge_count comes
+          // from the methodDataOop, which value does not get reset on
+          // the call to frequency_counter_overflow().  To avoid
+          // excessive calls to the overflow routine while the method is
+          // being compiled, add a second test to make sure the overflow
+          // function is called only once every overflow_frequency.
+          const int overflow_frequency = 1024;
+          __ andl(rbx, overflow_frequency - 1);
+          __ jcc(Assembler::zero, backedge_counter_overflow);
+
+        }
+      } else {
+        if (UseOnStackReplacement) {
+          // check for overflow against eax, which is the sum of the
+          // counters
+          __ cmp32(rax,
+                   ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
+          __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
+
+        }
       }
     }
     __ bind(dispatch);
@@ -2912,7 +2932,8 @@
 void TemplateTable::invokevirtual_helper(Register index,
                                          Register recv,
                                          Register flags) {
-  // Uses temporary registers rax, rdx  assert_different_registers(index, recv, rax, rdx);
+  // Uses temporary registers rax, rdx
+  assert_different_registers(index, recv, rax, rdx);
 
   // Test for an invoke of a final method
   Label notFinal;
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -595,8 +595,7 @@
   if (stub_blob == NULL) {
     vm_exit_during_initialization("Unable to allocate getPsrInfo_stub");
   }
-  CodeBuffer c(stub_blob->instructions_begin(),
-               stub_blob->instructions_size());
+  CodeBuffer c(stub_blob);
   VM_Version_StubGenerator g(&c);
   getPsrInfo_stub = CAST_TO_FN_PTR(getPsrInfo_stub_t,
                                    g.generate_getPsrInfo());
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -296,14 +296,14 @@
       result |= CPU_CX8;
     if (_cpuid_info.std_cpuid1_edx.bits.cmov != 0)
       result |= CPU_CMOV;
-    if (_cpuid_info.std_cpuid1_edx.bits.fxsr != 0 || is_amd() &&
-        _cpuid_info.ext_cpuid1_edx.bits.fxsr != 0)
+    if (_cpuid_info.std_cpuid1_edx.bits.fxsr != 0 || (is_amd() &&
+        _cpuid_info.ext_cpuid1_edx.bits.fxsr != 0))
       result |= CPU_FXSR;
     // HT flag is set for multi-core processors also.
     if (threads_per_core() > 1)
       result |= CPU_HT;
-    if (_cpuid_info.std_cpuid1_edx.bits.mmx != 0 || is_amd() &&
-        _cpuid_info.ext_cpuid1_edx.bits.mmx != 0)
+    if (_cpuid_info.std_cpuid1_edx.bits.mmx != 0 || (is_amd() &&
+        _cpuid_info.ext_cpuid1_edx.bits.mmx != 0))
       result |= CPU_MMX;
     if (_cpuid_info.std_cpuid1_edx.bits.sse != 0)
       result |= CPU_SSE;
--- a/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -209,7 +209,7 @@
            (UseCompressedOops ? 16 : 0);  // 1 leaq can be 3 bytes + 1 long
   } else {
     // Itable stub size
-    return (DebugVtables ? 512 : 72) + (CountCompiledCalls ? 13 : 0) +
+    return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) +
            (UseCompressedOops ? 32 : 0);  // 2 leaqs
   }
   // In order to tune these parameters, run the JVM with VM options
--- a/hotspot/src/cpu/x86/vm/x86_32.ad	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/x86_32.ad	Wed Jul 05 17:22:53 2017 +0200
@@ -350,54 +350,46 @@
 // EMIT_RM()
 void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3) {
   unsigned char c = (unsigned char)((f1 << 6) | (f2 << 3) | f3);
-  *(cbuf.code_end()) = c;
-  cbuf.set_code_end(cbuf.code_end() + 1);
+  cbuf.insts()->emit_int8(c);
 }
 
 // EMIT_CC()
 void emit_cc(CodeBuffer &cbuf, int f1, int f2) {
   unsigned char c = (unsigned char)( f1 | f2 );
-  *(cbuf.code_end()) = c;
-  cbuf.set_code_end(cbuf.code_end() + 1);
+  cbuf.insts()->emit_int8(c);
 }
 
 // EMIT_OPCODE()
 void emit_opcode(CodeBuffer &cbuf, int code) {
-  *(cbuf.code_end()) = (unsigned char)code;
-  cbuf.set_code_end(cbuf.code_end() + 1);
+  cbuf.insts()->emit_int8((unsigned char) code);
 }
 
 // EMIT_OPCODE() w/ relocation information
 void emit_opcode(CodeBuffer &cbuf, int code, relocInfo::relocType reloc, int offset = 0) {
-  cbuf.relocate(cbuf.inst_mark() + offset, reloc);
+  cbuf.relocate(cbuf.insts_mark() + offset, reloc);
   emit_opcode(cbuf, code);
 }
 
 // EMIT_D8()
 void emit_d8(CodeBuffer &cbuf, int d8) {
-  *(cbuf.code_end()) = (unsigned char)d8;
-  cbuf.set_code_end(cbuf.code_end() + 1);
+  cbuf.insts()->emit_int8((unsigned char) d8);
 }
 
 // EMIT_D16()
 void emit_d16(CodeBuffer &cbuf, int d16) {
-  *((short *)(cbuf.code_end())) = d16;
-  cbuf.set_code_end(cbuf.code_end() + 2);
+  cbuf.insts()->emit_int16(d16);
 }
 
 // EMIT_D32()
 void emit_d32(CodeBuffer &cbuf, int d32) {
-  *((int *)(cbuf.code_end())) = d32;
-  cbuf.set_code_end(cbuf.code_end() + 4);
+  cbuf.insts()->emit_int32(d32);
 }
 
 // emit 32 bit value and construct relocation entry from relocInfo::relocType
 void emit_d32_reloc(CodeBuffer &cbuf, int d32, relocInfo::relocType reloc,
         int format) {
-  cbuf.relocate(cbuf.inst_mark(), reloc, format);
-
-  *((int *)(cbuf.code_end())) = d32;
-  cbuf.set_code_end(cbuf.code_end() + 4);
+  cbuf.relocate(cbuf.insts_mark(), reloc, format);
+  cbuf.insts()->emit_int32(d32);
 }
 
 // emit 32 bit value and construct relocation entry from RelocationHolder
@@ -408,10 +400,8 @@
     assert(oop(d32)->is_oop() && (ScavengeRootsInCode || !oop(d32)->is_scavengable()), "cannot embed scavengable oops in code");
   }
 #endif
-  cbuf.relocate(cbuf.inst_mark(), rspec, format);
-
-  *((int *)(cbuf.code_end())) = d32;
-  cbuf.set_code_end(cbuf.code_end() + 4);
+  cbuf.relocate(cbuf.insts_mark(), rspec, format);
+  cbuf.insts()->emit_int32(d32);
 }
 
 // Access stack slot for load or store
@@ -613,7 +603,7 @@
     emit_rm(cbuf, 0x3, 0x05, ESP_enc);
     emit_d32(cbuf, framesize);
   }
-  C->set_frame_complete(cbuf.code_end() - cbuf.code_begin());
+  C->set_frame_complete(cbuf.insts_size());
 
 #ifdef ASSERT
   if (VerifyStackAtCalls) {
@@ -695,7 +685,7 @@
   emit_opcode(cbuf, 0x58 | EBP_enc);
 
   if( do_polling() && C->is_method_compilation() ) {
-    cbuf.relocate(cbuf.code_end(), relocInfo::poll_return_type, 0);
+    cbuf.relocate(cbuf.insts_end(), relocInfo::poll_return_type, 0);
     emit_opcode(cbuf,0x85);
     emit_rm(cbuf, 0x0, EAX_enc, 0x5); // EAX
     emit_d32(cbuf, (intptr_t)os::get_polling_page());
@@ -1211,9 +1201,9 @@
   // mov rbx,0
   // jmp -1
 
-  address mark = cbuf.inst_mark();  // get mark within main instrs section
-
-  // Note that the code buffer's inst_mark is always relative to insts.
+  address mark = cbuf.insts_mark();  // get mark within main instrs section
+
+  // Note that the code buffer's insts_mark is always relative to insts.
   // That's why we must use the macroassembler to generate a stub.
   MacroAssembler _masm(&cbuf);
 
@@ -1228,7 +1218,7 @@
   __ jump(RuntimeAddress(__ pc()));
 
   __ end_a_stub();
-  // Update current stubs pointer and restore code_end.
+  // Update current stubs pointer and restore insts_end.
 }
 // size of call stub, compiled java to interpretor
 uint size_java_to_interp() {
@@ -1254,7 +1244,7 @@
 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
   MacroAssembler masm(&cbuf);
 #ifdef ASSERT
-  uint code_size = cbuf.code_size();
+  uint insts_size = cbuf.insts_size();
 #endif
   masm.cmpptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes()));
   masm.jump_cc(Assembler::notEqual,
@@ -1266,7 +1256,7 @@
      nops_cnt += 1;
   masm.nop(nops_cnt);
 
-  assert(cbuf.code_size() - code_size == size(ra_), "checking code size of inline cache node");
+  assert(cbuf.insts_size() - insts_size == size(ra_), "checking code size of inline cache node");
 }
 
 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
@@ -1288,14 +1278,14 @@
 // and call a VM stub routine.
 int emit_exception_handler(CodeBuffer& cbuf) {
 
-  // Note that the code buffer's inst_mark is always relative to insts.
+  // Note that the code buffer's insts_mark is always relative to insts.
   // That's why we must use the macroassembler to generate a handler.
   MacroAssembler _masm(&cbuf);
   address base =
   __ start_a_stub(size_exception_handler());
   if (base == NULL)  return 0;  // CodeBuffer::expand failed
   int offset = __ offset();
-  __ jump(RuntimeAddress(OptoRuntime::exception_blob()->instructions_begin()));
+  __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
   __ end_a_stub();
   return offset;
@@ -1313,7 +1303,7 @@
 // Emit deopt handler code.
 int emit_deopt_handler(CodeBuffer& cbuf) {
 
-  // Note that the code buffer's inst_mark is always relative to insts.
+  // Note that the code buffer's insts_mark is always relative to insts.
   // That's why we must use the macroassembler to generate a handler.
   MacroAssembler _masm(&cbuf);
   address base =
@@ -1728,12 +1718,12 @@
 
   enc_class Lbl (label labl) %{ // JMP, CALL
     Label *l = $labl$$label;
-    emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.code_size()+4)) : 0);
+    emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.insts_size()+4)) : 0);
   %}
 
   enc_class LblShort (label labl) %{ // JMP, CALL
     Label *l = $labl$$label;
-    int disp = l ? (l->loc_pos() - (cbuf.code_size()+1)) : 0;
+    int disp = l ? (l->loc_pos() - (cbuf.insts_size()+1)) : 0;
     assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
     emit_d8(cbuf, disp);
   %}
@@ -1764,13 +1754,13 @@
     Label *l = $labl$$label;
     $$$emit8$primary;
     emit_cc(cbuf, $secondary, $cop$$cmpcode);
-    emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.code_size()+4)) : 0);
+    emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.insts_size()+4)) : 0);
   %}
 
   enc_class JccShort (cmpOp cop, label labl) %{    // JCC
     Label *l = $labl$$label;
     emit_cc(cbuf, $primary, $cop$$cmpcode);
-    int disp = l ? (l->loc_pos() - (cbuf.code_size()+1)) : 0;
+    int disp = l ? (l->loc_pos() - (cbuf.insts_size()+1)) : 0;
     assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
     emit_d8(cbuf, disp);
   %}
@@ -1838,10 +1828,10 @@
 
   enc_class Java_To_Runtime (method meth) %{    // CALL Java_To_Runtime, Java_To_Runtime_Leaf
     // This is the instruction starting address for relocation info.
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     $$$emit8$primary;
     // CALL directly to the runtime
-    emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
+    emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
                 runtime_call_Relocation::spec(), RELOC_IMM32 );
 
     if (UseSSE >= 2) {
@@ -1871,12 +1861,12 @@
 
   enc_class pre_call_FPU %{
     // If method sets FPU control word restore it here
-    debug_only(int off0 = cbuf.code_size());
+    debug_only(int off0 = cbuf.insts_size());
     if( Compile::current()->in_24_bit_fp_mode() ) {
       MacroAssembler masm(&cbuf);
       masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
     }
-    debug_only(int off1 = cbuf.code_size());
+    debug_only(int off1 = cbuf.insts_size());
     assert(off1 - off0 == pre_call_FPU_size(), "correct size prediction");
   %}
 
@@ -1889,12 +1879,12 @@
   %}
 
   enc_class preserve_SP %{
-    debug_only(int off0 = cbuf.code_size());
+    debug_only(int off0 = cbuf.insts_size());
     MacroAssembler _masm(&cbuf);
     // RBP is preserved across all calls, even compiled calls.
     // Use it to preserve RSP in places where the callee might change the SP.
     __ movptr(rbp_mh_SP_save, rsp);
-    debug_only(int off1 = cbuf.code_size());
+    debug_only(int off1 = cbuf.insts_size());
     assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
   %}
 
@@ -1906,16 +1896,16 @@
   enc_class Java_Static_Call (method meth) %{    // JAVA STATIC CALL
     // CALL to fixup routine.  Fixup routine uses ScopeDesc info to determine
     // who we intended to call.
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     $$$emit8$primary;
     if ( !_method ) {
-      emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
+      emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
                      runtime_call_Relocation::spec(), RELOC_IMM32 );
     } else if(_optimized_virtual) {
-      emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
+      emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
                      opt_virtual_call_Relocation::spec(), RELOC_IMM32 );
     } else {
-      emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
+      emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
                      static_call_Relocation::spec(), RELOC_IMM32 );
     }
     if( _method ) {  // Emit stub for static call
@@ -1927,15 +1917,15 @@
     // !!!!!
     // Generate  "Mov EAX,0x00", placeholder instruction to load oop-info
     // emit_call_dynamic_prologue( cbuf );
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     emit_opcode(cbuf, 0xB8 + EAX_enc);        // mov    EAX,-1
     emit_d32_reloc(cbuf, (int)Universe::non_oop_word(), oop_Relocation::spec_for_immediate(), RELOC_IMM32);
-    address  virtual_call_oop_addr = cbuf.inst_mark();
+    address  virtual_call_oop_addr = cbuf.insts_mark();
     // CALL to fixup routine.  Fixup routine uses ScopeDesc info to determine
     // who we intended to call.
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     $$$emit8$primary;
-    emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
+    emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
                 virtual_call_Relocation::spec(virtual_call_oop_addr), RELOC_IMM32 );
   %}
 
@@ -1944,7 +1934,7 @@
     assert( -128 <= disp && disp <= 127, "compiled_code_offset isn't small");
 
     // CALL *[EAX+in_bytes(methodOopDesc::from_compiled_code_entry_point_offset())]
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     $$$emit8$primary;
     emit_rm(cbuf, 0x01, $secondary, EAX_enc );  // R/M byte
     emit_d8(cbuf, disp);             // Displacement
@@ -1976,9 +1966,9 @@
 //     emit_rm(cbuf, 0x3, EBP_enc, EBP_enc);
 //
 //     // CALL to interpreter.
-//     cbuf.set_inst_mark();
+//     cbuf.set_insts_mark();
 //     $$$emit8$primary;
-//     emit_d32_reloc(cbuf, ($labl$$label - (int)(cbuf.code_end()) - 4),
+//     emit_d32_reloc(cbuf, ($labl$$label - (int)(cbuf.insts_end()) - 4),
 //                 runtime_call_Relocation::spec(), RELOC_IMM32 );
 //   %}
 
@@ -2087,7 +2077,7 @@
   %}
 
   enc_class Opc_MemImm_F(immF src) %{
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     $$$emit8$primary;
     emit_rm(cbuf, 0x0, $secondary, 0x5);
     emit_float_constant(cbuf, $src$$constant);
@@ -2280,7 +2270,7 @@
   %}
 
   enc_class set_instruction_start( ) %{
-    cbuf.set_inst_mark();            // Mark start of opcode for reloc info in mem operand
+    cbuf.set_insts_mark();            // Mark start of opcode for reloc info in mem operand
   %}
 
   enc_class RegMem (eRegI ereg, memory mem) %{    // emit_reg_mem
@@ -2429,7 +2419,7 @@
       emit_opcode( cbuf, 0xD9 ); // FLD (i.e., push it)
       emit_d8( cbuf, 0xC0-1+$src$$reg );
     }
-    cbuf.set_inst_mark();       // Mark start of opcode for reloc info in mem operand
+    cbuf.set_insts_mark();       // Mark start of opcode for reloc info in mem operand
     emit_opcode(cbuf,$primary);
     encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_is_oop);
   %}
@@ -2474,7 +2464,7 @@
     emit_opcode(cbuf,0x1B);
     emit_rm(cbuf, 0x3, tmpReg, tmpReg);
     // AND $tmp,$y
-    cbuf.set_inst_mark();       // Mark start of opcode for reloc info in mem operand
+    cbuf.set_insts_mark();       // Mark start of opcode for reloc info in mem operand
     emit_opcode(cbuf,0x23);
     int reg_encoding = tmpReg;
     int base  = $mem$$base;
@@ -3157,9 +3147,9 @@
     // PUSH src2.lo
     emit_opcode(cbuf,               0x50+$src2$$reg  );
     // CALL directly to the runtime
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     emit_opcode(cbuf,0xE8);       // Call into runtime
-    emit_d32_reloc(cbuf, (CAST_FROM_FN_PTR(address, SharedRuntime::ldiv) - cbuf.code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
+    emit_d32_reloc(cbuf, (CAST_FROM_FN_PTR(address, SharedRuntime::ldiv) - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
     // Restore stack
     emit_opcode(cbuf, 0x83); // add  SP, #framesize
     emit_rm(cbuf, 0x3, 0x00, ESP_enc);
@@ -3176,9 +3166,9 @@
     // PUSH src2.lo
     emit_opcode(cbuf,               0x50+$src2$$reg  );
     // CALL directly to the runtime
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     emit_opcode(cbuf,0xE8);       // Call into runtime
-    emit_d32_reloc(cbuf, (CAST_FROM_FN_PTR(address, SharedRuntime::lrem ) - cbuf.code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
+    emit_d32_reloc(cbuf, (CAST_FROM_FN_PTR(address, SharedRuntime::lrem ) - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
     // Restore stack
     emit_opcode(cbuf, 0x83); // add  SP, #framesize
     emit_rm(cbuf, 0x3, 0x00, ESP_enc);
@@ -3824,9 +3814,9 @@
   %}
 
   enc_class enc_rethrow() %{
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     emit_opcode(cbuf, 0xE9);        // jmp    entry
-    emit_d32_reloc(cbuf, (int)OptoRuntime::rethrow_stub() - ((int)cbuf.code_end())-4,
+    emit_d32_reloc(cbuf, (int)OptoRuntime::rethrow_stub() - ((int)cbuf.insts_end())-4,
                    runtime_call_Relocation::spec(), RELOC_IMM32 );
   %}
 
@@ -3873,9 +3863,9 @@
     emit_opcode(cbuf,0xD9 );      // FLD     ST(i)
     emit_d8    (cbuf,0xC0-1+$src$$reg );
     // CALL directly to the runtime
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     emit_opcode(cbuf,0xE8);       // Call into runtime
-    emit_d32_reloc(cbuf, (StubRoutines::d2i_wrapper() - cbuf.code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
+    emit_d32_reloc(cbuf, (StubRoutines::d2i_wrapper() - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
     // Carry on here...
   %}
 
@@ -3915,9 +3905,9 @@
     emit_opcode(cbuf,0xD9 );      // FLD     ST(i)
     emit_d8    (cbuf,0xC0-1+$src$$reg );
     // CALL directly to the runtime
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     emit_opcode(cbuf,0xE8);       // Call into runtime
-    emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf.code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
+    emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
     // Carry on here...
   %}
 
@@ -3988,9 +3978,9 @@
     emit_d8(cbuf,0x04);
 
     // CALL directly to the runtime
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     emit_opcode(cbuf,0xE8);       // Call into runtime
-    emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf.code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
+    emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
     // Carry on here...
   %}
 
@@ -4062,9 +4052,9 @@
     emit_d8(cbuf,0x08);
 
     // CALL directly to the runtime
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     emit_opcode(cbuf,0xE8);      // Call into runtime
-    emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf.code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
+    emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
     // Carry on here...
   %}
 
@@ -4122,9 +4112,9 @@
     emit_d8(cbuf, $primary ? 0x8 : 0x4);
 
     // CALL directly to the runtime
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     emit_opcode(cbuf,0xE8);       // Call into runtime
-    emit_d32_reloc(cbuf, (StubRoutines::d2i_wrapper() - cbuf.code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
+    emit_d32_reloc(cbuf, (StubRoutines::d2i_wrapper() - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
 
     // Carry on here...
   %}
@@ -4321,7 +4311,7 @@
   // so the memory operand is used twice in the encoding.
   enc_class enc_storeL_volatile( memory mem, stackSlotL src ) %{
     store_to_stackslot( cbuf, 0x0DF, 0x05, $src$$disp );
-    cbuf.set_inst_mark();            // Mark start of FIST in case $mem has an oop
+    cbuf.set_insts_mark();            // Mark start of FIST in case $mem has an oop
     emit_opcode(cbuf,0xDF);
     int rm_byte_opcode = 0x07;
     int base     = $mem$$base;
@@ -4345,7 +4335,7 @@
       bool disp_is_oop = $src->disp_is_oop(); // disp-as-oop when working with static globals
       encode_RegMem(cbuf, $tmp$$reg, base, index, scale, displace, disp_is_oop);
     }
-    cbuf.set_inst_mark();            // Mark start of MOVSD in case $mem has an oop
+    cbuf.set_insts_mark();            // Mark start of MOVSD in case $mem has an oop
     { // MOVSD $mem,$tmp ! atomic long store
       emit_opcode(cbuf,0xF2);
       emit_opcode(cbuf,0x0F);
@@ -4378,7 +4368,7 @@
       emit_opcode(cbuf,0x62);
       emit_rm(cbuf, 0x3, $tmp$$reg, $tmp2$$reg);
     }
-    cbuf.set_inst_mark();            // Mark start of MOVSD in case $mem has an oop
+    cbuf.set_insts_mark();            // Mark start of MOVSD in case $mem has an oop
     { // MOVSD $mem,$tmp ! atomic long store
       emit_opcode(cbuf,0xF2);
       emit_opcode(cbuf,0x0F);
@@ -4399,7 +4389,7 @@
   // A better choice might be TESTB [spp + pagesize() - CacheLineSize()],0
 
   enc_class Safepoint_Poll() %{
-    cbuf.relocate(cbuf.inst_mark(), relocInfo::poll_type, 0);
+    cbuf.relocate(cbuf.insts_mark(), relocInfo::poll_type, 0);
     emit_opcode(cbuf,0x85);
     emit_rm (cbuf, 0x0, 0x7, 0x5);
     emit_d32(cbuf, (intptr_t)os::get_polling_page());
@@ -12932,7 +12922,7 @@
     bool ok = false;
     if ($cop$$cmpcode == Assembler::notEqual) {
        // the two jumps 6 bytes apart so the jump distances are too
-       parity_disp = l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0;
+       parity_disp = l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0;
     } else if ($cop$$cmpcode == Assembler::equal) {
        parity_disp = 6;
        ok = true;
@@ -12942,7 +12932,7 @@
     emit_d32(cbuf, parity_disp);
     $$$emit8$primary;
     emit_cc(cbuf, $secondary, $cop$$cmpcode);
-    int disp = l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0;
+    int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0;
     emit_d32(cbuf, disp);
   %}
   ins_pipe(pipe_jcc);
@@ -13128,7 +13118,7 @@
     emit_cc(cbuf, $primary, Assembler::parity);
     int parity_disp = -1;
     if ($cop$$cmpcode == Assembler::notEqual) {
-      parity_disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
+      parity_disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0;
     } else if ($cop$$cmpcode == Assembler::equal) {
       parity_disp = 2;
     } else {
@@ -13136,7 +13126,7 @@
     }
     emit_d8(cbuf, parity_disp);
     emit_cc(cbuf, $primary, $cop$$cmpcode);
-    int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
+    int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0;
     emit_d8(cbuf, disp);
     assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
     assert(-128 <= parity_disp && parity_disp <= 127, "Displacement too large for short jmp");
--- a/hotspot/src/cpu/x86/vm/x86_64.ad	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad	Wed Jul 05 17:22:53 2017 +0200
@@ -619,62 +619,48 @@
 #endif
 
 // EMIT_RM()
-void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3)
-{
+void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3) {
   unsigned char c = (unsigned char) ((f1 << 6) | (f2 << 3) | f3);
-  *(cbuf.code_end()) = c;
-  cbuf.set_code_end(cbuf.code_end() + 1);
+  cbuf.insts()->emit_int8(c);
 }
 
 // EMIT_CC()
-void emit_cc(CodeBuffer &cbuf, int f1, int f2)
-{
+void emit_cc(CodeBuffer &cbuf, int f1, int f2) {
   unsigned char c = (unsigned char) (f1 | f2);
-  *(cbuf.code_end()) = c;
-  cbuf.set_code_end(cbuf.code_end() + 1);
+  cbuf.insts()->emit_int8(c);
 }
 
 // EMIT_OPCODE()
-void emit_opcode(CodeBuffer &cbuf, int code)
-{
-  *(cbuf.code_end()) = (unsigned char) code;
-  cbuf.set_code_end(cbuf.code_end() + 1);
+void emit_opcode(CodeBuffer &cbuf, int code) {
+  cbuf.insts()->emit_int8((unsigned char) code);
 }
 
 // EMIT_OPCODE() w/ relocation information
 void emit_opcode(CodeBuffer &cbuf,
                  int code, relocInfo::relocType reloc, int offset, int format)
 {
-  cbuf.relocate(cbuf.inst_mark() + offset, reloc, format);
+  cbuf.relocate(cbuf.insts_mark() + offset, reloc, format);
   emit_opcode(cbuf, code);
 }
 
 // EMIT_D8()
-void emit_d8(CodeBuffer &cbuf, int d8)
-{
-  *(cbuf.code_end()) = (unsigned char) d8;
-  cbuf.set_code_end(cbuf.code_end() + 1);
+void emit_d8(CodeBuffer &cbuf, int d8) {
+  cbuf.insts()->emit_int8((unsigned char) d8);
 }
 
 // EMIT_D16()
-void emit_d16(CodeBuffer &cbuf, int d16)
-{
-  *((short *)(cbuf.code_end())) = d16;
-  cbuf.set_code_end(cbuf.code_end() + 2);
+void emit_d16(CodeBuffer &cbuf, int d16) {
+  cbuf.insts()->emit_int16(d16);
 }
 
 // EMIT_D32()
-void emit_d32(CodeBuffer &cbuf, int d32)
-{
-  *((int *)(cbuf.code_end())) = d32;
-  cbuf.set_code_end(cbuf.code_end() + 4);
+void emit_d32(CodeBuffer &cbuf, int d32) {
+  cbuf.insts()->emit_int32(d32);
 }
 
 // EMIT_D64()
-void emit_d64(CodeBuffer &cbuf, int64_t d64)
-{
-  *((int64_t*) (cbuf.code_end())) = d64;
-  cbuf.set_code_end(cbuf.code_end() + 8);
+void emit_d64(CodeBuffer &cbuf, int64_t d64) {
+  cbuf.insts()->emit_int64(d64);
 }
 
 // emit 32 bit value and construct relocation entry from relocInfo::relocType
@@ -684,32 +670,24 @@
                     int format)
 {
   assert(reloc != relocInfo::external_word_type, "use 2-arg emit_d32_reloc");
-  cbuf.relocate(cbuf.inst_mark(), reloc, format);
-
-  *((int*) (cbuf.code_end())) = d32;
-  cbuf.set_code_end(cbuf.code_end() + 4);
+  cbuf.relocate(cbuf.insts_mark(), reloc, format);
+  cbuf.insts()->emit_int32(d32);
 }
 
 // emit 32 bit value and construct relocation entry from RelocationHolder
-void emit_d32_reloc(CodeBuffer& cbuf,
-                    int d32,
-                    RelocationHolder const& rspec,
-                    int format)
-{
+void emit_d32_reloc(CodeBuffer& cbuf, int d32, RelocationHolder const& rspec, int format) {
 #ifdef ASSERT
   if (rspec.reloc()->type() == relocInfo::oop_type &&
       d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) {
     assert(oop((intptr_t)d32)->is_oop() && (ScavengeRootsInCode || !oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code");
   }
 #endif
-  cbuf.relocate(cbuf.inst_mark(), rspec, format);
-
-  *((int* )(cbuf.code_end())) = d32;
-  cbuf.set_code_end(cbuf.code_end() + 4);
+  cbuf.relocate(cbuf.insts_mark(), rspec, format);
+  cbuf.insts()->emit_int32(d32);
 }
 
 void emit_d32_reloc(CodeBuffer& cbuf, address addr) {
-  address next_ip = cbuf.code_end() + 4;
+  address next_ip = cbuf.insts_end() + 4;
   emit_d32_reloc(cbuf, (int) (addr - next_ip),
                  external_word_Relocation::spec(addr),
                  RELOC_DISP32);
@@ -717,23 +695,13 @@
 
 
 // emit 64 bit value and construct relocation entry from relocInfo::relocType
-void emit_d64_reloc(CodeBuffer& cbuf,
-                    int64_t d64,
-                    relocInfo::relocType reloc,
-                    int format)
-{
-  cbuf.relocate(cbuf.inst_mark(), reloc, format);
-
-  *((int64_t*) (cbuf.code_end())) = d64;
-  cbuf.set_code_end(cbuf.code_end() + 8);
+void emit_d64_reloc(CodeBuffer& cbuf, int64_t d64, relocInfo::relocType reloc, int format) {
+  cbuf.relocate(cbuf.insts_mark(), reloc, format);
+  cbuf.insts()->emit_int64(d64);
 }
 
 // emit 64 bit value and construct relocation entry from RelocationHolder
-void emit_d64_reloc(CodeBuffer& cbuf,
-                    int64_t d64,
-                    RelocationHolder const& rspec,
-                    int format)
-{
+void emit_d64_reloc(CodeBuffer& cbuf, int64_t d64, RelocationHolder const& rspec, int format) {
 #ifdef ASSERT
   if (rspec.reloc()->type() == relocInfo::oop_type &&
       d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) {
@@ -741,10 +709,8 @@
            "cannot embed scavengable oops in code");
   }
 #endif
-  cbuf.relocate(cbuf.inst_mark(), rspec, format);
-
-  *((int64_t*) (cbuf.code_end())) = d64;
-  cbuf.set_code_end(cbuf.code_end() + 8);
+  cbuf.relocate(cbuf.insts_mark(), rspec, format);
+  cbuf.insts()->emit_int64(d64);
 }
 
 // Access stack slot for load or store
@@ -966,7 +932,7 @@
     }
   }
 
-  C->set_frame_complete(cbuf.code_end() - cbuf.code_begin());
+  C->set_frame_complete(cbuf.insts_size());
 
 #ifdef ASSERT
   if (VerifyStackAtCalls) {
@@ -1050,11 +1016,11 @@
   if (do_polling() && C->is_method_compilation()) {
     // testl %rax, off(%rip) // Opcode + ModRM + Disp32 == 6 bytes
     // XXX reg_mem doesn't support RIP-relative addressing yet
-    cbuf.set_inst_mark();
-    cbuf.relocate(cbuf.inst_mark(), relocInfo::poll_return_type, 0); // XXX
+    cbuf.set_insts_mark();
+    cbuf.relocate(cbuf.insts_mark(), relocInfo::poll_return_type, 0); // XXX
     emit_opcode(cbuf, 0x85); // testl
     emit_rm(cbuf, 0x0, RAX_enc, 0x5); // 00 rax 101 == 0x5
-    // cbuf.inst_mark() is beginning of instruction
+    // cbuf.insts_mark() is beginning of instruction
     emit_d32_reloc(cbuf, os::get_polling_page());
 //                    relocInfo::poll_return_type,
   }
@@ -1814,9 +1780,9 @@
   // movq rbx, 0
   // jmp -5 # to self
 
-  address mark = cbuf.inst_mark();  // get mark within main instrs section
-
-  // Note that the code buffer's inst_mark is always relative to insts.
+  address mark = cbuf.insts_mark();  // get mark within main instrs section
+
+  // Note that the code buffer's insts_mark is always relative to insts.
   // That's why we must use the macroassembler to generate a stub.
   MacroAssembler _masm(&cbuf);
 
@@ -1830,7 +1796,7 @@
   // This is recognized as unresolved by relocs/nativeinst/ic code
   __ jump(RuntimeAddress(__ pc()));
 
-  // Update current stubs pointer and restore code_end.
+  // Update current stubs pointer and restore insts_end.
   __ end_a_stub();
 }
 
@@ -1868,7 +1834,7 @@
 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
 {
   MacroAssembler masm(&cbuf);
-  uint code_size = cbuf.code_size();
+  uint insts_size = cbuf.insts_size();
   if (UseCompressedOops) {
     masm.load_klass(rscratch1, j_rarg0);
     masm.cmpptr(rax, rscratch1);
@@ -1880,7 +1846,7 @@
 
   /* WARNING these NOPs are critical so that verified entry point is properly
      4 bytes aligned for patching by NativeJump::patch_verified_entry() */
-  int nops_cnt = 4 - ((cbuf.code_size() - code_size) & 0x3);
+  int nops_cnt = 4 - ((cbuf.insts_size() - insts_size) & 0x3);
   if (OptoBreakpoint) {
     // Leave space for int3
     nops_cnt -= 1;
@@ -1910,14 +1876,14 @@
 int emit_exception_handler(CodeBuffer& cbuf)
 {
 
-  // Note that the code buffer's inst_mark is always relative to insts.
+  // Note that the code buffer's insts_mark is always relative to insts.
   // That's why we must use the macroassembler to generate a handler.
   MacroAssembler _masm(&cbuf);
   address base =
   __ start_a_stub(size_exception_handler());
   if (base == NULL)  return 0;  // CodeBuffer::expand failed
   int offset = __ offset();
-  __ jump(RuntimeAddress(OptoRuntime::exception_blob()->instructions_begin()));
+  __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
   __ end_a_stub();
   return offset;
@@ -1933,7 +1899,7 @@
 int emit_deopt_handler(CodeBuffer& cbuf)
 {
 
-  // Note that the code buffer's inst_mark is always relative to insts.
+  // Note that the code buffer's insts_mark is always relative to insts.
   // That's why we must use the macroassembler to generate a handler.
   MacroAssembler _masm(&cbuf);
   address base =
@@ -1962,7 +1928,7 @@
   address double_address = __ double_constant(x);
   cbuf.insts()->set_mark_off(mark);  // preserve mark across masm shift
   emit_d32_reloc(cbuf,
-                 (int) (double_address - cbuf.code_end() - 4),
+                 (int) (double_address - cbuf.insts_end() - 4),
                  internal_word_Relocation::spec(double_address),
                  RELOC_DISP32);
 }
@@ -1973,7 +1939,7 @@
   address float_address = __ float_constant(x);
   cbuf.insts()->set_mark_off(mark);  // preserve mark across masm shift
   emit_d32_reloc(cbuf,
-                 (int) (float_address - cbuf.code_end() - 4),
+                 (int) (float_address - cbuf.insts_end() - 4),
                  internal_word_Relocation::spec(float_address),
                  RELOC_DISP32);
 }
@@ -2481,14 +2447,14 @@
   %{
     // JMP, CALL
     Label* l = $labl$$label;
-    emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0);
+    emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0);
   %}
 
   enc_class LblShort(label labl)
   %{
     // JMP, CALL
     Label* l = $labl$$label;
-    int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
+    int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0;
     assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
     emit_d8(cbuf, disp);
   %}
@@ -2517,7 +2483,7 @@
     Label* l = $labl$$label;
     $$$emit8$primary;
     emit_cc(cbuf, $secondary, $cop$$cmpcode);
-    emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0);
+    emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0);
   %}
 
   enc_class JccShort (cmpOp cop, label labl)
@@ -2525,7 +2491,7 @@
   // JCC
     Label *l = $labl$$label;
     emit_cc(cbuf, $primary, $cop$$cmpcode);
-    int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
+    int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0;
     assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
     emit_d8(cbuf, disp);
   %}
@@ -2609,22 +2575,22 @@
   %{
     // CALL Java_To_Interpreter
     // This is the instruction starting address for relocation info.
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     $$$emit8$primary;
     // CALL directly to the runtime
     emit_d32_reloc(cbuf,
-                   (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
+                   (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4),
                    runtime_call_Relocation::spec(),
                    RELOC_DISP32);
   %}
 
   enc_class preserve_SP %{
-    debug_only(int off0 = cbuf.code_size());
+    debug_only(int off0 = cbuf.insts_size());
     MacroAssembler _masm(&cbuf);
     // RBP is preserved across all calls, even compiled calls.
     // Use it to preserve RSP in places where the callee might change the SP.
     __ movptr(rbp_mh_SP_save, rsp);
-    debug_only(int off1 = cbuf.code_size());
+    debug_only(int off1 = cbuf.insts_size());
     assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
   %}
 
@@ -2638,22 +2604,22 @@
     // JAVA STATIC CALL
     // CALL to fixup routine.  Fixup routine uses ScopeDesc info to
     // determine who we intended to call.
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     $$$emit8$primary;
 
     if (!_method) {
       emit_d32_reloc(cbuf,
-                     (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
+                     (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4),
                      runtime_call_Relocation::spec(),
                      RELOC_DISP32);
     } else if (_optimized_virtual) {
       emit_d32_reloc(cbuf,
-                     (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
+                     (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4),
                      opt_virtual_call_Relocation::spec(),
                      RELOC_DISP32);
     } else {
       emit_d32_reloc(cbuf,
-                     (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
+                     (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4),
                      static_call_Relocation::spec(),
                      RELOC_DISP32);
     }
@@ -2669,7 +2635,7 @@
     // !!!!!
     // Generate  "movq rax, -1", placeholder instruction to load oop-info
     // emit_call_dynamic_prologue( cbuf );
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
 
     // movq rax, -1
     emit_opcode(cbuf, Assembler::REX_W);
@@ -2677,13 +2643,13 @@
     emit_d64_reloc(cbuf,
                    (int64_t) Universe::non_oop_word(),
                    oop_Relocation::spec_for_immediate(), RELOC_IMM64);
-    address virtual_call_oop_addr = cbuf.inst_mark();
+    address virtual_call_oop_addr = cbuf.insts_mark();
     // CALL to fixup routine.  Fixup routine uses ScopeDesc info to determine
     // who we intended to call.
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     $$$emit8$primary;
     emit_d32_reloc(cbuf,
-                   (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
+                   (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4),
                    virtual_call_Relocation::spec(virtual_call_oop_addr),
                    RELOC_DISP32);
   %}
@@ -2697,7 +2663,7 @@
     // assert(-0x80 <= disp && disp < 0x80, "compiled_code_offset isn't small");
 
     // callq *disp(%rax)
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     $$$emit8$primary;
     if (disp < 0x80) {
       emit_rm(cbuf, 0x01, $secondary, RAX_enc); // R/M byte
@@ -3729,10 +3695,10 @@
 
   enc_class enc_rethrow()
   %{
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     emit_opcode(cbuf, 0xE9); // jmp entry
     emit_d32_reloc(cbuf,
-                   (int) (OptoRuntime::rethrow_stub() - cbuf.code_end() - 4),
+                   (int) (OptoRuntime::rethrow_stub() - cbuf.insts_end() - 4),
                    runtime_call_Relocation::spec(),
                    RELOC_DISP32);
   %}
@@ -3742,7 +3708,7 @@
     int dstenc = $dst$$reg;
     address signmask_address = (address) StubRoutines::x86::float_sign_mask();
 
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     if (dstenc >= 8) {
       emit_opcode(cbuf, Assembler::REX_R);
       dstenc -= 8;
@@ -3759,7 +3725,7 @@
     int dstenc = $dst$$reg;
     address signmask_address = (address) StubRoutines::x86::double_sign_mask();
 
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     emit_opcode(cbuf, 0x66);
     if (dstenc >= 8) {
       emit_opcode(cbuf, Assembler::REX_R);
@@ -3777,7 +3743,7 @@
     int dstenc = $dst$$reg;
     address signflip_address = (address) StubRoutines::x86::float_sign_flip();
 
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     if (dstenc >= 8) {
       emit_opcode(cbuf, Assembler::REX_R);
       dstenc -= 8;
@@ -3794,7 +3760,7 @@
     int dstenc = $dst$$reg;
     address signflip_address = (address) StubRoutines::x86::double_sign_flip();
 
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     emit_opcode(cbuf, 0x66);
     if (dstenc >= 8) {
       emit_opcode(cbuf, Assembler::REX_R);
@@ -3846,11 +3812,11 @@
     encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes
 
     // call f2i_fixup
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     emit_opcode(cbuf, 0xE8);
     emit_d32_reloc(cbuf,
                    (int)
-                   (StubRoutines::x86::f2i_fixup() - cbuf.code_end() - 4),
+                   (StubRoutines::x86::f2i_fixup() - cbuf.insts_end() - 4),
                    runtime_call_Relocation::spec(),
                    RELOC_DISP32);
 
@@ -3870,7 +3836,7 @@
     address const_address = (address) StubRoutines::x86::double_sign_flip();
 
     // cmpq $dst, [0x8000000000000000]
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     emit_opcode(cbuf, dstenc < 8 ? Assembler::REX_W : Assembler::REX_WR);
     emit_opcode(cbuf, 0x39);
     // XXX reg_mem doesn't support RIP-relative addressing yet
@@ -3904,11 +3870,11 @@
     encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes
 
     // call f2l_fixup
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     emit_opcode(cbuf, 0xE8);
     emit_d32_reloc(cbuf,
                    (int)
-                   (StubRoutines::x86::f2l_fixup() - cbuf.code_end() - 4),
+                   (StubRoutines::x86::f2l_fixup() - cbuf.insts_end() - 4),
                    runtime_call_Relocation::spec(),
                    RELOC_DISP32);
 
@@ -3960,11 +3926,11 @@
     encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes
 
     // call d2i_fixup
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     emit_opcode(cbuf, 0xE8);
     emit_d32_reloc(cbuf,
                    (int)
-                   (StubRoutines::x86::d2i_fixup() - cbuf.code_end() - 4),
+                   (StubRoutines::x86::d2i_fixup() - cbuf.insts_end() - 4),
                    runtime_call_Relocation::spec(),
                    RELOC_DISP32);
 
@@ -3984,7 +3950,7 @@
     address const_address = (address) StubRoutines::x86::double_sign_flip();
 
     // cmpq $dst, [0x8000000000000000]
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     emit_opcode(cbuf, dstenc < 8 ? Assembler::REX_W : Assembler::REX_WR);
     emit_opcode(cbuf, 0x39);
     // XXX reg_mem doesn't support RIP-relative addressing yet
@@ -4018,11 +3984,11 @@
     encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes
 
     // call d2l_fixup
-    cbuf.set_inst_mark();
+    cbuf.set_insts_mark();
     emit_opcode(cbuf, 0xE8);
     emit_d32_reloc(cbuf,
                    (int)
-                   (StubRoutines::x86::d2l_fixup() - cbuf.code_end() - 4),
+                   (StubRoutines::x86::d2l_fixup() - cbuf.insts_end() - 4),
                    runtime_call_Relocation::spec(),
                    RELOC_DISP32);
 
@@ -4042,11 +4008,11 @@
   %{
     // testl %rax, off(%rip) // Opcode + ModRM + Disp32 == 6 bytes
     // XXX reg_mem doesn't support RIP-relative addressing yet
-    cbuf.set_inst_mark();
-    cbuf.relocate(cbuf.inst_mark(), relocInfo::poll_type, 0); // XXX
+    cbuf.set_insts_mark();
+    cbuf.relocate(cbuf.insts_mark(), relocInfo::poll_type, 0); // XXX
     emit_opcode(cbuf, 0x85); // testl
     emit_rm(cbuf, 0x0, RAX_enc, 0x5); // 00 rax 101 == 0x5
-    // cbuf.inst_mark() is beginning of instruction
+    // cbuf.insts_mark() is beginning of instruction
     emit_d32_reloc(cbuf, os::get_polling_page());
 //                    relocInfo::poll_type,
   %}
@@ -12304,7 +12270,7 @@
     int parity_disp = -1;
     if ($cop$$cmpcode == Assembler::notEqual) {
        // the two jumps 6 bytes apart so the jump distances are too
-       parity_disp = l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0;
+       parity_disp = l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0;
     } else if ($cop$$cmpcode == Assembler::equal) {
        parity_disp = 6;
     } else {
@@ -12313,7 +12279,7 @@
     emit_d32(cbuf, parity_disp);
     $$$emit8$primary;
     emit_cc(cbuf, $secondary, $cop$$cmpcode);
-    int disp = l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0;
+    int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0;
     emit_d32(cbuf, disp);
   %}
   ins_pipe(pipe_jcc);
@@ -12508,7 +12474,7 @@
     emit_cc(cbuf, $primary, Assembler::parity);
     int parity_disp = -1;
     if ($cop$$cmpcode == Assembler::notEqual) {
-      parity_disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
+      parity_disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0;
     } else if ($cop$$cmpcode == Assembler::equal) {
       parity_disp = 2;
     } else {
@@ -12516,7 +12482,7 @@
     }
     emit_d8(cbuf, parity_disp);
     emit_cc(cbuf, $primary, $cop$$cmpcode);
-    int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
+    int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0;
     emit_d8(cbuf, disp);
     assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
     assert(-128 <= parity_disp && parity_disp <= 127, "Displacement too large for short jmp");
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -2597,10 +2597,14 @@
 // where we're going to put our guard pages, truncate the mapping at
 // that point by munmap()ping it.  This ensures that when we later
 // munmap() the guard pages we don't leave a hole in the stack
-// mapping.
+// mapping. This only affects the main/initial thread, but guard
+// against future OS changes
 bool os::create_stack_guard_pages(char* addr, size_t size) {
   uintptr_t stack_extent, stack_base;
-  if (get_stack_bounds(&stack_extent, &stack_base)) {
+  bool chk_bounds = NOT_DEBUG(os::Linux::is_initial_thread()) DEBUG_ONLY(true);
+  if (chk_bounds && get_stack_bounds(&stack_extent, &stack_base)) {
+      assert(os::Linux::is_initial_thread(),
+           "growable stack in non-initial thread");
     if (stack_extent < (uintptr_t)addr)
       ::munmap((void*)stack_extent, (uintptr_t)addr - stack_extent);
   }
@@ -2609,10 +2613,15 @@
 }
 
 // If this is a growable mapping, remove the guard pages entirely by
-// munmap()ping them.  If not, just call uncommit_memory().
+// munmap()ping them.  If not, just call uncommit_memory(). This only
+// affects the main/initial thread, but guard against future OS changes
 bool os::remove_stack_guard_pages(char* addr, size_t size) {
   uintptr_t stack_extent, stack_base;
-  if (get_stack_bounds(&stack_extent, &stack_base)) {
+  bool chk_bounds = NOT_DEBUG(os::Linux::is_initial_thread()) DEBUG_ONLY(true);
+  if (chk_bounds && get_stack_bounds(&stack_extent, &stack_base)) {
+      assert(os::Linux::is_initial_thread(),
+           "growable stack in non-initial thread");
+
     return ::munmap(addr, size) == 0;
   }
 
--- a/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -230,7 +230,8 @@
 
   GEN_OFFS(CodeBlob, _name);
   GEN_OFFS(CodeBlob, _header_size);
-  GEN_OFFS(CodeBlob, _instructions_offset);
+  GEN_OFFS(CodeBlob, _content_offset);
+  GEN_OFFS(CodeBlob, _code_offset);
   GEN_OFFS(CodeBlob, _data_offset);
   GEN_OFFS(CodeBlob, _frame_size);
   printf("\n");
--- a/hotspot/src/os/solaris/dtrace/libjvm_db.c	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/os/solaris/dtrace/libjvm_db.c	Wed Jul 05 17:22:53 2017 +0200
@@ -124,7 +124,7 @@
   uint64_t pc_desc;
 
   int32_t  orig_pc_offset;      /* _orig_pc_offset */
-  int32_t  instrs_beg;          /* _instructions_offset */
+  int32_t  instrs_beg;          /* _code_offset */
   int32_t  instrs_end;
   int32_t  deopt_beg;           /* _deoptimize_offset */
   int32_t  scopes_data_beg;     /* _scopes_data_offset */
@@ -587,7 +587,7 @@
       fprintf(stderr, "\t nmethod_info: BEGIN \n");
 
   /* Instructions */
-  err = ps_pread(J->P, nm + OFFSET_CodeBlob_instructions_offset, &N->instrs_beg, SZ32);
+  err = ps_pread(J->P, nm + OFFSET_CodeBlob_code_offset, &N->instrs_beg, SZ32);
   CHECK_FAIL(err);
   err = ps_pread(J->P, nm + OFFSET_CodeBlob_data_offset, &N->instrs_end, SZ32);
   CHECK_FAIL(err);
--- a/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -149,8 +149,8 @@
   // If we are using Vectored Exceptions we don't need this registration
   if (UseVectoredExceptions) return true;
 
-  BufferBlob* b = BufferBlob::create("CodeCache Exception Handler", sizeof (DynamicCodeData));
-  CodeBuffer cb(b->instructions_begin(), b->instructions_size());
+  BufferBlob* blob = BufferBlob::create("CodeCache Exception Handler", sizeof(DynamicCodeData));
+  CodeBuffer cb(blob);
   MacroAssembler* masm = new MacroAssembler(&cb);
   pDCD = (pDynamicCodeData) masm->pc();
 
--- a/hotspot/src/os_cpu/windows_x86/vm/windows_x86_32.ad	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/os_cpu/windows_x86/vm/windows_x86_32.ad	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 1999, 2006, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -141,8 +141,7 @@
 
 // emit an interrupt that is caught by the debugger
 void emit_break(CodeBuffer &cbuf) {
-  *(cbuf.code_end()) = (unsigned char)(0xcc);
-  cbuf.set_code_end(cbuf.code_end() + 1);
+  cbuf.insts()->emit_int8((unsigned char) 0xcc);
 }
 
 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
--- a/hotspot/src/os_cpu/windows_x86/vm/windows_x86_64.ad	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/os_cpu/windows_x86/vm/windows_x86_64.ad	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2003, 2006, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -144,8 +144,7 @@
 
 // emit an interrupt that is caught by the debugger
 void emit_break(CodeBuffer &cbuf) {
-  *(cbuf.code_end()) = (unsigned char)(0xcc);
-  cbuf.set_code_end(cbuf.code_end() + 1);
+  cbuf.insts()->emit_int8((unsigned char) 0xcc);
 }
 
 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
--- a/hotspot/src/share/vm/adlc/output_c.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/adlc/output_c.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -2482,7 +2482,7 @@
   }
 
   // Save current instruction's starting address (helps with relocation).
-  fprintf( fp, "    cbuf.set_inst_mark();\n");
+  fprintf(fp, "    cbuf.set_insts_mark();\n");
 
   // // // idx0 is only needed for syntactic purposes and only by "storeSSI"
   // fprintf( fp, "    unsigned idx0  = 0;\n");
--- a/hotspot/src/share/vm/asm/codeBuffer.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/asm/codeBuffer.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -74,12 +74,11 @@
 
 typedef CodeBuffer::csize_t csize_t;  // file-local definition
 
-// external buffer, in a predefined CodeBlob or other buffer area
+// External buffer, in a predefined CodeBlob.
 // Important: The code_start must be taken exactly, and not realigned.
-CodeBuffer::CodeBuffer(address code_start, csize_t code_size) {
-  assert(code_start != NULL, "sanity");
+CodeBuffer::CodeBuffer(CodeBlob* blob) {
   initialize_misc("static buffer");
-  initialize(code_start, code_size);
+  initialize(blob->content_begin(), blob->content_size());
   assert(verify_section_allocation(), "initial use of buffer OK");
 }
 
@@ -99,7 +98,7 @@
   // Set up various pointers into the blob.
   initialize(_total_start, _total_size);
 
-  assert((uintptr_t)code_begin() % CodeEntryAlignment == 0, "instruction start not code entry aligned");
+  assert((uintptr_t)insts_begin() % CodeEntryAlignment == 0, "instruction start not code entry aligned");
 
   pd_initialize();
 
@@ -144,13 +143,6 @@
 
 void CodeBuffer::initialize_section_size(CodeSection* cs, csize_t size) {
   assert(cs != &_insts, "insts is the memory provider, not the consumer");
-#ifdef ASSERT
-  for (int n = (int)SECT_INSTS+1; n < (int)SECT_LIMIT; n++) {
-    CodeSection* prevCS = code_section(n);
-    if (prevCS == cs)  break;
-    assert(!prevCS->is_allocated(), "section allocation must be in reverse order");
-  }
-#endif
   csize_t slop = CodeSection::end_slop();  // margin between sections
   int align = cs->alignment();
   assert(is_power_of_2(align), "sanity");
@@ -192,21 +184,21 @@
 void CodeBuffer::set_blob(BufferBlob* blob) {
   _blob = blob;
   if (blob != NULL) {
-    address start = blob->instructions_begin();
-    address end   = blob->instructions_end();
+    address start = blob->content_begin();
+    address end   = blob->content_end();
     // Round up the starting address.
     int align = _insts.alignment();
     start += (-(intptr_t)start) & (align-1);
     _total_start = start;
     _total_size  = end - start;
   } else {
-    #ifdef ASSERT
+#ifdef ASSERT
     // Clean out dangling pointers.
     _total_start    = badAddress;
+    _consts._start  = _consts._end  = badAddress;
     _insts._start   = _insts._end   = badAddress;
     _stubs._start   = _stubs._end   = badAddress;
-    _consts._start  = _consts._end  = badAddress;
-    #endif //ASSERT
+#endif //ASSERT
   }
 }
 
@@ -222,9 +214,9 @@
   return NULL;
 #else //PRODUCT
   switch (n) {
+  case SECT_CONSTS:            return "consts";
   case SECT_INSTS:             return "insts";
   case SECT_STUBS:             return "stubs";
-  case SECT_CONSTS:            return "consts";
   default:                     return NULL;
   }
 #endif //PRODUCT
@@ -422,21 +414,21 @@
 /// The pattern is the same for all functions.
 /// We iterate over all the sections, padding each to alignment.
 
-csize_t CodeBuffer::total_code_size() const {
-  csize_t code_size_so_far = 0;
+csize_t CodeBuffer::total_content_size() const {
+  csize_t size_so_far = 0;
   for (int n = 0; n < (int)SECT_LIMIT; n++) {
     const CodeSection* cs = code_section(n);
     if (cs->is_empty())  continue;  // skip trivial section
-    code_size_so_far = cs->align_at_start(code_size_so_far);
-    code_size_so_far += cs->size();
+    size_so_far = cs->align_at_start(size_so_far);
+    size_so_far += cs->size();
   }
-  return code_size_so_far;
+  return size_so_far;
 }
 
 void CodeBuffer::compute_final_layout(CodeBuffer* dest) const {
   address buf = dest->_total_start;
   csize_t buf_offset = 0;
-  assert(dest->_total_size >= total_code_size(), "must be big enough");
+  assert(dest->_total_size >= total_content_size(), "must be big enough");
 
   {
     // not sure why this is here, but why not...
@@ -446,12 +438,11 @@
 
   const CodeSection* prev_cs      = NULL;
   CodeSection*       prev_dest_cs = NULL;
-  for (int n = 0; n < (int)SECT_LIMIT; n++) {
+
+  for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
     // figure compact layout of each section
     const CodeSection* cs = code_section(n);
-    address cstart = cs->start();
-    address cend   = cs->end();
-    csize_t csize  = cend - cstart;
+    csize_t csize = cs->size();
 
     CodeSection* dest_cs = dest->code_section(n);
     if (!cs->is_empty()) {
@@ -464,7 +455,7 @@
         prev_dest_cs->_limit += padding;
       }
       #ifdef ASSERT
-      if (prev_cs != NULL && prev_cs->is_frozen() && n < SECT_CONSTS) {
+      if (prev_cs != NULL && prev_cs->is_frozen() && n < (SECT_LIMIT - 1)) {
         // Make sure the ends still match up.
         // This is important because a branch in a frozen section
         // might target code in a following section, via a Label,
@@ -489,33 +480,29 @@
   }
 
   // Done calculating sections; did it come out to the right end?
-  assert(buf_offset == total_code_size(), "sanity");
+  assert(buf_offset == total_content_size(), "sanity");
   assert(dest->verify_section_allocation(), "final configuration works");
 }
 
-csize_t CodeBuffer::total_offset_of(address addr) const {
-  csize_t code_size_so_far = 0;
-  for (int n = 0; n < (int)SECT_LIMIT; n++) {
-    const CodeSection* cs = code_section(n);
-    if (!cs->is_empty()) {
-      code_size_so_far = cs->align_at_start(code_size_so_far);
+csize_t CodeBuffer::total_offset_of(CodeSection* cs) const {
+  csize_t size_so_far = 0;
+  for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
+    const CodeSection* cur_cs = code_section(n);
+    if (!cur_cs->is_empty()) {
+      size_so_far = cur_cs->align_at_start(size_so_far);
     }
-    if (cs->contains2(addr)) {
-      return code_size_so_far + (addr - cs->start());
+    if (cur_cs->index() == cs->index()) {
+      return size_so_far;
     }
-    code_size_so_far += cs->size();
+    size_so_far += cur_cs->size();
   }
-#ifndef PRODUCT
-  tty->print_cr("Dangling address " PTR_FORMAT " in:", addr);
-  ((CodeBuffer*)this)->print();
-#endif
   ShouldNotReachHere();
   return -1;
 }
 
 csize_t CodeBuffer::total_relocation_size() const {
   csize_t lsize = copy_relocations_to(NULL);  // dry run only
-  csize_t csize = total_code_size();
+  csize_t csize = total_content_size();
   csize_t total = RelocIterator::locs_and_index_size(csize, lsize);
   return (csize_t) align_size_up(total, HeapWordSize);
 }
@@ -534,7 +521,7 @@
 
   csize_t code_end_so_far = 0;
   csize_t code_point_so_far = 0;
-  for (int n = 0; n < (int)SECT_LIMIT; n++) {
+  for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) {
     // pull relocs out of each section
     const CodeSection* cs = code_section(n);
     assert(!(cs->is_empty() && cs->locs_count() > 0), "sanity");
@@ -601,7 +588,7 @@
     buf_offset += sizeof(relocInfo);
   }
 
-  assert(code_end_so_far == total_code_size(), "sanity");
+  assert(code_end_so_far == total_content_size(), "sanity");
 
   // Account for index:
   if (buf != NULL) {
@@ -621,9 +608,8 @@
   }
 #endif //PRODUCT
 
-  CodeBuffer dest(dest_blob->instructions_begin(),
-                  dest_blob->instructions_size());
-  assert(dest_blob->instructions_size() >= total_code_size(), "good sizing");
+  CodeBuffer dest(dest_blob);
+  assert(dest_blob->content_size() >= total_content_size(), "good sizing");
   this->compute_final_layout(&dest);
   relocate_code_to(&dest);
 
@@ -631,18 +617,20 @@
   dest_blob->set_comments(_comments);
 
   // Done moving code bytes; were they the right size?
-  assert(round_to(dest.total_code_size(), oopSize) == dest_blob->instructions_size(), "sanity");
+  assert(round_to(dest.total_content_size(), oopSize) == dest_blob->content_size(), "sanity");
 
   // Flush generated code
-  ICache::invalidate_range(dest_blob->instructions_begin(),
-                           dest_blob->instructions_size());
+  ICache::invalidate_range(dest_blob->code_begin(), dest_blob->code_size());
 }
 
-// Move all my code into another code buffer.
-// Consult applicable relocs to repair embedded addresses.
+// Move all my code into another code buffer.  Consult applicable
+// relocs to repair embedded addresses.  The layout in the destination
+// CodeBuffer is different to the source CodeBuffer: the destination
+// CodeBuffer gets the final layout (consts, insts, stubs in order of
+// ascending address).
 void CodeBuffer::relocate_code_to(CodeBuffer* dest) const {
   DEBUG_ONLY(address dest_end = dest->_total_start + dest->_total_size);
-  for (int n = 0; n < (int)SECT_LIMIT; n++) {
+  for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
     // pull code out of each section
     const CodeSection* cs = code_section(n);
     if (cs->is_empty())  continue;  // skip trivial section
@@ -684,20 +672,19 @@
                                                csize_t* new_capacity) {
   csize_t new_total_cap = 0;
 
-  int prev_n = -1;
-  for (int n = 0; n < (int)SECT_LIMIT; n++) {
+  for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
     const CodeSection* sect = code_section(n);
 
     if (!sect->is_empty()) {
-      // Compute initial padding; assign it to the previous non-empty guy.
-      // Cf. compute_final_layout.
+      // Compute initial padding; assign it to the previous section,
+      // even if it's empty (e.g. consts section can be empty).
+      // Cf. compute_final_layout
       csize_t padding = sect->align_at_start(new_total_cap) - new_total_cap;
       if (padding != 0) {
         new_total_cap += padding;
-        assert(prev_n >= 0, "sanity");
-        new_capacity[prev_n] += padding;
+        assert(n - 1 >= SECT_FIRST, "sanity");
+        new_capacity[n - 1] += padding;
       }
-      prev_n = n;
     }
 
     csize_t exp = sect->size();  // 100% increase
@@ -777,11 +764,11 @@
   this->_before_expand = bxp;
 
   // Give each section its required (expanded) capacity.
-  for (int n = (int)SECT_LIMIT-1; n >= SECT_INSTS; n--) {
+  for (int n = (int)SECT_LIMIT-1; n >= SECT_FIRST; n--) {
     CodeSection* cb_sect   = cb.code_section(n);
     CodeSection* this_sect = code_section(n);
     if (new_capacity[n] == 0)  continue;  // already nulled out
-    if (n > SECT_INSTS) {
+    if (n != SECT_INSTS) {
       cb.initialize_section_size(cb_sect, new_capacity[n]);
     }
     assert(cb_sect->capacity() >= new_capacity[n], "big enough");
@@ -844,20 +831,25 @@
   if (tstart == badAddress)  return true;  // smashed by set_blob(NULL)
   address tend   = tstart + _total_size;
   if (_blob != NULL) {
-    assert(tstart >= _blob->instructions_begin(), "sanity");
-    assert(tend   <= _blob->instructions_end(),   "sanity");
+    assert(tstart >= _blob->content_begin(), "sanity");
+    assert(tend   <= _blob->content_end(),   "sanity");
   }
-  address tcheck = tstart;  // advancing pointer to verify disjointness
-  for (int n = 0; n < (int)SECT_LIMIT; n++) {
+  // Verify disjointness.
+  for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
     CodeSection* sect = code_section(n);
-    if (!sect->is_allocated())  continue;
-    assert(sect->start() >= tcheck, "sanity");
-    tcheck = sect->start();
-    assert((intptr_t)tcheck % sect->alignment() == 0
+    if (!sect->is_allocated() || sect->is_empty())  continue;
+    assert((intptr_t)sect->start() % sect->alignment() == 0
            || sect->is_empty() || _blob == NULL,
            "start is aligned");
-    assert(sect->end()   >= tcheck, "sanity");
-    assert(sect->end()   <= tend,   "sanity");
+    for (int m = (int) SECT_FIRST; m < (int) SECT_LIMIT; m++) {
+      CodeSection* other = code_section(m);
+      if (!other->is_allocated() || other == sect)  continue;
+      assert(!other->contains(sect->start()    ), "sanity");
+      // limit is an exclusive address and can be the start of another
+      // section.
+      assert(!other->contains(sect->limit() - 1), "sanity");
+    }
+    assert(sect->end() <= tend, "sanity");
   }
   return true;
 }
@@ -981,13 +973,13 @@
 
 
 void CodeBuffer::decode() {
-  Disassembler::decode(decode_begin(), code_end());
-  _decode_begin = code_end();
+  Disassembler::decode(decode_begin(), insts_end());
+  _decode_begin = insts_end();
 }
 
 
 void CodeBuffer::skip_decode() {
-  _decode_begin = code_end();
+  _decode_begin = insts_end();
 }
 
 
--- a/hotspot/src/share/vm/asm/codeBuffer.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/asm/codeBuffer.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -186,6 +186,12 @@
     _locs_point = pc;
   }
 
+  // Code emission
+  void emit_int8 (int8_t  x) { *((int8_t*)  end()) = x; set_end(end() + 1); }
+  void emit_int16(int16_t x) { *((int16_t*) end()) = x; set_end(end() + 2); }
+  void emit_int32(int32_t x) { *((int32_t*) end()) = x; set_end(end() + 4); }
+  void emit_int64(int64_t x) { *((int64_t*) end()) = x; set_end(end() + 8); }
+
   // Share a scratch buffer for relocinfo.  (Hacky; saves a resource allocation.)
   void initialize_shared_locs(relocInfo* buf, int length);
 
@@ -283,10 +289,12 @@
  public:
   typedef int csize_t;  // code size type; would be size_t except for history
   enum {
-    // Here is the list of all possible sections, in order of ascending address.
+    // Here is the list of all possible sections.  The order reflects
+    // the final layout.
+    SECT_FIRST = 0,
+    SECT_CONSTS = SECT_FIRST, // Non-instruction data:  Floats, jump tables, etc.
     SECT_INSTS,               // Executable instructions.
     SECT_STUBS,               // Outbound trampolines for supporting call sites.
-    SECT_CONSTS,              // Non-instruction data:  Floats, jump tables, etc.
     SECT_LIMIT, SECT_NONE = -1
   };
 
@@ -298,9 +306,9 @@
 
   const char*  _name;
 
+  CodeSection  _consts;             // constants, jump tables
   CodeSection  _insts;              // instructions (the main section)
   CodeSection  _stubs;              // stubs (call site support), deopt, exception handling
-  CodeSection  _consts;             // constants, jump tables
 
   CodeBuffer*  _before_expand;  // dead buffer, from before the last expansion
 
@@ -328,9 +336,9 @@
   }
 
   void initialize(address code_start, csize_t code_size) {
+    _consts.initialize_outer(this,  SECT_CONSTS);
     _insts.initialize_outer(this,   SECT_INSTS);
     _stubs.initialize_outer(this,   SECT_STUBS);
-    _consts.initialize_outer(this,  SECT_CONSTS);
     _total_start = code_start;
     _total_size  = code_size;
     // Initialize the main section:
@@ -374,9 +382,17 @@
 
  public:
   // (1) code buffer referring to pre-allocated instruction memory
-  CodeBuffer(address code_start, csize_t code_size);
+  CodeBuffer(address code_start, csize_t code_size) {
+    assert(code_start != NULL, "sanity");
+    initialize_misc("static buffer");
+    initialize(code_start, code_size);
+    assert(verify_section_allocation(), "initial use of buffer OK");
+  }
 
-  // (2) code buffer allocating codeBlob memory for code & relocation
+  // (2) CodeBuffer referring to pre-allocated CodeBlob.
+  CodeBuffer(CodeBlob* blob);
+
+  // (3) code buffer allocating codeBlob memory for code & relocation
   // info but with lazy initialization.  The name must be something
   // informative.
   CodeBuffer(const char* name) {
@@ -384,7 +400,7 @@
   }
 
 
-  // (3) code buffer allocating codeBlob memory for code & relocation
+  // (4) code buffer allocating codeBlob memory for code & relocation
   // info.  The name must be something informative and code_size must
   // include both code and stubs sizes.
   CodeBuffer(const char* name, csize_t code_size, csize_t locs_size) {
@@ -394,22 +410,22 @@
 
   ~CodeBuffer();
 
-  // Initialize a CodeBuffer constructed using constructor 2.  Using
-  // constructor 3 is equivalent to calling constructor 2 and then
+  // Initialize a CodeBuffer constructed using constructor 3.  Using
+  // constructor 4 is equivalent to calling constructor 3 and then
   // calling this method.  It's been factored out for convenience of
   // construction.
   void initialize(csize_t code_size, csize_t locs_size);
 
+  CodeSection* consts()            { return &_consts; }
   CodeSection* insts()             { return &_insts; }
   CodeSection* stubs()             { return &_stubs; }
-  CodeSection* consts()            { return &_consts; }
 
-  // present sections in order; return NULL at end; insts is #0, etc.
+  // present sections in order; return NULL at end; consts is #0, etc.
   CodeSection* code_section(int n) {
-    // This makes the slightly questionable but portable assumption that
-    // the various members (_insts, _stubs, etc.) are adjacent in the
-    // layout of CodeBuffer.
-    CodeSection* cs = &_insts + n;
+    // This makes the slightly questionable but portable assumption
+    // that the various members (_consts, _insts, _stubs, etc.) are
+    // adjacent in the layout of CodeBuffer.
+    CodeSection* cs = &_consts + n;
     assert(cs->index() == n || !cs->is_allocated(), "sanity");
     return cs;
   }
@@ -438,40 +454,41 @@
   void   free_blob();                       // Free the blob, if we own one.
 
   // Properties relative to the insts section:
-  address code_begin() const            { return _insts.start(); }
-  address code_end() const              { return _insts.end();   }
-  void set_code_end(address end)        { _insts.set_end(end); }
-  address code_limit() const            { return _insts.limit(); }
-  address inst_mark() const             { return _insts.mark(); }
-  void set_inst_mark()                  { _insts.set_mark(); }
-  void clear_inst_mark()                { _insts.clear_mark(); }
+  address       insts_begin() const      { return _insts.start();      }
+  address       insts_end() const        { return _insts.end();        }
+  void      set_insts_end(address end)   {        _insts.set_end(end); }
+  address       insts_limit() const      { return _insts.limit();      }
+  address       insts_mark() const       { return _insts.mark();       }
+  void      set_insts_mark()             {        _insts.set_mark();   }
+  void    clear_insts_mark()             {        _insts.clear_mark(); }
 
   // is there anything in the buffer other than the current section?
-  bool    is_pure() const               { return code_size() == total_code_size(); }
+  bool    is_pure() const                { return insts_size() == total_content_size(); }
 
   // size in bytes of output so far in the insts sections
-  csize_t code_size() const             { return _insts.size(); }
+  csize_t insts_size() const             { return _insts.size(); }
 
-  // same as code_size(), except that it asserts there is no non-code here
-  csize_t pure_code_size() const        { assert(is_pure(), "no non-code");
-                                          return code_size(); }
+  // same as insts_size(), except that it asserts there is no non-code here
+  csize_t pure_insts_size() const        { assert(is_pure(), "no non-code");
+                                           return insts_size(); }
   // capacity in bytes of the insts sections
-  csize_t code_capacity() const         { return _insts.capacity(); }
+  csize_t insts_capacity() const         { return _insts.capacity(); }
 
   // number of bytes remaining in the insts section
-  csize_t code_remaining() const        { return _insts.remaining(); }
+  csize_t insts_remaining() const        { return _insts.remaining(); }
 
   // is a given address in the insts section?  (2nd version is end-inclusive)
-  bool code_contains(address pc) const  { return _insts.contains(pc); }
-  bool code_contains2(address pc) const { return _insts.contains2(pc); }
+  bool insts_contains(address pc) const  { return _insts.contains(pc); }
+  bool insts_contains2(address pc) const { return _insts.contains2(pc); }
 
-  // allocated size of code in all sections, when aligned and concatenated
-  // (this is the eventual state of the code in its final CodeBlob)
-  csize_t total_code_size() const;
+  // Allocated size in all sections, when aligned and concatenated
+  // (this is the eventual state of the content in its final
+  // CodeBlob).
+  csize_t total_content_size() const;
 
-  // combined offset (relative to start of insts) of given address,
-  // as eventually found in the final CodeBlob
-  csize_t total_offset_of(address addr) const;
+  // Combined offset (relative to start of first section) of given
+  // section, as eventually found in the final CodeBlob.
+  csize_t total_offset_of(CodeSection* cs) const;
 
   // allocated size of all relocation data, including index, rounded up
   csize_t total_relocation_size() const;
--- a/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -652,10 +652,20 @@
         else if (lss_sux == gtr_sux) { cond = If::neq; tsux = lss_sux; fsux = eql_sux; }
         else if (eql_sux == gtr_sux) { cond = If::geq; tsux = eql_sux; fsux = lss_sux; }
         else                         { ShouldNotReachHere();                           }
-           If* canon = new If(cmp->x(), cond, nan_sux == tsux, cmp->y(), tsux, fsux, cmp->state_before(), x->is_safepoint());
+        If* canon = new If(cmp->x(), cond, nan_sux == tsux, cmp->y(), tsux, fsux, cmp->state_before(), x->is_safepoint());
         if (cmp->x() == cmp->y()) {
           do_If(canon);
         } else {
+          if (compilation()->profile_branches()) {
+            // TODO: If profiling, leave floating point comparisons unoptimized.
+            // We currently do not support profiling of the unordered case.
+            switch(cmp->op()) {
+              case Bytecodes::_fcmpl: case Bytecodes::_fcmpg:
+              case Bytecodes::_dcmpl: case Bytecodes::_dcmpg:
+                set_canonical(x);
+                return;
+            }
+          }
           set_canonical(canon);
           set_bci(cmp->bci());
         }
@@ -663,6 +673,8 @@
     } else if (l->as_InstanceOf() != NULL) {
       // NOTE: Code permanently disabled for now since it leaves the old InstanceOf
       //       instruction in the graph (it is pinned). Need to fix this at some point.
+      //       It should also be left in the graph when generating a profiled method version or Goto
+      //       has to know that it was an InstanceOf.
       return;
       // pattern: If ((obj instanceof klass) cond rc) => simplify to: IfInstanceOf or: Goto
       InstanceOf* inst = l->as_InstanceOf();
@@ -881,4 +893,5 @@
 void Canonicalizer::do_UnsafePrefetchRead (UnsafePrefetchRead*  x) {}
 void Canonicalizer::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {}
 void Canonicalizer::do_ProfileCall(ProfileCall* x) {}
-void Canonicalizer::do_ProfileCounter(ProfileCounter* x) {}
+void Canonicalizer::do_ProfileInvoke(ProfileInvoke* x) {}
+
--- a/hotspot/src/share/vm/c1/c1_Canonicalizer.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_Canonicalizer.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,9 +24,11 @@
 
 class Canonicalizer: InstructionVisitor {
  private:
+  Compilation *_compilation;
   Instruction* _canonical;
   int _bci;
 
+  Compilation *compilation()                     { return _compilation; }
   void set_canonical(Value x);
   void set_bci(int bci)                          { _bci = bci; }
   void set_constant(jint x)                      { set_canonical(new Constant(new IntConstant(x))); }
@@ -43,7 +45,9 @@
                         int* scale);
 
  public:
-  Canonicalizer(Value x, int bci)                { _canonical = x; _bci = bci; if (CanonicalizeNodes) x->visit(this); }
+  Canonicalizer(Compilation* c, Value x, int bci) : _compilation(c), _canonical(x), _bci(bci) {
+    if (CanonicalizeNodes) x->visit(this);
+  }
   Value canonical() const                        { return _canonical; }
   int bci() const                                { return _bci; }
 
@@ -92,5 +96,5 @@
   virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
   virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
   virtual void do_ProfileCall    (ProfileCall*     x);
-  virtual void do_ProfileCounter (ProfileCounter*  x);
+  virtual void do_ProfileInvoke  (ProfileInvoke*   x);
 };
--- a/hotspot/src/share/vm/c1/c1_CodeStubs.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_CodeStubs.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -80,20 +80,21 @@
   }
 };
 
-#ifdef TIERED
 class CounterOverflowStub: public CodeStub {
  private:
   CodeEmitInfo* _info;
   int           _bci;
+  LIR_Opr       _method;
 
 public:
-  CounterOverflowStub(CodeEmitInfo* info, int bci) : _info(info), _bci(bci) {
+  CounterOverflowStub(CodeEmitInfo* info, int bci, LIR_Opr method) :  _info(info), _bci(bci), _method(method) {
   }
 
   virtual void emit_code(LIR_Assembler* e);
 
   virtual void visit(LIR_OpVisitState* visitor) {
     visitor->do_slow_case(_info);
+    visitor->do_input(_method);
   }
 
 #ifndef PRODUCT
@@ -101,7 +102,6 @@
 #endif // PRODUCT
 
 };
-#endif // TIERED
 
 class ConversionStub: public CodeStub {
  private:
--- a/hotspot/src/share/vm/c1/c1_Compilation.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_Compilation.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -290,9 +290,13 @@
 
   CHECK_BAILOUT_(no_frame_size);
 
+  if (is_profiling()) {
+    method()->build_method_data();
+  }
+
   {
     PhaseTraceTime timeit(_t_buildIR);
-  build_hir();
+    build_hir();
   }
   if (BailoutAfterHIR) {
     BAILOUT_("Bailing out because of -XX:+BailoutAfterHIR", no_frame_size);
@@ -447,6 +451,7 @@
 , _masm(NULL)
 , _has_exception_handlers(false)
 , _has_fpu_code(true)   // pessimistic assumption
+, _would_profile(false)
 , _has_unsafe_access(false)
 , _has_method_handle_invokes(false)
 , _bailout_msg(NULL)
@@ -454,20 +459,23 @@
 , _allocator(NULL)
 , _next_id(0)
 , _next_block_id(0)
-, _code(buffer_blob->instructions_begin(),
-        buffer_blob->instructions_size())
+, _code(buffer_blob)
 , _current_instruction(NULL)
 #ifndef PRODUCT
 , _last_instruction_printed(NULL)
 #endif // PRODUCT
 {
   PhaseTraceTime timeit(_t_compile);
-
   _arena = Thread::current()->resource_area();
   _env->set_compiler_data(this);
   _exception_info_list = new ExceptionInfoList();
   _implicit_exception_table.set_size(0);
   compile_method();
+  if (is_profiling() && _would_profile) {
+    ciMethodData *md = method->method_data();
+    assert (md != NULL, "Should have MDO");
+    md->set_would_profile(_would_profile);
+  }
 }
 
 Compilation::~Compilation() {
--- a/hotspot/src/share/vm/c1/c1_Compilation.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_Compilation.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -69,6 +69,7 @@
   bool               _has_exception_handlers;
   bool               _has_fpu_code;
   bool               _has_unsafe_access;
+  bool               _would_profile;
   bool               _has_method_handle_invokes;  // True if this method has MethodHandle invokes.
   const char*        _bailout_msg;
   ExceptionInfoList* _exception_info_list;
@@ -143,6 +144,7 @@
   void set_has_exception_handlers(bool f)        { _has_exception_handlers = f; }
   void set_has_fpu_code(bool f)                  { _has_fpu_code = f; }
   void set_has_unsafe_access(bool f)             { _has_unsafe_access = f; }
+  void set_would_profile(bool f)                 { _would_profile = f; }
   // Add a set of exception handlers covering the given PC offset
   void add_exception_handlers_for_pco(int pco, XHandlers* exception_handlers);
   // Statistics gathering
@@ -202,6 +204,30 @@
   void compile_only_this_scope(outputStream* st, IRScope* scope);
   void exclude_this_method();
 #endif // PRODUCT
+
+  bool is_profiling() {
+    return env()->comp_level() == CompLevel_full_profile ||
+           env()->comp_level() == CompLevel_limited_profile;
+  }
+  bool count_invocations() { return is_profiling(); }
+  bool count_backedges()   { return is_profiling(); }
+
+  // Helpers for generation of profile information
+  bool profile_branches() {
+    return env()->comp_level() == CompLevel_full_profile &&
+      C1UpdateMethodData && C1ProfileBranches;
+  }
+  bool profile_calls() {
+    return env()->comp_level() == CompLevel_full_profile &&
+      C1UpdateMethodData && C1ProfileCalls;
+  }
+  bool profile_inlined_calls() {
+    return profile_calls() && C1ProfileInlinedCalls;
+  }
+  bool profile_checkcasts() {
+    return env()->comp_level() == CompLevel_full_profile &&
+      C1UpdateMethodData && C1ProfileCheckcasts;
+  }
 };
 
 
--- a/hotspot/src/share/vm/c1/c1_Compiler.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_Compiler.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -39,9 +39,7 @@
   // Name of this compiler
   virtual const char* name()                     { return "C1"; }
 
-#ifdef TIERED
-  virtual bool is_c1() { return true; };
-#endif // TIERED
+  virtual bool is_c1()                           { return true; };
 
   BufferBlob* build_buffer_blob();
 
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -967,6 +967,17 @@
   StoreIndexed* result = new StoreIndexed(array, index, length, type, value, lock_stack());
   append(result);
   _memory->store_value(value);
+
+  if (type == T_OBJECT && is_profiling()) {
+    // Note that we'd collect profile data in this method if we wanted it.
+    compilation()->set_would_profile(true);
+
+    if (profile_checkcasts()) {
+      result->set_profiled_method(method());
+      result->set_profiled_bci(bci());
+      result->set_should_profile(true);
+    }
+  }
 }
 
 
@@ -1144,8 +1155,16 @@
 
 
 void GraphBuilder::_goto(int from_bci, int to_bci) {
-  profile_bci(from_bci);
-  append(new Goto(block_at(to_bci), to_bci <= from_bci));
+  Goto *x = new Goto(block_at(to_bci), to_bci <= from_bci);
+  if (is_profiling()) {
+    compilation()->set_would_profile(true);
+  }
+  if (profile_branches()) {
+    x->set_profiled_method(method());
+    x->set_profiled_bci(bci());
+    x->set_should_profile(true);
+  }
+  append(x);
 }
 
 
@@ -1153,11 +1172,45 @@
   BlockBegin* tsux = block_at(stream()->get_dest());
   BlockBegin* fsux = block_at(stream()->next_bci());
   bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci();
-  If* if_node = append(new If(x, cond, false, y, tsux, fsux, is_bb ? state_before : NULL, is_bb))->as_If();
-  if (profile_branches() && (if_node != NULL)) {
-    if_node->set_profiled_method(method());
-    if_node->set_profiled_bci(bci());
-    if_node->set_should_profile(true);
+  Instruction *i = append(new If(x, cond, false, y, tsux, fsux, is_bb ? state_before : NULL, is_bb));
+
+  if (is_profiling()) {
+    If* if_node = i->as_If();
+    if (if_node != NULL) {
+      // Note that we'd collect profile data in this method if we wanted it.
+      compilation()->set_would_profile(true);
+      // At level 2 we need the proper bci to count backedges
+      if_node->set_profiled_bci(bci());
+      if (profile_branches()) {
+        // Successors can be rotated by the canonicalizer, check for this case.
+        if_node->set_profiled_method(method());
+        if_node->set_should_profile(true);
+        if (if_node->tsux() == fsux) {
+          if_node->set_swapped(true);
+        }
+      }
+      return;
+    }
+
+    // Check if this If was reduced to Goto.
+    Goto *goto_node = i->as_Goto();
+    if (goto_node != NULL) {
+      compilation()->set_would_profile(true);
+      if (profile_branches()) {
+        goto_node->set_profiled_method(method());
+        goto_node->set_profiled_bci(bci());
+        goto_node->set_should_profile(true);
+        // Find out which successor is used.
+        if (goto_node->default_sux() == tsux) {
+          goto_node->set_direction(Goto::taken);
+        } else if (goto_node->default_sux() == fsux) {
+          goto_node->set_direction(Goto::not_taken);
+        } else {
+          ShouldNotReachHere();
+        }
+      }
+      return;
+    }
   }
 }
 
@@ -1698,8 +1751,7 @@
 
   if (recv != NULL &&
       (code == Bytecodes::_invokespecial ||
-       !is_loaded || target->is_final() ||
-       profile_calls())) {
+       !is_loaded || target->is_final())) {
     // invokespecial always needs a NULL check.  invokevirtual where
     // the target is final or where it's not known that whether the
     // target is final requires a NULL check.  Otherwise normal
@@ -1709,15 +1761,23 @@
     null_check(recv);
   }
 
-  if (profile_calls()) {
-    assert(cha_monomorphic_target == NULL || exact_target == NULL, "both can not be set");
-    ciKlass* target_klass = NULL;
-    if (cha_monomorphic_target != NULL) {
-      target_klass = cha_monomorphic_target->holder();
-    } else if (exact_target != NULL) {
-      target_klass = exact_target->holder();
+  if (is_profiling()) {
+    if (recv != NULL && profile_calls()) {
+      null_check(recv);
     }
-    profile_call(recv, target_klass);
+    // Note that we'd collect profile data in this method if we wanted it.
+    compilation()->set_would_profile(true);
+
+    if (profile_calls()) {
+      assert(cha_monomorphic_target == NULL || exact_target == NULL, "both can not be set");
+      ciKlass* target_klass = NULL;
+      if (cha_monomorphic_target != NULL) {
+        target_klass = cha_monomorphic_target->holder();
+      } else if (exact_target != NULL) {
+        target_klass = exact_target->holder();
+      }
+      profile_call(recv, target_klass);
+    }
   }
 
   Invoke* result = new Invoke(code, result_type, recv, args, vtable_index, target, state_before);
@@ -1782,10 +1842,16 @@
   CheckCast* c = new CheckCast(klass, apop(), state_before);
   apush(append_split(c));
   c->set_direct_compare(direct_compare(klass));
-  if (profile_checkcasts()) {
-    c->set_profiled_method(method());
-    c->set_profiled_bci(bci());
-    c->set_should_profile(true);
+
+  if (is_profiling()) {
+    // Note that we'd collect profile data in this method if we wanted it.
+    compilation()->set_would_profile(true);
+
+    if (profile_checkcasts()) {
+      c->set_profiled_method(method());
+      c->set_profiled_bci(bci());
+      c->set_should_profile(true);
+    }
   }
 }
 
@@ -1797,6 +1863,17 @@
   InstanceOf* i = new InstanceOf(klass, apop(), state_before);
   ipush(append_split(i));
   i->set_direct_compare(direct_compare(klass));
+
+  if (is_profiling()) {
+    // Note that we'd collect profile data in this method if we wanted it.
+    compilation()->set_would_profile(true);
+
+    if (profile_checkcasts()) {
+      i->set_profiled_method(method());
+      i->set_profiled_bci(bci());
+      i->set_should_profile(true);
+    }
+  }
 }
 
 
@@ -1868,7 +1945,7 @@
 
 
 Instruction* GraphBuilder::append_with_bci(Instruction* instr, int bci) {
-  Canonicalizer canon(instr, bci);
+  Canonicalizer canon(compilation(), instr, bci);
   Instruction* i1 = canon.canonical();
   if (i1->bci() != -99) {
     // Canonicalizer returned an instruction which was already
@@ -2651,18 +2728,6 @@
   h->set_depth_first_number(0);
 
   Value l = h;
-  if (profile_branches()) {
-    // Increment the invocation count on entry to the method.  We
-    // can't use profile_invocation here because append isn't setup to
-    // work properly at this point.  The instruction have to be
-    // appended to the instruction stream by hand.
-    Value m = new Constant(new ObjectConstant(compilation()->method()));
-    h->set_next(m, 0);
-    Value p = new ProfileCounter(m, methodOopDesc::interpreter_invocation_counter_offset_in_bytes(), 1);
-    m->set_next(p, 0);
-    l = p;
-  }
-
   BlockEnd* g = new Goto(entry, false);
   l->set_next(g, entry->bci());
   h->set_end(g);
@@ -2688,10 +2753,10 @@
   // also necessary when profiling so that there's a single block that
   // can increment the interpreter_invocation_count.
   BlockBegin* new_header_block;
-  if (std_entry->number_of_preds() == 0 && !profile_branches()) {
+  if (std_entry->number_of_preds() > 0 || count_invocations() || count_backedges()) {
+    new_header_block = header_block(std_entry, BlockBegin::std_entry_flag, state);
+  } else {
     new_header_block = std_entry;
-  } else {
-    new_header_block = header_block(std_entry, BlockBegin::std_entry_flag, state);
   }
 
   // setup start block (root for the IR graph)
@@ -3115,16 +3180,21 @@
 
   Values* args = state()->pop_arguments(callee->arg_size());
   ValueStack* locks = lock_stack();
-  if (profile_calls()) {
+
+  if (is_profiling()) {
     // Don't profile in the special case where the root method
     // is the intrinsic
     if (callee != method()) {
-      Value recv = NULL;
-      if (has_receiver) {
-        recv = args->at(0);
-        null_check(recv);
+      // Note that we'd collect profile data in this method if we wanted it.
+      compilation()->set_would_profile(true);
+      if (profile_calls()) {
+        Value recv = NULL;
+        if (has_receiver) {
+          recv = args->at(0);
+          null_check(recv);
+        }
+        profile_call(recv, NULL);
       }
-      profile_call(recv, NULL);
     }
   }
 
@@ -3296,7 +3366,9 @@
 
 bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
   assert(!callee->is_native(), "callee must not be native");
-
+  if (count_backedges() && callee->has_loops()) {
+    INLINE_BAILOUT("too complex for tiered");
+  }
   // first perform tests of things it's not possible to inline
   if (callee->has_exception_handlers() &&
       !InlineMethodsWithExceptionHandlers) INLINE_BAILOUT("callee has exception handlers");
@@ -3365,12 +3437,19 @@
     null_check(recv);
   }
 
-  if (profile_inlined_calls()) {
-    profile_call(recv, holder_known ? callee->holder() : NULL);
+  if (is_profiling()) {
+    // Note that we'd collect profile data in this method if we wanted it.
+    // this may be redundant here...
+    compilation()->set_would_profile(true);
+
+    if (profile_calls()) {
+      profile_call(recv, holder_known ? callee->holder() : NULL);
+    }
+    if (profile_inlined_calls()) {
+      profile_invocation(callee, state(), 0);
+    }
   }
 
-  profile_invocation(callee);
-
   // Introduce a new callee continuation point - if the callee has
   // more than one return instruction or the return does not allow
   // fall-through of control flow, all return instructions of the
@@ -3755,30 +3834,10 @@
 }
 #endif // PRODUCT
 
-
 void GraphBuilder::profile_call(Value recv, ciKlass* known_holder) {
   append(new ProfileCall(method(), bci(), recv, known_holder));
 }
 
-
-void GraphBuilder::profile_invocation(ciMethod* callee) {
-  if (profile_calls()) {
-    // increment the interpreter_invocation_count for the inlinee
-    Value m = append(new Constant(new ObjectConstant(callee)));
-    append(new ProfileCounter(m, methodOopDesc::interpreter_invocation_counter_offset_in_bytes(), 1));
-  }
+void GraphBuilder::profile_invocation(ciMethod* callee, ValueStack* state, int bci) {
+  append(new ProfileInvoke(callee, state, bci));
 }
-
-
-void GraphBuilder::profile_bci(int bci) {
-  if (profile_branches()) {
-    ciMethodData* md = method()->method_data();
-    if (md == NULL) {
-      BAILOUT("out of memory building methodDataOop");
-    }
-    ciProfileData* data = md->bci_to_data(bci);
-    assert(data != NULL && data->is_JumpData(), "need JumpData for goto");
-    Value mdo = append(new Constant(new ObjectConstant(md)));
-    append(new ProfileCounter(mdo, md->byte_offset_of_slot(data, JumpData::taken_offset()), 1));
-  }
-}
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -342,27 +342,17 @@
 
   NOT_PRODUCT(void print_inline_result(ciMethod* callee, bool res);)
 
-  // methodDataOop profiling helpers
   void profile_call(Value recv, ciKlass* predicted_holder);
-  void profile_invocation(ciMethod* method);
-  void profile_bci(int bci);
+  void profile_invocation(ciMethod* inlinee, ValueStack* state, int bci);
 
-  // Helpers for generation of profile information
-  bool profile_branches() {
-    return _compilation->env()->comp_level() == CompLevel_fast_compile &&
-      Tier1UpdateMethodData && Tier1ProfileBranches;
-  }
-  bool profile_calls() {
-    return _compilation->env()->comp_level() == CompLevel_fast_compile &&
-      Tier1UpdateMethodData && Tier1ProfileCalls;
-  }
-  bool profile_inlined_calls() {
-    return profile_calls() && Tier1ProfileInlinedCalls;
-  }
-  bool profile_checkcasts() {
-    return _compilation->env()->comp_level() == CompLevel_fast_compile &&
-      Tier1UpdateMethodData && Tier1ProfileCheckcasts;
-  }
+  // Shortcuts to profiling control.
+  bool is_profiling()          { return _compilation->is_profiling();          }
+  bool count_invocations()     { return _compilation->count_invocations();     }
+  bool count_backedges()       { return _compilation->count_backedges();       }
+  bool profile_branches()      { return _compilation->profile_branches();      }
+  bool profile_calls()         { return _compilation->profile_calls();         }
+  bool profile_inlined_calls() { return _compilation->profile_inlined_calls(); }
+  bool profile_checkcasts()    { return _compilation->profile_checkcasts();    }
 
  public:
   NOT_PRODUCT(void print_stats();)
--- a/hotspot/src/share/vm/c1/c1_IR.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_IR.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -296,19 +296,21 @@
 
 void IR::optimize() {
   Optimizer opt(this);
-  if (DoCEE) {
-    opt.eliminate_conditional_expressions();
+  if (!compilation()->profile_branches()) {
+    if (DoCEE) {
+      opt.eliminate_conditional_expressions();
 #ifndef PRODUCT
-    if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after CEE"); print(true); }
-    if (PrintIR  || PrintIR1 ) { tty->print_cr("IR after CEE"); print(false); }
+      if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after CEE"); print(true); }
+      if (PrintIR  || PrintIR1 ) { tty->print_cr("IR after CEE"); print(false); }
 #endif
-  }
-  if (EliminateBlocks) {
-    opt.eliminate_blocks();
+    }
+    if (EliminateBlocks) {
+      opt.eliminate_blocks();
 #ifndef PRODUCT
-    if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after block elimination"); print(true); }
-    if (PrintIR  || PrintIR1 ) { tty->print_cr("IR after block elimination"); print(false); }
+      if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after block elimination"); print(true); }
+      if (PrintIR  || PrintIR1 ) { tty->print_cr("IR after block elimination"); print(false); }
 #endif
+    }
   }
   if (EliminateNullChecks) {
     opt.eliminate_null_checks();
@@ -484,6 +486,8 @@
   BitMap2D   _loop_map;            // two-dimensional bit set: a bit is set if a block is contained in a loop
   BlockList  _work_list;           // temporary list (used in mark_loops and compute_order)
 
+  Compilation* _compilation;
+
   // accessors for _visited_blocks and _active_blocks
   void init_visited()                     { _active_blocks.clear(); _visited_blocks.clear(); }
   bool is_visited(BlockBegin* b) const    { return _visited_blocks.at(b->block_id()); }
@@ -526,8 +530,9 @@
   NOT_PRODUCT(void print_blocks();)
   DEBUG_ONLY(void verify();)
 
+  Compilation* compilation() const { return _compilation; }
  public:
-  ComputeLinearScanOrder(BlockBegin* start_block);
+  ComputeLinearScanOrder(Compilation* c, BlockBegin* start_block);
 
   // accessors for final result
   BlockList* linear_scan_order() const    { return _linear_scan_order; }
@@ -535,7 +540,7 @@
 };
 
 
-ComputeLinearScanOrder::ComputeLinearScanOrder(BlockBegin* start_block) :
+ComputeLinearScanOrder::ComputeLinearScanOrder(Compilation* c, BlockBegin* start_block) :
   _max_block_id(BlockBegin::number_of_blocks()),
   _num_blocks(0),
   _num_loops(0),
@@ -547,13 +552,18 @@
   _loop_end_blocks(8),
   _work_list(8),
   _linear_scan_order(NULL), // initialized later with correct size
-  _loop_map(0, 0)           // initialized later with correct size
+  _loop_map(0, 0),          // initialized later with correct size
+  _compilation(c)
 {
   TRACE_LINEAR_SCAN(2, "***** computing linear-scan block order");
 
   init_visited();
   count_edges(start_block, NULL);
 
+  if (compilation()->is_profiling()) {
+    compilation()->method()->method_data()->set_compilation_stats(_num_loops, _num_blocks);
+  }
+
   if (_num_loops > 0) {
     mark_loops();
     clear_non_natural_loops(start_block);
@@ -1130,7 +1140,7 @@
 void IR::compute_code() {
   assert(is_valid(), "IR must be valid");
 
-  ComputeLinearScanOrder compute_order(start());
+  ComputeLinearScanOrder compute_order(compilation(), start());
   _num_loops = compute_order.num_loops();
   _code = compute_order.linear_scan_order();
 }
--- a/hotspot/src/share/vm/c1/c1_Instruction.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_Instruction.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -740,9 +740,9 @@
 
 
 #ifndef PRODUCT
-  #define TRACE_PHI(code) if (PrintPhiFunctions) { code; }
+   #define TRACE_PHI(code) if (PrintPhiFunctions) { code; }
 #else
-  #define TRACE_PHI(coce)
+   #define TRACE_PHI(coce)
 #endif
 
 
@@ -1011,3 +1011,7 @@
 void Throw::state_values_do(ValueVisitor* f) {
   BlockEnd::state_values_do(f);
 }
+
+void ProfileInvoke::state_values_do(ValueVisitor* f) {
+  if (state() != NULL) state()->values_do(f);
+}
--- a/hotspot/src/share/vm/c1/c1_Instruction.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_Instruction.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -98,7 +98,7 @@
 class         UnsafePrefetchRead;
 class         UnsafePrefetchWrite;
 class   ProfileCall;
-class   ProfileCounter;
+class   ProfileInvoke;
 
 // A Value is a reference to the instruction creating the value
 typedef Instruction* Value;
@@ -195,7 +195,7 @@
   virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x) = 0;
   virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) = 0;
   virtual void do_ProfileCall    (ProfileCall*     x) = 0;
-  virtual void do_ProfileCounter (ProfileCounter*  x) = 0;
+  virtual void do_ProfileInvoke  (ProfileInvoke*   x) = 0;
 };
 
 
@@ -906,11 +906,13 @@
  private:
   Value       _value;
 
+  ciMethod* _profiled_method;
+  int       _profiled_bci;
  public:
   // creation
   StoreIndexed(Value array, Value index, Value length, BasicType elt_type, Value value, ValueStack* lock_stack)
   : AccessIndexed(array, index, length, elt_type, lock_stack)
-  , _value(value)
+  , _value(value), _profiled_method(NULL), _profiled_bci(0)
   {
     set_flag(NeedsWriteBarrierFlag, (as_ValueType(elt_type)->is_object()));
     set_flag(NeedsStoreCheckFlag, (as_ValueType(elt_type)->is_object()));
@@ -923,7 +925,13 @@
   IRScope* scope() const;                        // the state's scope
   bool needs_write_barrier() const               { return check_flag(NeedsWriteBarrierFlag); }
   bool needs_store_check() const                 { return check_flag(NeedsStoreCheckFlag); }
-
+  // Helpers for methodDataOop profiling
+  void set_should_profile(bool value)                { set_flag(ProfileMDOFlag, value); }
+  void set_profiled_method(ciMethod* method)         { _profiled_method = method;   }
+  void set_profiled_bci(int bci)                     { _profiled_bci = bci;         }
+  bool      should_profile() const                   { return check_flag(ProfileMDOFlag); }
+  ciMethod* profiled_method() const                  { return _profiled_method;     }
+  int       profiled_bci() const                     { return _profiled_bci;        }
   // generic
   virtual void input_values_do(ValueVisitor* f)   { AccessIndexed::input_values_do(f); f->visit(&_value); }
 };
@@ -1297,9 +1305,14 @@
   Value       _obj;
   ValueStack* _state_before;
 
+  ciMethod* _profiled_method;
+  int       _profiled_bci;
+
  public:
   // creation
-  TypeCheck(ciKlass* klass, Value obj, ValueType* type, ValueStack* state_before) : StateSplit(type), _klass(klass), _obj(obj), _state_before(state_before) {
+  TypeCheck(ciKlass* klass, Value obj, ValueType* type, ValueStack* state_before)
+  : StateSplit(type), _klass(klass), _obj(obj), _state_before(state_before),
+    _profiled_method(NULL), _profiled_bci(0) {
     ASSERT_VALUES
     set_direct_compare(false);
   }
@@ -1318,20 +1331,22 @@
   virtual bool can_trap() const                  { return true; }
   virtual void input_values_do(ValueVisitor* f)   { StateSplit::input_values_do(f); f->visit(&_obj); }
   virtual void other_values_do(ValueVisitor* f);
+
+  // Helpers for methodDataOop profiling
+  void set_should_profile(bool value)                { set_flag(ProfileMDOFlag, value); }
+  void set_profiled_method(ciMethod* method)         { _profiled_method = method;   }
+  void set_profiled_bci(int bci)                     { _profiled_bci = bci;         }
+  bool      should_profile() const                   { return check_flag(ProfileMDOFlag); }
+  ciMethod* profiled_method() const                  { return _profiled_method;     }
+  int       profiled_bci() const                     { return _profiled_bci;        }
 };
 
 
 LEAF(CheckCast, TypeCheck)
- private:
-  ciMethod* _profiled_method;
-  int       _profiled_bci;
-
  public:
   // creation
   CheckCast(ciKlass* klass, Value obj, ValueStack* state_before)
-  : TypeCheck(klass, obj, objectType, state_before)
-  , _profiled_method(NULL)
-  , _profiled_bci(0) {}
+  : TypeCheck(klass, obj, objectType, state_before) {}
 
   void set_incompatible_class_change_check() {
     set_flag(ThrowIncompatibleClassChangeErrorFlag, true);
@@ -1340,17 +1355,8 @@
     return check_flag(ThrowIncompatibleClassChangeErrorFlag);
   }
 
-  // Helpers for methodDataOop profiling
-  void set_should_profile(bool value)                { set_flag(ProfileMDOFlag, value); }
-  void set_profiled_method(ciMethod* method)         { _profiled_method = method;   }
-  void set_profiled_bci(int bci)                     { _profiled_bci = bci;         }
-  bool      should_profile() const                   { return check_flag(ProfileMDOFlag); }
-  ciMethod* profiled_method() const                  { return _profiled_method;     }
-  int       profiled_bci() const                     { return _profiled_bci;        }
-
   ciType* declared_type() const;
   ciType* exact_type() const;
-
 };
 
 
@@ -1734,19 +1740,44 @@
 
 LEAF(Goto, BlockEnd)
  public:
+  enum Direction {
+    none,            // Just a regular goto
+    taken, not_taken // Goto produced from If
+  };
+ private:
+  ciMethod*   _profiled_method;
+  int         _profiled_bci;
+  Direction   _direction;
+ public:
   // creation
-  Goto(BlockBegin* sux, ValueStack* state_before, bool is_safepoint = false) : BlockEnd(illegalType, state_before, is_safepoint) {
+  Goto(BlockBegin* sux, ValueStack* state_before, bool is_safepoint = false)
+    : BlockEnd(illegalType, state_before, is_safepoint)
+    , _direction(none)
+    , _profiled_method(NULL)
+    , _profiled_bci(0) {
     BlockList* s = new BlockList(1);
     s->append(sux);
     set_sux(s);
   }
 
-  Goto(BlockBegin* sux, bool is_safepoint) : BlockEnd(illegalType, NULL, is_safepoint) {
+  Goto(BlockBegin* sux, bool is_safepoint) : BlockEnd(illegalType, NULL, is_safepoint)
+                                           , _direction(none)
+                                           , _profiled_method(NULL)
+                                           , _profiled_bci(0) {
     BlockList* s = new BlockList(1);
     s->append(sux);
     set_sux(s);
   }
 
+  bool should_profile() const                    { return check_flag(ProfileMDOFlag); }
+  ciMethod* profiled_method() const              { return _profiled_method; } // set only for profiled branches
+  int profiled_bci() const                       { return _profiled_bci; }
+  Direction direction() const                    { return _direction; }
+
+  void set_should_profile(bool value)            { set_flag(ProfileMDOFlag, value); }
+  void set_profiled_method(ciMethod* method)     { _profiled_method = method; }
+  void set_profiled_bci(int bci)                 { _profiled_bci = bci; }
+  void set_direction(Direction d)                { _direction = d; }
 };
 
 
@@ -1757,6 +1788,8 @@
   Value       _y;
   ciMethod*   _profiled_method;
   int         _profiled_bci; // Canonicalizer may alter bci of If node
+  bool        _swapped;      // Is the order reversed with respect to the original If in the
+                             // bytecode stream?
  public:
   // creation
   // unordered_is_true is valid for float/double compares only
@@ -1767,6 +1800,7 @@
   , _y(y)
   , _profiled_method(NULL)
   , _profiled_bci(0)
+  , _swapped(false)
   {
     ASSERT_VALUES
     set_flag(UnorderedIsTrueFlag, unordered_is_true);
@@ -1788,7 +1822,8 @@
   BlockBegin* usux() const                       { return sux_for(unordered_is_true()); }
   bool should_profile() const                    { return check_flag(ProfileMDOFlag); }
   ciMethod* profiled_method() const              { return _profiled_method; } // set only for profiled branches
-  int profiled_bci() const                       { return _profiled_bci; }    // set only for profiled branches
+  int profiled_bci() const                       { return _profiled_bci; }    // set for profiled branches and tiered
+  bool is_swapped() const                        { return _swapped; }
 
   // manipulation
   void swap_operands() {
@@ -1807,7 +1842,7 @@
   void set_should_profile(bool value)             { set_flag(ProfileMDOFlag, value); }
   void set_profiled_method(ciMethod* method)      { _profiled_method = method; }
   void set_profiled_bci(int bci)                  { _profiled_bci = bci;       }
-
+  void set_swapped(bool value)                    { _swapped = value;         }
   // generic
   virtual void input_values_do(ValueVisitor* f)   { BlockEnd::input_values_do(f); f->visit(&_x); f->visit(&_y); }
 };
@@ -2235,7 +2270,6 @@
   }
 };
 
-
 LEAF(ProfileCall, Instruction)
  private:
   ciMethod* _method;
@@ -2263,35 +2297,32 @@
   virtual void input_values_do(ValueVisitor* f)   { if (_recv != NULL) f->visit(&_recv); }
 };
 
+// Use to trip invocation counter of an inlined method
 
-//
-// Simple node representing a counter update generally used for updating MDOs
-//
-LEAF(ProfileCounter, Instruction)
+LEAF(ProfileInvoke, Instruction)
  private:
-  Value     _mdo;
-  int       _offset;
-  int       _increment;
+  ciMethod*   _inlinee;
+  ValueStack* _state;
+  int         _bci_of_invoke;
 
  public:
-  ProfileCounter(Value mdo, int offset, int increment = 1)
+  ProfileInvoke(ciMethod* inlinee,  ValueStack* state, int bci)
     : Instruction(voidType)
-    , _mdo(mdo)
-    , _offset(offset)
-    , _increment(increment)
+    , _inlinee(inlinee)
+    , _bci_of_invoke(bci)
+    , _state(state)
   {
-    // The ProfileCounter has side-effects and must occur precisely where located
+    // The ProfileInvoke has side-effects and must occur precisely where located QQQ???
     pin();
   }
 
-  Value mdo()      { return _mdo; }
-  int offset()     { return _offset; }
-  int increment()  { return _increment; }
-
-  virtual void input_values_do(ValueVisitor* f)   { f->visit(&_mdo); }
+  ciMethod* inlinee()      { return _inlinee; }
+  ValueStack* state()      { return _state; }
+  int bci_of_invoke()      { return _bci_of_invoke; }
+  virtual void input_values_do(ValueVisitor*)   {}
+  virtual void state_values_do(ValueVisitor*);
 };
 
-
 class BlockPair: public CompilationResourceObj {
  private:
   BlockBegin* _from;
--- a/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -819,7 +819,6 @@
   output()->put(')');
 }
 
-
 void InstructionPrinter::do_ProfileCall(ProfileCall* x) {
   output()->print("profile ");
   print_value(x->recv());
@@ -831,20 +830,11 @@
   output()->put(')');
 }
 
-
-void InstructionPrinter::do_ProfileCounter(ProfileCounter* x) {
+void InstructionPrinter::do_ProfileInvoke(ProfileInvoke* x) {
+  output()->print("profile_invoke ");
+  output()->print(" %s.%s", x->inlinee()->holder()->name()->as_utf8(), x->inlinee()->name()->as_utf8());
+  output()->put(')');
 
-  ObjectConstant* oc = x->mdo()->type()->as_ObjectConstant();
-  if (oc != NULL && oc->value()->is_method() &&
-      x->offset() == methodOopDesc::interpreter_invocation_counter_offset_in_bytes()) {
-    print_value(x->mdo());
-    output()->print(".interpreter_invocation_count += %d", x->increment());
-  } else {
-    output()->print("counter [");
-    print_value(x->mdo());
-    output()->print(" + %d] += %d", x->offset(), x->increment());
-  }
 }
 
-
 #endif // PRODUCT
--- a/hotspot/src/share/vm/c1/c1_InstructionPrinter.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_InstructionPrinter.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -123,6 +123,6 @@
   virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
   virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
   virtual void do_ProfileCall    (ProfileCall*     x);
-  virtual void do_ProfileCounter (ProfileCounter*  x);
+  virtual void do_ProfileInvoke  (ProfileInvoke*   x);
 };
 #endif // PRODUCT
--- a/hotspot/src/share/vm/c1/c1_LIR.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIR.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -345,9 +345,8 @@
 LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass,
                                  LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3,
                                  bool fast_check, CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch,
-                                 CodeStub* stub,
-                                 ciMethod* profiled_method,
-                                 int profiled_bci)
+                                 CodeStub* stub)
+
   : LIR_Op(code, result, NULL)
   , _object(object)
   , _array(LIR_OprFact::illegalOpr)
@@ -359,8 +358,10 @@
   , _stub(stub)
   , _info_for_patch(info_for_patch)
   , _info_for_exception(info_for_exception)
-  , _profiled_method(profiled_method)
-  , _profiled_bci(profiled_bci) {
+  , _profiled_method(NULL)
+  , _profiled_bci(-1)
+  , _should_profile(false)
+{
   if (code == lir_checkcast) {
     assert(info_for_exception != NULL, "checkcast throws exceptions");
   } else if (code == lir_instanceof) {
@@ -372,7 +373,7 @@
 
 
 
-LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception, ciMethod* profiled_method, int profiled_bci)
+LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception)
   : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)
   , _object(object)
   , _array(array)
@@ -384,8 +385,10 @@
   , _stub(NULL)
   , _info_for_patch(NULL)
   , _info_for_exception(info_for_exception)
-  , _profiled_method(profiled_method)
-  , _profiled_bci(profiled_bci) {
+  , _profiled_method(NULL)
+  , _profiled_bci(-1)
+  , _should_profile(false)
+{
   if (code == lir_store_check) {
     _stub = new ArrayStoreExceptionStub(info_for_exception);
     assert(info_for_exception != NULL, "store_check throws exceptions");
@@ -495,6 +498,8 @@
     case lir_monaddr:        // input and result always valid, info always invalid
     case lir_null_check:     // input and info always valid, result always invalid
     case lir_move:           // input and result always valid, may have info
+    case lir_pack64:         // input and result always valid
+    case lir_unpack64:       // input and result always valid
     case lir_prefetchr:      // input always valid, result and info always invalid
     case lir_prefetchw:      // input always valid, result and info always invalid
     {
@@ -903,7 +908,6 @@
       assert(opProfileCall->_tmp1->is_valid(), "used");  do_temp(opProfileCall->_tmp1);
       break;
     }
-
   default:
     ShouldNotReachHere();
   }
@@ -1041,12 +1045,10 @@
   masm->emit_delay(this);
 }
 
-
 void LIR_OpProfileCall::emit_code(LIR_Assembler* masm) {
   masm->emit_profile_call(this);
 }
 
-
 // LIR_List
 LIR_List::LIR_List(Compilation* compilation, BlockBegin* block)
   : _operations(8)
@@ -1364,19 +1366,29 @@
                           LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
                           CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
                           ciMethod* profiled_method, int profiled_bci) {
-  append(new LIR_OpTypeCheck(lir_checkcast, result, object, klass,
-                             tmp1, tmp2, tmp3, fast_check, info_for_exception, info_for_patch, stub,
-                             profiled_method, profiled_bci));
+  LIR_OpTypeCheck* c = new LIR_OpTypeCheck(lir_checkcast, result, object, klass,
+                                           tmp1, tmp2, tmp3, fast_check, info_for_exception, info_for_patch, stub);
+  if (profiled_method != NULL) {
+    c->set_profiled_method(profiled_method);
+    c->set_profiled_bci(profiled_bci);
+    c->set_should_profile(true);
+  }
+  append(c);
 }
 
-
-void LIR_List::instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch) {
-  append(new LIR_OpTypeCheck(lir_instanceof, result, object, klass, tmp1, tmp2, tmp3, fast_check, NULL, info_for_patch, NULL, NULL, 0));
+void LIR_List::instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch, ciMethod* profiled_method, int profiled_bci) {
+  LIR_OpTypeCheck* c = new LIR_OpTypeCheck(lir_instanceof, result, object, klass, tmp1, tmp2, tmp3, fast_check, NULL, info_for_patch, NULL);
+  if (profiled_method != NULL) {
+    c->set_profiled_method(profiled_method);
+    c->set_profiled_bci(profiled_bci);
+    c->set_should_profile(true);
+  }
+  append(c);
 }
 
 
 void LIR_List::store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception) {
-  append(new LIR_OpTypeCheck(lir_store_check, object, array, tmp1, tmp2, tmp3, info_for_exception, NULL, 0));
+  append(new LIR_OpTypeCheck(lir_store_check, object, array, tmp1, tmp2, tmp3, info_for_exception));
 }
 
 
@@ -1611,6 +1623,8 @@
      case lir_convert:               s = "convert";       break;
      case lir_alloc_object:          s = "alloc_obj";     break;
      case lir_monaddr:               s = "mon_addr";      break;
+     case lir_pack64:                s = "pack64";        break;
+     case lir_unpack64:              s = "unpack64";      break;
      // LIR_Op2
      case lir_cmp:                   s = "cmp";           break;
      case lir_cmp_l2i:               s = "cmp_l2i";       break;
@@ -1664,7 +1678,6 @@
      case lir_cas_int:               s = "cas_int";      break;
      // LIR_OpProfileCall
      case lir_profile_call:          s = "profile_call";  break;
-
      case lir_none:                  ShouldNotReachHere();break;
     default:                         s = "illegal_op";    break;
   }
@@ -1922,7 +1935,6 @@
   tmp1()->print(out);          out->print(" ");
 }
 
-
 #endif // PRODUCT
 
 // Implementation of LIR_InsertionBuffer
--- a/hotspot/src/share/vm/c1/c1_LIR.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIR.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -849,6 +849,8 @@
       , lir_monaddr
       , lir_roundfp
       , lir_safepoint
+      , lir_pack64
+      , lir_unpack64
       , lir_unwind
   , end_op1
   , begin_op2
@@ -1464,18 +1466,16 @@
   CodeEmitInfo* _info_for_patch;
   CodeEmitInfo* _info_for_exception;
   CodeStub*     _stub;
-  // Helpers for Tier1UpdateMethodData
   ciMethod*     _profiled_method;
   int           _profiled_bci;
+  bool          _should_profile;
 
 public:
   LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass,
                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
-                  CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
-                  ciMethod* profiled_method, int profiled_bci);
+                  CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub);
   LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array,
-                  LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception,
-                  ciMethod* profiled_method, int profiled_bci);
+                  LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
 
   LIR_Opr object() const                         { return _object;         }
   LIR_Opr array() const                          { assert(code() == lir_store_check, "not valid"); return _array;         }
@@ -1489,8 +1489,12 @@
   CodeStub* stub() const                         { return _stub;           }
 
   // methodDataOop profiling
-  ciMethod* profiled_method()                    { return _profiled_method; }
-  int       profiled_bci()                       { return _profiled_bci; }
+  void set_profiled_method(ciMethod *method)     { _profiled_method = method; }
+  void set_profiled_bci(int bci)                 { _profiled_bci = bci;       }
+  void set_should_profile(bool b)                { _should_profile = b;       }
+  ciMethod* profiled_method() const              { return _profiled_method;   }
+  int       profiled_bci() const                 { return _profiled_bci;      }
+  bool      should_profile() const               { return _should_profile;    }
 
   virtual void emit_code(LIR_Assembler* masm);
   virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; }
@@ -1771,7 +1775,6 @@
   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
 };
 
-
 class LIR_InsertionBuffer;
 
 //--------------------------------LIR_List---------------------------------------------------
@@ -1835,6 +1838,7 @@
   //---------- mutators ---------------
   void insert_before(int i, LIR_List* op_list)   { _operations.insert_before(i, op_list->instructions_list()); }
   void insert_before(int i, LIR_Op* op)          { _operations.insert_before(i, op); }
+  void remove_at(int i)                          { _operations.remove_at(i); }
 
   //---------- printing -------------
   void print_instructions() PRODUCT_RETURN;
@@ -1908,6 +1912,9 @@
   void logical_or  (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_or,   left, right, dst)); }
   void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor,  left, right, dst)); }
 
+  void   pack64(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_pack64,   src, dst, T_LONG, lir_patch_none, NULL)); }
+  void unpack64(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_unpack64, src, dst, T_LONG, lir_patch_none, NULL)); }
+
   void null_check(LIR_Opr opr, CodeEmitInfo* info)         { append(new LIR_Op1(lir_null_check, opr, info)); }
   void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
     append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info));
@@ -2034,15 +2041,17 @@
 
   void fpop_raw()                                { append(new LIR_Op0(lir_fpop_raw)); }
 
+  void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch, ciMethod* profiled_method, int profiled_bci);
+  void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
+
   void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass,
                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
                   CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
                   ciMethod* profiled_method, int profiled_bci);
-  void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch);
-  void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
-
   // methodDataOop profiling
-  void profile_call(ciMethod* method, int bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) { append(new LIR_OpProfileCall(lir_profile_call, method, bci, mdo, recv, t1, cha_klass)); }
+  void profile_call(ciMethod* method, int bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) {
+    append(new LIR_OpProfileCall(lir_profile_call, method, bci, mdo, recv, t1, cha_klass));
+  }
 };
 
 void print_LIR(BlockList* blocks);
--- a/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -548,6 +548,16 @@
       monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
       break;
 
+#ifdef SPARC
+    case lir_pack64:
+      pack64(op->in_opr(), op->result_opr());
+      break;
+
+    case lir_unpack64:
+      unpack64(op->in_opr(), op->result_opr());
+      break;
+#endif
+
     case lir_unwind:
       unwind_op(op->in_opr());
       break;
--- a/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -187,6 +187,7 @@
   void emit_alloc_obj(LIR_OpAllocObj* op);
   void emit_alloc_array(LIR_OpAllocArray* op);
   void emit_opTypeCheck(LIR_OpTypeCheck* op);
+  void emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null);
   void emit_compare_and_swap(LIR_OpCompareAndSwap* op);
   void emit_lock(LIR_OpLock* op);
   void emit_call(LIR_OpJavaCall* op);
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -480,16 +480,6 @@
 }
 
 
-// increment a counter returning the incremented value
-LIR_Opr LIRGenerator::increment_and_return_counter(LIR_Opr base, int offset, int increment) {
-  LIR_Address* counter = new LIR_Address(base, offset, T_INT);
-  LIR_Opr result = new_register(T_INT);
-  __ load(counter, result);
-  __ add(result, LIR_OprFact::intConst(increment), result);
-  __ store(result, counter);
-  return result;
-}
-
 
 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
   LIR_Opr result_op = result;
@@ -821,7 +811,6 @@
   return tmp;
 }
 
-
 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
   if (if_instr->should_profile()) {
     ciMethod* method = if_instr->profiled_method();
@@ -836,24 +825,32 @@
     assert(data->is_BranchData(), "need BranchData for two-way branches");
     int taken_count_offset     = md->byte_offset_of_slot(data, BranchData::taken_offset());
     int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
+    if (if_instr->is_swapped()) {
+      int t = taken_count_offset;
+      taken_count_offset = not_taken_count_offset;
+      not_taken_count_offset = t;
+    }
+
     LIR_Opr md_reg = new_register(T_OBJECT);
-    __ move(LIR_OprFact::oopConst(md->constant_encoding()), md_reg);
-    LIR_Opr data_offset_reg = new_register(T_INT);
+    __ oop2reg(md->constant_encoding(), md_reg);
+
+    LIR_Opr data_offset_reg = new_pointer_register();
     __ cmove(lir_cond(cond),
-             LIR_OprFact::intConst(taken_count_offset),
-             LIR_OprFact::intConst(not_taken_count_offset),
+             LIR_OprFact::intptrConst(taken_count_offset),
+             LIR_OprFact::intptrConst(not_taken_count_offset),
              data_offset_reg);
-    LIR_Opr data_reg = new_register(T_INT);
-    LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, T_INT);
+
+    // MDO cells are intptr_t, so the data_reg width is arch-dependent.
+    LIR_Opr data_reg = new_pointer_register();
+    LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
     __ move(LIR_OprFact::address(data_addr), data_reg);
+    // Use leal instead of add to avoid destroying condition codes on x86
     LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
-    // Use leal instead of add to avoid destroying condition codes on x86
     __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
     __ move(data_reg, LIR_OprFact::address(data_addr));
   }
 }
 
-
 // Phi technique:
 // This is about passing live values from one basic block to the other.
 // In code generated with Java it is rather rare that more than one
@@ -1305,8 +1302,6 @@
   LIR_Opr flag_val = new_register(T_INT);
   __ load(mark_active_flag_addr, flag_val);
 
-  LabelObj* start_store = new LabelObj();
-
   LIR_PatchCode pre_val_patch_code =
     patch ? lir_patch_normal : lir_patch_none;
 
@@ -1757,7 +1752,7 @@
 
 #ifndef PRODUCT
   if (PrintC1Statistics) {
-    increment_counter(Runtime1::throw_count_address());
+    increment_counter(Runtime1::throw_count_address(), T_INT);
   }
 #endif
 
@@ -2191,12 +2186,41 @@
     ValueStack* state = x->state_before() ? x->state_before() : x->state();
 
     // increment backedge counter if needed
-    increment_backedge_counter(state_for(x, state));
-
+    CodeEmitInfo* info = state_for(x, state);
+    increment_backedge_counter(info, info->bci());
     CodeEmitInfo* safepoint_info = state_for(x, state);
     __ safepoint(safepoint_poll_register(), safepoint_info);
   }
 
+  // Gotos can be folded Ifs, handle this case.
+  if (x->should_profile()) {
+    ciMethod* method = x->profiled_method();
+    assert(method != NULL, "method should be set if branch is profiled");
+    ciMethodData* md = method->method_data();
+    if (md == NULL) {
+      bailout("out of memory building methodDataOop");
+      return;
+    }
+    ciProfileData* data = md->bci_to_data(x->profiled_bci());
+    assert(data != NULL, "must have profiling data");
+    int offset;
+    if (x->direction() == Goto::taken) {
+      assert(data->is_BranchData(), "need BranchData for two-way branches");
+      offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
+    } else if (x->direction() == Goto::not_taken) {
+      assert(data->is_BranchData(), "need BranchData for two-way branches");
+      offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
+    } else {
+      assert(data->is_JumpData(), "need JumpData for branches");
+      offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
+    }
+    LIR_Opr md_reg = new_register(T_OBJECT);
+    __ oop2reg(md->constant_encoding(), md_reg);
+
+    increment_counter(new LIR_Address(md_reg, offset,
+                                      NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
+  }
+
   // emit phi-instruction move after safepoint since this simplifies
   // describing the state as the safepoint.
   move_to_phi(x->state());
@@ -2279,7 +2303,10 @@
   }
 
   // increment invocation counters if needed
-  increment_invocation_counter(new CodeEmitInfo(0, scope()->start()->state(), NULL));
+  if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
+    CodeEmitInfo* info = new CodeEmitInfo(InvocationEntryBci, scope()->start()->state(), NULL);
+    increment_invocation_counter(info);
+  }
 
   // all blocks with a successor must end with an unconditional jump
   // to the successor even if they are consecutive
@@ -2613,12 +2640,12 @@
   }
 }
 
-
 void LIRGenerator::do_ProfileCall(ProfileCall* x) {
   // Need recv in a temporary register so it interferes with the other temporaries
   LIR_Opr recv = LIR_OprFact::illegalOpr;
   LIR_Opr mdo = new_register(T_OBJECT);
-  LIR_Opr tmp = new_register(T_INT);
+  // tmp is used to hold the counters on SPARC
+  LIR_Opr tmp = new_pointer_register();
   if (x->recv() != NULL) {
     LIRItem value(x->recv(), this);
     value.load_item();
@@ -2628,14 +2655,69 @@
   __ profile_call(x->method(), x->bci_of_invoke(), mdo, recv, tmp, x->known_holder());
 }
 
-
-void LIRGenerator::do_ProfileCounter(ProfileCounter* x) {
-  LIRItem mdo(x->mdo(), this);
-  mdo.load_item();
-
-  increment_counter(new LIR_Address(mdo.result(), x->offset(), T_INT), x->increment());
+void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
+  // We can safely ignore accessors here, since c2 will inline them anyway,
+  // accessors are also always mature.
+  if (!x->inlinee()->is_accessor()) {
+    CodeEmitInfo* info = state_for(x, x->state(), true);
+    // Increment invocation counter, don't notify the runtime, because we don't inline loops,
+    increment_event_counter_impl(info, x->inlinee(), 0, InvocationEntryBci, false, false);
+  }
+}
+
+void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) {
+  int freq_log;
+  int level = compilation()->env()->comp_level();
+  if (level == CompLevel_limited_profile) {
+    freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
+  } else if (level == CompLevel_full_profile) {
+    freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
+  } else {
+    ShouldNotReachHere();
+  }
+  // Increment the appropriate invocation/backedge counter and notify the runtime.
+  increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true);
 }
 
+void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
+                                                ciMethod *method, int frequency,
+                                                int bci, bool backedge, bool notify) {
+  assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
+  int level = _compilation->env()->comp_level();
+  assert(level > CompLevel_simple, "Shouldn't be here");
+
+  int offset = -1;
+  LIR_Opr counter_holder = new_register(T_OBJECT);
+  LIR_Opr meth;
+  if (level == CompLevel_limited_profile) {
+    offset = in_bytes(backedge ? methodOopDesc::backedge_counter_offset() :
+                                 methodOopDesc::invocation_counter_offset());
+    __ oop2reg(method->constant_encoding(), counter_holder);
+    meth = counter_holder;
+  } else if (level == CompLevel_full_profile) {
+    offset = in_bytes(backedge ? methodDataOopDesc::backedge_counter_offset() :
+                                 methodDataOopDesc::invocation_counter_offset());
+    __ oop2reg(method->method_data()->constant_encoding(), counter_holder);
+    meth = new_register(T_OBJECT);
+    __ oop2reg(method->constant_encoding(), meth);
+  } else {
+    ShouldNotReachHere();
+  }
+  LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
+  LIR_Opr result = new_register(T_INT);
+  __ load(counter, result);
+  __ add(result, LIR_OprFact::intConst(InvocationCounter::count_increment), result);
+  __ store(result, counter);
+  if (notify) {
+    LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT);
+    __ logical_and(result, mask, result);
+    __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
+    // The bci for info can point to cmp for if's we want the if bci
+    CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
+    __ branch(lir_cond_equal, T_INT, overflow);
+    __ branch_destination(overflow->continuation());
+  }
+}
 
 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
   LIRItemList args(1);
@@ -2748,28 +2830,3 @@
   return result;
 }
 
-
-
-void LIRGenerator::increment_invocation_counter(CodeEmitInfo* info, bool backedge) {
-#ifdef TIERED
-  if (_compilation->env()->comp_level() == CompLevel_fast_compile &&
-      (method()->code_size() >= Tier1BytecodeLimit || backedge)) {
-    int limit = InvocationCounter::Tier1InvocationLimit;
-    int offset = in_bytes(methodOopDesc::invocation_counter_offset() +
-                          InvocationCounter::counter_offset());
-    if (backedge) {
-      limit = InvocationCounter::Tier1BackEdgeLimit;
-      offset = in_bytes(methodOopDesc::backedge_counter_offset() +
-                        InvocationCounter::counter_offset());
-    }
-
-    LIR_Opr meth = new_register(T_OBJECT);
-    __ oop2reg(method()->constant_encoding(), meth);
-    LIR_Opr result = increment_and_return_counter(meth, offset, InvocationCounter::count_increment);
-    __ cmp(lir_cond_aboveEqual, result, LIR_OprFact::intConst(limit));
-    CodeStub* overflow = new CounterOverflowStub(info, info->bci());
-    __ branch(lir_cond_aboveEqual, T_INT, overflow);
-    __ branch_destination(overflow->continuation());
-  }
-#endif
-}
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -196,6 +196,9 @@
   LIR_Opr load_constant(Constant* x);
   LIR_Opr load_constant(LIR_Const* constant);
 
+  // Given an immediate value, return an operand usable in logical ops.
+  LIR_Opr load_immediate(int x, BasicType type);
+
   void  set_result(Value x, LIR_Opr opr)           {
     assert(opr->is_valid(), "must set to valid value");
     assert(x->operand()->is_illegal(), "operand should never change");
@@ -213,8 +216,6 @@
   LIR_Opr round_item(LIR_Opr opr);
   LIR_Opr force_to_spill(LIR_Opr value, BasicType t);
 
-  void  profile_branch(If* if_instr, If::Condition cond);
-
   PhiResolverState& resolver_state() { return _resolver_state; }
 
   void  move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val);
@@ -285,12 +286,9 @@
 
   void arithmetic_call_op (Bytecodes::Code code, LIR_Opr result, LIR_OprList* args);
 
-  void increment_counter(address counter, int step = 1);
+  void increment_counter(address counter, BasicType type, int step = 1);
   void increment_counter(LIR_Address* addr, int step = 1);
 
-  // increment a counter returning the incremented value
-  LIR_Opr increment_and_return_counter(LIR_Opr base, int offset, int increment);
-
   // is_strictfp is only needed for mul and div (and only generates different code on i486)
   void arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp, CodeEmitInfo* info = NULL);
   // machine dependent.  returns true if it emitted code for the multiply
@@ -347,9 +345,21 @@
   bool can_store_as_constant(Value i, BasicType type) const;
 
   LIR_Opr safepoint_poll_register();
-  void increment_invocation_counter(CodeEmitInfo* info, bool backedge = false);
-  void increment_backedge_counter(CodeEmitInfo* info) {
-    increment_invocation_counter(info, true);
+
+  void profile_branch(If* if_instr, If::Condition cond);
+  void increment_event_counter_impl(CodeEmitInfo* info,
+                                    ciMethod *method, int frequency,
+                                    int bci, bool backedge, bool notify);
+  void increment_event_counter(CodeEmitInfo* info, int bci, bool backedge);
+  void increment_invocation_counter(CodeEmitInfo *info) {
+    if (compilation()->count_invocations()) {
+      increment_event_counter(info, InvocationEntryBci, false);
+    }
+  }
+  void increment_backedge_counter(CodeEmitInfo* info, int bci) {
+    if (compilation()->count_backedges()) {
+      increment_event_counter(info, bci, true);
+    }
   }
 
   CodeEmitInfo* state_for(Instruction* x, ValueStack* state, bool ignore_xhandler = false);
@@ -503,7 +513,7 @@
   virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
   virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
   virtual void do_ProfileCall    (ProfileCall*     x);
-  virtual void do_ProfileCounter (ProfileCounter*  x);
+  virtual void do_ProfileInvoke  (ProfileInvoke*   x);
 };
 
 
--- a/hotspot/src/share/vm/c1/c1_Optimizer.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_Optimizer.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -430,7 +430,7 @@
   void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
   void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
   void do_ProfileCall    (ProfileCall*     x);
-  void do_ProfileCounter (ProfileCounter*  x);
+  void do_ProfileInvoke  (ProfileInvoke*   x);
 };
 
 
@@ -598,7 +598,7 @@
 void NullCheckVisitor::do_UnsafePrefetchRead (UnsafePrefetchRead*  x) {}
 void NullCheckVisitor::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {}
 void NullCheckVisitor::do_ProfileCall    (ProfileCall*     x) { nce()->clear_last_explicit_null_check(); }
-void NullCheckVisitor::do_ProfileCounter (ProfileCounter*  x) {}
+void NullCheckVisitor::do_ProfileInvoke  (ProfileInvoke*   x) {}
 
 
 void NullCheckEliminator::visit(Value* p) {
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -118,8 +118,7 @@
   assert(0 <= id && id < number_of_ids, "illegal stub id");
   ResourceMark rm;
   // create code buffer for code storage
-  CodeBuffer code(buffer_blob->instructions_begin(),
-                  buffer_blob->instructions_size());
+  CodeBuffer code(buffer_blob);
 
   Compilation::setup_code_buffer(&code, 0);
 
@@ -141,9 +140,7 @@
     case slow_subtype_check_id:
     case fpu2long_stub_id:
     case unwind_exception_id:
-#ifndef TIERED
-    case counter_overflow_id: // Not generated outside the tiered world
-#endif
+    case counter_overflow_id:
 #if defined(SPARC) || defined(PPC)
     case handle_exception_nofpu_id:  // Unused on sparc
 #endif
@@ -323,31 +320,60 @@
   }
 JRT_END
 
-#ifdef TIERED
-JRT_ENTRY(void, Runtime1::counter_overflow(JavaThread* thread, int bci))
-  RegisterMap map(thread, false);
-  frame fr =  thread->last_frame().sender(&map);
+// This is a helper to allow us to safepoint but allow the outer entry
+// to be safepoint free if we need to do an osr
+static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, methodOopDesc* m) {
+  nmethod* osr_nm = NULL;
+  methodHandle method(THREAD, m);
+
+  RegisterMap map(THREAD, false);
+  frame fr =  THREAD->last_frame().sender(&map);
   nmethod* nm = (nmethod*) fr.cb();
-  assert(nm!= NULL && nm->is_nmethod(), "what?");
-  methodHandle method(thread, nm->method());
-  if (bci == 0) {
-    // invocation counter overflow
-    if (!Tier1CountOnly) {
-      CompilationPolicy::policy()->method_invocation_event(method, CHECK);
-    } else {
-      method()->invocation_counter()->reset();
+  assert(nm!= NULL && nm->is_nmethod(), "Sanity check");
+  methodHandle enclosing_method(THREAD, nm->method());
+
+  CompLevel level = (CompLevel)nm->comp_level();
+  int bci = InvocationEntryBci;
+  if (branch_bci != InvocationEntryBci) {
+    // Compute desination bci
+    address pc = method()->code_base() + branch_bci;
+    Bytecodes::Code branch = Bytecodes::code_at(pc, method());
+    int offset = 0;
+    switch (branch) {
+      case Bytecodes::_if_icmplt: case Bytecodes::_iflt:
+      case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt:
+      case Bytecodes::_if_icmple: case Bytecodes::_ifle:
+      case Bytecodes::_if_icmpge: case Bytecodes::_ifge:
+      case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq:
+      case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne:
+      case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto:
+        offset = (int16_t)Bytes::get_Java_u2(pc + 1);
+        break;
+      case Bytecodes::_goto_w:
+        offset = Bytes::get_Java_u4(pc + 1);
+        break;
+      default: ;
     }
-  } else {
-    if (!Tier1CountOnly) {
-      // Twe have a bci but not the destination bci and besides a backedge
-      // event is more for OSR which we don't want here.
-      CompilationPolicy::policy()->method_invocation_event(method, CHECK);
-    } else {
-      method()->backedge_counter()->reset();
+    bci = branch_bci + offset;
+  }
+
+  osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, THREAD);
+  return osr_nm;
+}
+
+JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* thread, int bci, methodOopDesc* method))
+  nmethod* osr_nm;
+  JRT_BLOCK
+    osr_nm = counter_overflow_helper(thread, bci, method);
+    if (osr_nm != NULL) {
+      RegisterMap map(thread, false);
+      frame fr =  thread->last_frame().sender(&map);
+      VM_DeoptimizeFrame deopt(thread, fr.id());
+      VMThread::execute(&deopt);
     }
-  }
+  JRT_BLOCK_END
+  return NULL;
 JRT_END
-#endif // TIERED
 
 extern void vm_exit(int code);
 
@@ -899,7 +925,7 @@
             NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
 
             assert(n_copy->data() == 0 ||
-                   n_copy->data() == (int)Universe::non_oop_word(),
+                   n_copy->data() == (intptr_t)Universe::non_oop_word(),
                    "illegal init value");
             assert(load_klass() != NULL, "klass not set");
             n_copy->set_data((intx) (load_klass()));
--- a/hotspot/src/share/vm/c1/c1_Runtime1.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -123,9 +123,7 @@
   static void new_object_array(JavaThread* thread, klassOopDesc* klass, jint length);
   static void new_multi_array (JavaThread* thread, klassOopDesc* klass, int rank, jint* dims);
 
-#ifdef TIERED
-  static void counter_overflow(JavaThread* thread, int bci);
-#endif // TIERED
+  static address counter_overflow(JavaThread* thread, int bci, methodOopDesc* method);
 
   static void unimplemented_entry   (JavaThread* thread, StubID id);
 
@@ -155,7 +153,7 @@
 
   // stubs
   static CodeBlob* blob_for (StubID id);
-  static address   entry_for(StubID id)          { return blob_for(id)->instructions_begin(); }
+  static address   entry_for(StubID id)          { return blob_for(id)->code_begin(); }
   static const char* name_for (StubID id);
   static const char* name_for_address(address entry);
 
--- a/hotspot/src/share/vm/c1/c1_ValueMap.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_ValueMap.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -185,11 +185,11 @@
   void do_ExceptionObject(ExceptionObject* x) { /* nothing to do */ }
   void do_RoundFP        (RoundFP*         x) { /* nothing to do */ }
   void do_UnsafeGetRaw   (UnsafeGetRaw*    x) { /* nothing to do */ }
+  void do_ProfileInvoke  (ProfileInvoke*   x) { /* nothing to do */ };
   void do_UnsafeGetObject(UnsafeGetObject* x) { /* nothing to do */ }
   void do_UnsafePrefetchRead (UnsafePrefetchRead*  x) { /* nothing to do */ }
   void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ }
   void do_ProfileCall    (ProfileCall*     x) { /* nothing to do */ }
-  void do_ProfileCounter (ProfileCounter*  x) { /* nothing to do */ }
 };
 
 
--- a/hotspot/src/share/vm/c1/c1_globals.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/c1/c1_globals.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -25,12 +25,6 @@
 //
 // Defines all global flags used by the client compiler.
 //
-#ifndef TIERED
-  #define NOT_TIERED(x) x
-#else
-  #define NOT_TIERED(x)
-#endif
-
 #define C1_FLAGS(develop, develop_pd, product, product_pd, notproduct)      \
                                                                             \
   /* Printing */                                                            \
@@ -55,7 +49,7 @@
   notproduct(bool, PrintIRDuringConstruction, false,                        \
           "Print IR as it's being constructed (helpful for debugging frontend)")\
                                                                             \
-  notproduct(bool, PrintPhiFunctions, false,                                   \
+  notproduct(bool, PrintPhiFunctions, false,                                \
           "Print phi functions when they are created and simplified")       \
                                                                             \
   notproduct(bool, PrintIR, false,                                          \
@@ -279,41 +273,29 @@
   product_pd(intx, SafepointPollOffset,                                     \
           "Offset added to polling address (Intel only)")                   \
                                                                             \
-  product(bool, UseNewFeature1, false,                                      \
-          "Enable new feature for testing.  This is a dummy flag.")         \
-                                                                            \
-  product(bool, UseNewFeature2, false,                                      \
-          "Enable new feature for testing.  This is a dummy flag.")         \
-                                                                            \
-  product(bool, UseNewFeature3, false,                                      \
-          "Enable new feature for testing.  This is a dummy flag.")         \
-                                                                            \
-  product(bool, UseNewFeature4, false,                                      \
-          "Enable new feature for testing.  This is a dummy flag.")         \
-                                                                            \
   develop(bool, ComputeExactFPURegisterUsage, true,                         \
           "Compute additional live set for fpu registers to simplify fpu stack merge (Intel only)") \
                                                                             \
-  product(bool, Tier1ProfileCalls, true,                                    \
+  product(bool, C1ProfileCalls, true,                                       \
           "Profile calls when generating code for updating MDOs")           \
                                                                             \
-  product(bool, Tier1ProfileVirtualCalls, true,                             \
+  product(bool, C1ProfileVirtualCalls, true,                                \
           "Profile virtual calls when generating code for updating MDOs")   \
                                                                             \
-  product(bool, Tier1ProfileInlinedCalls, true,                             \
+  product(bool, C1ProfileInlinedCalls, true,                                \
           "Profile inlined calls when generating code for updating MDOs")   \
                                                                             \
-  product(bool, Tier1ProfileBranches, true,                                 \
+  product(bool, C1ProfileBranches, true,                                    \
           "Profile branches when generating code for updating MDOs")        \
                                                                             \
-  product(bool, Tier1ProfileCheckcasts, true,                               \
+  product(bool, C1ProfileCheckcasts, true,                                  \
           "Profile checkcasts when generating code for updating MDOs")      \
                                                                             \
-  product(bool, Tier1OptimizeVirtualCallProfiling, true,                    \
-          "Use CHA and exact type results at call sites when updating MDOs") \
+  product(bool, C1OptimizeVirtualCallProfiling, true,                       \
+          "Use CHA and exact type results at call sites when updating MDOs")\
                                                                             \
-  develop(bool, Tier1CountOnly, false,                                      \
-          "Don't schedule tier 2 compiles. Enter VM only")                  \
+  product(bool, C1UpdateMethodData, trueInTiered,                           \
+          "Update methodDataOops in Tier1-generated code")                  \
                                                                             \
   develop(bool, PrintCFGToFile, false,                                      \
           "print control flow graph to a separate file during compilation") \
--- a/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -92,11 +92,11 @@
     empty_map.clear();
   }
 
-  ArgumentMap raw_pop()  { assert(_stack_height > 0, "stack underflow"); return _stack[--_stack_height]; }
+  ArgumentMap raw_pop()  { guarantee(_stack_height > 0, "stack underflow"); return _stack[--_stack_height]; }
   ArgumentMap  apop()    { return raw_pop(); }
   void spop()            { raw_pop(); }
   void lpop()            { spop(); spop(); }
-  void raw_push(ArgumentMap i)   { assert(_stack_height < _max_stack, "stack overflow"); _stack[_stack_height++] = i; }
+  void raw_push(ArgumentMap i)   { guarantee(_stack_height < _max_stack, "stack overflow"); _stack[_stack_height++] = i; }
   void apush(ArgumentMap i)      { raw_push(i); }
   void spush()           { raw_push(empty_map); }
   void lpush()           { spush(); spush(); }
@@ -365,12 +365,19 @@
       case Bytecodes::_ldc:
       case Bytecodes::_ldc_w:
       case Bytecodes::_ldc2_w:
-        if (type2size[s.get_constant().basic_type()] == 1) {
+      {
+        // Avoid calling get_constant() which will try to allocate
+        // unloaded constant. We need only constant's type.
+        int index = s.get_constant_pool_index();
+        constantTag tag = s.get_constant_pool_tag(index);
+        if (tag.is_long() || tag.is_double()) {
+          // Only longs and doubles use 2 stack slots.
+          state.lpush();
+        } else {
           state.spush();
-        } else {
-          state.lpush();
         }
         break;
+      }
       case Bytecodes::_aload:
         state.apush(state._vars[s.get_index()]);
         break;
--- a/hotspot/src/share/vm/ci/ciEnv.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -956,18 +956,18 @@
       if (task() != NULL)  task()->set_code(nm);
 
       if (entry_bci == InvocationEntryBci) {
-#ifdef TIERED
-        // If there is an old version we're done with it
-        nmethod* old = method->code();
-        if (TraceMethodReplacement && old != NULL) {
-          ResourceMark rm;
-          char *method_name = method->name_and_sig_as_C_string();
-          tty->print_cr("Replacing method %s", method_name);
+        if (TieredCompilation) {
+          // If there is an old version we're done with it
+          nmethod* old = method->code();
+          if (TraceMethodReplacement && old != NULL) {
+            ResourceMark rm;
+            char *method_name = method->name_and_sig_as_C_string();
+            tty->print_cr("Replacing method %s", method_name);
+          }
+          if (old != NULL ) {
+            old->make_not_entrant();
+          }
         }
-        if (old != NULL ) {
-          old->make_not_entrant();
-        }
-#endif // TIERED
         if (TraceNMethodInstalls ) {
           ResourceMark rm;
           char *method_name = method->name_and_sig_as_C_string();
@@ -1011,7 +1011,7 @@
 // ------------------------------------------------------------------
 // ciEnv::comp_level
 int ciEnv::comp_level() {
-  if (task() == NULL)  return CompLevel_full_optimization;
+  if (task() == NULL)  return CompLevel_highest_tier;
   return task()->comp_level();
 }
 
--- a/hotspot/src/share/vm/ci/ciMethod.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/ci/ciMethod.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -49,7 +49,8 @@
   _handler_count      = h_m()->exception_table()->length() / 4;
   _uses_monitors      = h_m()->access_flags().has_monitor_bytecodes();
   _balanced_monitors  = !_uses_monitors || h_m()->access_flags().is_monitor_matching();
-  _is_compilable      = !h_m()->is_not_compilable();
+  _is_c1_compilable   = !h_m()->is_not_c1_compilable();
+  _is_c2_compilable   = !h_m()->is_not_c2_compilable();
   // Lazy fields, filled in on demand.  Require allocation.
   _code               = NULL;
   _exception_handlers = NULL;
@@ -61,11 +62,12 @@
 #endif // COMPILER2 || SHARK
 
   ciEnv *env = CURRENT_ENV;
-  if (env->jvmti_can_hotswap_or_post_breakpoint() && _is_compilable) {
+  if (env->jvmti_can_hotswap_or_post_breakpoint() && can_be_compiled()) {
     // 6328518 check hotswap conditions under the right lock.
     MutexLocker locker(Compile_lock);
     if (Dependencies::check_evol_method(h_m()) != NULL) {
-      _is_compilable = false;
+      _is_c1_compilable = false;
+      _is_c2_compilable = false;
     }
   } else {
     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
@@ -93,7 +95,7 @@
   _signature = new (env->arena()) ciSignature(_holder, sig_symbol);
   _method_data = NULL;
   // Take a snapshot of these values, so they will be commensurate with the MDO.
-  if (ProfileInterpreter) {
+  if (ProfileInterpreter || TieredCompilation) {
     int invcnt = h_m()->interpreter_invocation_count();
     // if the value overflowed report it as max int
     _interpreter_invocation_count = invcnt < 0 ? max_jint : invcnt ;
@@ -437,11 +439,26 @@
         // In addition, virtual call sites have receiver type information
         int receivers_count_total = 0;
         int morphism = 0;
+        // Precompute morphism for the possible fixup
         for (uint i = 0; i < call->row_limit(); i++) {
           ciKlass* receiver = call->receiver(i);
           if (receiver == NULL)  continue;
-          morphism += 1;
-          int rcount = call->receiver_count(i);
+          morphism++;
+        }
+        int epsilon = 0;
+        if (TieredCompilation && ProfileInterpreter) {
+          // Interpreter and C1 treat final and special invokes differently.
+          // C1 will record a type, whereas the interpreter will just
+          // increment the count. Detect this case.
+          if (morphism == 1 && count > 0) {
+            epsilon = count;
+            count = 0;
+          }
+        }
+        for (uint i = 0; i < call->row_limit(); i++) {
+          ciKlass* receiver = call->receiver(i);
+          if (receiver == NULL)  continue;
+          int rcount = call->receiver_count(i) + epsilon;
           if (rcount == 0) rcount = 1; // Should be valid value
           receivers_count_total += rcount;
           // Add the receiver to result data.
@@ -687,10 +704,17 @@
 // invocation counts in methods.
 int ciMethod::scale_count(int count, float prof_factor) {
   if (count > 0 && method_data() != NULL) {
-    int current_mileage = method_data()->current_mileage();
-    int creation_mileage = method_data()->creation_mileage();
-    int counter_life = current_mileage - creation_mileage;
+    int counter_life;
     int method_life = interpreter_invocation_count();
+    if (TieredCompilation) {
+      // In tiered the MDO's life is measured directly, so just use the snapshotted counters
+      counter_life = MAX2(method_data()->invocation_count(), method_data()->backedge_count());
+    } else {
+      int current_mileage = method_data()->current_mileage();
+      int creation_mileage = method_data()->creation_mileage();
+      counter_life = current_mileage - creation_mileage;
+    }
+
     // counter_life due to backedge_counter could be > method_life
     if (counter_life > method_life)
       counter_life = method_life;
@@ -778,7 +802,8 @@
   Thread* my_thread = JavaThread::current();
   methodHandle h_m(my_thread, get_methodOop());
 
-  if (Tier1UpdateMethodData && is_tier1_compile(env->comp_level())) {
+  // Create an MDO for the inlinee
+  if (TieredCompilation && is_c1_compile(env->comp_level())) {
     build_method_data(h_m);
   }
 
@@ -885,7 +910,11 @@
 // Have previous compilations of this method succeeded?
 bool ciMethod::can_be_compiled() {
   check_is_loaded();
-  return _is_compilable;
+  ciEnv* env = CURRENT_ENV;
+  if (is_c1_compile(env->comp_level())) {
+    return _is_c1_compilable;
+  }
+  return _is_c2_compilable;
 }
 
 // ------------------------------------------------------------------
@@ -895,8 +924,13 @@
 void ciMethod::set_not_compilable() {
   check_is_loaded();
   VM_ENTRY_MARK;
-  _is_compilable = false;
-  get_methodOop()->set_not_compilable();
+  ciEnv* env = CURRENT_ENV;
+  if (is_c1_compile(env->comp_level())) {
+    _is_c1_compilable = false;
+  } else {
+    _is_c2_compilable = false;
+  }
+  get_methodOop()->set_not_compilable(env->comp_level());
 }
 
 // ------------------------------------------------------------------
@@ -910,7 +944,8 @@
 bool ciMethod::can_be_osr_compiled(int entry_bci) {
   check_is_loaded();
   VM_ENTRY_MARK;
-  return !get_methodOop()->access_flags().is_not_osr_compilable();
+  ciEnv* env = CURRENT_ENV;
+  return !get_methodOop()->is_not_osr_compilable(env->comp_level());
 }
 
 // ------------------------------------------------------------------
@@ -920,26 +955,29 @@
   return get_methodOop()->code() != NULL;
 }
 
+int ciMethod::comp_level() {
+  check_is_loaded();
+  VM_ENTRY_MARK;
+  nmethod* nm = get_methodOop()->code();
+  if (nm != NULL) return nm->comp_level();
+  return 0;
+}
+
 // ------------------------------------------------------------------
 // ciMethod::instructions_size
-// This is a rough metric for "fat" methods, compared
-// before inlining with InlineSmallCode.
-// The CodeBlob::instructions_size accessor includes
-// junk like exception handler, stubs, and constant table,
-// which are not highly relevant to an inlined method.
-// So we use the more specific accessor nmethod::code_size.
-int ciMethod::instructions_size() {
+//
+// This is a rough metric for "fat" methods, compared before inlining
+// with InlineSmallCode.  The CodeBlob::code_size accessor includes
+// junk like exception handler, stubs, and constant table, which are
+// not highly relevant to an inlined method.  So we use the more
+// specific accessor nmethod::insts_size.
+int ciMethod::instructions_size(int comp_level) {
   GUARDED_VM_ENTRY(
     nmethod* code = get_methodOop()->code();
-    // if there's no compiled code or the code was produced by the
-    // tier1 profiler return 0 for the code size.  This should
-    // probably be based on the compilation level of the nmethod but
-    // that currently isn't properly recorded.
-    if (code == NULL ||
-        (TieredCompilation && code->compiler() != NULL && code->compiler()->is_c1())) {
-      return 0;
+    if (code != NULL && (comp_level == CompLevel_any || comp_level == code->comp_level())) {
+      return code->code_end() - code->verified_entry_point();
     }
-    return code->code_end() - code->verified_entry_point();
+    return 0;
   )
 }
 
--- a/hotspot/src/share/vm/ci/ciMethod.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/ci/ciMethod.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -61,7 +61,8 @@
 
   bool _uses_monitors;
   bool _balanced_monitors;
-  bool _is_compilable;
+  bool _is_c1_compilable;
+  bool _is_c2_compilable;
   bool _can_be_statically_bound;
 
   // Lazy fields, filled in on demand
@@ -127,6 +128,8 @@
   int interpreter_invocation_count() const       { check_is_loaded(); return _interpreter_invocation_count; }
   int interpreter_throwout_count() const         { check_is_loaded(); return _interpreter_throwout_count; }
 
+  int comp_level();
+
   Bytecodes::Code java_code_at_bci(int bci) {
     address bcp = code() + bci;
     return Bytecodes::java_code_at(bcp);
@@ -209,7 +212,7 @@
   bool can_be_osr_compiled(int entry_bci);
   void set_not_compilable();
   bool has_compiled_code();
-  int  instructions_size();
+  int  instructions_size(int comp_level = CompLevel_any);
   void log_nmethod_identity(xmlStream* log);
   bool is_not_reached(int bci);
   bool was_executed_more_than(int times);
--- a/hotspot/src/share/vm/ci/ciMethodData.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/ci/ciMethodData.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,6 +37,8 @@
   _data_size = 0;
   _extra_data_size = 0;
   _current_mileage = 0;
+  _invocation_counter = 0;
+  _backedge_counter = 0;
   _state = empty_state;
   _saw_free_extra_data = false;
   // Set an initial hint. Don't use set_hint_di() because
@@ -56,6 +58,8 @@
   _data_size = 0;
   _extra_data_size = 0;
   _current_mileage = 0;
+  _invocation_counter = 0;
+  _backedge_counter = 0;
   _state = empty_state;
   _saw_free_extra_data = false;
   // Set an initial hint. Don't use set_hint_di() because
@@ -99,6 +103,8 @@
   }
   // Note:  Extra data are all BitData, and do not need translation.
   _current_mileage = methodDataOopDesc::mileage_of(mdo->method());
+  _invocation_counter = mdo->invocation_count();
+  _backedge_counter = mdo->backedge_count();
   _state = mdo->is_mature()? mature_state: immature_state;
 
   _eflags = mdo->eflags();
@@ -253,6 +259,23 @@
   }
 }
 
+void ciMethodData::set_compilation_stats(short loops, short blocks) {
+  VM_ENTRY_MARK;
+  methodDataOop mdo = get_methodDataOop();
+  if (mdo != NULL) {
+    mdo->set_num_loops(loops);
+    mdo->set_num_blocks(blocks);
+  }
+}
+
+void ciMethodData::set_would_profile(bool p) {
+  VM_ENTRY_MARK;
+  methodDataOop mdo = get_methodDataOop();
+  if (mdo != NULL) {
+    mdo->set_would_profile(p);
+  }
+}
+
 bool ciMethodData::has_escape_info() {
   return eflag_set(methodDataOopDesc::estimated);
 }
--- a/hotspot/src/share/vm/ci/ciMethodData.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/ci/ciMethodData.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -162,6 +162,12 @@
   // Maturity of the oop when the snapshot is taken.
   int _current_mileage;
 
+  // These counters hold the age of MDO in tiered. In tiered we can have the same method
+  // running at different compilation levels concurrently. So, in order to precisely measure
+  // its maturity we need separate counters.
+  int _invocation_counter;
+  int _backedge_counter;
+
   // Coherent snapshot of original header.
   methodDataOopDesc _orig;
 
@@ -223,6 +229,16 @@
   int creation_mileage() { return _orig.creation_mileage(); }
   int current_mileage()  { return _current_mileage; }
 
+  int invocation_count() { return _invocation_counter; }
+  int backedge_count()   { return _backedge_counter;   }
+  // Transfer information about the method to methodDataOop.
+  // would_profile means we would like to profile this method,
+  // meaning it's not trivial.
+  void set_would_profile(bool p);
+  // Also set the numer of loops and blocks in the method.
+  // Again, this is used to determine if a method is trivial.
+  void set_compilation_stats(short loops, short blocks);
+
   void load_data();
 
   // Convert a dp (data pointer) to a di (data index).
--- a/hotspot/src/share/vm/classfile/classLoader.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/classfile/classLoader.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1292,7 +1292,7 @@
           // Iterate over all methods in class
           for (int n = 0; n < k->methods()->length(); n++) {
             methodHandle m (THREAD, methodOop(k->methods()->obj_at(n)));
-            if (CompilationPolicy::canBeCompiled(m)) {
+            if (CompilationPolicy::can_be_compiled(m)) {
 
               if (++_codecache_sweep_counter == CompileTheWorldSafepointInterval) {
                 // Give sweeper a chance to keep up with CTW
@@ -1301,7 +1301,7 @@
                 _codecache_sweep_counter = 0;
               }
               // Force compilation
-              CompileBroker::compile_method(m, InvocationEntryBci,
+              CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_initial_compile,
                                             methodHandle(), 0, "CTW", THREAD);
               if (HAS_PENDING_EXCEPTION) {
                 CLEAR_PENDING_EXCEPTION;
@@ -1315,7 +1315,7 @@
                   nm->make_not_entrant();
                   m->clear_code();
                 }
-                CompileBroker::compile_method(m, InvocationEntryBci,
+                CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_full_optimization,
                                               methodHandle(), 0, "CTW", THREAD);
                 if (HAS_PENDING_EXCEPTION) {
                   CLEAR_PENDING_EXCEPTION;
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -2424,12 +2424,15 @@
 
 int java_dyn_MethodTypeForm::_vmslots_offset;
 int java_dyn_MethodTypeForm::_erasedType_offset;
+int java_dyn_MethodTypeForm::_genericInvoker_offset;
 
 void java_dyn_MethodTypeForm::compute_offsets() {
   klassOop k = SystemDictionary::MethodTypeForm_klass();
   if (k != NULL) {
     compute_optional_offset(_vmslots_offset,    k, vmSymbols::vmslots_name(),    vmSymbols::int_signature(), true);
     compute_optional_offset(_erasedType_offset, k, vmSymbols::erasedType_name(), vmSymbols::java_dyn_MethodType_signature(), true);
+    compute_optional_offset(_genericInvoker_offset, k, vmSymbols::genericInvoker_name(), vmSymbols::java_dyn_MethodHandle_signature(), true);
+    if (_genericInvoker_offset == 0)  _genericInvoker_offset = -1;  // set to explicit "empty" value
   }
 }
 
@@ -2443,6 +2446,11 @@
   return mtform->obj_field(_erasedType_offset);
 }
 
+oop java_dyn_MethodTypeForm::genericInvoker(oop mtform) {
+  assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only");
+  return mtform->obj_field(_genericInvoker_offset);
+}
+
 
 // Support for java_dyn_CallSite
 
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1048,6 +1048,7 @@
  private:
   static int _vmslots_offset;           // number of argument slots needed
   static int _erasedType_offset;        // erasedType = canonical MethodType
+  static int _genericInvoker_offset;    // genericInvoker = adapter for invokeGeneric
 
   static void compute_offsets();
 
@@ -1055,10 +1056,12 @@
   // Accessors
   static int            vmslots(oop mtform);
   static oop            erasedType(oop mtform);
+  static oop            genericInvoker(oop mtform);
 
   // Accessors for code generation:
   static int vmslots_offset_in_bytes()          { return _vmslots_offset; }
   static int erasedType_offset_in_bytes()       { return _erasedType_offset; }
+  static int genericInvoker_offset_in_bytes()   { return _genericInvoker_offset; }
 };
 
 
--- a/hotspot/src/share/vm/classfile/stackMapTable.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/classfile/stackMapTable.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -152,6 +152,7 @@
 
 int32_t StackMapReader::chop(
     VerificationType* locals, int32_t length, int32_t chops) {
+  if (locals == NULL) return -1;
   int32_t pos = length - 1;
   for (int32_t i=0; i<chops; i++) {
     if (locals[pos].is_category2_2nd()) {
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -2361,8 +2361,11 @@
     // Must create lots of stuff here, but outside of the SystemDictionary lock.
     if (THREAD->is_Compiler_thread())
       return NULL;              // do not attempt from within compiler
+    bool for_invokeGeneric = (name_id == vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name));
     bool found_on_bcp = false;
-    Handle mt = find_method_handle_type(signature(), accessing_klass, found_on_bcp, CHECK_NULL);
+    Handle mt = find_method_handle_type(signature(), accessing_klass,
+                                        for_invokeGeneric,
+                                        found_on_bcp, CHECK_NULL);
     KlassHandle  mh_klass = SystemDictionaryHandles::MethodHandle_klass();
     methodHandle m = methodOopDesc::make_invoke_method(mh_klass, name, signature,
                                                        mt, CHECK_NULL);
@@ -2393,6 +2396,7 @@
 // consistent with this loader.
 Handle SystemDictionary::find_method_handle_type(symbolHandle signature,
                                                  KlassHandle accessing_klass,
+                                                 bool for_invokeGeneric,
                                                  bool& return_bcp_flag,
                                                  TRAPS) {
   Handle class_loader, protection_domain;
@@ -2448,10 +2452,26 @@
                          vmSymbols::findMethodHandleType_name(),
                          vmSymbols::findMethodHandleType_signature(),
                          &args, CHECK_(empty));
+  Handle method_type(THREAD, (oop) result.get_jobject());
+
+  if (for_invokeGeneric) {
+    // call sun.dyn.MethodHandleNatives::notifyGenericMethodType(MethodType) -> void
+    JavaCallArguments args(Handle(THREAD, method_type()));
+    JavaValue no_result(T_VOID);
+    JavaCalls::call_static(&no_result,
+                           SystemDictionary::MethodHandleNatives_klass(),
+                           vmSymbols::notifyGenericMethodType_name(),
+                           vmSymbols::notifyGenericMethodType_signature(),
+                           &args, THREAD);
+    if (HAS_PENDING_EXCEPTION) {
+      // If the notification fails, just kill it.
+      CLEAR_PENDING_EXCEPTION;
+    }
+  }
 
   // report back to the caller with the MethodType and the "on_bcp" flag
   return_bcp_flag = is_on_bcp;
-  return Handle(THREAD, (oop) result.get_jobject());
+  return method_type;
 }
 
 // Ask Java code to find or construct a method handle constant.
@@ -2466,7 +2486,7 @@
   Handle type;
   if (signature->utf8_length() > 0 && signature->byte_at(0) == '(') {
     bool ignore_is_on_bcp = false;
-    type = find_method_handle_type(signature, caller, ignore_is_on_bcp, CHECK_(empty));
+    type = find_method_handle_type(signature, caller, false, ignore_is_on_bcp, CHECK_(empty));
   } else {
     SignatureStream ss(signature(), false);
     if (!ss.is_done()) {
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -471,6 +471,7 @@
   // ask Java to compute a java.dyn.MethodType object for a given signature
   static Handle    find_method_handle_type(symbolHandle signature,
                                            KlassHandle accessing_klass,
+                                           bool for_invokeGeneric,
                                            bool& return_bcp_flag,
                                            TRAPS);
   // ask Java to compute a java.dyn.MethodHandle object for a given CP entry
--- a/hotspot/src/share/vm/classfile/verificationType.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/classfile/verificationType.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -54,10 +54,12 @@
       // any object or array is assignable to java.lang.Object
       return true;
     }
-    klassOop this_class = SystemDictionary::resolve_or_fail(
+    klassOop obj = SystemDictionary::resolve_or_fail(
         name_handle(), Handle(THREAD, context->class_loader()),
         Handle(THREAD, context->protection_domain()), true, CHECK_false);
-    if (this_class->klass_part()->is_interface()) {
+    KlassHandle this_class(THREAD, obj);
+
+    if (this_class->is_interface()) {
       // We treat interfaces as java.lang.Object, including
       // java.lang.Cloneable and java.io.Serializable
       return true;
@@ -65,7 +67,7 @@
       klassOop from_class = SystemDictionary::resolve_or_fail(
           from.name_handle(), Handle(THREAD, context->class_loader()),
           Handle(THREAD, context->protection_domain()), true, CHECK_false);
-      return instanceKlass::cast(from_class)->is_subclass_of(this_class);
+      return instanceKlass::cast(from_class)->is_subclass_of(this_class());
     }
   } else if (is_array() && from.is_array()) {
     VerificationType comp_this = get_component(CHECK_false);
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -246,6 +246,8 @@
   /* internal up-calls made only by the JVM, via class sun.dyn.MethodHandleNatives: */            \
   template(findMethodHandleType_name,                 "findMethodHandleType")                     \
   template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/dyn/MethodType;") \
+  template(notifyGenericMethodType_name,              "notifyGenericMethodType")                  \
+  template(notifyGenericMethodType_signature,         "(Ljava/dyn/MethodType;)V")                 \
   template(linkMethodHandleConstant_name,             "linkMethodHandleConstant")                 \
   template(linkMethodHandleConstant_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/dyn/MethodHandle;") \
   template(makeDynamicCallSite_name,                  "makeDynamicCallSite")                      \
@@ -345,6 +347,7 @@
   template(ptypes_name,                               "ptypes")                                   \
   template(form_name,                                 "form")                                     \
   template(erasedType_name,                           "erasedType")                               \
+  template(genericInvoker_name,                       "genericInvoker")                           \
   template(append_name,                               "append")                                   \
                                                                                                   \
   /* non-intrinsic name/signature pairs: */                                                       \
--- a/hotspot/src/share/vm/code/codeBlob.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/code/codeBlob.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -39,7 +39,7 @@
   size += round_to(cb->total_relocation_size(), oopSize);
   // align the size to CodeEntryAlignment
   size = align_code_offset(size);
-  size += round_to(cb->total_code_size(), oopSize);
+  size += round_to(cb->total_content_size(), oopSize);
   size += round_to(cb->total_oop_size(), oopSize);
   return size;
 }
@@ -47,8 +47,8 @@
 
 // Creates a simple CodeBlob. Sets up the size of the different regions.
 CodeBlob::CodeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size) {
-  assert(size == round_to(size, oopSize), "unaligned size");
-  assert(locs_size == round_to(locs_size, oopSize), "unaligned size");
+  assert(size        == round_to(size,        oopSize), "unaligned size");
+  assert(locs_size   == round_to(locs_size,   oopSize), "unaligned size");
   assert(header_size == round_to(header_size, oopSize), "unaligned size");
   assert(!UseRelocIndex, "no space allocated for reloc index yet");
 
@@ -64,7 +64,8 @@
   _frame_complete_offset = frame_complete;
   _header_size           = header_size;
   _relocation_size       = locs_size;
-  _instructions_offset   = align_code_offset(header_size + locs_size);
+  _content_offset        = align_code_offset(header_size + _relocation_size);
+  _code_offset           = _content_offset;
   _data_offset           = size;
   _frame_size            =  0;
   set_oop_maps(NULL);
@@ -82,7 +83,7 @@
   int         frame_size,
   OopMapSet*  oop_maps
 ) {
-  assert(size == round_to(size, oopSize), "unaligned size");
+  assert(size        == round_to(size,        oopSize), "unaligned size");
   assert(header_size == round_to(header_size, oopSize), "unaligned size");
 
   _name                  = name;
@@ -90,8 +91,9 @@
   _frame_complete_offset = frame_complete;
   _header_size           = header_size;
   _relocation_size       = round_to(cb->total_relocation_size(), oopSize);
-  _instructions_offset   = align_code_offset(header_size + _relocation_size);
-  _data_offset           = _instructions_offset + round_to(cb->total_code_size(), oopSize);
+  _content_offset        = align_code_offset(header_size + _relocation_size);
+  _code_offset           = _content_offset + cb->total_offset_of(cb->insts());
+  _data_offset           = _content_offset + round_to(cb->total_content_size(), oopSize);
   assert(_data_offset <= size, "codeBlob is too small");
 
   cb->copy_code_and_locs_to(this);
@@ -127,9 +129,8 @@
 
 
 OopMap* CodeBlob::oop_map_for_return_address(address return_address) {
-  address pc = return_address ;
-  assert (oop_maps() != NULL, "nope");
-  return oop_maps()->find_map_at_offset ((intptr_t) pc - (intptr_t) instructions_begin());
+  assert(oop_maps() != NULL, "nope");
+  return oop_maps()->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin());
 }
 
 
@@ -284,12 +285,12 @@
     jio_snprintf(stub_id, sizeof(stub_id), "RuntimeStub - %s", stub_name);
     if (PrintStubCode) {
       tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, stub);
-      Disassembler::decode(stub->instructions_begin(), stub->instructions_end());
+      Disassembler::decode(stub->code_begin(), stub->code_end());
     }
-    Forte::register_stub(stub_id, stub->instructions_begin(), stub->instructions_end());
+    Forte::register_stub(stub_id, stub->code_begin(), stub->code_end());
 
     if (JvmtiExport::should_post_dynamic_code_generated()) {
-      JvmtiExport::post_dynamic_code_generated(stub_name, stub->instructions_begin(), stub->instructions_end());
+      JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end());
     }
   }
 
@@ -355,17 +356,15 @@
   // Do not hold the CodeCache lock during name formatting.
   if (blob != NULL) {
     char blob_id[256];
-    jio_snprintf(blob_id, sizeof(blob_id), "DeoptimizationBlob@" PTR_FORMAT, blob->instructions_begin());
+    jio_snprintf(blob_id, sizeof(blob_id), "DeoptimizationBlob@" PTR_FORMAT, blob->code_begin());
     if (PrintStubCode) {
       tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
-      Disassembler::decode(blob->instructions_begin(), blob->instructions_end());
+      Disassembler::decode(blob->code_begin(), blob->code_end());
     }
-    Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
+    Forte::register_stub(blob_id, blob->code_begin(), blob->code_end());
 
     if (JvmtiExport::should_post_dynamic_code_generated()) {
-      JvmtiExport::post_dynamic_code_generated("DeoptimizationBlob",
-                                               blob->instructions_begin(),
-                                               blob->instructions_end());
+      JvmtiExport::post_dynamic_code_generated("DeoptimizationBlob", blob->code_begin(), blob->code_end());
     }
   }
 
@@ -412,17 +411,15 @@
   // Do not hold the CodeCache lock during name formatting.
   if (blob != NULL) {
     char blob_id[256];
-    jio_snprintf(blob_id, sizeof(blob_id), "UncommonTrapBlob@" PTR_FORMAT, blob->instructions_begin());
+    jio_snprintf(blob_id, sizeof(blob_id), "UncommonTrapBlob@" PTR_FORMAT, blob->code_begin());
     if (PrintStubCode) {
       tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
-      Disassembler::decode(blob->instructions_begin(), blob->instructions_end());
+      Disassembler::decode(blob->code_begin(), blob->code_end());
     }
-    Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
+    Forte::register_stub(blob_id, blob->code_begin(), blob->code_end());
 
     if (JvmtiExport::should_post_dynamic_code_generated()) {
-      JvmtiExport::post_dynamic_code_generated("UncommonTrapBlob",
-                                               blob->instructions_begin(),
-                                               blob->instructions_end());
+      JvmtiExport::post_dynamic_code_generated("UncommonTrapBlob", blob->code_begin(), blob->code_end());
     }
   }
 
@@ -471,17 +468,15 @@
   // We do not need to hold the CodeCache lock during name formatting
   if (blob != NULL) {
     char blob_id[256];
-    jio_snprintf(blob_id, sizeof(blob_id), "ExceptionBlob@" PTR_FORMAT, blob->instructions_begin());
+    jio_snprintf(blob_id, sizeof(blob_id), "ExceptionBlob@" PTR_FORMAT, blob->code_begin());
     if (PrintStubCode) {
       tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
-      Disassembler::decode(blob->instructions_begin(), blob->instructions_end());
+      Disassembler::decode(blob->code_begin(), blob->code_end());
     }
-    Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
+    Forte::register_stub(blob_id, blob->code_begin(), blob->code_end());
 
     if (JvmtiExport::should_post_dynamic_code_generated()) {
-      JvmtiExport::post_dynamic_code_generated("ExceptionBlob",
-                                               blob->instructions_begin(),
-                                               blob->instructions_end());
+      JvmtiExport::post_dynamic_code_generated("ExceptionBlob", blob->code_begin(), blob->code_end());
     }
   }
 
@@ -529,17 +524,15 @@
   // We do not need to hold the CodeCache lock during name formatting.
   if (blob != NULL) {
     char blob_id[256];
-    jio_snprintf(blob_id, sizeof(blob_id), "SafepointBlob@" PTR_FORMAT, blob->instructions_begin());
+    jio_snprintf(blob_id, sizeof(blob_id), "SafepointBlob@" PTR_FORMAT, blob->code_begin());
     if (PrintStubCode) {
       tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
-      Disassembler::decode(blob->instructions_begin(), blob->instructions_end());
+      Disassembler::decode(blob->code_begin(), blob->code_end());
     }
-    Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
+    Forte::register_stub(blob_id, blob->code_begin(), blob->code_end());
 
     if (JvmtiExport::should_post_dynamic_code_generated()) {
-      JvmtiExport::post_dynamic_code_generated("SafepointBlob",
-                                               blob->instructions_begin(),
-                                               blob->instructions_end());
+      JvmtiExport::post_dynamic_code_generated("SafepointBlob", blob->code_begin(), blob->code_end());
     }
   }
 
--- a/hotspot/src/share/vm/code/codeBlob.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/code/codeBlob.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -35,7 +35,8 @@
 // Layout:
 //   - header
 //   - relocation
-//   - instruction space
+//   - content space
+//     - instruction space
 //   - data space
 class DeoptimizationBlob;
 
@@ -48,7 +49,8 @@
   int        _size;                              // total size of CodeBlob in bytes
   int        _header_size;                       // size of header (depends on subclass)
   int        _relocation_size;                   // size of relocation
-  int        _instructions_offset;               // offset to where instructions region begins
+  int        _content_offset;                    // offset to where content region begins (this includes consts, insts, stubs)
+  int        _code_offset;                       // offset to where instructions region begins (this includes insts, stubs)
   int        _frame_complete_offset;             // instruction offsets in [0.._frame_complete_offset) have
                                                  // not finished setting up their frame. Beware of pc's in
                                                  // that range. There is a similar range(s) on returns
@@ -106,31 +108,36 @@
   address    header_end() const                  { return ((address)   this) + _header_size; };
   relocInfo* relocation_begin() const            { return (relocInfo*) header_end(); };
   relocInfo* relocation_end() const              { return (relocInfo*)(header_end()   + _relocation_size); }
-  address    instructions_begin() const          { return (address)    header_begin() + _instructions_offset;  }
-  address    instructions_end() const            { return (address)    header_begin() + _data_offset; }
+  address    content_begin() const               { return (address)    header_begin() + _content_offset; }
+  address    content_end() const                 { return (address)    header_begin() + _data_offset; }
+  address    code_begin() const                  { return (address)    header_begin() + _code_offset; }
+  address    code_end() const                    { return (address)    header_begin() + _data_offset; }
   address    data_begin() const                  { return (address)    header_begin() + _data_offset; }
   address    data_end() const                    { return (address)    header_begin() + _size; }
 
   // Offsets
   int relocation_offset() const                  { return _header_size; }
-  int instructions_offset() const                { return _instructions_offset; }
+  int content_offset() const                     { return _content_offset; }
+  int code_offset() const                        { return _code_offset; }
   int data_offset() const                        { return _data_offset; }
 
   // Sizes
   int size() const                               { return _size; }
   int header_size() const                        { return _header_size; }
   int relocation_size() const                    { return (address) relocation_end() - (address) relocation_begin(); }
-  int instructions_size() const                  { return instructions_end() - instructions_begin();  }
-  int data_size() const                          { return data_end() - data_begin(); }
+  int content_size() const                       { return           content_end()    -           content_begin();    }
+  int code_size() const                          { return           code_end()       -           code_begin();       }
+  int data_size() const                          { return           data_end()       -           data_begin();       }
 
   // Containment
-  bool blob_contains(address addr) const         { return header_begin()       <= addr && addr < data_end(); }
+  bool blob_contains(address addr) const         { return header_begin()       <= addr && addr < data_end();       }
   bool relocation_contains(relocInfo* addr) const{ return relocation_begin()   <= addr && addr < relocation_end(); }
-  bool instructions_contains(address addr) const { return instructions_begin() <= addr && addr < instructions_end(); }
-  bool data_contains(address addr) const         { return data_begin()         <= addr && addr < data_end(); }
-  bool contains(address addr) const              { return instructions_contains(addr); }
-  bool is_frame_complete_at(address addr) const  { return instructions_contains(addr) &&
-                                                          addr >= instructions_begin() + _frame_complete_offset; }
+  bool content_contains(address addr) const      { return content_begin()      <= addr && addr < content_end();    }
+  bool code_contains(address addr) const         { return code_begin()         <= addr && addr < code_end();       }
+  bool data_contains(address addr) const         { return data_begin()         <= addr && addr < data_end();       }
+  bool contains(address addr) const              { return content_contains(addr); }
+  bool is_frame_complete_at(address addr) const  { return code_contains(addr) &&
+                                                          addr >= code_begin() + _frame_complete_offset; }
 
   // CodeCache support: really only used by the nmethods, but in order to get
   // asserts and certain bookkeeping to work in the CodeCache they are defined
@@ -169,7 +176,7 @@
 
   // Print the comment associated with offset on stream, if there is one
   virtual void print_block_comment(outputStream* stream, address block_begin) {
-    intptr_t offset = (intptr_t)(block_begin - instructions_begin());
+    intptr_t offset = (intptr_t)(block_begin - code_begin());
     _comments.print_block_comment(stream, offset);
   }
 
@@ -286,7 +293,7 @@
   // GC support
   bool caller_must_gc_arguments(JavaThread* thread) const { return _caller_must_gc_arguments; }
 
-  address entry_point()                          { return instructions_begin(); }
+  address entry_point()                          { return code_begin(); }
 
   // GC/Verification support
   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f)  { /* nothing to do */ }
@@ -313,13 +320,15 @@
      OopMapSet*  oop_maps
    )
    : CodeBlob(name, cb, header_size, size, CodeOffsets::frame_never_safe, frame_size, oop_maps)
-   {};
+  {};
 
-   bool is_alive() const                         { return true; }
+  address entry_point()                          { return code_begin(); }
 
-   void verify(); // does nothing
-   void print_on(outputStream* st) const;
-   void print_value_on(outputStream* st) const;
+  bool is_alive() const                          { return true; }
+
+  void verify(); // does nothing
+  void print_on(outputStream* st) const;
+  void print_value_on(outputStream* st) const;
 };
 
 
@@ -376,9 +385,9 @@
   // Printing
   void print_value_on(outputStream* st) const;
 
-  address unpack() const                         { return instructions_begin() + _unpack_offset;           }
-  address unpack_with_exception() const          { return instructions_begin() + _unpack_with_exception;   }
-  address unpack_with_reexecution() const        { return instructions_begin() + _unpack_with_reexecution; }
+  address unpack() const                         { return code_begin() + _unpack_offset;           }
+  address unpack_with_exception() const          { return code_begin() + _unpack_with_exception;   }
+  address unpack_with_reexecution() const        { return code_begin() + _unpack_with_reexecution; }
 
   // Alternate entry point for C1 where the exception and issuing pc
   // are in JavaThread::_exception_oop and JavaThread::_exception_pc
@@ -387,9 +396,9 @@
   // there may be live values in those registers during deopt.
   void set_unpack_with_exception_in_tls_offset(int offset) {
     _unpack_with_exception_in_tls = offset;
-    assert(contains(instructions_begin() + _unpack_with_exception_in_tls), "must be PC inside codeblob");
+    assert(code_contains(code_begin() + _unpack_with_exception_in_tls), "must be PC inside codeblob");
   }
-  address unpack_with_exception_in_tls() const   { return instructions_begin() + _unpack_with_exception_in_tls;   }
+  address unpack_with_exception_in_tls() const   { return code_begin() + _unpack_with_exception_in_tls; }
 };
 
 
--- a/hotspot/src/share/vm/code/codeCache.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/code/codeCache.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -76,14 +76,14 @@
     relocation_size  += cb->relocation_size();
     if (cb->is_nmethod()) {
       nmethod* nm = cb->as_nmethod_or_null();
-      code_size        += nm->code_size();
+      code_size        += nm->insts_size();
       stub_size        += nm->stub_size();
 
       scopes_oop_size  += nm->oops_size();
       scopes_data_size += nm->scopes_data_size();
       scopes_pcs_size  += nm->scopes_pcs_size();
     } else {
-      code_size        += cb->instructions_size();
+      code_size        += cb->code_size();
     }
   }
 };
@@ -210,7 +210,7 @@
   }
 
   // flush the hardware I-cache
-  ICache::invalidate_range(cb->instructions_begin(), cb->instructions_size());
+  ICache::invalidate_range(cb->content_begin(), cb->content_size());
 }
 
 
@@ -804,8 +804,8 @@
 
       if(nm->method() != NULL && nm->is_java_method()) {
         nmethodJava++;
-        if(nm->code_size() > maxCodeSize) {
-          maxCodeSize = nm->code_size();
+        if (nm->insts_size() > maxCodeSize) {
+          maxCodeSize = nm->insts_size();
         }
       }
     } else if (cb->is_runtime_stub()) {
@@ -830,7 +830,7 @@
     if (cb->is_nmethod()) {
       nmethod* nm = (nmethod*)cb;
       if(nm->is_java_method()) {
-        buckets[nm->code_size() / bucketSize]++;
+        buckets[nm->insts_size() / bucketSize]++;
       }
     }
   }
@@ -896,11 +896,11 @@
     FOR_ALL_BLOBS(p) {
       if (p->is_alive()) {
         number_of_blobs++;
-        code_size += p->instructions_size();
+        code_size += p->code_size();
         OopMapSet* set = p->oop_maps();
         if (set != NULL) {
           number_of_oop_maps += set->size();
-          map_size   += set->heap_size();
+          map_size           += set->heap_size();
         }
       }
     }
--- a/hotspot/src/share/vm/code/exceptionHandlerTable.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/code/exceptionHandlerTable.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -219,8 +219,8 @@
 
 void ImplicitExceptionTable::verify(nmethod *nm) const {
   for (uint i = 0; i < len(); i++) {
-     if ((*adr(i) > (unsigned int)nm->code_size()) ||
-         (*(adr(i)+1) > (unsigned int)nm->code_size()))
+     if ((*adr(i) > (unsigned int)nm->insts_size()) ||
+         (*(adr(i)+1) > (unsigned int)nm->insts_size()))
        fatal(err_msg("Invalid offset in ImplicitExceptionTable at " PTR_FORMAT, _data));
   }
 }
--- a/hotspot/src/share/vm/code/nmethod.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -87,9 +87,9 @@
   int nmethod_count;
   int total_size;
   int relocation_size;
-  int code_size;
+  int consts_size;
+  int insts_size;
   int stub_size;
-  int consts_size;
   int scopes_data_size;
   int scopes_pcs_size;
   int dependencies_size;
@@ -101,9 +101,9 @@
     nmethod_count += 1;
     total_size          += nm->size();
     relocation_size     += nm->relocation_size();
-    code_size           += nm->code_size();
+    consts_size         += nm->consts_size();
+    insts_size          += nm->insts_size();
     stub_size           += nm->stub_size();
-    consts_size         += nm->consts_size();
     oops_size           += nm->oops_size();
     scopes_data_size    += nm->scopes_data_size();
     scopes_pcs_size     += nm->scopes_pcs_size();
@@ -116,9 +116,9 @@
     tty->print_cr("Statistics for %d bytecoded nmethods:", nmethod_count);
     if (total_size != 0)          tty->print_cr(" total in heap  = %d", total_size);
     if (relocation_size != 0)     tty->print_cr(" relocation     = %d", relocation_size);
-    if (code_size != 0)           tty->print_cr(" main code      = %d", code_size);
+    if (consts_size != 0)         tty->print_cr(" constants      = %d", consts_size);
+    if (insts_size != 0)          tty->print_cr(" main code      = %d", insts_size);
     if (stub_size != 0)           tty->print_cr(" stub code      = %d", stub_size);
-    if (consts_size != 0)         tty->print_cr(" constants      = %d", consts_size);
     if (oops_size != 0)           tty->print_cr(" oops           = %d", oops_size);
     if (scopes_data_size != 0)    tty->print_cr(" scopes data    = %d", scopes_data_size);
     if (scopes_pcs_size != 0)     tty->print_cr(" scopes pcs     = %d", scopes_pcs_size);
@@ -130,13 +130,13 @@
   int native_nmethod_count;
   int native_total_size;
   int native_relocation_size;
-  int native_code_size;
+  int native_insts_size;
   int native_oops_size;
   void note_native_nmethod(nmethod* nm) {
     native_nmethod_count += 1;
     native_total_size       += nm->size();
     native_relocation_size  += nm->relocation_size();
-    native_code_size        += nm->code_size();
+    native_insts_size       += nm->insts_size();
     native_oops_size        += nm->oops_size();
   }
   void print_native_nmethod_stats() {
@@ -144,7 +144,7 @@
     tty->print_cr("Statistics for %d native nmethods:", native_nmethod_count);
     if (native_total_size != 0)       tty->print_cr(" N. total size  = %d", native_total_size);
     if (native_relocation_size != 0)  tty->print_cr(" N. relocation  = %d", native_relocation_size);
-    if (native_code_size != 0)        tty->print_cr(" N. main code   = %d", native_code_size);
+    if (native_insts_size != 0)       tty->print_cr(" N. main code   = %d", native_insts_size);
     if (native_oops_size != 0)        tty->print_cr(" N. oops        = %d", native_oops_size);
   }
 
@@ -404,9 +404,9 @@
 
 int nmethod::total_size() const {
   return
-    code_size()          +
+    consts_size()        +
+    insts_size()         +
     stub_size()          +
-    consts_size()        +
     scopes_data_size()   +
     scopes_pcs_size()    +
     handler_table_size() +
@@ -618,8 +618,8 @@
     _deoptimize_mh_offset    = 0;
     _orig_pc_offset          = 0;
 
+    _consts_offset           = data_offset();
     _stub_offset             = data_offset();
-    _consts_offset           = data_offset();
     _oops_offset             = data_offset();
     _scopes_data_offset      = _oops_offset          + round_to(code_buffer->total_oop_size(), oopSize);
     _scopes_pcs_offset       = _scopes_data_offset;
@@ -629,8 +629,8 @@
     _nmethod_end_offset      = _nul_chk_table_offset;
     _compile_id              = 0;  // default
     _comp_level              = CompLevel_none;
-    _entry_point             = instructions_begin();
-    _verified_entry_point    = instructions_begin() + offsets->value(CodeOffsets::Verified_Entry);
+    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
+    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
     _osr_entry_point         = NULL;
     _exception_cache         = NULL;
     _pc_desc_cache.reset_to(NULL);
@@ -696,8 +696,8 @@
     _unwind_handler_offset   = -1;
     _trap_offset             = offsets->value(CodeOffsets::Dtrace_trap);
     _orig_pc_offset          = 0;
+    _consts_offset           = data_offset();
     _stub_offset             = data_offset();
-    _consts_offset           = data_offset();
     _oops_offset             = data_offset();
     _scopes_data_offset      = _oops_offset          + round_to(code_buffer->total_oop_size(), oopSize);
     _scopes_pcs_offset       = _scopes_data_offset;
@@ -707,8 +707,8 @@
     _nmethod_end_offset      = _nul_chk_table_offset;
     _compile_id              = 0;  // default
     _comp_level              = CompLevel_none;
-    _entry_point             = instructions_begin();
-    _verified_entry_point    = instructions_begin() + offsets->value(CodeOffsets::Verified_Entry);
+    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
+    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
     _osr_entry_point         = NULL;
     _exception_cache         = NULL;
     _pc_desc_cache.reset_to(NULL);
@@ -787,18 +787,25 @@
     _comp_level              = comp_level;
     _compiler                = compiler;
     _orig_pc_offset          = orig_pc_offset;
-    _stub_offset             = instructions_offset() + code_buffer->total_offset_of(code_buffer->stubs()->start());
+
+    // Section offsets
+    _consts_offset           = content_offset()      + code_buffer->total_offset_of(code_buffer->consts());
+    _stub_offset             = content_offset()      + code_buffer->total_offset_of(code_buffer->stubs());
 
     // Exception handler and deopt handler are in the stub section
-    _exception_offset        = _stub_offset + offsets->value(CodeOffsets::Exceptions);
-    _deoptimize_offset       = _stub_offset + offsets->value(CodeOffsets::Deopt);
-    _deoptimize_mh_offset    = _stub_offset + offsets->value(CodeOffsets::DeoptMH);
+    _exception_offset        = _stub_offset          + offsets->value(CodeOffsets::Exceptions);
+    _deoptimize_offset       = _stub_offset          + offsets->value(CodeOffsets::Deopt);
+    if (has_method_handle_invokes()) {
+      _deoptimize_mh_offset  = _stub_offset          + offsets->value(CodeOffsets::DeoptMH);
+    } else {
+      _deoptimize_mh_offset  = -1;
+    }
     if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
-      _unwind_handler_offset   = instructions_offset() + offsets->value(CodeOffsets::UnwindHandler);
+      _unwind_handler_offset = code_offset()         + offsets->value(CodeOffsets::UnwindHandler);
     } else {
-      _unwind_handler_offset   = -1;
+      _unwind_handler_offset = -1;
     }
-    _consts_offset           = instructions_offset() + code_buffer->total_offset_of(code_buffer->consts()->start());
+
     _oops_offset             = data_offset();
     _scopes_data_offset      = _oops_offset          + round_to(code_buffer->total_oop_size (), oopSize);
     _scopes_pcs_offset       = _scopes_data_offset   + round_to(debug_info->data_size       (), oopSize);
@@ -807,9 +814,9 @@
     _nul_chk_table_offset    = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize);
     _nmethod_end_offset      = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize);
 
-    _entry_point             = instructions_begin();
-    _verified_entry_point    = instructions_begin() + offsets->value(CodeOffsets::Verified_Entry);
-    _osr_entry_point         = instructions_begin() + offsets->value(CodeOffsets::OSR_Entry);
+    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
+    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
+    _osr_entry_point         = code_begin()          + offsets->value(CodeOffsets::OSR_Entry);
     _exception_cache         = NULL;
     _pc_desc_cache.reset_to(scopes_pcs_begin());
 
@@ -860,9 +867,9 @@
   if (compiler() != NULL) {
     log->print(" compiler='%s'", compiler()->name());
   }
-#ifdef TIERED
-  log->print(" level='%d'", comp_level());
-#endif // TIERED
+  if (TieredCompilation) {
+    log->print(" level='%d'", comp_level());
+  }
 }
 
 
@@ -878,14 +885,13 @@
     HandleMark hm;
     xtty->begin_elem("nmethod");
     log_identity(xtty);
-    xtty->print(" entry='" INTPTR_FORMAT "' size='%d'",
-                instructions_begin(), size());
+    xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", code_begin(), size());
     xtty->print(" address='" INTPTR_FORMAT "'", (intptr_t) this);
 
     LOG_OFFSET(xtty, relocation);
-    LOG_OFFSET(xtty, code);
+    LOG_OFFSET(xtty, consts);
+    LOG_OFFSET(xtty, insts);
     LOG_OFFSET(xtty, stub);
-    LOG_OFFSET(xtty, consts);
     LOG_OFFSET(xtty, scopes_data);
     LOG_OFFSET(xtty, scopes_pcs);
     LOG_OFFSET(xtty, dependencies);
@@ -902,35 +908,73 @@
 #undef LOG_OFFSET
 
 
+void nmethod::print_compilation(outputStream *st, const char *method_name, const char *title,
+                                methodOop method, bool is_blocking, int compile_id, int bci, int comp_level) {
+  bool is_synchronized = false, has_xhandler = false, is_native = false;
+  int code_size = -1;
+  if (method != NULL) {
+    is_synchronized = method->is_synchronized();
+    has_xhandler    = method->has_exception_handler();
+    is_native       = method->is_native();
+    code_size       = method->code_size();
+  }
+  // print compilation number
+  st->print("%7d %3d", (int)tty->time_stamp().milliseconds(), compile_id);
+
+  // print method attributes
+  const bool is_osr = bci != InvocationEntryBci;
+  const char blocking_char  = is_blocking     ? 'b' : ' ';
+  const char compile_type   = is_osr          ? '%' : ' ';
+  const char sync_char      = is_synchronized ? 's' : ' ';
+  const char exception_char = has_xhandler    ? '!' : ' ';
+  const char native_char    = is_native       ? 'n' : ' ';
+  st->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, native_char);
+  if (TieredCompilation) {
+    st->print("%d ", comp_level);
+  }
+
+  // print optional title
+  bool do_nl = false;
+  if (title != NULL) {
+    int tlen = (int) strlen(title);
+    bool do_nl = false;
+    if (tlen > 0 && title[tlen-1] == '\n') { tlen--; do_nl = true; }
+    st->print("%.*s", tlen, title);
+  } else {
+    do_nl = true;
+  }
+
+  // print method name string if given
+  if (method_name != NULL) {
+    st->print(method_name);
+  } else {
+    // otherwise as the method to print itself
+    if (method != NULL && !Universe::heap()->is_gc_active()) {
+      method->print_short_name(st);
+    } else {
+      st->print("(method)");
+    }
+  }
+
+  if (method != NULL) {
+    // print osr_bci if any
+    if (is_osr) st->print(" @ %d", bci);
+    // print method size
+    st->print(" (%d bytes)", code_size);
+  }
+  if (do_nl) st->cr();
+}
+
 // Print out more verbose output usually for a newly created nmethod.
 void nmethod::print_on(outputStream* st, const char* title) const {
   if (st != NULL) {
     ttyLocker ttyl;
-    // Print a little tag line that looks like +PrintCompilation output:
-    int tlen = (int) strlen(title);
-    bool do_nl = false;
-    if (tlen > 0 && title[tlen-1] == '\n') { tlen--; do_nl = true; }
-    st->print("%3d%c  %.*s",
-              compile_id(),
-              is_osr_method() ? '%' :
-              method() != NULL &&
-              is_native_method() ? 'n' : ' ',
-              tlen, title);
-#ifdef TIERED
-    st->print(" (%d) ", comp_level());
-#endif // TIERED
+    print_compilation(st, /*method_name*/NULL, title,
+                      method(), /*is_blocking*/false,
+                      compile_id(),
+                      is_osr_method() ? osr_entry_bci() : InvocationEntryBci,
+                      comp_level());
     if (WizardMode) st->print(" (" INTPTR_FORMAT ")", this);
-    if (Universe::heap()->is_gc_active() && method() != NULL) {
-      st->print("(method)");
-    } else if (method() != NULL) {
-        method()->print_short_name(st);
-      if (is_osr_method())
-        st->print(" @ %d", osr_entry_bci());
-      if (method()->code_size() > 0)
-        st->print(" (%d bytes)", method()->code_size());
-    }
-
-    if (do_nl)  st->cr();
   }
 }
 
@@ -1131,6 +1175,7 @@
 }
 
 void nmethod::inc_decompile_count() {
+  if (!is_compiled_by_c2()) return;
   // Could be gated by ProfileTraps, but do not bother...
   methodOop m = method();
   if (m == NULL)  return;
@@ -1460,7 +1505,7 @@
       moop->name()->utf8_length(),
       moop->signature()->bytes(),
       moop->signature()->utf8_length(),
-      code_begin(), code_size());
+      insts_begin(), insts_size());
 
   if (JvmtiExport::should_post_compiled_method_load() ||
       JvmtiExport::should_post_compiled_method_unload()) {
@@ -1502,7 +1547,7 @@
   if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
     assert(!unload_reported(), "already unloaded");
     HandleMark hm;
-    JvmtiExport::post_compiled_method_unload(_jmethod_id, code_begin());
+    JvmtiExport::post_compiled_method_unload(_jmethod_id, insts_begin());
   }
 
   // The JVMTI CompiledMethodUnload event can be enabled or disabled at
@@ -1854,7 +1899,7 @@
   // Adjust the final sentinel downward.
   PcDesc* last_pc = &scopes_pcs_begin()[count-1];
   assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity");
-  last_pc->set_pc_offset(instructions_size() + 1);
+  last_pc->set_pc_offset(content_size() + 1);
   for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) {
     // Fill any rounding gaps with copies of the last record.
     last_pc[1] = last_pc[0];
@@ -1894,7 +1939,7 @@
 
 // Finds a PcDesc with real-pc equal to "pc"
 PcDesc* nmethod::find_pc_desc_internal(address pc, bool approximate) {
-  address base_address = instructions_begin();
+  address base_address = code_begin();
   if ((pc < base_address) ||
       (pc - base_address) >= (ptrdiff_t) PcDesc::upper_offset_limit) {
     return NULL;  // PC is wildly out of range
@@ -2042,7 +2087,7 @@
 
 
 bool nmethod::is_patchable_at(address instr_addr) {
-  assert (code_contains(instr_addr), "wrong nmethod used");
+  assert(insts_contains(instr_addr), "wrong nmethod used");
   if (is_zombie()) {
     // a zombie may never be patched
     return false;
@@ -2054,7 +2099,7 @@
 address nmethod::continuation_for_implicit_exception(address pc) {
   // Exception happened outside inline-cache check code => we are inside
   // an active nmethod => use cpc to determine a return address
-  int exception_offset = pc - instructions_begin();
+  int exception_offset = pc - code_begin();
   int cont_offset = ImplicitExceptionTable(this).at( exception_offset );
 #ifdef ASSERT
   if (cont_offset == 0) {
@@ -2075,7 +2120,7 @@
     // Let the normal error handling report the exception
     return NULL;
   }
-  return instructions_begin() + cont_offset;
+  return code_begin() + cont_offset;
 }
 
 
@@ -2334,18 +2379,18 @@
                                               relocation_begin(),
                                               relocation_end(),
                                               relocation_size());
-  if (code_size         () > 0) tty->print_cr(" main code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              code_begin(),
-                                              code_end(),
-                                              code_size());
+  if (consts_size       () > 0) tty->print_cr(" constants      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              consts_begin(),
+                                              consts_end(),
+                                              consts_size());
+  if (insts_size        () > 0) tty->print_cr(" main code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              insts_begin(),
+                                              insts_end(),
+                                              insts_size());
   if (stub_size         () > 0) tty->print_cr(" stub code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
                                               stub_begin(),
                                               stub_end(),
                                               stub_size());
-  if (consts_size       () > 0) tty->print_cr(" constants      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              consts_begin(),
-                                              consts_end(),
-                                              consts_size());
   if (oops_size         () > 0) tty->print_cr(" oops           [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
                                               oops_begin(),
                                               oops_end(),
@@ -2370,10 +2415,6 @@
                                               nul_chk_table_begin(),
                                               nul_chk_table_end(),
                                               nul_chk_table_size());
-  if (oops_size         () > 0) tty->print_cr(" oops           [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              oops_begin(),
-                                              oops_end(),
-                                              oops_size());
 }
 
 void nmethod::print_code() {
@@ -2607,7 +2648,7 @@
   // First, find an oopmap in (begin, end].
   // We use the odd half-closed interval so that oop maps and scope descs
   // which are tied to the byte after a call are printed with the call itself.
-  address base = instructions_begin();
+  address base = code_begin();
   OopMapSet* oms = oop_maps();
   if (oms != NULL) {
     for (int i = 0, imax = oms->size(); i < imax; i++) {
@@ -2695,10 +2736,10 @@
     st->move_to(column);
     st->print(";   {%s}", str);
   }
-  int cont_offset = ImplicitExceptionTable(this).at(begin - instructions_begin());
+  int cont_offset = ImplicitExceptionTable(this).at(begin - code_begin());
   if (cont_offset != 0) {
     st->move_to(column);
-    st->print("; implicit exception: dispatches to " INTPTR_FORMAT, instructions_begin() + cont_offset);
+    st->print("; implicit exception: dispatches to " INTPTR_FORMAT, code_begin() + cont_offset);
   }
 
 }
@@ -2732,7 +2773,7 @@
 }
 
 void nmethod::print_nul_chk_table() {
-  ImplicitExceptionTable(this).print(instructions_begin());
+  ImplicitExceptionTable(this).print(code_begin());
 }
 
 void nmethod::print_statistics() {
--- a/hotspot/src/share/vm/code/nmethod.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/code/nmethod.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -143,8 +143,8 @@
 #ifdef HAVE_DTRACE_H
   int _trap_offset;
 #endif // def HAVE_DTRACE_H
+  int _consts_offset;
   int _stub_offset;
-  int _consts_offset;
   int _oops_offset;                       // offset to where embedded oop table begins (inside data)
   int _scopes_data_offset;
   int _scopes_pcs_offset;
@@ -312,7 +312,7 @@
                                      int frame_size);
 
   int trap_offset() const      { return _trap_offset; }
-  address trap_address() const { return code_begin() + _trap_offset; }
+  address trap_address() const { return insts_begin() + _trap_offset; }
 
 #endif // def HAVE_DTRACE_H
 
@@ -336,16 +336,16 @@
   bool is_compiled_by_shark() const;
 
   // boundaries for different parts
-  address code_begin            () const          { return _entry_point; }
-  address code_end              () const          { return           header_begin() + _stub_offset          ; }
+  address consts_begin          () const          { return           header_begin() + _consts_offset        ; }
+  address consts_end            () const          { return           header_begin() +  code_offset()        ; }
+  address insts_begin           () const          { return           header_begin() +  code_offset()        ; }
+  address insts_end             () const          { return           header_begin() + _stub_offset          ; }
+  address stub_begin            () const          { return           header_begin() + _stub_offset          ; }
+  address stub_end              () const          { return           header_begin() + _oops_offset          ; }
   address exception_begin       () const          { return           header_begin() + _exception_offset     ; }
   address deopt_handler_begin   () const          { return           header_begin() + _deoptimize_offset    ; }
   address deopt_mh_handler_begin() const          { return           header_begin() + _deoptimize_mh_offset ; }
   address unwind_handler_begin  () const          { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
-  address stub_begin            () const          { return           header_begin() + _stub_offset          ; }
-  address stub_end              () const          { return           header_begin() + _consts_offset        ; }
-  address consts_begin          () const          { return           header_begin() + _consts_offset        ; }
-  address consts_end            () const          { return           header_begin() + _oops_offset          ; }
   oop*    oops_begin            () const          { return (oop*)   (header_begin() + _oops_offset)         ; }
   oop*    oops_end              () const          { return (oop*)   (header_begin() + _scopes_data_offset)  ; }
 
@@ -361,9 +361,9 @@
   address nul_chk_table_end     () const          { return           header_begin() + _nmethod_end_offset   ; }
 
   // Sizes
-  int code_size         () const                  { return            code_end         () -            code_begin         (); }
+  int consts_size       () const                  { return            consts_end       () -            consts_begin       (); }
+  int insts_size        () const                  { return            insts_end        () -            insts_begin        (); }
   int stub_size         () const                  { return            stub_end         () -            stub_begin         (); }
-  int consts_size       () const                  { return            consts_end       () -            consts_begin       (); }
   int oops_size         () const                  { return (address)  oops_end         () - (address)  oops_begin         (); }
   int scopes_data_size  () const                  { return            scopes_data_end  () -            scopes_data_begin  (); }
   int scopes_pcs_size   () const                  { return (intptr_t) scopes_pcs_end   () - (intptr_t) scopes_pcs_begin   (); }
@@ -374,9 +374,9 @@
   int total_size        () const;
 
   // Containment
-  bool code_contains         (address addr) const { return code_begin         () <= addr && addr < code_end         (); }
+  bool consts_contains       (address addr) const { return consts_begin       () <= addr && addr < consts_end       (); }
+  bool insts_contains        (address addr) const { return insts_begin        () <= addr && addr < insts_end        (); }
   bool stub_contains         (address addr) const { return stub_begin         () <= addr && addr < stub_end         (); }
-  bool consts_contains       (address addr) const { return consts_begin       () <= addr && addr < consts_end       (); }
   bool oops_contains         (oop*    addr) const { return oops_begin         () <= addr && addr < oops_end         (); }
   bool scopes_data_contains  (address addr) const { return scopes_data_begin  () <= addr && addr < scopes_data_end  (); }
   bool scopes_pcs_contains   (PcDesc* addr) const { return scopes_pcs_begin   () <= addr && addr < scopes_pcs_end   (); }
@@ -506,7 +506,7 @@
   void clear_inline_caches();
   void cleanup_inline_caches();
   bool inlinecache_check_contains(address addr) const {
-    return (addr >= instructions_begin() && addr < verified_entry_point());
+    return (addr >= code_begin() && addr < verified_entry_point());
   }
 
   // unlink and deallocate this nmethod
@@ -559,7 +559,7 @@
 
   PcDesc* find_pc_desc(address pc, bool approximate) {
     PcDesc* desc = _pc_desc_cache.last_pc_desc();
-    if (desc != NULL && desc->pc_offset() == pc - instructions_begin()) {
+    if (desc != NULL && desc->pc_offset() == pc - code_begin()) {
       return desc;
     }
     return find_pc_desc_internal(pc, approximate);
@@ -599,6 +599,10 @@
   void verify_scopes();
   void verify_interrupt_point(address interrupt_point);
 
+  // print compilation helper
+  static void print_compilation(outputStream *st, const char *method_name, const char *title,
+                                methodOop method, bool is_blocking, int compile_id, int bci, int comp_level);
+
   // printing support
   void print()                          const;
   void print_code();
--- a/hotspot/src/share/vm/code/pcDesc.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/code/pcDesc.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -34,7 +34,7 @@
 }
 
 address PcDesc::real_pc(const nmethod* code) const {
-  return code->instructions_begin() + pc_offset();
+  return code->code_begin() + pc_offset();
 }
 
 void PcDesc::print(nmethod* code) {
--- a/hotspot/src/share/vm/code/relocInfo.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/code/relocInfo.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -128,13 +128,20 @@
   _code    = nm;
   _current = nm->relocation_begin() - 1;
   _end     = nm->relocation_end();
-  _addr    = (address) nm->instructions_begin();
+  _addr    = nm->content_begin();
+
+  // Initialize code sections.
+  _section_start[CodeBuffer::SECT_CONSTS] = nm->consts_begin();
+  _section_start[CodeBuffer::SECT_INSTS ] = nm->insts_begin() ;
+  _section_start[CodeBuffer::SECT_STUBS ] = nm->stub_begin()  ;
+
+  _section_end  [CodeBuffer::SECT_CONSTS] = nm->consts_end()  ;
+  _section_end  [CodeBuffer::SECT_INSTS ] = nm->insts_end()   ;
+  _section_end  [CodeBuffer::SECT_STUBS ] = nm->stub_end()    ;
 
   assert(!has_current(), "just checking");
-  address code_end = nm->instructions_end();
-
-  assert(begin == NULL || begin >= nm->instructions_begin(), "in bounds");
- // FIX THIS  assert(limit == NULL || limit <= code_end,     "in bounds");
+  assert(begin == NULL || begin >= nm->code_begin(), "in bounds");
+  assert(limit == NULL || limit <= nm->code_end(),   "in bounds");
   set_limits(begin, limit);
 }
 
@@ -148,9 +155,11 @@
   _code    = NULL; // Not cb->blob();
 
   CodeBuffer* cb = cs->outer();
-  assert((int)SECT_LIMIT == CodeBuffer::SECT_LIMIT, "my copy must be equal");
-  for (int n = 0; n < (int)SECT_LIMIT; n++) {
-    _section_start[n] = cb->code_section(n)->start();
+  assert((int) SECT_LIMIT == CodeBuffer::SECT_LIMIT, "my copy must be equal");
+  for (int n = (int) CodeBuffer::SECT_FIRST; n < (int) CodeBuffer::SECT_LIMIT; n++) {
+    CodeSection* cs = cb->code_section(n);
+    _section_start[n] = cs->start();
+    _section_end  [n] = cs->end();
   }
 
   assert(!has_current(), "just checking");
@@ -168,6 +177,12 @@
 };
 
 
+bool RelocIterator::addr_in_const() const {
+  const int n = CodeBuffer::SECT_CONSTS;
+  return section_start(n) <= addr() && addr() < section_end(n);
+}
+
+
 static inline int num_cards(int code_size) {
   return (code_size-1) / indexCardSize;
 }
@@ -267,7 +282,7 @@
       // skip ahead
       RelocIndexEntry* index       = (RelocIndexEntry*)_end;
       RelocIndexEntry* index_limit = (RelocIndexEntry*)((address)index + index_size);
-      assert(_addr == _code->instructions_begin(), "_addr must be unadjusted");
+      assert(_addr == _code->code_begin(), "_addr must be unadjusted");
       int card = (begin - _addr) / indexCardSize;
       if (card > 0) {
         if (index+card-1 < index_limit)  index += card-1;
@@ -362,31 +377,12 @@
 }
 
 
-address RelocIterator::compute_section_start(int n) const {
-// This routine not only computes a section start, but also
-// memoizes it for later.
-#define CACHE ((RelocIterator*)this)->_section_start[n]
-  CodeBlob* cb = code();
-  guarantee(cb != NULL, "must have a code blob");
-  if (n == CodeBuffer::SECT_INSTS)
-    return CACHE = cb->instructions_begin();
-  assert(cb->is_nmethod(), "only nmethods have these sections");
-  nmethod* nm = (nmethod*) cb;
-  address res = NULL;
-  switch (n) {
-  case CodeBuffer::SECT_STUBS:
-    res = nm->stub_begin();
-    break;
-  case CodeBuffer::SECT_CONSTS:
-    res = nm->consts_begin();
-    break;
-  default:
-    ShouldNotReachHere();
+void RelocIterator::initialize_misc() {
+  set_has_current(false);
+  for (int i = (int) CodeBuffer::SECT_FIRST; i < (int) CodeBuffer::SECT_LIMIT; i++) {
+    _section_start[i] = NULL;  // these will be lazily computed, if needed
+    _section_end  [i] = NULL;
   }
-  assert(nm->contains(res) || res == nm->instructions_end(), "tame pointer");
-  CACHE = res;
-  return res;
-#undef CACHE
 }
 
 
--- a/hotspot/src/share/vm/code/relocInfo.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/code/relocInfo.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -502,8 +502,7 @@
 //   }
 
 class RelocIterator : public StackObj {
-  enum { SECT_CONSTS = 2,
-         SECT_LIMIT = 3 };  // must be equal to CodeBuffer::SECT_LIMIT
+  enum { SECT_LIMIT = 3 };  // must be equal to CodeBuffer::SECT_LIMIT, checked in ctor
   friend class Relocation;
   friend class relocInfo;       // for change_reloc_info_for_address only
   typedef relocInfo::relocType relocType;
@@ -521,6 +520,7 @@
 
   // Base addresses needed to compute targets of section_word_type relocs.
   address    _section_start[SECT_LIMIT];
+  address    _section_end  [SECT_LIMIT];
 
   void set_has_current(bool b) {
     _datalen = !b ? -1 : 0;
@@ -540,14 +540,7 @@
 
   void advance_over_prefix();    // helper method
 
-  void initialize_misc() {
-    set_has_current(false);
-    for (int i = 0; i < SECT_LIMIT; i++) {
-      _section_start[i] = NULL;  // these will be lazily computed, if needed
-    }
-  }
-
-  address compute_section_start(int n) const;  // out-of-line helper
+  void initialize_misc();
 
   void initialize(nmethod* nm, address begin, address limit);
 
@@ -598,11 +591,15 @@
   bool     has_current()      const { return _datalen >= 0; }
 
   void       set_addr(address addr) { _addr = addr; }
-  bool   addr_in_const()      const { return addr() >= section_start(SECT_CONSTS); }
+  bool   addr_in_const()      const;
 
   address section_start(int n) const {
-    address res = _section_start[n];
-    return (res != NULL) ? res : compute_section_start(n);
+    assert(_section_start[n], "must be initialized");
+    return _section_start[n];
+  }
+  address section_end(int n) const {
+    assert(_section_end[n], "must be initialized");
+    return _section_end[n];
   }
 
   // The address points to the affected displacement part of the instruction.
--- a/hotspot/src/share/vm/code/scopeDesc.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/code/scopeDesc.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -174,7 +174,7 @@
   print_value_on(st);
   // decode offsets
   if (WizardMode) {
-    st->print("ScopeDesc[%d]@" PTR_FORMAT " ", _decode_offset, _code->instructions_begin());
+    st->print("ScopeDesc[%d]@" PTR_FORMAT " ", _decode_offset, _code->content_begin());
     st->print_cr(" offset:     %d",    _decode_offset);
     st->print_cr(" bci:        %d",    bci());
     st->print_cr(" reexecute:  %s",    should_reexecute() ? "true" : "false");
--- a/hotspot/src/share/vm/code/stubs.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/code/stubs.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -66,9 +66,9 @@
     vm_exit_out_of_memory(size, err_msg("CodeCache: no room for %s", name));
   }
   _stub_interface  = stub_interface;
-  _buffer_size     = blob->instructions_size();
-  _buffer_limit    = blob->instructions_size();
-  _stub_buffer     = blob->instructions_begin();
+  _buffer_size     = blob->content_size();
+  _buffer_limit    = blob->content_size();
+  _stub_buffer     = blob->content_begin();
   _queue_begin     = 0;
   _queue_end       = 0;
   _number_of_stubs = 0;
--- a/hotspot/src/share/vm/code/vtableStubs.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/code/vtableStubs.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -48,7 +48,7 @@
     if (blob == NULL) {
       vm_exit_out_of_memory(bytes, "CodeCache: no room for vtable chunks");
     }
-    _chunk = blob->instructions_begin();
+    _chunk = blob->content_begin();
     _chunk_end = _chunk + bytes;
     Forte::register_stub("vtable stub", _chunk, _chunk_end);
     // Notify JVMTI about this stub. The event will be recorded by the enclosing
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -123,20 +123,12 @@
 int CompileBroker::_sum_nmethod_size             = 0;
 int CompileBroker::_sum_nmethod_code_size        = 0;
 
-CompileQueue* CompileBroker::_method_queue   = NULL;
+CompileQueue* CompileBroker::_c2_method_queue   = NULL;
+CompileQueue* CompileBroker::_c1_method_queue   = NULL;
 CompileTask*  CompileBroker::_task_free_list = NULL;
 
 GrowableArray<CompilerThread*>* CompileBroker::_method_threads = NULL;
 
-// CompileTaskWrapper
-//
-// Assign this task to the current thread.  Deallocate the task
-// when the compilation is complete.
-class CompileTaskWrapper : StackObj {
-public:
-  CompileTaskWrapper(CompileTask* task);
-  ~CompileTaskWrapper();
-};
 
 CompileTaskWrapper::CompileTaskWrapper(CompileTask* task) {
   CompilerThread* thread = CompilerThread::current();
@@ -246,6 +238,12 @@
              bool_to_str(_is_complete), bool_to_str(_is_success));
 }
 
+
+void CompileTask::print_compilation(outputStream *st, methodOop method, char* method_name) {
+  nmethod::print_compilation(st, method_name,/*title*/ NULL, method,
+                             is_blocking(), compile_id(), osr_bci(), comp_level());
+}
+
 // ------------------------------------------------------------------
 // CompileTask::print_line_on_error
 //
@@ -258,32 +256,13 @@
 //
 void CompileTask::print_line_on_error(outputStream* st, char* buf, int buflen) {
   methodOop method = (methodOop)JNIHandles::resolve(_method);
-
   // print compiler name
   st->print("%s:", CompileBroker::compiler(comp_level())->name());
-
-  // print compilation number
-  st->print("%3d", compile_id());
-
-  // print method attributes
-  const bool is_osr = osr_bci() != CompileBroker::standard_entry_bci;
-  { const char blocking_char  = is_blocking()                      ? 'b' : ' ';
-    const char compile_type   = is_osr                             ? '%' : ' ';
-    const char sync_char      = method->is_synchronized()          ? 's' : ' ';
-    const char exception_char = method->has_exception_handler()    ? '!' : ' ';
-    const char tier_char      =
-      is_highest_tier_compile(comp_level())                        ? ' ' : ('0' + comp_level());
-    st->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, tier_char);
+  char* method_name = NULL;
+  if (method != NULL) {
+    method_name = method->name_and_sig_as_C_string(buf, buflen);
   }
-
-  // Use buf to get method name and signature
-  if (method != NULL) st->print("%s", method->name_and_sig_as_C_string(buf, buflen));
-
-  // print osr_bci if any
-  if (is_osr) st->print(" @ %d", osr_bci());
-
-  // print method size
-  st->print_cr(" (%d bytes)", method->code_size());
+  print_compilation(st, method, method_name);
 }
 
 // ------------------------------------------------------------------
@@ -298,29 +277,7 @@
 
   // print compiler name if requested
   if (CIPrintCompilerName) tty->print("%s:", CompileBroker::compiler(comp_level())->name());
-
-  // print compilation number
-  tty->print("%3d", compile_id());
-
-  // print method attributes
-  const bool is_osr = osr_bci() != CompileBroker::standard_entry_bci;
-  { const char blocking_char  = is_blocking()                      ? 'b' : ' ';
-    const char compile_type   = is_osr                             ? '%' : ' ';
-    const char sync_char      = method->is_synchronized()          ? 's' : ' ';
-    const char exception_char = method->has_exception_handler()    ? '!' : ' ';
-    const char tier_char      =
-      is_highest_tier_compile(comp_level())                        ? ' ' : ('0' + comp_level());
-    tty->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, tier_char);
-  }
-
-  // print method name
-  method->print_short_name(tty);
-
-  // print osr_bci if any
-  if (is_osr) tty->print(" @ %d", osr_bci());
-
-  // print method size
-  tty->print_cr(" (%d bytes)", method->code_size());
+  print_compilation(tty, method(), NULL);
 }
 
 
@@ -399,7 +356,7 @@
   // <task_done ... stamp='1.234'>  </task>
   nmethod* nm = code();
   log->begin_elem("task_done success='%d' nmsize='%d' count='%d'",
-                  _is_success, nm == NULL ? 0 : nm->instructions_size(),
+                  _is_success, nm == NULL ? 0 : nm->content_size(),
                   method->invocation_count());
   int bec = method->backedge_count();
   if (bec != 0)  log->print(" backedge_count='%d'", bec);
@@ -427,6 +384,7 @@
   assert(lock()->owned_by_self(), "must own lock");
 
   task->set_next(NULL);
+  task->set_prev(NULL);
 
   if (_last == NULL) {
     // The compile queue is empty.
@@ -437,8 +395,10 @@
     // Append the task to the queue.
     assert(_last->next() == NULL, "not last");
     _last->set_next(task);
+    task->set_prev(_last);
     _last = task;
   }
+  ++_size;
 
   // Mark the method as being in the compile queue.
   ((methodOop)JNIHandles::resolve(task->method_handle()))->set_queued_for_compilation();
@@ -452,10 +412,9 @@
   }
 
   // Notify CompilerThreads that a task is available.
-  lock()->notify();
+  lock()->notify_all();
 }
 
-
 // ------------------------------------------------------------------
 // CompileQueue::get
 //
@@ -464,7 +423,6 @@
   NMethodSweeper::possibly_sweep();
 
   MutexLocker locker(lock());
-
   // Wait for an available CompileTask.
   while (_first == NULL) {
     // There is no work to be done right now.  Wait.
@@ -481,20 +439,32 @@
       lock()->wait();
     }
   }
-
-  CompileTask* task = _first;
+  CompileTask* task = CompilationPolicy::policy()->select_task(this);
+  remove(task);
+  return task;
+}
 
-  // Update queue first and last
-  _first =_first->next();
-  if (_first == NULL) {
-    _last = NULL;
+void CompileQueue::remove(CompileTask* task)
+{
+   assert(lock()->owned_by_self(), "must own lock");
+  if (task->prev() != NULL) {
+    task->prev()->set_next(task->next());
+  } else {
+    // max is the first element
+    assert(task == _first, "Sanity");
+    _first = task->next();
   }
 
-  return task;
-
+  if (task->next() != NULL) {
+    task->next()->set_prev(task->prev());
+  } else {
+    // max is the last element
+    assert(task == _last, "Sanity");
+    _last = task->prev();
+  }
+  --_size;
 }
 
-
 // ------------------------------------------------------------------
 // CompileQueue::print
 void CompileQueue::print() {
@@ -545,7 +515,6 @@
   }
 }
 
-
 // ------------------------------------------------------------------
 // CompileBroker::compilation_init
 //
@@ -554,18 +523,18 @@
   _last_method_compiled[0] = '\0';
 
   // Set the interface to the current compiler(s).
+  int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
+  int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
 #ifdef COMPILER1
-  _compilers[0] = new Compiler();
-#ifndef COMPILER2
-  _compilers[1] = _compilers[0];
-#endif
+  if (c1_count > 0) {
+    _compilers[0] = new Compiler();
+  }
 #endif // COMPILER1
 
 #ifdef COMPILER2
-  _compilers[1] = new C2Compiler();
-#ifndef COMPILER1
-  _compilers[0] = _compilers[1];
-#endif
+  if (c2_count > 0) {
+    _compilers[1] = new C2Compiler();
+  }
 #endif // COMPILER2
 
 #ifdef SHARK
@@ -580,9 +549,7 @@
   _task_free_list = NULL;
 
   // Start the CompilerThreads
-  init_compiler_threads(compiler_count());
-
-
+  init_compiler_threads(c1_count, c2_count);
   // totalTime performance counter is always created as it is required
   // by the implementation of java.lang.management.CompilationMBean.
   {
@@ -770,23 +737,38 @@
 // CompileBroker::init_compiler_threads
 //
 // Initialize the compilation queue
-void CompileBroker::init_compiler_threads(int compiler_count) {
+void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler_count) {
   EXCEPTION_MARK;
+  assert(c2_compiler_count > 0 || c1_compiler_count > 0, "No compilers?");
+  if (c2_compiler_count > 0) {
+    _c2_method_queue  = new CompileQueue("C2MethodQueue",  MethodCompileQueue_lock);
+  }
+  if (c1_compiler_count > 0) {
+    _c1_method_queue  = new CompileQueue("C1MethodQueue",  MethodCompileQueue_lock);
+  }
 
-  _method_queue  = new CompileQueue("MethodQueue",  MethodCompileQueue_lock);
+  int compiler_count = c1_compiler_count + c2_compiler_count;
+
   _method_threads =
     new (ResourceObj::C_HEAP) GrowableArray<CompilerThread*>(compiler_count, true);
 
   char name_buffer[256];
-  int i;
-  for (i = 0; i < compiler_count; i++) {
+  for (int i = 0; i < c2_compiler_count; i++) {
     // Create a name for our thread.
-    sprintf(name_buffer, "CompilerThread%d", i);
+    sprintf(name_buffer, "C2 CompilerThread%d", i);
     CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
-
-    CompilerThread* new_thread = make_compiler_thread(name_buffer, _method_queue, counters, CHECK);
+    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_method_queue, counters, CHECK);
     _method_threads->append(new_thread);
   }
+
+  for (int i = c2_compiler_count; i < compiler_count; i++) {
+    // Create a name for our thread.
+    sprintf(name_buffer, "C1 CompilerThread%d", i);
+    CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
+    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_method_queue, counters, CHECK);
+    _method_threads->append(new_thread);
+  }
+
   if (UsePerfData) {
     PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes,
                                      compiler_count, CHECK);
@@ -796,7 +778,9 @@
 // ------------------------------------------------------------------
 // CompileBroker::is_idle
 bool CompileBroker::is_idle() {
-  if (!_method_queue->is_empty()) {
+  if (_c2_method_queue != NULL && !_c2_method_queue->is_empty()) {
+    return false;
+  } else if (_c1_method_queue != NULL && !_c1_method_queue->is_empty()) {
     return false;
   } else {
     int num_threads = _method_threads->length();
@@ -859,6 +843,7 @@
     return;
   }
 
+
   // If this method is already in the compile queue, then
   // we do not block the current thread.
   if (compilation_is_in_queue(method, osr_bci)) {
@@ -876,10 +861,11 @@
   // Outputs from the following MutexLocker block:
   CompileTask* task     = NULL;
   bool         blocking = false;
+  CompileQueue* queue  = compile_queue(comp_level);
 
   // Acquire our lock.
   {
-    MutexLocker locker(_method_queue->lock(), THREAD);
+    MutexLocker locker(queue->lock(), THREAD);
 
     // Make sure the method has not slipped into the queues since
     // last we checked; note that those checks were "fast bail-outs".
@@ -945,7 +931,7 @@
     // and in that case it's best to protect both the testing (here) of
     // these bits, and their updating (here and elsewhere) under a
     // common lock.
-    task = create_compile_task(_method_queue,
+    task = create_compile_task(queue,
                                compile_id, method,
                                osr_bci, comp_level,
                                hot_method, hot_count, comment,
@@ -959,6 +945,7 @@
 
 
 nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
+                                       int comp_level,
                                        methodHandle hot_method, int hot_count,
                                        const char* comment, TRAPS) {
   // make sure arguments make sense
@@ -967,26 +954,9 @@
   assert(!method->is_abstract() && (osr_bci == InvocationEntryBci || !method->is_native()), "cannot compile abstract/native methods");
   assert(!instanceKlass::cast(method->method_holder())->is_not_initialized(), "method holder must be initialized");
 
-  int comp_level = CompilationPolicy::policy()->compilation_level(method, osr_bci);
-
-#ifdef TIERED
-  if (TieredCompilation && StressTieredRuntime) {
-    static int flipper = 0;
-    if (is_even(flipper++)) {
-      comp_level = CompLevel_fast_compile;
-    } else {
-      comp_level = CompLevel_full_optimization;
-    }
+  if (!TieredCompilation) {
+    comp_level = CompLevel_highest_tier;
   }
-#ifdef SPARC
-  // QQQ FIX ME
-  // C2 only returns long results in G1 and c1 doesn't understand so disallow c2
-  // compiles of long results
-  if (TieredCompilation && method()->result_type() == T_LONG) {
-    comp_level = CompLevel_fast_compile;
-  }
-#endif // SPARC
-#endif // TIERED
 
   // return quickly if possible
 
@@ -1000,12 +970,10 @@
   if (osr_bci == InvocationEntryBci) {
     // standard compilation
     nmethod* method_code = method->code();
-    if (method_code != NULL
-#ifdef TIERED
-       && ( method_code->is_compiled_by_c2() || comp_level == CompLevel_fast_compile )
-#endif // TIERED
-      ) {
-      return method_code;
+    if (method_code != NULL) {
+      if (compilation_is_complete(method, osr_bci, comp_level)) {
+        return method_code;
+      }
     }
     if (method->is_not_compilable(comp_level)) return NULL;
 
@@ -1021,10 +989,11 @@
     // osr compilation
 #ifndef TIERED
     // seems like an assert of dubious value
-    assert(comp_level == CompLevel_full_optimization,
+    assert(comp_level == CompLevel_highest_tier,
            "all OSR compiles are assumed to be at a single compilation lavel");
 #endif // TIERED
-    nmethod* nm = method->lookup_osr_nmethod_for(osr_bci);
+    // We accept a higher level osr method
+    nmethod* nm = method->lookup_osr_nmethod_for(osr_bci, comp_level, false);
     if (nm != NULL) return nm;
     if (method->is_not_osr_compilable()) return NULL;
   }
@@ -1071,8 +1040,7 @@
   // If the compiler is shut off due to code cache flushing or otherwise,
   // fail out now so blocking compiles dont hang the java thread
   if (!should_compile_new_jobs() || (UseCodeCacheFlushing && CodeCache::needs_flushing())) {
-    method->invocation_counter()->decay();
-    method->backedge_counter()->decay();
+    CompilationPolicy::policy()->delay_compilation(method());
     return NULL;
   }
 
@@ -1088,7 +1056,8 @@
   }
 
   // return requested nmethod
-  return osr_bci  == InvocationEntryBci ? method->code() : method->lookup_osr_nmethod_for(osr_bci);
+  // We accept a higher level osr method
+  return osr_bci  == InvocationEntryBci ? method->code() : method->lookup_osr_nmethod_for(osr_bci, comp_level, false);
 }
 
 
@@ -1104,7 +1073,7 @@
     if (method->is_not_osr_compilable()) {
       return true;
     } else {
-      nmethod* result = method->lookup_osr_nmethod_for(osr_bci);
+      nmethod* result = method->lookup_osr_nmethod_for(osr_bci, comp_level, true);
       return (result != NULL);
     }
   } else {
@@ -1113,15 +1082,7 @@
     } else {
       nmethod* result = method->code();
       if (result == NULL) return false;
-#ifdef TIERED
-      if (comp_level == CompLevel_fast_compile) {
-        // At worst the code is from c1
-        return true;
-      }
-      // comp level must be full opt
-      return result->is_compiled_by_c2();
-#endif // TIERED
-      return true;
+      return comp_level == result->comp_level();
     }
   }
 }
@@ -1139,11 +1100,10 @@
 // versa).  This can be remedied by a full queue search to disambiguate
 // cases.  If it is deemed profitible, this may be done.
 bool CompileBroker::compilation_is_in_queue(methodHandle method,
-                                          int          osr_bci) {
+                                            int          osr_bci) {
   return method->queued_for_compilation();
 }
 
-
 // ------------------------------------------------------------------
 // CompileBroker::compilation_is_prohibited
 //
@@ -1151,11 +1111,9 @@
 bool CompileBroker::compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level) {
   bool is_native = method->is_native();
   // Some compilers may not support the compilation of natives.
-  // QQQ this needs some work ought to only record not compilable at
-  // the specified level
   if (is_native &&
       (!CICompileNatives || !compiler(comp_level)->supports_native())) {
-    method->set_not_compilable_quietly();
+    method->set_not_compilable_quietly(comp_level);
     return true;
   }
 
@@ -1194,7 +1152,7 @@
 // compilations may be numbered separately from regular compilations
 // if certain debugging flags are used.
 uint CompileBroker::assign_compile_id(methodHandle method, int osr_bci) {
-  assert(_method_queue->lock()->owner() == JavaThread::current(),
+  assert(MethodCompileQueue_lock->owner() == Thread::current(),
          "must hold the compilation queue lock");
   bool is_osr = (osr_bci != standard_entry_bci);
   assert(!method->is_native(), "no longer compile natives");
@@ -1643,7 +1601,6 @@
 #endif
 }
 
-
 // ------------------------------------------------------------------
 // CompileBroker::handle_full_code_cache
 //
@@ -1847,13 +1804,13 @@
     }
 
     // Collect counts of successful compilations
-    _sum_nmethod_size += code->total_size();
-    _sum_nmethod_code_size += code->code_size();
+    _sum_nmethod_size      += code->total_size();
+    _sum_nmethod_code_size += code->insts_size();
     _total_compile_count++;
 
     if (UsePerfData) {
-      _perf_sum_nmethod_size->inc(code->total_size());
-      _perf_sum_nmethod_code_size->inc(code->code_size());
+      _perf_sum_nmethod_size->inc(     code->total_size());
+      _perf_sum_nmethod_code_size->inc(code->insts_size());
       _perf_total_compile_count->inc();
     }
 
@@ -1883,12 +1840,12 @@
                 CompileBroker::_t_standard_compilation.seconds() / CompileBroker::_total_standard_compile_count);
   tty->print_cr("    On stack replacement   : %6.3f s, Average : %2.3f", CompileBroker::_t_osr_compilation.seconds(), CompileBroker::_t_osr_compilation.seconds() / CompileBroker::_total_osr_compile_count);
 
-  if (compiler(CompLevel_fast_compile)) {
-    compiler(CompLevel_fast_compile)->print_timers();
-    if (compiler(CompLevel_fast_compile) != compiler(CompLevel_highest_tier))
-      compiler(CompLevel_highest_tier)->print_timers();
+  if (compiler(CompLevel_simple) != NULL) {
+    compiler(CompLevel_simple)->print_timers();
   }
-
+  if (compiler(CompLevel_full_optimization) != NULL) {
+    compiler(CompLevel_full_optimization)->print_timers();
+  }
   tty->cr();
   int tcb = CompileBroker::_sum_osr_bytes_compiled + CompileBroker::_sum_standard_bytes_compiled;
   tty->print_cr("  Total compiled bytecodes : %6d bytes", tcb);
--- a/hotspot/src/share/vm/compiler/compileBroker.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/compiler/compileBroker.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,7 +41,7 @@
   int          _comp_level;
   int          _num_inlined_bytecodes;
   nmethodLocker* _code_handle;  // holder of eventual result
-  CompileTask* _next;
+  CompileTask* _next, *_prev;
 
   // Fields used for logging why the compilation was initiated:
   jlong        _time_queued;  // in units of os::elapsed_counter()
@@ -49,6 +49,7 @@
   int          _hot_count;    // information about its invocation counter
   const char*  _comment;      // more info about the task
 
+  void print_compilation(outputStream *st, methodOop method, char* method_name);
  public:
   CompileTask() {
     _lock = new Monitor(Mutex::nonleaf+2, "CompileTaskLock");
@@ -85,15 +86,17 @@
 
   CompileTask* next() const                      { return _next; }
   void         set_next(CompileTask* next)       { _next = next; }
+  CompileTask* prev() const                      { return _prev; }
+  void         set_prev(CompileTask* prev)       { _prev = prev; }
 
   void         print();
   void         print_line();
+
   void         print_line_on_error(outputStream* st, char* buf, int buflen);
   void         log_task(xmlStream* log);
   void         log_task_queued();
   void         log_task_start(CompileLog* log);
   void         log_task_done(CompileLog* log);
-
 };
 
 // CompilerCounters
@@ -141,7 +144,6 @@
     PerfCounter* compile_counter()           { return _perf_compiles; }
 };
 
-
 // CompileQueue
 //
 // A list of CompileTasks.
@@ -153,26 +155,42 @@
   CompileTask* _first;
   CompileTask* _last;
 
+  int _size;
  public:
   CompileQueue(const char* name, Monitor* lock) {
     _name = name;
     _lock = lock;
     _first = NULL;
     _last = NULL;
+    _size = 0;
   }
 
   const char*  name() const                      { return _name; }
   Monitor*     lock() const                      { return _lock; }
 
   void         add(CompileTask* task);
+  void         remove(CompileTask* task);
+  CompileTask* first()                           { return _first; }
+  CompileTask* last()                            { return _last;  }
 
   CompileTask* get();
 
   bool         is_empty() const                  { return _first == NULL; }
+  int          size()     const                  { return _size;          }
 
   void         print();
 };
 
+// CompileTaskWrapper
+//
+// Assign this task to the current thread.  Deallocate the task
+// when the compilation is complete.
+class CompileTaskWrapper : StackObj {
+public:
+  CompileTaskWrapper(CompileTask* task);
+  ~CompileTaskWrapper();
+};
+
 
 // Compilation
 //
@@ -208,7 +226,8 @@
   static int  _last_compile_level;
   static char _last_method_compiled[name_buffer_length];
 
-  static CompileQueue* _method_queue;
+  static CompileQueue* _c2_method_queue;
+  static CompileQueue* _c1_method_queue;
   static CompileTask* _task_free_list;
 
   static GrowableArray<CompilerThread*>* _method_threads;
@@ -256,19 +275,9 @@
   static int _sum_nmethod_size;
   static int _sum_nmethod_code_size;
 
-  static int compiler_count() {
-    return CICompilerCountPerCPU
-      // Example: if CICompilerCountPerCPU is true, then we get
-      // max(log2(8)-1,1) = 2 compiler threads on an 8-way machine.
-      // May help big-app startup time.
-      ? (MAX2(log2_intptr(os::active_processor_count())-1,1))
-      : CICompilerCount;
-  }
-
   static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS);
-  static void init_compiler_threads(int compiler_count);
+  static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count);
   static bool compilation_is_complete  (methodHandle method, int osr_bci, int comp_level);
-  static bool compilation_is_in_queue  (methodHandle method, int osr_bci);
   static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level);
   static uint assign_compile_id        (methodHandle method, int osr_bci);
   static bool is_compile_blocking      (methodHandle method, int osr_bci);
@@ -301,23 +310,35 @@
                                   int hot_count,
                                   const char* comment,
                                   TRAPS);
-
+  static CompileQueue* compile_queue(int comp_level) {
+    if (is_c2_compile(comp_level)) return _c2_method_queue;
+    if (is_c1_compile(comp_level)) return _c1_method_queue;
+    return NULL;
+  }
  public:
   enum {
     // The entry bci used for non-OSR compilations.
     standard_entry_bci = InvocationEntryBci
   };
 
-  static AbstractCompiler* compiler(int level ) {
-    if (level == CompLevel_fast_compile) return _compilers[0];
-    assert(level == CompLevel_highest_tier, "what level?");
-    return _compilers[1];
+  static AbstractCompiler* compiler(int comp_level) {
+    if (is_c2_compile(comp_level)) return _compilers[1]; // C2
+    if (is_c1_compile(comp_level)) return _compilers[0]; // C1
+    return NULL;
   }
 
+  static bool compilation_is_in_queue(methodHandle method, int osr_bci);
+  static int queue_size(int comp_level) {
+    CompileQueue *q = compile_queue(comp_level);
+    return q != NULL ? q->size() : 0;
+  }
   static void compilation_init();
   static void init_compiler_thread_log();
-  static nmethod* compile_method(methodHandle method, int osr_bci,
-                                 methodHandle hot_method, int hot_count,
+  static nmethod* compile_method(methodHandle method,
+                                 int osr_bci,
+                                 int comp_level,
+                                 methodHandle hot_method,
+                                 int hot_count,
                                  const char* comment, TRAPS);
 
   static void compiler_thread_loop();
--- a/hotspot/src/share/vm/compiler/disassembler.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/compiler/disassembler.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -407,7 +407,7 @@
   if (!load_library())  return;
   decode_env env(cb, st);
   env.output()->print_cr("Decoding CodeBlob " INTPTR_FORMAT, cb);
-  env.decode_instructions(cb->instructions_begin(), cb->instructions_end());
+  env.decode_instructions(cb->code_begin(), cb->code_end());
 }
 
 
@@ -424,12 +424,12 @@
   env.output()->print_cr("Code:");
 
 #ifdef SHARK
-  SharkEntry* entry = (SharkEntry *) nm->instructions_begin();
-  unsigned char* p = entry->code_start();
+  SharkEntry* entry = (SharkEntry *) nm->code_begin();
+  unsigned char* p   = entry->code_start();
   unsigned char* end = entry->code_limit();
 #else
-  unsigned char* p = nm->instructions_begin();
-  unsigned char* end = nm->instructions_end();
+  unsigned char* p   = nm->code_begin();
+  unsigned char* end = nm->code_end();
 #endif // SHARK
 
   // If there has been profiling, print the buckets.
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -256,7 +256,7 @@
 }
 
 TreeChunk* TreeList::first_available() {
-  guarantee(head() != NULL, "The head of the list cannot be NULL");
+  assert(head() != NULL, "The head of the list cannot be NULL");
   FreeChunk* fc = head()->next();
   TreeChunk* retTC;
   if (fc == NULL) {
@@ -272,7 +272,7 @@
 // those in the list for this size; potentially slow and expensive,
 // use with caution!
 TreeChunk* TreeList::largest_address() {
-  guarantee(head() != NULL, "The head of the list cannot be NULL");
+  assert(head() != NULL, "The head of the list cannot be NULL");
   FreeChunk* fc = head()->next();
   TreeChunk* retTC;
   if (fc == NULL) {
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1946,8 +1946,8 @@
 
 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
   assert(_promoInfo.tracking(), "No preceding save_marks?");
-  guarantee(SharedHeap::heap()->n_par_threads() == 0,
-            "Shouldn't be called (yet) during parallel part of gc.");
+  assert(SharedHeap::heap()->n_par_threads() == 0,
+         "Shouldn't be called if using parallel gc.");
   return _promoInfo.noPromotions();
 }
 
@@ -2569,7 +2569,7 @@
 
 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
   FreeChunk* res;
-  guarantee(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
+  assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
     // This locking manages sync with other large object allocations.
     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1332,7 +1332,7 @@
 // -----------------------------------------------------
 // FREE:      klass_word & 1 == 1; mark_word holds block size
 //
-// OBJECT:    klass_word installed; klass_word != 0 && klass_word & 0 == 0;
+// OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
 //            obj->size() computes correct size
 //            [Perm Gen objects needs to be "parsable" before they can be navigated]
 //
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -252,12 +252,13 @@
 class ChunkArray: public CHeapObj {
   size_t _index;
   size_t _capacity;
+  size_t _overflows;
   HeapWord** _array;   // storage for array
 
  public:
-  ChunkArray() : _index(0), _capacity(0), _array(NULL) {}
+  ChunkArray() : _index(0), _capacity(0), _overflows(0), _array(NULL) {}
   ChunkArray(HeapWord** a, size_t c):
-    _index(0), _capacity(c), _array(a) {}
+    _index(0), _capacity(c), _overflows(0), _array(a) {}
 
   HeapWord** array() { return _array; }
   void set_array(HeapWord** a) { _array = a; }
@@ -266,7 +267,9 @@
   void set_capacity(size_t c) { _capacity = c; }
 
   size_t end() {
-    assert(_index < capacity(), "_index out of bounds");
+    assert(_index <= capacity(),
+           err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds",
+                   _index, _capacity));
     return _index;
   }  // exclusive
 
@@ -277,12 +280,23 @@
 
   void reset() {
     _index = 0;
+    if (_overflows > 0 && PrintCMSStatistics > 1) {
+      warning("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times",
+              _capacity, _overflows);
+    }
+    _overflows = 0;
   }
 
   void record_sample(HeapWord* p, size_t sz) {
     // For now we do not do anything with the size
     if (_index < _capacity) {
       _array[_index++] = p;
+    } else {
+      ++_overflows;
+      assert(_index == _capacity,
+             err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT
+                     "): out of bounds at overflow#" SIZE_FORMAT,
+                     _index, _capacity, _overflows));
     }
   }
 };
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -165,13 +165,8 @@
        "Next of tail should be NULL");
    }
    decrement_count();
-#define TRAP_CODE 1
-#if TRAP_CODE
-   if (head() == NULL) {
-     guarantee(tail() == NULL, "INVARIANT");
-     guarantee(count() == 0, "INVARIANT");
-   }
-#endif
+   assert(((head() == NULL) + (tail() == NULL) + (count() == 0)) % 3 == 0,
+          "H/T/C Inconsistency");
    // clear next and prev fields of fc, debug only
    NOT_PRODUCT(
      fc->linkPrev(NULL);
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -253,8 +253,8 @@
        cur_spool = cur_spool->nextSpoolBlock) {
     // the first entry is just a self-pointer; indices 1 through
     // bufferSize - 1 are occupied (thus, bufferSize - 1 slots).
-    guarantee((void*)cur_spool->displacedHdr == (void*)&cur_spool->displacedHdr,
-              "first entry of displacedHdr should be self-referential");
+    assert((void*)cur_spool->displacedHdr == (void*)&cur_spool->displacedHdr,
+           "first entry of displacedHdr should be self-referential");
     slots += cur_spool->bufferSize - 1;
     blocks++;
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -2753,7 +2753,7 @@
   print_taskqueue_stats_hdr(st);
 
   TaskQueueStats totals;
-  const int n = MAX2(workers()->total_workers(), 1);
+  const int n = workers() != NULL ? workers()->total_workers() : 1;
   for (int i = 0; i < n; ++i) {
     st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
     totals += task_queue(i)->stats;
@@ -2764,7 +2764,7 @@
 }
 
 void G1CollectedHeap::reset_taskqueue_stats() {
-  const int n = MAX2(workers()->total_workers(), 1);
+  const int n = workers() != NULL ? workers()->total_workers() : 1;
   for (int i = 0; i < n; ++i) {
     task_queue(i)->stats.reset();
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -2148,7 +2148,7 @@
             body_summary->get_termination_seq()
           };
           NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(),
-                                        7, other_parts);
+                                        6, other_parts);
           check_other_times(2, body_summary->get_parallel_other_seq(),
                             &calc_other_times_ms);
         }
@@ -2166,30 +2166,32 @@
     }
     print_summary(1, "Other", summary->get_other_seq());
     {
-      NumberSeq calc_other_times_ms;
-      if (parallel) {
-        // parallel
-        NumberSeq* other_parts[] = {
-          body_summary->get_satb_drain_seq(),
-          body_summary->get_parallel_seq(),
-          body_summary->get_clear_ct_seq()
-        };
-        calc_other_times_ms = NumberSeq(summary->get_total_seq(),
-                                        3, other_parts);
-      } else {
-        // serial
-        NumberSeq* other_parts[] = {
-          body_summary->get_satb_drain_seq(),
-          body_summary->get_update_rs_seq(),
-          body_summary->get_ext_root_scan_seq(),
-          body_summary->get_mark_stack_scan_seq(),
-          body_summary->get_scan_rs_seq(),
-          body_summary->get_obj_copy_seq()
-        };
-        calc_other_times_ms = NumberSeq(summary->get_total_seq(),
-                                        7, other_parts);
+      if (body_summary != NULL) {
+        NumberSeq calc_other_times_ms;
+        if (parallel) {
+          // parallel
+          NumberSeq* other_parts[] = {
+            body_summary->get_satb_drain_seq(),
+            body_summary->get_parallel_seq(),
+            body_summary->get_clear_ct_seq()
+          };
+          calc_other_times_ms = NumberSeq(summary->get_total_seq(),
+                                                3, other_parts);
+        } else {
+          // serial
+          NumberSeq* other_parts[] = {
+            body_summary->get_satb_drain_seq(),
+            body_summary->get_update_rs_seq(),
+            body_summary->get_ext_root_scan_seq(),
+            body_summary->get_mark_stack_scan_seq(),
+            body_summary->get_scan_rs_seq(),
+            body_summary->get_obj_copy_seq()
+          };
+          calc_other_times_ms = NumberSeq(summary->get_total_seq(),
+                                                6, other_parts);
+        }
+        check_other_times(1,  summary->get_other_seq(), &calc_other_times_ms);
       }
-      check_other_times(1,  summary->get_other_seq(), &calc_other_times_ms);
     }
   } else {
     print_indent(0);
--- a/hotspot/src/share/vm/includeDB_compiler1	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/includeDB_compiler1	Wed Jul 05 17:22:53 2017 +0200
@@ -19,7 +19,6 @@
 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 // or visit www.oracle.com if you need additional information or have any
 // questions.
-//  
 //
 
 // NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps!
--- a/hotspot/src/share/vm/includeDB_compiler2	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/includeDB_compiler2	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -504,6 +504,7 @@
 graphKit.hpp                            callnode.hpp
 graphKit.hpp                            cfgnode.hpp
 graphKit.hpp                            ciEnv.hpp
+graphKit.hpp                            ciMethodData.hpp
 graphKit.hpp                            divnode.hpp
 graphKit.hpp                            compile.hpp
 graphKit.hpp                            deoptimization.hpp
@@ -624,6 +625,7 @@
 loopTransform.cpp                       loopnode.hpp
 loopTransform.cpp                       mulnode.hpp
 loopTransform.cpp                       rootnode.hpp
+loopTransform.cpp                       runtime.hpp
 loopTransform.cpp                       subnode.hpp
 
 loopUnswitch.cpp                        allocation.inline.hpp
--- a/hotspot/src/share/vm/includeDB_core	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/includeDB_core	Wed Jul 05 17:22:53 2017 +0200
@@ -1081,6 +1081,8 @@
 compilationPolicy.cpp                   nmethod.hpp
 compilationPolicy.cpp                   oop.inline.hpp
 compilationPolicy.cpp                   rframe.hpp
+compilationPolicy.cpp			scopeDesc.hpp
+compilationPolicy.cpp                   simpleThresholdPolicy.hpp
 compilationPolicy.cpp                   stubRoutines.hpp
 compilationPolicy.cpp                   thread.hpp
 compilationPolicy.cpp                   timer.hpp
@@ -1451,6 +1453,7 @@
 deoptimization.cpp                      allocation.inline.hpp
 deoptimization.cpp                      biasedLocking.hpp
 deoptimization.cpp                      bytecode.hpp
+deoptimization.cpp			compilationPolicy.hpp
 deoptimization.cpp                      debugInfoRec.hpp
 deoptimization.cpp                      deoptimization.hpp
 deoptimization.cpp                      events.hpp
@@ -2172,6 +2175,7 @@
 
 interpreterRuntime.cpp                  biasedLocking.hpp
 interpreterRuntime.cpp                  collectedHeap.hpp
+interpreterRuntime.cpp                  compileBroker.hpp
 interpreterRuntime.cpp                  compilationPolicy.hpp
 interpreterRuntime.cpp                  constantPoolOop.hpp
 interpreterRuntime.cpp                  cpCacheOop.hpp
@@ -2829,6 +2833,7 @@
 
 methodDataOop.cpp                       bytecode.hpp
 methodDataOop.cpp                       bytecodeStream.hpp
+methodDataOop.cpp                       compilationPolicy.hpp
 methodDataOop.cpp                       deoptimization.hpp
 methodDataOop.cpp                       handles.inline.hpp
 methodDataOop.cpp                       linkResolver.hpp
@@ -2841,6 +2846,7 @@
 methodDataOop.hpp                       oop.hpp
 methodDataOop.hpp                       orderAccess.hpp
 methodDataOop.hpp                       universe.hpp
+methodDataOop.hpp                       methodOop.hpp
 
 methodHandleWalk.hpp                    methodHandles.hpp
 
@@ -2906,6 +2912,7 @@
 methodOop.cpp                           bytecodeTracer.hpp
 methodOop.cpp                           bytecodes.hpp
 methodOop.cpp                           collectedHeap.inline.hpp
+methodOop.cpp				compilationPolicy.hpp
 methodOop.cpp                           debugInfoRec.hpp
 methodOop.cpp                           frame.inline.hpp
 methodOop.cpp                           gcLocker.hpp
@@ -3655,6 +3662,7 @@
 
 safepoint.cpp                           codeCache.hpp
 safepoint.cpp                           collectedHeap.hpp
+safepoint.cpp                           compilationPolicy.hpp
 safepoint.cpp                           deoptimization.hpp
 safepoint.cpp                           events.hpp
 safepoint.cpp                           frame.inline.hpp
@@ -3799,6 +3807,17 @@
 signature.hpp                           methodOop.hpp
 signature.hpp                           top.hpp
 
+simpleThresholdPolicy.cpp               arguments.hpp
+simpleThresholdPolicy.cpp               compileBroker.hpp
+simpleThresholdPolicy.cpp               resourceArea.hpp
+simpleThresholdPolicy.cpp               simpleThresholdPolicy.hpp
+simpleThresholdPolicy.cpp               simpleThresholdPolicy.inline.hpp
+
+simpleThresholdPolicy.hpp               compilationPolicy.hpp
+simpleThresholdPolicy.hpp               globalDefinitions.hpp
+simpleThresholdPolicy.hpp               methodDataOop.hpp
+simpleThresholdPolicy.hpp               nmethod.hpp
+
 sizes.cpp                               sizes.hpp
 
 sizes.hpp                               allocation.hpp
@@ -3977,6 +3996,7 @@
 
 sweeper.cpp                             atomic.hpp
 sweeper.cpp                             codeCache.hpp
+sweeper.cpp				compilationPolicy.hpp
 sweeper.cpp                             compileBroker.hpp
 sweeper.cpp                             events.hpp
 sweeper.cpp                             methodOop.hpp
--- a/hotspot/src/share/vm/interpreter/interpreter.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/interpreter/interpreter.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -117,7 +117,7 @@
 
 
     // commit Codelet
-    AbstractInterpreter::code()->commit((*_masm)->code()->pure_code_size());
+    AbstractInterpreter::code()->commit((*_masm)->code()->pure_insts_size());
     // make sure nobody can use _masm outside a CodeletMark lifespan
     *_masm = NULL;
   }
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -200,6 +200,7 @@
 void InterpreterRuntime::note_trap(JavaThread* thread, int reason, TRAPS) {
   assert(ProfileTraps, "call me only if profiling");
   methodHandle trap_method(thread, method(thread));
+
   if (trap_method.not_null()) {
     methodDataHandle trap_mdo(thread, trap_method->method_data());
     if (trap_mdo.is_null()) {
@@ -777,43 +778,6 @@
 // Miscellaneous
 
 
-#ifndef PRODUCT
-static void trace_frequency_counter_overflow(methodHandle m, int branch_bci, int bci, address branch_bcp) {
-  if (TraceInvocationCounterOverflow) {
-    InvocationCounter* ic = m->invocation_counter();
-    InvocationCounter* bc = m->backedge_counter();
-    ResourceMark rm;
-    const char* msg =
-      branch_bcp == NULL
-      ? "comp-policy cntr ovfl @ %d in entry of "
-      : "comp-policy cntr ovfl @ %d in loop of ";
-    tty->print(msg, bci);
-    m->print_value();
-    tty->cr();
-    ic->print();
-    bc->print();
-    if (ProfileInterpreter) {
-      if (branch_bcp != NULL) {
-        methodDataOop mdo = m->method_data();
-        if (mdo != NULL) {
-          int count = mdo->bci_to_data(branch_bci)->as_JumpData()->taken();
-          tty->print_cr("back branch count = %d", count);
-        }
-      }
-    }
-  }
-}
-
-static void trace_osr_request(methodHandle method, nmethod* osr, int bci) {
-  if (TraceOnStackReplacement) {
-    ResourceMark rm;
-    tty->print(osr != NULL ? "Reused OSR entry for " : "Requesting OSR entry for ");
-    method->print_short_name(tty);
-    tty->print_cr(" at bci %d", bci);
-  }
-}
-#endif // !PRODUCT
-
 nmethod* InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, address branch_bcp) {
   nmethod* nm = frequency_counter_overflow_inner(thread, branch_bcp);
   assert(branch_bcp != NULL || nm == NULL, "always returns null for non OSR requests");
@@ -826,7 +790,7 @@
     frame fr = thread->last_frame();
     methodOop method =  fr.interpreter_frame_method();
     int bci = method->bci_from(fr.interpreter_frame_bcp());
-    nm = method->lookup_osr_nmethod_for(bci);
+    nm = method->lookup_osr_nmethod_for(bci, CompLevel_none, false);
   }
   return nm;
 }
@@ -840,74 +804,32 @@
   frame fr = thread->last_frame();
   assert(fr.is_interpreted_frame(), "must come from interpreter");
   methodHandle method(thread, fr.interpreter_frame_method());
-  const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : 0;
-  const int bci = method->bci_from(fr.interpreter_frame_bcp());
-  NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci, branch_bcp);)
+  const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : InvocationEntryBci;
+  const int bci = branch_bcp != NULL ? method->bci_from(fr.interpreter_frame_bcp()) : InvocationEntryBci;
+
+  nmethod* osr_nm = CompilationPolicy::policy()->event(method, method, branch_bci, bci, CompLevel_none, thread);
 
-  if (JvmtiExport::can_post_interpreter_events()) {
-    if (thread->is_interp_only_mode()) {
-      // If certain JVMTI events (e.g. frame pop event) are requested then the
-      // thread is forced to remain in interpreted code. This is
-      // implemented partly by a check in the run_compiled_code
-      // section of the interpreter whether we should skip running
-      // compiled code, and partly by skipping OSR compiles for
-      // interpreted-only threads.
-      if (branch_bcp != NULL) {
-        CompilationPolicy::policy()->reset_counter_for_back_branch_event(method);
-        return NULL;
+  if (osr_nm != NULL) {
+    // We may need to do on-stack replacement which requires that no
+    // monitors in the activation are biased because their
+    // BasicObjectLocks will need to migrate during OSR. Force
+    // unbiasing of all monitors in the activation now (even though
+    // the OSR nmethod might be invalidated) because we don't have a
+    // safepoint opportunity later once the migration begins.
+    if (UseBiasedLocking) {
+      ResourceMark rm;
+      GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
+      for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
+           kptr < fr.interpreter_frame_monitor_begin();
+           kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
+        if( kptr->obj() != NULL ) {
+          objects_to_revoke->append(Handle(THREAD, kptr->obj()));
+        }
       }
+      BiasedLocking::revoke(objects_to_revoke);
     }
   }
-
-  if (branch_bcp == NULL) {
-    // when code cache is full, compilation gets switched off, UseCompiler
-    // is set to false
-    if (!method->has_compiled_code() && UseCompiler) {
-      CompilationPolicy::policy()->method_invocation_event(method, CHECK_NULL);
-    } else {
-      // Force counter overflow on method entry, even if no compilation
-      // happened.  (The method_invocation_event call does this also.)
-      CompilationPolicy::policy()->reset_counter_for_invocation_event(method);
-    }
-    // compilation at an invocation overflow no longer goes and retries test for
-    // compiled method. We always run the loser of the race as interpreted.
-    // so return NULL
-    return NULL;
-  } else {
-    // counter overflow in a loop => try to do on-stack-replacement
-    nmethod* osr_nm = method->lookup_osr_nmethod_for(bci);
-    NOT_PRODUCT(trace_osr_request(method, osr_nm, bci);)
-    // when code cache is full, we should not compile any more...
-    if (osr_nm == NULL && UseCompiler) {
-      const int branch_bci = method->bci_from(branch_bcp);
-      CompilationPolicy::policy()->method_back_branch_event(method, branch_bci, bci, CHECK_NULL);
-      osr_nm = method->lookup_osr_nmethod_for(bci);
-    }
-    if (osr_nm == NULL) {
-      CompilationPolicy::policy()->reset_counter_for_back_branch_event(method);
-      return NULL;
-    } else {
-      // We may need to do on-stack replacement which requires that no
-      // monitors in the activation are biased because their
-      // BasicObjectLocks will need to migrate during OSR. Force
-      // unbiasing of all monitors in the activation now (even though
-      // the OSR nmethod might be invalidated) because we don't have a
-      // safepoint opportunity later once the migration begins.
-      if (UseBiasedLocking) {
-        ResourceMark rm;
-        GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
-        for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
-             kptr < fr.interpreter_frame_monitor_begin();
-             kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
-          if( kptr->obj() != NULL ) {
-            objects_to_revoke->append(Handle(THREAD, kptr->obj()));
-          }
-        }
-        BiasedLocking::revoke(objects_to_revoke);
-      }
-      return osr_nm;
-    }
-  }
+  return osr_nm;
 IRT_END
 
 IRT_LEAF(jint, InterpreterRuntime::bcp_to_di(methodOopDesc* method, address cur_bcp))
@@ -1124,7 +1046,7 @@
   if (handler_blob == NULL) {
     return NULL;
   }
-  address handler = handler_blob->instructions_begin();
+  address handler = handler_blob->code_begin();
   _handler_blob = handler_blob;
   _handler = handler;
   return handler;
@@ -1140,7 +1062,7 @@
 
   BufferBlob* bb = BufferBlob::create("Signature Handler Temp Buffer",
                                       SignatureHandlerLibrary::buffer_size);
-  _buffer = bb->instructions_begin();
+  _buffer = bb->code_begin();
 
   _fingerprints = new(ResourceObj::C_HEAP)GrowableArray<uint64_t>(32, true);
   _handlers     = new(ResourceObj::C_HEAP)GrowableArray<address>(32, true);
@@ -1148,16 +1070,16 @@
 
 address SignatureHandlerLibrary::set_handler(CodeBuffer* buffer) {
   address handler   = _handler;
-  int     code_size = buffer->pure_code_size();
-  if (handler + code_size > _handler_blob->instructions_end()) {
+  int     insts_size = buffer->pure_insts_size();
+  if (handler + insts_size > _handler_blob->code_end()) {
     // get a new handler blob
     handler = set_handler_blob();
   }
   if (handler != NULL) {
-    memcpy(handler, buffer->code_begin(), code_size);
+    memcpy(handler, buffer->insts_begin(), insts_size);
     pd_set_handler(handler);
-    ICache::invalidate_range(handler, code_size);
-    _handler = handler + code_size;
+    ICache::invalidate_range(handler, insts_size);
+    _handler = handler + insts_size;
   }
   return handler;
 }
@@ -1196,8 +1118,8 @@
                           (method->is_static() ? "static" : "receiver"),
                           method->name_and_sig_as_C_string(),
                           fingerprint,
-                          buffer.code_size());
-            Disassembler::decode(handler, handler + buffer.code_size());
+                          buffer.insts_size());
+            Disassembler::decode(handler, handler + buffer.insts_size());
 #ifndef PRODUCT
             tty->print_cr(" --- associated result handler ---");
             address rh_begin = Interpreter::result_handler(method()->result_type());
--- a/hotspot/src/share/vm/interpreter/invocationCounter.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/interpreter/invocationCounter.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,8 +40,7 @@
 }
 
 void InvocationCounter::set_carry() {
-  _counter |= carry_mask;
-
+  set_carry_flag();
   // The carry bit now indicates that this counter had achieved a very
   // large value.  Now reduce the value, so that the method can be
   // executed many more times before re-entering the VM.
@@ -52,7 +51,6 @@
   if (old_count != new_count)  set(state(), new_count);
 }
 
-
 void InvocationCounter::set_state(State state) {
   assert(0 <= state && state < number_of_states, "illegal state");
   int init = _init[state];
@@ -82,11 +80,6 @@
 int                       InvocationCounter::InterpreterBackwardBranchLimit;
 int                       InvocationCounter::InterpreterProfileLimit;
 
-// Tier1 limits
-int                       InvocationCounter::Tier1InvocationLimit;
-int                       InvocationCounter::Tier1BackEdgeLimit;
-
-
 
 const char* InvocationCounter::state_as_string(State state) {
   switch (state) {
@@ -146,8 +139,6 @@
 
   InterpreterInvocationLimit = CompileThreshold << number_of_noncount_bits;
   InterpreterProfileLimit = ((CompileThreshold * InterpreterProfilePercentage) / 100)<< number_of_noncount_bits;
-  Tier1InvocationLimit = Tier2CompileThreshold << number_of_noncount_bits;
-  Tier1BackEdgeLimit   = Tier2BackEdgeThreshold << number_of_noncount_bits;
 
   // When methodData is collected, the backward branch limit is compared against a
   // methodData counter, rather than an InvocationCounter.  In the former case, we
--- a/hotspot/src/share/vm/interpreter/invocationCounter.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/interpreter/invocationCounter.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,7 +43,6 @@
     number_of_count_bits = BitsPerInt - number_of_noncount_bits,
     state_limit          = nth_bit(number_of_state_bits),
     count_grain          = nth_bit(number_of_state_bits + number_of_carry_bits),
-    count_limit          = nth_bit(number_of_count_bits - 1),
     carry_mask           = right_n_bits(number_of_carry_bits) << number_of_state_bits,
     state_mask           = right_n_bits(number_of_state_bits),
     status_mask          = right_n_bits(number_of_state_bits + number_of_carry_bits),
@@ -52,18 +51,16 @@
 
  public:
   static int InterpreterInvocationLimit;        // CompileThreshold scaled for interpreter use
-  static int Tier1InvocationLimit;              // CompileThreshold scaled for tier1 use
-  static int Tier1BackEdgeLimit;                // BackEdgeThreshold scaled for tier1 use
-
   static int InterpreterBackwardBranchLimit;    // A separate threshold for on stack replacement
-
   static int InterpreterProfileLimit;           // Profiling threshold scaled for interpreter use
 
   typedef address (*Action)(methodHandle method, TRAPS);
 
   enum PublicConstants {
     count_increment      = count_grain,          // use this value to increment the 32bit _counter word
-    count_mask_value     = count_mask            // use this value to mask the backedge counter
+    count_mask_value     = count_mask,           // use this value to mask the backedge counter
+    count_shift          = number_of_noncount_bits,
+    count_limit          = nth_bit(number_of_count_bits - 1)
   };
 
   enum State {
@@ -79,6 +76,7 @@
   inline void set(State state, int count);       // sets state and counter
   inline void decay();                           // decay counter (divide by two)
   void set_carry();                              // set the sticky carry bit
+  void set_carry_flag()                          {  _counter |= carry_mask; }
 
   // Accessors
   State  state() const                           { return (State)(_counter & state_mask); }
@@ -135,3 +133,4 @@
   if (c > 0 && new_count == 0) new_count = 1;
   set(state(), new_count);
 }
+
--- a/hotspot/src/share/vm/interpreter/linkResolver.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/interpreter/linkResolver.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -83,12 +83,12 @@
   _resolved_method = resolved_method;
   _selected_method = selected_method;
   _vtable_index    = vtable_index;
-  if (CompilationPolicy::mustBeCompiled(selected_method)) {
+  if (CompilationPolicy::must_be_compiled(selected_method)) {
     // This path is unusual, mostly used by the '-Xcomp' stress test mode.
 
-    // Note: with several active threads, the mustBeCompiled may be true
-    //       while canBeCompiled is false; remove assert
-    // assert(CompilationPolicy::canBeCompiled(selected_method), "cannot compile");
+    // Note: with several active threads, the must_be_compiled may be true
+    //       while can_be_compiled is false; remove assert
+    // assert(CompilationPolicy::can_be_compiled(selected_method), "cannot compile");
     if (THREAD->is_Compiler_thread()) {
       // don't force compilation, resolve was on behalf of compiler
       return;
@@ -104,7 +104,8 @@
       return;
     }
     CompileBroker::compile_method(selected_method, InvocationEntryBci,
-                                  methodHandle(), 0, "mustBeCompiled", CHECK);
+                                  CompLevel_initial_compile,
+                                  methodHandle(), 0, "must_be_compiled", CHECK);
   }
 }
 
--- a/hotspot/src/share/vm/memory/allocation.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/memory/allocation.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -58,7 +58,7 @@
 void ResourceObj::operator delete(void* p) {
   assert(((ResourceObj *)p)->allocated_on_C_heap(),
          "delete only allowed for C_HEAP objects");
-  DEBUG_ONLY(((ResourceObj *)p)->_allocation = (uintptr_t) badHeapOopVal;)
+  DEBUG_ONLY(((ResourceObj *)p)->_allocation = (uintptr_t)badHeapOopVal;)
   FreeHeap(p);
 }
 
@@ -104,7 +104,7 @@
 ResourceObj::~ResourceObj() {
     // allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
     if (!allocated_on_C_heap()) {  // ResourceObj::delete() zaps _allocation for C_heap.
-      _allocation = (uintptr_t) badHeapOopVal; // zap type
+      _allocation = (uintptr_t)badHeapOopVal; // zap type
     }
 }
 #endif // ASSERT
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -32,7 +32,11 @@
     MaxPermSize = PermSize;
   }
   PermSize = MAX2(min_alignment(), align_size_down_(PermSize, min_alignment()));
-  MaxPermSize = align_size_up(MaxPermSize, max_alignment());
+  // Don't increase Perm size limit above specified.
+  MaxPermSize = align_size_down(MaxPermSize, max_alignment());
+  if (PermSize > MaxPermSize) {
+    PermSize = MaxPermSize;
+  }
 
   MinPermHeapExpansion = MAX2(min_alignment(), align_size_down_(MinPermHeapExpansion, min_alignment()));
   MaxPermHeapExpansion = MAX2(min_alignment(), align_size_down_(MaxPermHeapExpansion, min_alignment()));
--- a/hotspot/src/share/vm/memory/referenceProcessor.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/memory/referenceProcessor.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -346,7 +346,8 @@
   bool _was_discovering_refs;
  public:
   NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) {
-    if (_was_discovering_refs = _rp->discovery_enabled()) {
+    _was_discovering_refs = _rp->discovery_enabled();
+    if (_was_discovering_refs) {
       _rp->disable_discovery();
     }
   }
--- a/hotspot/src/share/vm/oops/constantPoolOop.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/oops/constantPoolOop.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -466,6 +466,7 @@
       bool ignore_is_on_bcp = false;
       Handle value = SystemDictionary::find_method_handle_type(signature,
                                                                klass,
+                                                               false,
                                                                ignore_is_on_bcp,
                                                                CHECK_NULL);
       result_oop = value();
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -382,7 +382,7 @@
       const char* desc = "Could not initialize class ";
       const char* className = this_oop->external_name();
       size_t msglen = strlen(desc) + strlen(className) + 1;
-      char* message = NEW_C_HEAP_ARRAY(char, msglen);
+      char* message = NEW_RESOURCE_ARRAY(char, msglen);
       if (NULL == message) {
         // Out of memory: can't create detailed error message
         THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), className);
@@ -2200,8 +2200,23 @@
   assert(n->is_osr_method(), "wrong kind of nmethod");
   n->set_osr_link(osr_nmethods_head());
   set_osr_nmethods_head(n);
+  // Raise the highest osr level if necessary
+  if (TieredCompilation) {
+    methodOop m = n->method();
+    m->set_highest_osr_comp_level(MAX2(m->highest_osr_comp_level(), n->comp_level()));
+  }
   // Remember to unlock again
   OsrList_lock->unlock();
+
+  // Get rid of the osr methods for the same bci that have lower levels.
+  if (TieredCompilation) {
+    for (int l = CompLevel_limited_profile; l < n->comp_level(); l++) {
+      nmethod *inv = lookup_osr_nmethod(n->method(), n->osr_entry_bci(), l, true);
+      if (inv != NULL && inv->is_in_use()) {
+        inv->make_not_entrant();
+      }
+    }
+  }
 }
 
 
@@ -2211,39 +2226,79 @@
   assert(n->is_osr_method(), "wrong kind of nmethod");
   nmethod* last = NULL;
   nmethod* cur  = osr_nmethods_head();
+  int max_level = CompLevel_none;  // Find the max comp level excluding n
+  methodOop m = n->method();
   // Search for match
   while(cur != NULL && cur != n) {
+    if (TieredCompilation) {
+      // Find max level before n
+      max_level = MAX2(max_level, cur->comp_level());
+    }
     last = cur;
     cur = cur->osr_link();
   }
+  nmethod* next = NULL;
   if (cur == n) {
+    next = cur->osr_link();
     if (last == NULL) {
       // Remove first element
-      set_osr_nmethods_head(osr_nmethods_head()->osr_link());
+      set_osr_nmethods_head(next);
     } else {
-      last->set_osr_link(cur->osr_link());
+      last->set_osr_link(next);
     }
   }
   n->set_osr_link(NULL);
+  if (TieredCompilation) {
+    cur = next;
+    while (cur != NULL) {
+      // Find max level after n
+      max_level = MAX2(max_level, cur->comp_level());
+      cur = cur->osr_link();
+    }
+    m->set_highest_osr_comp_level(max_level);
+  }
   // Remember to unlock again
   OsrList_lock->unlock();
 }
 
-nmethod* instanceKlass::lookup_osr_nmethod(const methodOop m, int bci) const {
+nmethod* instanceKlass::lookup_osr_nmethod(const methodOop m, int bci, int comp_level, bool match_level) const {
   // This is a short non-blocking critical region, so the no safepoint check is ok.
   OsrList_lock->lock_without_safepoint_check();
   nmethod* osr = osr_nmethods_head();
+  nmethod* best = NULL;
   while (osr != NULL) {
     assert(osr->is_osr_method(), "wrong kind of nmethod found in chain");
+    // There can be a time when a c1 osr method exists but we are waiting
+    // for a c2 version. When c2 completes its osr nmethod we will trash
+    // the c1 version and only be able to find the c2 version. However
+    // while we overflow in the c1 code at back branches we don't want to
+    // try and switch to the same code as we are already running
+
     if (osr->method() == m &&
         (bci == InvocationEntryBci || osr->osr_entry_bci() == bci)) {
-      // Found a match - return it.
-      OsrList_lock->unlock();
-      return osr;
+      if (match_level) {
+        if (osr->comp_level() == comp_level) {
+          // Found a match - return it.
+          OsrList_lock->unlock();
+          return osr;
+        }
+      } else {
+        if (best == NULL || (osr->comp_level() > best->comp_level())) {
+          if (osr->comp_level() == CompLevel_highest_tier) {
+            // Found the best possible - return it.
+            OsrList_lock->unlock();
+            return osr;
+          }
+          best = osr;
+        }
+      }
     }
     osr = osr->osr_link();
   }
   OsrList_lock->unlock();
+  if (best != NULL && best->comp_level() >= comp_level && match_level == false) {
+    return best;
+  }
   return NULL;
 }
 
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -588,7 +588,7 @@
   void set_osr_nmethods_head(nmethod* h)     { _osr_nmethods_head = h; };
   void add_osr_nmethod(nmethod* n);
   void remove_osr_nmethod(nmethod* n);
-  nmethod* lookup_osr_nmethod(const methodOop m, int bci) const;
+  nmethod* lookup_osr_nmethod(const methodOop m, int bci, int level, bool match_level) const;
 
   // Breakpoint support (see methods on methodOop for details)
   BreakpointInfo* breakpoints() const       { return _breakpoints; };
--- a/hotspot/src/share/vm/oops/methodDataOop.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/oops/methodDataOop.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -283,11 +283,17 @@
     if (receiver(row) != NULL)  entries++;
   }
   st->print_cr("count(%u) entries(%u)", count(), entries);
+  int total = count();
+  for (row = 0; row < row_limit(); row++) {
+    if (receiver(row) != NULL) {
+      total += receiver_count(row);
+    }
+  }
   for (row = 0; row < row_limit(); row++) {
     if (receiver(row) != NULL) {
       tab(st);
       receiver(row)->print_value_on(st);
-      st->print_cr("(%u)", receiver_count(row));
+      st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total);
     }
   }
 }
@@ -743,9 +749,18 @@
 // Initialize the methodDataOop corresponding to a given method.
 void methodDataOopDesc::initialize(methodHandle method) {
   ResourceMark rm;
-
   // Set the method back-pointer.
   _method = method();
+
+  if (TieredCompilation) {
+    _invocation_counter.init();
+    _backedge_counter.init();
+    _num_loops = 0;
+    _num_blocks = 0;
+    _highest_comp_level = 0;
+    _highest_osr_comp_level = 0;
+    _would_profile = false;
+  }
   set_creation_mileage(mileage_of(method()));
 
   // Initialize flags and trap history.
@@ -798,32 +813,25 @@
 // Get a measure of how much mileage the method has on it.
 int methodDataOopDesc::mileage_of(methodOop method) {
   int mileage = 0;
-  int iic = method->interpreter_invocation_count();
-  if (mileage < iic)  mileage = iic;
-
-  InvocationCounter* ic = method->invocation_counter();
-  InvocationCounter* bc = method->backedge_counter();
-
-  int icval = ic->count();
-  if (ic->carry()) icval += CompileThreshold;
-  if (mileage < icval)  mileage = icval;
-  int bcval = bc->count();
-  if (bc->carry()) bcval += CompileThreshold;
-  if (mileage < bcval)  mileage = bcval;
+  if (TieredCompilation) {
+    mileage = MAX2(method->invocation_count(), method->backedge_count());
+  } else {
+    int iic = method->interpreter_invocation_count();
+    if (mileage < iic)  mileage = iic;
+    InvocationCounter* ic = method->invocation_counter();
+    InvocationCounter* bc = method->backedge_counter();
+    int icval = ic->count();
+    if (ic->carry()) icval += CompileThreshold;
+    if (mileage < icval)  mileage = icval;
+    int bcval = bc->count();
+    if (bc->carry()) bcval += CompileThreshold;
+    if (mileage < bcval)  mileage = bcval;
+  }
   return mileage;
 }
 
 bool methodDataOopDesc::is_mature() const {
-  uint current = mileage_of(_method);
-  uint initial = creation_mileage();
-  if (current < initial)
-    return true;  // some sort of overflow
-  uint target;
-  if (ProfileMaturityPercentage <= 0)
-    target = (uint) -ProfileMaturityPercentage;  // absolute value
-  else
-    target = (uint)( (ProfileMaturityPercentage * CompileThreshold) / 100 );
-  return (current >= initial + target);
+  return CompilationPolicy::policy()->is_mature(_method);
 }
 
 // Translate a bci to its corresponding data index (di).
--- a/hotspot/src/share/vm/oops/methodDataOop.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/oops/methodDataOop.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1206,7 +1206,25 @@
   intx              _arg_stack;       // bit set of stack-allocatable arguments
   intx              _arg_returned;    // bit set of returned arguments
 
-  int _creation_mileage;            // method mileage at MDO creation
+  int _creation_mileage;              // method mileage at MDO creation
+
+  // How many invocations has this MDO seen?
+  // These counters are used to determine the exact age of MDO.
+  // We need those because in tiered a method can be concurrently
+  // executed at different levels.
+  InvocationCounter _invocation_counter;
+  // Same for backedges.
+  InvocationCounter _backedge_counter;
+  // Number of loops and blocks is computed when compiling the first
+  // time with C1. It is used to determine if method is trivial.
+  short             _num_loops;
+  short             _num_blocks;
+  // Highest compile level this method has ever seen.
+  u1                _highest_comp_level;
+  // Same for OSR level
+  u1                _highest_osr_comp_level;
+  // Does this method contain anything worth profiling?
+  bool              _would_profile;
 
   // Size of _data array in bytes.  (Excludes header and extra_data fields.)
   int _data_size;
@@ -1292,6 +1310,36 @@
 
   int      creation_mileage() const  { return _creation_mileage; }
   void set_creation_mileage(int x)   { _creation_mileage = x; }
+
+  int invocation_count() {
+    if (invocation_counter()->carry()) {
+      return InvocationCounter::count_limit;
+    }
+    return invocation_counter()->count();
+  }
+  int backedge_count() {
+    if (backedge_counter()->carry()) {
+      return InvocationCounter::count_limit;
+    }
+    return backedge_counter()->count();
+  }
+
+  InvocationCounter* invocation_counter()     { return &_invocation_counter; }
+  InvocationCounter* backedge_counter()       { return &_backedge_counter;   }
+
+  void set_would_profile(bool p)              { _would_profile = p;    }
+  bool would_profile() const                  { return _would_profile; }
+
+  int highest_comp_level()                    { return _highest_comp_level;      }
+  void set_highest_comp_level(int level)      { _highest_comp_level = level;     }
+  int highest_osr_comp_level()                { return _highest_osr_comp_level;  }
+  void set_highest_osr_comp_level(int level)  { _highest_osr_comp_level = level; }
+
+  int num_loops() const                       { return _num_loops;  }
+  void set_num_loops(int n)                   { _num_loops = n;     }
+  int num_blocks() const                      { return _num_blocks; }
+  void set_num_blocks(int n)                  { _num_blocks = n;    }
+
   bool is_mature() const;  // consult mileage and ProfileMaturityPercentage
   static int mileage_of(methodOop m);
 
@@ -1413,7 +1461,7 @@
   void inc_decompile_count() {
     _nof_decompiles += 1;
     if (decompile_count() > (uint)PerMethodRecompilationCutoff) {
-      method()->set_not_compilable();
+      method()->set_not_compilable(CompLevel_full_optimization);
     }
   }
 
@@ -1422,6 +1470,13 @@
     return byte_offset_of(methodDataOopDesc, _data[0]);
   }
 
+  static ByteSize invocation_counter_offset() {
+    return byte_offset_of(methodDataOopDesc, _invocation_counter);
+  }
+  static ByteSize backedge_counter_offset() {
+    return byte_offset_of(methodDataOopDesc, _backedge_counter);
+  }
+
   // GC support
   oop* adr_method() const { return (oop*)&_method; }
   bool object_is_parsable() const { return _size != 0; }
--- a/hotspot/src/share/vm/oops/methodKlass.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/oops/methodKlass.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -75,7 +75,6 @@
 
   // Fix and bury in methodOop
   m->set_interpreter_entry(NULL); // sets i2i entry and from_int
-  m->set_highest_tier_compile(CompLevel_none);
   m->set_adapter_entry(NULL);
   m->clear_code(); // from_c/from_i get set to c2i/i2i
 
@@ -89,6 +88,7 @@
   m->invocation_counter()->init();
   m->backedge_counter()->init();
   m->clear_number_of_breakpoints();
+
   assert(m->is_parsable(), "must be parsable here.");
   assert(m->size() == size, "wrong size for object");
   // We should not publish an uprasable object's reference
@@ -246,8 +246,8 @@
   st->print_cr(" - method size:       %d",   m->method_size());
   if (m->intrinsic_id() != vmIntrinsics::_none)
     st->print_cr(" - intrinsic id:      %d %s", m->intrinsic_id(), vmIntrinsics::name_at(m->intrinsic_id()));
-  if (m->highest_tier_compile() != CompLevel_none)
-    st->print_cr(" - highest tier:      %d", m->highest_tier_compile());
+  if (m->highest_comp_level() != CompLevel_none)
+    st->print_cr(" - highest level:     %d", m->highest_comp_level());
   st->print_cr(" - vtable index:      %d",   m->_vtable_index);
   st->print_cr(" - i2i entry:         " INTPTR_FORMAT, m->interpreter_entry());
   st->print_cr(" - adapter:           " INTPTR_FORMAT, m->adapter());
--- a/hotspot/src/share/vm/oops/methodOop.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/oops/methodOop.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -233,7 +233,7 @@
 }
 
 
-bool methodOopDesc::was_executed_more_than(int n) const {
+bool methodOopDesc::was_executed_more_than(int n) {
   // Invocation counter is reset when the methodOop is compiled.
   // If the method has compiled code we therefore assume it has
   // be excuted more than n times.
@@ -241,7 +241,8 @@
     // interpreter doesn't bump invocation counter of trivial methods
     // compiler does not bump invocation counter of compiled methods
     return true;
-  } else if (_invocation_counter.carry()) {
+  }
+  else if (_invocation_counter.carry() || (method_data() != NULL && method_data()->invocation_counter()->carry())) {
     // The carry bit is set when the counter overflows and causes
     // a compilation to occur.  We don't know how many times
     // the counter has been reset, so we simply assume it has
@@ -253,7 +254,7 @@
 }
 
 #ifndef PRODUCT
-void methodOopDesc::print_invocation_count() const {
+void methodOopDesc::print_invocation_count() {
   if (is_static()) tty->print("static ");
   if (is_final()) tty->print("final ");
   if (is_synchronized()) tty->print("synchronized ");
@@ -574,16 +575,19 @@
     // compilers must recognize this method specially, or not at all
     return true;
   }
-
-#ifdef COMPILER2
-  if (is_tier1_compile(comp_level)) {
-    if (is_not_tier1_compilable()) {
-      return true;
-    }
+  if (number_of_breakpoints() > 0) {
+    return true;
+  }
+  if (comp_level == CompLevel_any) {
+    return is_not_c1_compilable() || is_not_c2_compilable();
   }
-#endif // COMPILER2
-  return (_invocation_counter.state() == InvocationCounter::wait_for_nothing)
-          || (number_of_breakpoints() > 0);
+  if (is_c1_compile(comp_level)) {
+    return is_not_c1_compilable();
+  }
+  if (is_c2_compile(comp_level)) {
+    return is_not_c2_compilable();
+  }
+  return false;
 }
 
 // call this when compiler finds that this method is not compilable
@@ -604,15 +608,18 @@
     xtty->stamp();
     xtty->end_elem();
   }
-#ifdef COMPILER2
-  if (is_tier1_compile(comp_level)) {
-    set_not_tier1_compilable();
-    return;
+  if (comp_level == CompLevel_all) {
+    set_not_c1_compilable();
+    set_not_c2_compilable();
+  } else {
+    if (is_c1_compile(comp_level)) {
+      set_not_c1_compilable();
+    } else
+      if (is_c2_compile(comp_level)) {
+        set_not_c2_compilable();
+      }
   }
-#endif /* COMPILER2 */
-  assert(comp_level == CompLevel_highest_tier, "unexpected compilation level");
-  invocation_counter()->set_state(InvocationCounter::wait_for_nothing);
-  backedge_counter()->set_state(InvocationCounter::wait_for_nothing);
+  CompilationPolicy::policy()->disable_compilation(this);
 }
 
 // Revert to using the interpreter and clear out the nmethod
@@ -649,7 +656,6 @@
   set_method_data(NULL);
   set_interpreter_throwout_count(0);
   set_interpreter_invocation_count(0);
-  _highest_tier_compile = CompLevel_none;
 }
 
 // Called when the method_holder is getting linked. Setup entrypoints so the method
@@ -746,8 +752,8 @@
   int comp_level = code->comp_level();
   // In theory there could be a race here. In practice it is unlikely
   // and not worth worrying about.
-  if (comp_level > mh->highest_tier_compile()) {
-    mh->set_highest_tier_compile(comp_level);
+  if (comp_level > mh->highest_comp_level()) {
+    mh->set_highest_comp_level(comp_level);
   }
 
   OrderAccess::storestore();
@@ -813,11 +819,13 @@
 
 bool methodOopDesc::is_method_handle_invoke_name(vmSymbols::SID name_sid) {
   switch (name_sid) {
-  case vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name):  // FIXME: remove this transitional form
   case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name):
   case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name):
     return true;
   }
+  if (AllowTransitionalJSR292
+      && name_sid == vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name))
+    return true;
   return false;
 }
 
@@ -905,12 +913,16 @@
   m->set_signature_index(_imcp_invoke_signature);
   assert(is_method_handle_invoke_name(m->name()), "");
   assert(m->signature() == signature(), "");
+  assert(m->is_method_handle_invoke(), "");
 #ifdef CC_INTERP
   ResultTypeFinder rtf(signature());
   m->set_result_index(rtf.type());
 #endif
   m->compute_size_of_parameters(THREAD);
   m->set_exception_table(Universe::the_empty_int_array());
+  m->init_intrinsic_id();
+  assert(m->intrinsic_id() == vmIntrinsics::_invokeExact ||
+         m->intrinsic_id() == vmIntrinsics::_invokeGeneric, "must be an invoker");
 
   // Finally, set up its entry points.
   assert(m->method_handle_type() == method_type(), "");
@@ -1023,6 +1035,7 @@
   assert(_intrinsic_id == vmIntrinsics::_none, "do this just once");
   const uintptr_t max_id_uint = right_n_bits((int)(sizeof(_intrinsic_id) * BitsPerByte));
   assert((uintptr_t)vmIntrinsics::ID_LIMIT <= max_id_uint, "else fix size");
+  assert(intrinsic_id_size_in_bytes() == sizeof(_intrinsic_id), "");
 
   // the klass name is well-known:
   vmSymbols::SID klass_id = klass_id_for_intrinsics(method_holder());
@@ -1030,9 +1043,10 @@
 
   // ditto for method and signature:
   vmSymbols::SID  name_id = vmSymbols::find_sid(name());
-  if (name_id  == vmSymbols::NO_SID)  return;
+  if (name_id == vmSymbols::NO_SID)  return;
   vmSymbols::SID   sig_id = vmSymbols::find_sid(signature());
-  if (sig_id   == vmSymbols::NO_SID)  return;
+  if (klass_id != vmSymbols::VM_SYMBOL_ENUM_NAME(java_dyn_MethodHandle)
+      && sig_id == vmSymbols::NO_SID)  return;
   jshort flags = access_flags().as_short();
 
   vmIntrinsics::ID id = vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
@@ -1061,10 +1075,13 @@
     if (is_static() || !is_native())  break;
     switch (name_id) {
     case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name):
-      id = vmIntrinsics::_invokeGeneric; break;
-    default:
-      if (is_method_handle_invoke_name(name()))
-        id = vmIntrinsics::_invokeExact;
+      id = vmIntrinsics::_invokeGeneric;
+      break;
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name):
+      id = vmIntrinsics::_invokeExact;
+      break;
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name):
+      if (AllowTransitionalJSR292)  id = vmIntrinsics::_invokeExact;
       break;
     }
     break;
@@ -1442,6 +1459,64 @@
 }
 
 
+int methodOopDesc::invocation_count() {
+  if (TieredCompilation) {
+    const methodDataOop mdo = method_data();
+    if (invocation_counter()->carry() || ((mdo != NULL) ? mdo->invocation_counter()->carry() : false)) {
+      return InvocationCounter::count_limit;
+    } else {
+      return invocation_counter()->count() + ((mdo != NULL) ? mdo->invocation_counter()->count() : 0);
+    }
+  } else {
+    return invocation_counter()->count();
+  }
+}
+
+int methodOopDesc::backedge_count() {
+  if (TieredCompilation) {
+    const methodDataOop mdo = method_data();
+    if (backedge_counter()->carry() || ((mdo != NULL) ? mdo->backedge_counter()->carry() : false)) {
+      return InvocationCounter::count_limit;
+    } else {
+      return backedge_counter()->count() + ((mdo != NULL) ? mdo->backedge_counter()->count() : 0);
+    }
+  } else {
+    return backedge_counter()->count();
+  }
+}
+
+int methodOopDesc::highest_comp_level() const {
+  methodDataOop mdo = method_data();
+  if (mdo != NULL) {
+    return mdo->highest_comp_level();
+  } else {
+    return CompLevel_none;
+  }
+}
+
+int methodOopDesc::highest_osr_comp_level() const {
+  methodDataOop mdo = method_data();
+  if (mdo != NULL) {
+    return mdo->highest_osr_comp_level();
+  } else {
+    return CompLevel_none;
+  }
+}
+
+void methodOopDesc::set_highest_comp_level(int level) {
+  methodDataOop mdo = method_data();
+  if (mdo != NULL) {
+    mdo->set_highest_comp_level(level);
+  }
+}
+
+void methodOopDesc::set_highest_osr_comp_level(int level) {
+  methodDataOop mdo = method_data();
+  if (mdo != NULL) {
+    mdo->set_highest_osr_comp_level(level);
+  }
+}
+
 BreakpointInfo::BreakpointInfo(methodOop m, int bci) {
   _bci = bci;
   _name_index = m->name_index();
--- a/hotspot/src/share/vm/oops/methodOop.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/oops/methodOop.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -62,9 +62,9 @@
 // | method_size             | max_stack                  |
 // | max_locals              | size_of_parameters         |
 // |------------------------------------------------------|
-// | intrinsic_id, highest_tier  |       (unused)         |
+// | intrinsic_id, (unused)  |  throwout_count            |
 // |------------------------------------------------------|
-// | throwout_count          | num_breakpoints            |
+// | num_breakpoints         |  (unused)                  |
 // |------------------------------------------------------|
 // | invocation_counter                                   |
 // | backedge_counter                                     |
@@ -83,7 +83,6 @@
 class CheckedExceptionElement;
 class LocalVariableTableElement;
 class AdapterHandlerEntry;
-
 class methodDataOopDesc;
 
 class methodOopDesc : public oopDesc {
@@ -93,7 +92,7 @@
   constMethodOop    _constMethod;                // Method read-only data.
   constantPoolOop   _constants;                  // Constant pool
   methodDataOop     _method_data;
-  int               _interpreter_invocation_count; // Count of times invoked
+  int               _interpreter_invocation_count; // Count of times invoked (reused as prev_event_count in tiered)
   AccessFlags       _access_flags;               // Access flags
   int               _vtable_index;               // vtable index of this method (see VtableIndexFlag)
                                                  // note: can have vtables with >2**16 elements (because of inheritance)
@@ -105,11 +104,11 @@
   u2                _max_locals;                 // Number of local variables used by this method
   u2                _size_of_parameters;         // size of the parameter block (receiver + arguments) in words
   u1                _intrinsic_id;               // vmSymbols::intrinsic_id (0 == _none)
-  u1                _highest_tier_compile;       // Highest compile level this method has ever seen.
   u2                _interpreter_throwout_count; // Count of times method was exited via exception while interpreting
   u2                _number_of_breakpoints;      // fullspeed debugging support
   InvocationCounter _invocation_counter;         // Incremented before each activation of the method - used to trigger frequency-based optimizations
   InvocationCounter _backedge_counter;           // Incremented before each backedge taken - used to trigger frequencey-based optimizations
+
 #ifndef PRODUCT
   int               _compiled_invocation_count;  // Number of nmethod invocations so far (for perf. debugging)
 #endif
@@ -221,8 +220,11 @@
   // max locals
   int  max_locals() const                        { return _max_locals; }
   void set_max_locals(int size)                  { _max_locals = size; }
-  int highest_tier_compile()                     { return _highest_tier_compile;}
-  void set_highest_tier_compile(int level)      { _highest_tier_compile = level;}
+
+  int highest_comp_level() const;
+  void set_highest_comp_level(int level);
+  int highest_osr_comp_level() const;
+  void set_highest_osr_comp_level(int level);
 
   // Count of times method was exited via exception while interpreting
   void interpreter_throwout_increment() {
@@ -276,21 +278,29 @@
   }
 
   // invocation counter
-  InvocationCounter* invocation_counter()        { return &_invocation_counter; }
-  InvocationCounter* backedge_counter()          { return &_backedge_counter; }
-  int invocation_count() const                   { return _invocation_counter.count(); }
-  int backedge_count() const                     { return _backedge_counter.count(); }
-  bool was_executed_more_than(int n) const;
-  bool was_never_executed() const                { return !was_executed_more_than(0); }
+  InvocationCounter* invocation_counter() { return &_invocation_counter; }
+  InvocationCounter* backedge_counter()   { return &_backedge_counter; }
+
+  int invocation_count();
+  int backedge_count();
+
+  bool was_executed_more_than(int n);
+  bool was_never_executed()                      { return !was_executed_more_than(0); }
 
   static void build_interpreter_method_data(methodHandle method, TRAPS);
 
-  int interpreter_invocation_count() const       { return _interpreter_invocation_count; }
+  int interpreter_invocation_count() {
+    if (TieredCompilation) return invocation_count();
+    else return _interpreter_invocation_count;
+  }
   void set_interpreter_invocation_count(int count) { _interpreter_invocation_count = count; }
-  int increment_interpreter_invocation_count() { return ++_interpreter_invocation_count; }
+  int increment_interpreter_invocation_count() {
+    if (TieredCompilation) ShouldNotReachHere();
+    return ++_interpreter_invocation_count;
+  }
 
 #ifndef PRODUCT
-  int  compiled_invocation_count() const         { return _compiled_invocation_count; }
+  int  compiled_invocation_count() const         { return _compiled_invocation_count;  }
   void set_compiled_invocation_count(int count)  { _compiled_invocation_count = count; }
 #endif // not PRODUCT
 
@@ -361,7 +371,7 @@
 
 #ifndef PRODUCT
   // operations on invocation counter
-  void print_invocation_count() const;
+  void print_invocation_count();
 #endif
 
   // byte codes
@@ -506,6 +516,8 @@
   static int method_data_offset_in_bytes()       { return offset_of(methodOopDesc, _method_data); }
   static int interpreter_invocation_counter_offset_in_bytes()
                                                  { return offset_of(methodOopDesc, _interpreter_invocation_count); }
+  static int intrinsic_id_offset_in_bytes()      { return offset_of(methodOopDesc, _intrinsic_id); }
+  static int intrinsic_id_size_in_bytes()        { return sizeof(u1); }
 
   // Static methods that are used to implement member methods where an exposed this pointer
   // is needed due to possible GCs
@@ -587,8 +599,13 @@
   static vmSymbols::SID klass_id_for_intrinsics(klassOop holder);
 
   // On-stack replacement support
-  bool has_osr_nmethod()                         { return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, InvocationEntryBci) != NULL; }
-  nmethod* lookup_osr_nmethod_for(int bci)       { return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, bci); }
+  bool has_osr_nmethod(int level, bool match_level) {
+   return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL;
+  }
+
+  nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) {
+    return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, bci, level, match_level);
+  }
 
   // Inline cache support
   void cleanup_inline_caches();
@@ -600,22 +617,24 @@
   // Indicates whether compilation failed earlier for this method, or
   // whether it is not compilable for another reason like having a
   // breakpoint set in it.
-  bool is_not_compilable(int comp_level = CompLevel_highest_tier) const;
-  void set_not_compilable(int comp_level = CompLevel_highest_tier, bool report = true);
-  void set_not_compilable_quietly(int comp_level = CompLevel_highest_tier) {
+  bool is_not_compilable(int comp_level = CompLevel_any) const;
+  void set_not_compilable(int comp_level = CompLevel_all, bool report = true);
+  void set_not_compilable_quietly(int comp_level = CompLevel_all) {
     set_not_compilable(comp_level, false);
   }
-
-  bool is_not_osr_compilable() const             { return is_not_compilable() || access_flags().is_not_osr_compilable(); }
-  void set_not_osr_compilable()                  { _access_flags.set_not_osr_compilable(); }
-
-  bool is_not_tier1_compilable() const           { return access_flags().is_not_tier1_compilable(); }
-  void set_not_tier1_compilable()                { _access_flags.set_not_tier1_compilable(); }
+  bool is_not_osr_compilable(int comp_level = CompLevel_any) const {
+    return is_not_compilable(comp_level) || access_flags().is_not_osr_compilable();
+  }
+  void set_not_osr_compilable()               { _access_flags.set_not_osr_compilable();       }
+  bool is_not_c1_compilable() const           { return access_flags().is_not_c1_compilable(); }
+  void set_not_c1_compilable()                { _access_flags.set_not_c1_compilable();        }
+  bool is_not_c2_compilable() const           { return access_flags().is_not_c2_compilable(); }
+  void set_not_c2_compilable()                { _access_flags.set_not_c2_compilable();        }
 
   // Background compilation support
-  bool queued_for_compilation() const            { return access_flags().queued_for_compilation();    }
-  void set_queued_for_compilation()              { _access_flags.set_queued_for_compilation(); }
-  void clear_queued_for_compilation()            { _access_flags.clear_queued_for_compilation(); }
+  bool queued_for_compilation() const  { return access_flags().queued_for_compilation(); }
+  void set_queued_for_compilation()    { _access_flags.set_queued_for_compilation();     }
+  void clear_queued_for_compilation()  { _access_flags.clear_queued_for_compilation();   }
 
   static methodOop method_from_bcp(address bcp);
 
--- a/hotspot/src/share/vm/opto/addnode.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/opto/addnode.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -705,6 +705,9 @@
     }
     addr = addr->in(AddPNode::Address);
   }
+  if (addr != base) {
+    return -1;
+  }
   return count;
 }
 
--- a/hotspot/src/share/vm/opto/bytecodeInfo.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/opto/bytecodeInfo.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -140,7 +140,7 @@
   } else {
     // Not hot.  Check for medium-sized pre-existing nmethod at cold sites.
     if (callee_method->has_compiled_code() &&
-        callee_method->instructions_size() > InlineSmallCode/4)
+        callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode/4)
       return "already compiled into a medium method";
   }
   if (size > max_size) {
@@ -180,7 +180,7 @@
       }
     }
 
-    if (callee_method->has_compiled_code() && callee_method->instructions_size() > InlineSmallCode) {
+    if (callee_method->has_compiled_code() && callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode) {
       wci_result->set_profit(wci_result->profit() * 0.1);
       // %%% adjust wci_result->size()?
     }
@@ -206,7 +206,7 @@
 
   // Now perform checks which are heuristic
 
-  if( callee_method->has_compiled_code() && callee_method->instructions_size() > InlineSmallCode )
+  if( callee_method->has_compiled_code() && callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode )
     return "already compiled into a big method";
 
   // don't inline exception code unless the top method belongs to an
--- a/hotspot/src/share/vm/opto/c2_globals.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/opto/c2_globals.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -157,6 +157,12 @@
   develop(bool, TraceLoopPredicate, false,                                  \
           "Trace generation of loop predicates")                            \
                                                                             \
+  product(bool, OptimizeFill, false,                                        \
+          "convert fill/copy loops into intrinsic")                         \
+                                                                            \
+  develop(bool, TraceOptimizeFill, false,                                   \
+          "print detailed information about fill conversion")               \
+                                                                            \
   develop(bool, OptoCoalesce, true,                                         \
           "Use Conservative Copy Coalescing in the Register Allocator")     \
                                                                             \
--- a/hotspot/src/share/vm/opto/compile.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -400,7 +400,7 @@
   }
 
   // Initialize the relocation buffers
-  relocInfo* locs_buf = (relocInfo*) blob->instructions_end() - MAX_locs_size;
+  relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
   set_scratch_locs_memory(locs_buf);
 }
 
@@ -422,9 +422,9 @@
   assert(blob != NULL, "Initialize BufferBlob at start");
   assert(blob->size() > MAX_inst_size, "sanity");
   relocInfo* locs_buf = scratch_locs_memory();
-  address blob_begin = blob->instructions_begin();
+  address blob_begin = blob->content_begin();
   address blob_end   = (address)locs_buf;
-  assert(blob->instructions_contains(blob_end), "sanity");
+  assert(blob->content_contains(blob_end), "sanity");
   CodeBuffer buf(blob_begin, blob_end - blob_begin);
   buf.initialize_consts_size(MAX_const_size);
   buf.initialize_stubs_size(MAX_stubs_size);
@@ -433,7 +433,7 @@
   buf.insts()->initialize_shared_locs(&locs_buf[0],     lsize);
   buf.stubs()->initialize_shared_locs(&locs_buf[lsize], lsize);
   n->emit(buf, this->regalloc());
-  return buf.code_size();
+  return buf.insts_size();
 }
 
 
@@ -850,25 +850,13 @@
   set_decompile_count(0);
 
   set_do_freq_based_layout(BlockLayoutByFrequency || method_has_option("BlockLayoutByFrequency"));
-  // Compilation level related initialization
-  if (env()->comp_level() == CompLevel_fast_compile) {
-    set_num_loop_opts(Tier1LoopOptsCount);
-    set_do_inlining(Tier1Inline != 0);
-    set_max_inline_size(Tier1MaxInlineSize);
-    set_freq_inline_size(Tier1FreqInlineSize);
-    set_do_scheduling(false);
-    set_do_count_invocations(Tier1CountInvocations);
-    set_do_method_data_update(Tier1UpdateMethodData);
-  } else {
-    assert(env()->comp_level() == CompLevel_full_optimization, "unknown comp level");
-    set_num_loop_opts(LoopOptsCount);
-    set_do_inlining(Inline);
-    set_max_inline_size(MaxInlineSize);
-    set_freq_inline_size(FreqInlineSize);
-    set_do_scheduling(OptoScheduling);
-    set_do_count_invocations(false);
-    set_do_method_data_update(false);
-  }
+  set_num_loop_opts(LoopOptsCount);
+  set_do_inlining(Inline);
+  set_max_inline_size(MaxInlineSize);
+  set_freq_inline_size(FreqInlineSize);
+  set_do_scheduling(OptoScheduling);
+  set_do_count_invocations(false);
+  set_do_method_data_update(false);
 
   if (debug_info()->recording_non_safepoints()) {
     set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>
--- a/hotspot/src/share/vm/opto/graphKit.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/opto/graphKit.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1739,6 +1739,7 @@
   C->gvn_replace_by(callprojs.fallthrough_catchproj, final_state->in(TypeFunc::Control));
   C->gvn_replace_by(callprojs.fallthrough_memproj,   final_state->in(TypeFunc::Memory));
   C->gvn_replace_by(callprojs.fallthrough_ioproj,    final_state->in(TypeFunc::I_O));
+  Node* final_mem = final_state->in(TypeFunc::Memory);
 
   // Replace the result with the new result if it exists and is used
   if (callprojs.resproj != NULL && result != NULL) {
@@ -1776,6 +1777,21 @@
   // Disconnect the call from the graph
   call->disconnect_inputs(NULL);
   C->gvn_replace_by(call, C->top());
+
+  // Clean up any MergeMems that feed other MergeMems since the
+  // optimizer doesn't like that.
+  if (final_mem->is_MergeMem()) {
+    Node_List wl;
+    for (SimpleDUIterator i(final_mem); i.has_next(); i.next()) {
+      Node* m = i.get();
+      if (m->is_MergeMem() && !wl.contains(m)) {
+        wl.push(m);
+      }
+    }
+    while (wl.size()  > 0) {
+      _gvn.transform(wl.pop());
+    }
+  }
 }
 
 
@@ -1891,7 +1907,7 @@
   kill_dead_locals();
 
   // Now insert the uncommon trap subroutine call
-  address call_addr = SharedRuntime::uncommon_trap_blob()->instructions_begin();
+  address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point();
   const TypePtr* no_memory_effects = NULL;
   // Pass the index of the class to be loaded
   Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON |
@@ -2451,11 +2467,79 @@
 }
 
 
+//------------------------------seems_never_null-------------------------------
+// Use null_seen information if it is available from the profile.
+// If we see an unexpected null at a type check we record it and force a
+// recompile; the offending check will be recompiled to handle NULLs.
+// If we see several offending BCIs, then all checks in the
+// method will be recompiled.
+bool GraphKit::seems_never_null(Node* obj, ciProfileData* data) {
+  if (UncommonNullCast               // Cutout for this technique
+      && obj != null()               // And not the -Xcomp stupid case?
+      && !too_many_traps(Deoptimization::Reason_null_check)
+      ) {
+    if (data == NULL)
+      // Edge case:  no mature data.  Be optimistic here.
+      return true;
+    // If the profile has not seen a null, assume it won't happen.
+    assert(java_bc() == Bytecodes::_checkcast ||
+           java_bc() == Bytecodes::_instanceof ||
+           java_bc() == Bytecodes::_aastore, "MDO must collect null_seen bit here");
+    return !data->as_BitData()->null_seen();
+  }
+  return false;
+}
+
+//------------------------maybe_cast_profiled_receiver-------------------------
+// If the profile has seen exactly one type, narrow to exactly that type.
+// Subsequent type checks will always fold up.
+Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
+                                             ciProfileData* data,
+                                             ciKlass* require_klass) {
+  if (!UseTypeProfile || !TypeProfileCasts) return NULL;
+  if (data == NULL)  return NULL;
+
+  // Make sure we haven't already deoptimized from this tactic.
+  if (too_many_traps(Deoptimization::Reason_class_check))
+    return NULL;
+
+  // (No, this isn't a call, but it's enough like a virtual call
+  // to use the same ciMethod accessor to get the profile info...)
+  ciCallProfile profile = method()->call_profile_at_bci(bci());
+  if (profile.count() >= 0 &&         // no cast failures here
+      profile.has_receiver(0) &&
+      profile.morphism() == 1) {
+    ciKlass* exact_kls = profile.receiver(0);
+    if (require_klass == NULL ||
+        static_subtype_check(require_klass, exact_kls) == SSC_always_true) {
+      // If we narrow the type to match what the type profile sees,
+      // we can then remove the rest of the cast.
+      // This is a win, even if the exact_kls is very specific,
+      // because downstream operations, such as method calls,
+      // will often benefit from the sharper type.
+      Node* exact_obj = not_null_obj; // will get updated in place...
+      Node* slow_ctl  = type_check_receiver(exact_obj, exact_kls, 1.0,
+                                            &exact_obj);
+      { PreserveJVMState pjvms(this);
+        set_control(slow_ctl);
+        uncommon_trap(Deoptimization::Reason_class_check,
+                      Deoptimization::Action_maybe_recompile);
+      }
+      replace_in_map(not_null_obj, exact_obj);
+      return exact_obj;
+    }
+    // assert(ssc == SSC_always_true)... except maybe the profile lied to us.
+  }
+
+  return NULL;
+}
+
+
 //-------------------------------gen_instanceof--------------------------------
 // Generate an instance-of idiom.  Used by both the instance-of bytecode
 // and the reflective instance-of call.
-Node* GraphKit::gen_instanceof( Node *subobj, Node* superklass ) {
-  C->set_has_split_ifs(true); // Has chance for split-if optimization
+Node* GraphKit::gen_instanceof(Node* obj, Node* superklass) {
+  kill_dead_locals();           // Benefit all the uncommon traps
   assert( !stopped(), "dead parse path should be checked in callers" );
   assert(!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass)->is_klassptr()),
          "must check for not-null not-dead klass in callers");
@@ -2466,9 +2550,16 @@
   Node*       phi    = new(C, PATH_LIMIT) PhiNode(region, TypeInt::BOOL);
   C->set_has_split_ifs(true); // Has chance for split-if optimization
 
+  ciProfileData* data = NULL;
+  if (java_bc() == Bytecodes::_instanceof) {  // Only for the bytecode
+    data = method()->method_data()->bci_to_data(bci());
+  }
+  bool never_see_null = (ProfileDynamicTypes  // aggressive use of profile
+                         && seems_never_null(obj, data));
+
   // Null check; get casted pointer; set region slot 3
   Node* null_ctl = top();
-  Node* not_null_obj = null_check_oop(subobj, &null_ctl);
+  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null);
 
   // If not_null_obj is dead, only null-path is taken
   if (stopped()) {              // Doing instance-of on a NULL?
@@ -2477,6 +2568,23 @@
   }
   region->init_req(_null_path, null_ctl);
   phi   ->init_req(_null_path, intcon(0)); // Set null path value
+  if (null_ctl == top()) {
+    // Do this eagerly, so that pattern matches like is_diamond_phi
+    // will work even during parsing.
+    assert(_null_path == PATH_LIMIT-1, "delete last");
+    region->del_req(_null_path);
+    phi   ->del_req(_null_path);
+  }
+
+  if (ProfileDynamicTypes && data != NULL) {
+    Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, data, NULL);
+    if (stopped()) {            // Profile disagrees with this path.
+      set_control(null_ctl);    // Null is the only remaining possibility.
+      return intcon(0);
+    }
+    if (cast_obj != NULL)
+      not_null_obj = cast_obj;
+  }
 
   // Load the object's klass
   Node* obj_klass = load_object_klass(not_null_obj);
@@ -2546,20 +2654,8 @@
   C->set_has_split_ifs(true); // Has chance for split-if optimization
 
   // Use null-cast information if it is available
-  bool never_see_null = false;
-  // If we see an unexpected null at a check-cast we record it and force a
-  // recompile; the offending check-cast will be compiled to handle NULLs.
-  // If we see several offending BCIs, then all checkcasts in the
-  // method will be compiled to handle NULLs.
-  if (UncommonNullCast            // Cutout for this technique
-      && failure_control == NULL  // regular case
-      && obj != null()            // And not the -Xcomp stupid case?
-      && !too_many_traps(Deoptimization::Reason_null_check)) {
-    // Finally, check the "null_seen" bit from the interpreter.
-    if (data == NULL || !data->as_BitData()->null_seen()) {
-      never_see_null = true;
-    }
-  }
+  bool never_see_null = ((failure_control == NULL)  // regular case only
+                         && seems_never_null(obj, data));
 
   // Null check; get casted pointer; set region slot 3
   Node* null_ctl = top();
@@ -2572,47 +2668,26 @@
   }
   region->init_req(_null_path, null_ctl);
   phi   ->init_req(_null_path, null());  // Set null path value
-
-  Node* cast_obj = NULL;        // the casted version of the object
-
-  // If the profile has seen exactly one type, narrow to that type.
-  // (The subsequent subtype check will always fold up.)
-  if (UseTypeProfile && TypeProfileCasts && data != NULL &&
+  if (null_ctl == top()) {
+    // Do this eagerly, so that pattern matches like is_diamond_phi
+    // will work even during parsing.
+    assert(_null_path == PATH_LIMIT-1, "delete last");
+    region->del_req(_null_path);
+    phi   ->del_req(_null_path);
+  }
+
+  Node* cast_obj = NULL;
+  if (data != NULL &&
       // Counter has never been decremented (due to cast failure).
       // ...This is a reasonable thing to expect.  It is true of
       // all casts inserted by javac to implement generic types.
-      data->as_CounterData()->count() >= 0 &&
-      !too_many_traps(Deoptimization::Reason_class_check)) {
-    // (No, this isn't a call, but it's enough like a virtual call
-    // to use the same ciMethod accessor to get the profile info...)
-    ciCallProfile profile = method()->call_profile_at_bci(bci());
-    if (profile.count() >= 0 &&         // no cast failures here
-        profile.has_receiver(0) &&
-        profile.morphism() == 1) {
-      ciKlass* exact_kls = profile.receiver(0);
-      int ssc = static_subtype_check(tk->klass(), exact_kls);
-      if (ssc == SSC_always_true) {
-        // If we narrow the type to match what the type profile sees,
-        // we can then remove the rest of the cast.
-        // This is a win, even if the exact_kls is very specific,
-        // because downstream operations, such as method calls,
-        // will often benefit from the sharper type.
-        Node* exact_obj = not_null_obj; // will get updated in place...
-        Node* slow_ctl  = type_check_receiver(exact_obj, exact_kls, 1.0,
-                                              &exact_obj);
-        { PreserveJVMState pjvms(this);
-          set_control(slow_ctl);
-          uncommon_trap(Deoptimization::Reason_class_check,
-                        Deoptimization::Action_maybe_recompile);
-        }
-        if (failure_control != NULL) // failure is now impossible
-          (*failure_control) = top();
-        replace_in_map(not_null_obj, exact_obj);
-        // adjust the type of the phi to the exact klass:
-        phi->raise_bottom_type(_gvn.type(exact_obj)->meet(TypePtr::NULL_PTR));
-        cast_obj = exact_obj;
-      }
-      // assert(cast_obj != NULL)... except maybe the profile lied to us.
+      data->as_CounterData()->count() >= 0) {
+    cast_obj = maybe_cast_profiled_receiver(not_null_obj, data, tk->klass());
+    if (cast_obj != NULL) {
+      if (failure_control != NULL) // failure is now impossible
+        (*failure_control) = top();
+      // adjust the type of the phi to the exact klass:
+      phi->raise_bottom_type(_gvn.type(cast_obj)->meet(TypePtr::NULL_PTR));
     }
   }
 
--- a/hotspot/src/share/vm/opto/graphKit.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/opto/graphKit.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -341,6 +341,14 @@
   Node* null_check_oop(Node* value, Node* *null_control,
                        bool never_see_null = false);
 
+  // Check the null_seen bit.
+  bool seems_never_null(Node* obj, ciProfileData* data);
+
+  // Use the type profile to narrow an object type.
+  Node* maybe_cast_profiled_receiver(Node* not_null_obj,
+                                     ciProfileData* data,
+                                     ciKlass* require_klass);
+
   // Cast obj to not-null on this path
   Node* cast_not_null(Node* obj, bool do_replace_in_map = true);
   // Replace all occurrences of one node by another.
--- a/hotspot/src/share/vm/opto/lcm.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/opto/lcm.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -72,8 +72,7 @@
     for (uint i1 = 0; i1 < null_block->_nodes.size(); i1++) {
       Node* nn = null_block->_nodes[i1];
       if (nn->is_MachCall() &&
-          nn->as_MachCall()->entry_point() ==
-          SharedRuntime::uncommon_trap_blob()->instructions_begin()) {
+          nn->as_MachCall()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) {
         const Type* trtype = nn->in(TypeFunc::Parms)->bottom_type();
         if (trtype->isa_int() && trtype->is_int()->is_con()) {
           jint tr_con = trtype->is_int()->get_con();
--- a/hotspot/src/share/vm/opto/library_call.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -906,7 +906,8 @@
   const int count_offset = java_lang_String::count_offset_in_bytes();
   const int offset_offset = java_lang_String::offset_offset_in_bytes();
 
-  _sp += 2;
+  int nargs = 2;
+  _sp += nargs;
   Node* argument = pop();  // pop non-receiver first:  it was pushed second
   Node* receiver = pop();
 
@@ -914,11 +915,11 @@
   // null check technically happens in the wrong place, which can lead to
   // invalid stack traces when string compare is inlined into a method
   // which handles NullPointerExceptions.
-  _sp += 2;
+  _sp += nargs;
   receiver = do_null_check(receiver, T_OBJECT);
   //should not do null check for argument for String.equals(), because spec
   //allows to specify NULL as argument.
-  _sp -= 2;
+  _sp -= nargs;
 
   if (stopped()) {
     return true;
@@ -943,7 +944,9 @@
   ciInstanceKlass* klass = env()->String_klass();
 
   if (!stopped()) {
+    _sp += nargs;          // gen_instanceof might do an uncommon trap
     Node* inst = gen_instanceof(argument, makecon(TypeKlassPtr::make(klass)));
+    _sp -= nargs;
     Node* cmp  = _gvn.transform(new (C, 3) CmpINode(inst, intcon(1)));
     Node* bol  = _gvn.transform(new (C, 2) BoolNode(cmp, BoolTest::ne));
 
@@ -2935,7 +2938,9 @@
   switch (id) {
   case vmIntrinsics::_isInstance:
     // nothing is an instance of a primitive type
+    _sp += nargs;          // gen_instanceof might do an uncommon trap
     query_value = gen_instanceof(obj, kls);
+    _sp -= nargs;
     break;
 
   case vmIntrinsics::_getModifiers:
@@ -4957,8 +4962,7 @@
       for (DUIterator_Fast jmax, j = not_ctl->fast_outs(jmax); j < jmax; j++) {
         Node* obs = not_ctl->fast_out(j);
         if (obs->in(0) == not_ctl && obs->is_Call() &&
-            (obs->as_Call()->entry_point() ==
-             SharedRuntime::uncommon_trap_blob()->instructions_begin())) {
+            (obs->as_Call()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point())) {
           found_trap = true; break;
         }
       }
--- a/hotspot/src/share/vm/opto/loopTransform.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/opto/loopTransform.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -2049,11 +2049,18 @@
   if (cmp->Opcode() != Op_CmpU ) {
     return false;
   }
-  if (cmp->in(2)->Opcode() != Op_LoadRange) {
-    return false;
+  Node* range = cmp->in(2);
+  if (range->Opcode() != Op_LoadRange) {
+    const TypeInt* tint = phase->_igvn.type(range)->isa_int();
+    if (!OptimizeFill || tint == NULL || tint->empty() || tint->_lo < 0) {
+      // Allow predication on positive values that aren't LoadRanges.
+      // This allows optimization of loops where the length of the
+      // array is a known value and doesn't need to be loaded back
+      // from the array.
+      return false;
+    }
   }
-  LoadRangeNode* lr = (LoadRangeNode*)cmp->in(2);
-  if (!invar.is_invariant(lr)) { // loadRange must be invariant
+  if (!invar.is_invariant(range)) {
     return false;
   }
   Node *iv     = _head->as_CountedLoop()->phi();
@@ -2248,9 +2255,9 @@
       const Node*    cmp    = bol->in(1)->as_Cmp();
       Node*          idx    = cmp->in(1);
       assert(!invar.is_invariant(idx), "index is variant");
-      assert(cmp->in(2)->Opcode() == Op_LoadRange, "must be");
-      Node* ld_rng = cmp->in(2); // LoadRangeNode
-      assert(invar.is_invariant(ld_rng), "load range must be invariant");
+      assert(cmp->in(2)->Opcode() == Op_LoadRange || OptimizeFill, "must be");
+      Node* rng = cmp->in(2);
+      assert(invar.is_invariant(rng), "range must be invariant");
       int scale    = 1;
       Node* offset = zero;
       bool ok = is_scaled_iv_plus_offset(idx, cl->phi(), &scale, &offset);
@@ -2271,21 +2278,21 @@
 
       // Perform cloning to keep Invariance state correct since the
       // late schedule will place invariant things in the loop.
-      ld_rng = invar.clone(ld_rng, ctrl);
+      rng = invar.clone(rng, ctrl);
       if (offset && offset != zero) {
         assert(invar.is_invariant(offset), "offset must be loop invariant");
         offset = invar.clone(offset, ctrl);
       }
 
       // Test the lower bound
-      Node*  lower_bound_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, ld_rng, false);
+      Node*  lower_bound_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, rng, false);
       IfNode* lower_bound_iff = lower_bound_proj->in(0)->as_If();
       _igvn.hash_delete(lower_bound_iff);
       lower_bound_iff->set_req(1, lower_bound_bol);
       if (TraceLoopPredicate) tty->print_cr("lower bound check if: %d", lower_bound_iff->_idx);
 
       // Test the upper bound
-      Node* upper_bound_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, ld_rng, true);
+      Node* upper_bound_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, rng, true);
       IfNode* upper_bound_iff = upper_bound_proj->in(0)->as_If();
       _igvn.hash_delete(upper_bound_iff);
       upper_bound_iff->set_req(1, upper_bound_bol);
@@ -2366,3 +2373,358 @@
 
   return hoisted;
 }
+
+
+// Process all the loops in the loop tree and replace any fill
+// patterns with an intrisc version.
+bool PhaseIdealLoop::do_intrinsify_fill() {
+  bool changed = false;
+  for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
+    IdealLoopTree* lpt = iter.current();
+    changed |= intrinsify_fill(lpt);
+  }
+  return changed;
+}
+
+
+// Examine an inner loop looking for a a single store of an invariant
+// value in a unit stride loop,
+bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value,
+                                     Node*& shift, Node*& con) {
+  const char* msg = NULL;
+  Node* msg_node = NULL;
+
+  store_value = NULL;
+  con = NULL;
+  shift = NULL;
+
+  // Process the loop looking for stores.  If there are multiple
+  // stores or extra control flow give at this point.
+  CountedLoopNode* head = lpt->_head->as_CountedLoop();
+  for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
+    Node* n = lpt->_body.at(i);
+    if (n->outcnt() == 0) continue; // Ignore dead
+    if (n->is_Store()) {
+      if (store != NULL) {
+        msg = "multiple stores";
+        break;
+      }
+      int opc = n->Opcode();
+      if (opc == Op_StoreP || opc == Op_StoreN || opc == Op_StoreCM) {
+        msg = "oop fills not handled";
+        break;
+      }
+      Node* value = n->in(MemNode::ValueIn);
+      if (!lpt->is_invariant(value)) {
+        msg  = "variant store value";
+      } else if (!_igvn.type(n->in(MemNode::Address))->isa_aryptr()) {
+        msg = "not array address";
+      }
+      store = n;
+      store_value = value;
+    } else if (n->is_If() && n != head->loopexit()) {
+      msg = "extra control flow";
+      msg_node = n;
+    }
+  }
+
+  if (store == NULL) {
+    // No store in loop
+    return false;
+  }
+
+  if (msg == NULL && head->stride_con() != 1) {
+    // could handle negative strides too
+    if (head->stride_con() < 0) {
+      msg = "negative stride";
+    } else {
+      msg = "non-unit stride";
+    }
+  }
+
+  if (msg == NULL && !store->in(MemNode::Address)->is_AddP()) {
+    msg = "can't handle store address";
+    msg_node = store->in(MemNode::Address);
+  }
+
+  // Make sure there is an appropriate fill routine
+  BasicType t = store->as_Mem()->memory_type();
+  const char* fill_name;
+  if (msg == NULL &&
+      StubRoutines::select_fill_function(t, false, fill_name) == NULL) {
+    msg = "unsupported store";
+    msg_node = store;
+  }
+
+  if (msg != NULL) {
+#ifndef PRODUCT
+    if (TraceOptimizeFill) {
+      tty->print_cr("not fill intrinsic candidate: %s", msg);
+      if (msg_node != NULL) msg_node->dump();
+    }
+#endif
+    return false;
+  }
+
+  // Make sure the address expression can be handled.  It should be
+  // head->phi * elsize + con.  head->phi might have a ConvI2L.
+  Node* elements[4];
+  Node* conv = NULL;
+  bool found_index = false;
+  int count = store->in(MemNode::Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements));
+  for (int e = 0; e < count; e++) {
+    Node* n = elements[e];
+    if (n->is_Con() && con == NULL) {
+      con = n;
+    } else if (n->Opcode() == Op_LShiftX && shift == NULL) {
+      Node* value = n->in(1);
+#ifdef _LP64
+      if (value->Opcode() == Op_ConvI2L) {
+        conv = value;
+        value = value->in(1);
+      }
+#endif
+      if (value != head->phi()) {
+        msg = "unhandled shift in address";
+      } else {
+        found_index = true;
+        shift = n;
+        assert(type2aelembytes(store->as_Mem()->memory_type(), true) == 1 << shift->in(2)->get_int(), "scale should match");
+      }
+    } else if (n->Opcode() == Op_ConvI2L && conv == NULL) {
+      if (n->in(1) == head->phi()) {
+        found_index = true;
+        conv = n;
+      } else {
+        msg = "unhandled input to ConvI2L";
+      }
+    } else if (n == head->phi()) {
+      // no shift, check below for allowed cases
+      found_index = true;
+    } else {
+      msg = "unhandled node in address";
+      msg_node = n;
+    }
+  }
+
+  if (count == -1) {
+    msg = "malformed address expression";
+    msg_node = store;
+  }
+
+  if (!found_index) {
+    msg = "missing use of index";
+  }
+
+  // byte sized items won't have a shift
+  if (msg == NULL && shift == NULL && t != T_BYTE && t != T_BOOLEAN) {
+    msg = "can't find shift";
+    msg_node = store;
+  }
+
+  if (msg != NULL) {
+#ifndef PRODUCT
+    if (TraceOptimizeFill) {
+      tty->print_cr("not fill intrinsic: %s", msg);
+      if (msg_node != NULL) msg_node->dump();
+    }
+#endif
+    return false;
+  }
+
+  // No make sure all the other nodes in the loop can be handled
+  VectorSet ok(Thread::current()->resource_area());
+
+  // store related values are ok
+  ok.set(store->_idx);
+  ok.set(store->in(MemNode::Memory)->_idx);
+
+  // Loop structure is ok
+  ok.set(head->_idx);
+  ok.set(head->loopexit()->_idx);
+  ok.set(head->phi()->_idx);
+  ok.set(head->incr()->_idx);
+  ok.set(head->loopexit()->cmp_node()->_idx);
+  ok.set(head->loopexit()->in(1)->_idx);
+
+  // Address elements are ok
+  if (con)   ok.set(con->_idx);
+  if (shift) ok.set(shift->_idx);
+  if (conv)  ok.set(conv->_idx);
+
+  for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
+    Node* n = lpt->_body.at(i);
+    if (n->outcnt() == 0) continue; // Ignore dead
+    if (ok.test(n->_idx)) continue;
+    // Backedge projection is ok
+    if (n->is_IfTrue() && n->in(0) == head->loopexit()) continue;
+    if (!n->is_AddP()) {
+      msg = "unhandled node";
+      msg_node = n;
+      break;
+    }
+  }
+
+  // Make sure no unexpected values are used outside the loop
+  for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
+    Node* n = lpt->_body.at(i);
+    // These values can be replaced with other nodes if they are used
+    // outside the loop.
+    if (n == store || n == head->loopexit() || n == head->incr()) continue;
+    for (SimpleDUIterator iter(n); iter.has_next(); iter.next()) {
+      Node* use = iter.get();
+      if (!lpt->_body.contains(use)) {
+        msg = "node is used outside loop";
+        // lpt->_body.dump();
+        msg_node = n;
+        break;
+      }
+    }
+  }
+
+#ifdef ASSERT
+  if (TraceOptimizeFill) {
+    if (msg != NULL) {
+      tty->print_cr("no fill intrinsic: %s", msg);
+      if (msg_node != NULL) msg_node->dump();
+    } else {
+      tty->print_cr("fill intrinsic for:");
+    }
+    store->dump();
+    if (Verbose) {
+      lpt->_body.dump();
+    }
+  }
+#endif
+
+  return msg == NULL;
+}
+
+
+
+bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) {
+  // Only for counted inner loops
+  if (!lpt->is_counted() || !lpt->is_inner()) {
+    return false;
+  }
+
+  // Must have constant stride
+  CountedLoopNode* head = lpt->_head->as_CountedLoop();
+  if (!head->stride_is_con() || !head->is_normal_loop()) {
+    return false;
+  }
+
+  // Check that the body only contains a store of a loop invariant
+  // value that is indexed by the loop phi.
+  Node* store = NULL;
+  Node* store_value = NULL;
+  Node* shift = NULL;
+  Node* offset = NULL;
+  if (!match_fill_loop(lpt, store, store_value, shift, offset)) {
+    return false;
+  }
+
+  // Now replace the whole loop body by a call to a fill routine that
+  // covers the same region as the loop.
+  Node* base = store->in(MemNode::Address)->as_AddP()->in(AddPNode::Base);
+
+  // Build an expression for the beginning of the copy region
+  Node* index = head->init_trip();
+#ifdef _LP64
+  index = new (C, 2) ConvI2LNode(index);
+  _igvn.register_new_node_with_optimizer(index);
+#endif
+  if (shift != NULL) {
+    // byte arrays don't require a shift but others do.
+    index = new (C, 3) LShiftXNode(index, shift->in(2));
+    _igvn.register_new_node_with_optimizer(index);
+  }
+  index = new (C, 4) AddPNode(base, base, index);
+  _igvn.register_new_node_with_optimizer(index);
+  Node* from = new (C, 4) AddPNode(base, index, offset);
+  _igvn.register_new_node_with_optimizer(from);
+  // Compute the number of elements to copy
+  Node* len = new (C, 3) SubINode(head->limit(), head->init_trip());
+  _igvn.register_new_node_with_optimizer(len);
+
+  BasicType t = store->as_Mem()->memory_type();
+  bool aligned = false;
+  if (offset != NULL && head->init_trip()->is_Con()) {
+    int element_size = type2aelembytes(t);
+    aligned = (offset->find_intptr_t_type()->get_con() + head->init_trip()->get_int() * element_size) % HeapWordSize == 0;
+  }
+
+  // Build a call to the fill routine
+  const char* fill_name;
+  address fill = StubRoutines::select_fill_function(t, aligned, fill_name);
+  assert(fill != NULL, "what?");
+
+  // Convert float/double to int/long for fill routines
+  if (t == T_FLOAT) {
+    store_value = new (C, 2) MoveF2INode(store_value);
+    _igvn.register_new_node_with_optimizer(store_value);
+  } else if (t == T_DOUBLE) {
+    store_value = new (C, 2) MoveD2LNode(store_value);
+    _igvn.register_new_node_with_optimizer(store_value);
+  }
+
+  Node* mem_phi = store->in(MemNode::Memory);
+  Node* result_ctrl;
+  Node* result_mem;
+  const TypeFunc* call_type = OptoRuntime::array_fill_Type();
+  int size = call_type->domain()->cnt();
+  CallLeafNode *call = new (C, size) CallLeafNoFPNode(call_type, fill,
+                                                      fill_name, TypeAryPtr::get_array_body_type(t));
+  call->init_req(TypeFunc::Parms+0, from);
+  call->init_req(TypeFunc::Parms+1, store_value);
+  call->init_req(TypeFunc::Parms+2, len);
+  call->init_req( TypeFunc::Control, head->init_control());
+  call->init_req( TypeFunc::I_O    , C->top() )        ;   // does no i/o
+  call->init_req( TypeFunc::Memory ,  mem_phi->in(LoopNode::EntryControl) );
+  call->init_req( TypeFunc::ReturnAdr, C->start()->proj_out(TypeFunc::ReturnAdr) );
+  call->init_req( TypeFunc::FramePtr, C->start()->proj_out(TypeFunc::FramePtr) );
+  _igvn.register_new_node_with_optimizer(call);
+  result_ctrl = new (C, 1) ProjNode(call,TypeFunc::Control);
+  _igvn.register_new_node_with_optimizer(result_ctrl);
+  result_mem = new (C, 1) ProjNode(call,TypeFunc::Memory);
+  _igvn.register_new_node_with_optimizer(result_mem);
+
+  // If this fill is tightly coupled to an allocation and overwrites
+  // the whole body, allow it to take over the zeroing.
+  AllocateNode* alloc = AllocateNode::Ideal_allocation(base, this);
+  if (alloc != NULL && alloc->is_AllocateArray()) {
+    Node* length = alloc->as_AllocateArray()->Ideal_length();
+    if (head->limit() == length &&
+        head->init_trip() == _igvn.intcon(0)) {
+      if (TraceOptimizeFill) {
+        tty->print_cr("Eliminated zeroing in allocation");
+      }
+      alloc->maybe_set_complete(&_igvn);
+    } else {
+#ifdef ASSERT
+      if (TraceOptimizeFill) {
+        tty->print_cr("filling array but bounds don't match");
+        alloc->dump();
+        head->init_trip()->dump();
+        head->limit()->dump();
+        length->dump();
+      }
+#endif
+    }
+  }
+
+  // Redirect the old control and memory edges that are outside the loop.
+  Node* exit = head->loopexit()->proj_out(0);
+  _igvn.replace_node(exit, result_ctrl);
+  _igvn.replace_node(store, result_mem);
+  // Any uses the increment outside of the loop become the loop limit.
+  _igvn.replace_node(head->incr(), head->limit());
+
+  // Disconnect the head from the loop.
+  for (uint i = 0; i < lpt->_body.size(); i++) {
+    Node* n = lpt->_body.at(i);
+    _igvn.replace_node(n, C->top());
+  }
+
+  return true;
+}
--- a/hotspot/src/share/vm/opto/loopnode.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/opto/loopnode.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1673,6 +1673,12 @@
     _ltree_root->_child->loop_predication(this);
   }
 
+  if (OptimizeFill && UseLoopPredicate && C->has_loops() && !C->major_progress()) {
+    if (do_intrinsify_fill()) {
+      C->set_major_progress();
+    }
+  }
+
   // Perform iteration-splitting on inner loops.  Split iterations to avoid
   // range checks or one-shot null checks.
 
--- a/hotspot/src/share/vm/opto/loopnode.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/opto/loopnode.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -937,6 +937,12 @@
   // same block.  Split thru the Region.
   void do_split_if( Node *iff );
 
+  // Conversion of fill/copy patterns into intrisic versions
+  bool do_intrinsify_fill();
+  bool intrinsify_fill(IdealLoopTree* lpt);
+  bool match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value,
+                       Node*& shift, Node*& offset);
+
 private:
   // Return a type based on condition control flow
   const TypeInt* filtered_type( Node *n, Node* n_ctrl);
--- a/hotspot/src/share/vm/opto/memnode.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/opto/memnode.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1547,8 +1547,8 @@
         adr->is_AddP() && off != Type::OffsetBot) {
       // For constant Strings treat the fields as compile time constants.
       Node* base = adr->in(AddPNode::Base);
-      if (base->Opcode() == Op_ConP) {
-        const TypeOopPtr* t = phase->type(base)->isa_oopptr();
+      const TypeOopPtr* t = phase->type(base)->isa_oopptr();
+      if (t != NULL && t->singleton()) {
         ciObject* string = t->const_oop();
         ciConstant constant = string->as_instance()->field_value_by_offset(off);
         if (constant.basic_type() == T_INT) {
--- a/hotspot/src/share/vm/opto/output.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/opto/output.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1184,7 +1184,7 @@
       MacroAssembler(cb).bind( blk_labels[b->_pre_order] );
 
     else
-      assert( blk_labels[b->_pre_order].loc_pos() == cb->code_size(),
+      assert( blk_labels[b->_pre_order].loc_pos() == cb->insts_size(),
               "label position does not match code offset" );
 
     uint last_inst = b->_nodes.size();
@@ -1225,7 +1225,7 @@
         // If this requires all previous instructions be flushed, then do so
         if( is_sfn || is_mcall || mach->alignment_required() != 1) {
           cb->flush_bundle(true);
-          current_offset = cb->code_size();
+          current_offset = cb->insts_size();
         }
 
         // align the instruction if necessary
@@ -1246,7 +1246,7 @@
           _cfg->_bbs.map( nop->_idx, b );
           nop->emit(*cb, _regalloc);
           cb->flush_bundle(true);
-          current_offset = cb->code_size();
+          current_offset = cb->insts_size();
         }
 
         // Remember the start of the last call in a basic block
@@ -1348,12 +1348,12 @@
       // Save the offset for the listing
 #ifndef PRODUCT
       if( node_offsets && n->_idx < node_offset_limit )
-        node_offsets[n->_idx] = cb->code_size();
+        node_offsets[n->_idx] = cb->insts_size();
 #endif
 
       // "Normal" instruction case
       n->emit(*cb, _regalloc);
-      current_offset  = cb->code_size();
+      current_offset  = cb->insts_size();
       non_safepoints.observe_instruction(n, current_offset);
 
       // mcall is last "call" that can be a safepoint
@@ -1372,13 +1372,12 @@
         assert(delay_slot != NULL, "expecting delay slot node");
 
         // Back up 1 instruction
-        cb->set_code_end(
-          cb->code_end()-Pipeline::instr_unit_size());
+        cb->set_insts_end(cb->insts_end() - Pipeline::instr_unit_size());
 
         // Save the offset for the listing
 #ifndef PRODUCT
         if( node_offsets && delay_slot->_idx < node_offset_limit )
-          node_offsets[delay_slot->_idx] = cb->code_size();
+          node_offsets[delay_slot->_idx] = cb->insts_size();
 #endif
 
         // Support a SafePoint in the delay slot
@@ -1420,7 +1419,7 @@
         b->_nodes.insert( b->_nodes.size(), nop );
         _cfg->_bbs.map( nop->_idx, b );
         nop->emit(*cb, _regalloc);
-        current_offset = cb->code_size();
+        current_offset = cb->insts_size();
       }
     }
 
@@ -1437,13 +1436,13 @@
   // Compute the size of the first block
   _first_block_size = blk_labels[1].loc_pos() - blk_labels[0].loc_pos();
 
-  assert(cb->code_size() < 500000, "method is unreasonably large");
+  assert(cb->insts_size() < 500000, "method is unreasonably large");
 
   // ------------------
 
 #ifndef PRODUCT
   // Information on the size of the method, without the extraneous code
-  Scheduling::increment_method_size(cb->code_size());
+  Scheduling::increment_method_size(cb->insts_size());
 #endif
 
   // ------------------
--- a/hotspot/src/share/vm/opto/parse.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/opto/parse.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -494,6 +494,7 @@
   float   dynamic_branch_prediction(float &cnt);
   float   branch_prediction(float &cnt, BoolTest::mask btest, int target_bci);
   bool    seems_never_taken(float prob);
+  bool    seems_stable_comparison(BoolTest::mask btest, Node* c);
 
   void    do_ifnull(BoolTest::mask btest, Node* c);
   void    do_if(BoolTest::mask btest, Node* c);
--- a/hotspot/src/share/vm/opto/parse2.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/opto/parse2.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -892,6 +892,62 @@
   return prob < PROB_MIN;
 }
 
+// True if the comparison seems to be the kind that will not change its
+// statistics from true to false.  See comments in adjust_map_after_if.
+// This question is only asked along paths which are already
+// classifed as untaken (by seems_never_taken), so really,
+// if a path is never taken, its controlling comparison is
+// already acting in a stable fashion.  If the comparison
+// seems stable, we will put an expensive uncommon trap
+// on the untaken path.  To be conservative, and to allow
+// partially executed counted loops to be compiled fully,
+// we will plant uncommon traps only after pointer comparisons.
+bool Parse::seems_stable_comparison(BoolTest::mask btest, Node* cmp) {
+  for (int depth = 4; depth > 0; depth--) {
+    // The following switch can find CmpP here over half the time for
+    // dynamic language code rich with type tests.
+    // Code using counted loops or array manipulations (typical
+    // of benchmarks) will have many (>80%) CmpI instructions.
+    switch (cmp->Opcode()) {
+    case Op_CmpP:
+      // A never-taken null check looks like CmpP/BoolTest::eq.
+      // These certainly should be closed off as uncommon traps.
+      if (btest == BoolTest::eq)
+        return true;
+      // A never-failed type check looks like CmpP/BoolTest::ne.
+      // Let's put traps on those, too, so that we don't have to compile
+      // unused paths with indeterminate dynamic type information.
+      if (ProfileDynamicTypes)
+        return true;
+      return false;
+
+    case Op_CmpI:
+      // A small minority (< 10%) of CmpP are masked as CmpI,
+      // as if by boolean conversion ((p == q? 1: 0) != 0).
+      // Detect that here, even if it hasn't optimized away yet.
+      // Specifically, this covers the 'instanceof' operator.
+      if (btest == BoolTest::ne || btest == BoolTest::eq) {
+        if (_gvn.type(cmp->in(2))->singleton() &&
+            cmp->in(1)->is_Phi()) {
+          PhiNode* phi = cmp->in(1)->as_Phi();
+          int true_path = phi->is_diamond_phi();
+          if (true_path > 0 &&
+              _gvn.type(phi->in(1))->singleton() &&
+              _gvn.type(phi->in(2))->singleton()) {
+            // phi->region->if_proj->ifnode->bool->cmp
+            BoolNode* bol = phi->in(0)->in(1)->in(0)->in(1)->as_Bool();
+            btest = bol->_test._test;
+            cmp = bol->in(1);
+            continue;
+          }
+        }
+      }
+      return false;
+    }
+  }
+  return false;
+}
+
 //-------------------------------repush_if_args--------------------------------
 // Push arguments of an "if" bytecode back onto the stack by adjusting _sp.
 inline int Parse::repush_if_args() {
@@ -1137,19 +1193,22 @@
 
   bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
 
-  int cop = c->Opcode();
-  if (seems_never_taken(prob) && cop == Op_CmpP && btest == BoolTest::eq) {
-    // (An earlier version of do_if omitted '&& btest == BoolTest::eq'.)
-    //
+  if (seems_never_taken(prob) && seems_stable_comparison(btest, c)) {
     // If this might possibly turn into an implicit null check,
     // and the null has never yet been seen, we need to generate
     // an uncommon trap, so as to recompile instead of suffering
     // with very slow branches.  (We'll get the slow branches if
     // the program ever changes phase and starts seeing nulls here.)
     //
-    // The tests we worry about are of the form (p == null).
-    // We do not simply inspect for a null constant, since a node may
+    // We do not inspect for a null constant, since a node may
     // optimize to 'null' later on.
+    //
+    // Null checks, and other tests which expect inequality,
+    // show btest == BoolTest::eq along the non-taken branch.
+    // On the other hand, type tests, must-be-null tests,
+    // and other tests which expect pointer equality,
+    // show btest == BoolTest::ne along the non-taken branch.
+    // We prune both types of branches if they look unused.
     repush_if_args();
     // We need to mark this branch as taken so that if we recompile we will
     // see that it is possible. In the tiered system the interpreter doesn't
--- a/hotspot/src/share/vm/opto/parseHelper.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/opto/parseHelper.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -119,7 +119,11 @@
   }
 
   // Push the bool result back on stack
-  push( gen_instanceof( pop(), makecon(TypeKlassPtr::make(klass)) ) );
+  Node* res = gen_instanceof(peek(), makecon(TypeKlassPtr::make(klass)));
+
+  // Pop from stack AFTER gen_instanceof because it can uncommon trap.
+  pop();
+  push(res);
 }
 
 //------------------------------array_store_check------------------------------
--- a/hotspot/src/share/vm/opto/runtime.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/opto/runtime.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -645,6 +645,22 @@
 }
 
 
+const TypeFunc* OptoRuntime::array_fill_Type() {
+  // create input type (domain)
+  const Type** fields = TypeTuple::fields(3);
+  fields[TypeFunc::Parms+0] = TypePtr::NOTNULL;
+  fields[TypeFunc::Parms+1] = TypeInt::INT;
+  fields[TypeFunc::Parms+2] = TypeInt::INT;
+  const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms + 3, fields);
+
+  // create result type
+  fields = TypeTuple::fields(1);
+  fields[TypeFunc::Parms+0] = NULL; // void
+  const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
+
+  return TypeFunc::make(domain, range);
+}
+
 //------------- Interpreter state access for on stack replacement
 const TypeFunc* OptoRuntime::osr_end_Type() {
   // create input type (domain)
--- a/hotspot/src/share/vm/opto/runtime.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/opto/runtime.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -260,6 +260,8 @@
   static const TypeFunc* generic_arraycopy_Type();
   static const TypeFunc* slow_arraycopy_Type();   // the full routine
 
+  static const TypeFunc* array_fill_Type();
+
   // leaf on stack replacement interpreter accessor types
   static const TypeFunc* osr_end_Type();
 
--- a/hotspot/src/share/vm/opto/stringopts.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/opto/stringopts.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -157,7 +157,7 @@
       Node* uct = _uncommon_traps.at(u);
 
       // Build a new call using the jvms state of the allocate
-      address call_addr = SharedRuntime::uncommon_trap_blob()->instructions_begin();
+      address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point();
       const TypeFunc* call_type = OptoRuntime::uncommon_trap_Type();
       int size = call_type->domain()->cnt();
       const TypePtr* no_memory_effects = NULL;
--- a/hotspot/src/share/vm/opto/type.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/opto/type.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -314,7 +314,7 @@
   mreg2type[Op_RegL] = TypeLong::LONG;
   mreg2type[Op_RegFlags] = TypeInt::CC;
 
-  TypeAryPtr::RANGE   = TypeAryPtr::make( TypePtr::BotPTR, TypeAry::make(Type::BOTTOM,TypeInt::POS), current->env()->Object_klass(), false, arrayOopDesc::length_offset_in_bytes());
+  TypeAryPtr::RANGE   = TypeAryPtr::make( TypePtr::BotPTR, TypeAry::make(Type::BOTTOM,TypeInt::POS), NULL /* current->env()->Object_klass() */, false, arrayOopDesc::length_offset_in_bytes());
 
   TypeAryPtr::NARROWOOPS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeNarrowOop::BOTTOM, TypeInt::POS), NULL /*ciArrayKlass::make(o)*/,  false,  Type::OffsetBot);
 
@@ -3369,7 +3369,7 @@
         tary = TypeAry::make(Type::BOTTOM, tary->_size);
       }
     }
-    bool xk;
+    bool xk = false;
     switch (tap->ptr()) {
     case AnyNull:
     case TopPTR:
@@ -3391,9 +3391,10 @@
         o = tap->const_oop();
         xk = true;
       } else {
-        xk = this->_klass_is_exact;
+        // Only precise for identical arrays
+        xk = this->_klass_is_exact && (klass() == tap->klass());
       }
-      return TypeAryPtr::make( ptr, o, tary, tap->_klass, xk, off, instance_id );
+      return TypeAryPtr::make( ptr, o, tary, lazy_klass, xk, off, instance_id );
     }
     case NotNull:
     case BotPTR:
@@ -3683,12 +3684,10 @@
 }
 
 
-//------------------------------klass------------------------------------------
-// Return the defining klass for this class
-ciKlass* TypeAryPtr::klass() const {
-  if( _klass ) return _klass;   // Return cached value, if possible
-
-  // Oops, need to compute _klass and cache it
+//----------------------compute_klass------------------------------------------
+// Compute the defining klass for this class
+ciKlass* TypeAryPtr::compute_klass(DEBUG_ONLY(bool verify)) const {
+  // Compute _klass based on element type.
   ciKlass* k_ary = NULL;
   const TypeInstPtr *tinst;
   const TypeAryPtr *tary;
@@ -3715,11 +3714,39 @@
   } else {
     // Cannot compute array klass directly from basic type,
     // since subtypes of TypeInt all have basic type T_INT.
+#ifdef ASSERT
+    if (verify && el->isa_int()) {
+      // Check simple cases when verifying klass.
+      BasicType bt = T_ILLEGAL;
+      if (el == TypeInt::BYTE) {
+        bt = T_BYTE;
+      } else if (el == TypeInt::SHORT) {
+        bt = T_SHORT;
+      } else if (el == TypeInt::CHAR) {
+        bt = T_CHAR;
+      } else if (el == TypeInt::INT) {
+        bt = T_INT;
+      } else {
+        return _klass; // just return specified klass
+      }
+      return ciTypeArrayKlass::make(bt);
+    }
+#endif
     assert(!el->isa_int(),
            "integral arrays must be pre-equipped with a class");
     // Compute array klass directly from basic type
     k_ary = ciTypeArrayKlass::make(el->basic_type());
   }
+  return k_ary;
+}
+
+//------------------------------klass------------------------------------------
+// Return the defining klass for this class
+ciKlass* TypeAryPtr::klass() const {
+  if( _klass ) return _klass;   // Return cached value, if possible
+
+  // Oops, need to compute _klass and cache it
+  ciKlass* k_ary = compute_klass();
 
   if( this != TypeAryPtr::OOPS ) {
     // The _klass field acts as a cache of the underlying
--- a/hotspot/src/share/vm/opto/type.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/opto/type.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -831,11 +831,30 @@
 //------------------------------TypeAryPtr-------------------------------------
 // Class of Java array pointers
 class TypeAryPtr : public TypeOopPtr {
-  TypeAryPtr( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id ) : TypeOopPtr(AryPtr,ptr,k,xk,o,offset, instance_id), _ary(ary) {};
+  TypeAryPtr( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id ) : TypeOopPtr(AryPtr,ptr,k,xk,o,offset, instance_id), _ary(ary) {
+#ifdef ASSERT
+    if (k != NULL) {
+      // Verify that specified klass and TypeAryPtr::klass() follow the same rules.
+      ciKlass* ck = compute_klass(true);
+      if (k != ck) {
+        this->dump(); tty->cr();
+        tty->print(" k: ");
+        k->print(); tty->cr();
+        tty->print("ck: ");
+        if (ck != NULL) ck->print();
+        else tty->print("<NULL>");
+        tty->cr();
+        assert(false, "unexpected TypeAryPtr::_klass");
+      }
+    }
+#endif
+  }
   virtual bool eq( const Type *t ) const;
   virtual int hash() const;     // Type specific hashing
   const TypeAry *_ary;          // Array we point into
 
+  ciKlass* compute_klass(DEBUG_ONLY(bool verify = false)) const;
+
 public:
   // Accessors
   ciKlass* klass() const;
--- a/hotspot/src/share/vm/prims/jvmtiCodeBlobEvents.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiCodeBlobEvents.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -114,7 +114,7 @@
   // check if this starting address has been seen already - the
   // assumption is that stubs are inserted into the list before the
   // enclosing BufferBlobs.
-  address addr = cb->instructions_begin();
+  address addr = cb->code_begin();
   for (int i=0; i<_global_code_blobs->length(); i++) {
     JvmtiCodeBlobDesc* scb = _global_code_blobs->at(i);
     if (addr == scb->code_begin()) {
@@ -123,8 +123,7 @@
   }
 
   // record the CodeBlob details as a JvmtiCodeBlobDesc
-  JvmtiCodeBlobDesc* scb = new JvmtiCodeBlobDesc(cb->name(), cb->instructions_begin(),
-                                                 cb->instructions_end());
+  JvmtiCodeBlobDesc* scb = new JvmtiCodeBlobDesc(cb->name(), cb->code_begin(), cb->code_end());
   _global_code_blobs->append(scb);
 }
 
--- a/hotspot/src/share/vm/prims/jvmtiExport.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiExport.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -687,8 +687,8 @@
  public:
   JvmtiCompiledMethodLoadEventMark(JavaThread *thread, nmethod *nm, void* compile_info_ptr = NULL)
           : JvmtiMethodEventMark(thread,methodHandle(thread, nm->method())) {
-    _code_data = nm->code_begin();
-    _code_size = nm->code_size();
+    _code_data = nm->insts_begin();
+    _code_size = nm->insts_size();
     _compile_info = compile_info_ptr; // Set void pointer of compiledMethodLoad Event. Default value is NULL.
     JvmtiCodeBlobEvents::build_jvmti_addr_location_map(nm, &_map, &_map_length);
   }
--- a/hotspot/src/share/vm/prims/methodHandleWalk.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/prims/methodHandleWalk.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -333,8 +333,7 @@
         ArgToken arglist[2];
         arglist[0] = arg;         // outgoing value
         arglist[1] = ArgToken();  // sentinel
-        assert(false, "I think the argument count must be 1 instead of 0");
-        arg = make_invoke(NULL, boxer, Bytecodes::_invokevirtual, false, 0, &arglist[0], CHECK_(empty));
+        arg = make_invoke(NULL, boxer, Bytecodes::_invokevirtual, false, 1, &arglist[0], CHECK_(empty));
         change_argument(src, arg_slot, T_OBJECT, arg);
         break;
       }
@@ -979,7 +978,7 @@
 
   // Inline the method.
   InvocationCounter* ic = m->invocation_counter();
-  ic->set_carry();
+  ic->set_carry_flag();
 
   for (int i = 0; i < argc; i++) {
     ArgToken arg = argv[i];
@@ -1209,7 +1208,7 @@
   // Set the carry bit of the invocation counter to force inlining of
   // the adapter.
   InvocationCounter* ic = m->invocation_counter();
-  ic->set_carry();
+  ic->set_carry_flag();
 
   // Rewrite the method and set up the constant pool cache.
   objArrayOop m_array = oopFactory::new_system_objArray(1, CHECK_(nullHandle));
@@ -1398,7 +1397,9 @@
 
 extern "C"
 void print_method_handle(oop mh) {
-  if (java_dyn_MethodHandle::is_instance(mh)) {
+  if (!mh->is_oop()) {
+    tty->print_cr("*** not a method handle: "INTPTR_FORMAT, (intptr_t)mh);
+  } else if (java_dyn_MethodHandle::is_instance(mh)) {
     //MethodHandlePrinter::print(mh);
   } else {
     tty->print("*** not a method handle: ");
--- a/hotspot/src/share/vm/prims/methodHandles.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/prims/methodHandles.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -113,8 +113,7 @@
   _adapter_code = MethodHandlesAdapterBlob::create(_adapter_code_size);
   if (_adapter_code == NULL)
     vm_exit_out_of_memory(_adapter_code_size, "CodeCache: no room for MethodHandles adapters");
-  CodeBuffer code(_adapter_code->instructions_begin(), _adapter_code->instructions_size());
-
+  CodeBuffer code(_adapter_code);
   MethodHandlesAdapterGenerator g(&code);
   g.generate();
 }
--- a/hotspot/src/share/vm/prims/methodHandles.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/prims/methodHandles.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -446,6 +446,8 @@
                                RegisterOrConstant arg_slots,
                                Register argslot_reg,
                                Register temp_reg, Register temp2_reg, Register temp3_reg = noreg);
+
+  static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
 };
 
 
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -50,7 +50,6 @@
 bool   Arguments::_UseOnStackReplacement        = UseOnStackReplacement;
 bool   Arguments::_BackgroundCompilation        = BackgroundCompilation;
 bool   Arguments::_ClipInlining                 = ClipInlining;
-intx   Arguments::_Tier2CompileThreshold        = Tier2CompileThreshold;
 
 char*  Arguments::SharedArchivePath             = NULL;
 
@@ -121,7 +120,7 @@
   PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.name",
                                                                  "Java Virtual Machine Specification",  false));
   PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.vendor",
-                                                                 "Sun Microsystems Inc.",  false));
+        JDK_Version::is_gte_jdk17x_version() ? "Oracle Corporation" : "Sun Microsystems Inc.", false));
   PropertyList_add(&_system_properties, new SystemProperty("java.vm.version", VM_Version::vm_release(),  false));
   PropertyList_add(&_system_properties, new SystemProperty("java.vm.name", VM_Version::vm_name(),  false));
   PropertyList_add(&_system_properties, new SystemProperty("java.vm.vendor", VM_Version::vm_vendor(),  false));
@@ -913,7 +912,6 @@
   AlwaysCompileLoopMethods   = Arguments::_AlwaysCompileLoopMethods;
   UseOnStackReplacement      = Arguments::_UseOnStackReplacement;
   BackgroundCompilation      = Arguments::_BackgroundCompilation;
-  Tier2CompileThreshold      = Arguments::_Tier2CompileThreshold;
 
   // Change from defaults based on mode
   switch (mode) {
@@ -950,6 +948,31 @@
   }
 }
 
+void Arguments::set_tiered_flags() {
+  if (FLAG_IS_DEFAULT(CompilationPolicyChoice)) {
+    FLAG_SET_DEFAULT(CompilationPolicyChoice, 2);
+  }
+
+  if (CompilationPolicyChoice < 2) {
+    vm_exit_during_initialization(
+      "Incompatible compilation policy selected", NULL);
+  }
+
+#ifdef _LP64
+  if (FLAG_IS_DEFAULT(UseCompressedOops) || FLAG_IS_ERGO(UseCompressedOops)) {
+    UseCompressedOops = false;
+  }
+  if (UseCompressedOops) {
+    vm_exit_during_initialization(
+      "Tiered compilation is not supported with compressed oops yet", NULL);
+  }
+#endif
+ // Increase the code cache size - tiered compiles a lot more.
+  if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
+    FLAG_SET_DEFAULT(ReservedCodeCacheSize, ReservedCodeCacheSize * 2);
+  }
+}
+
 #ifndef KERNEL
 // If the user has chosen ParallelGCThreads > 0, we set UseParNewGC
 // if it's not explictly set or unset. If the user has chosen
@@ -1250,7 +1273,8 @@
 }
 
 inline uintx max_heap_for_compressed_oops() {
-  LP64_ONLY(return OopEncodingHeapMax - MaxPermSize - os::vm_page_size());
+  // Heap should be above HeapBaseMinAddress to get zero based compressed oops.
+  LP64_ONLY(return OopEncodingHeapMax - MaxPermSize - os::vm_page_size() - HeapBaseMinAddress);
   NOT_LP64(ShouldNotReachHere(); return 0);
 }
 
@@ -1299,7 +1323,7 @@
   // Check that UseCompressedOops can be set with the max heap size allocated
   // by ergonomics.
   if (MaxHeapSize <= max_heap_for_compressed_oops()) {
-#ifndef COMPILER1
+#if !defined(COMPILER1) || defined(TIERED)
     if (FLAG_IS_DEFAULT(UseCompressedOops) && !UseG1GC) {
       FLAG_SET_ERGO(bool, UseCompressedOops, true);
     }
@@ -1513,6 +1537,9 @@
   if (AggressiveOpts && FLAG_IS_DEFAULT(OptimizeStringConcat)) {
     FLAG_SET_DEFAULT(OptimizeStringConcat, true);
   }
+  if (AggressiveOpts && FLAG_IS_DEFAULT(OptimizeFill)) {
+    FLAG_SET_DEFAULT(OptimizeFill, true);
+  }
 #endif
 
   if (AggressiveOpts) {
@@ -1561,6 +1588,18 @@
   return false;
 }
 
+bool Arguments::verify_min_value(intx val, intx min, const char* name) {
+  // Returns true if given value is greater than specified min threshold
+  // false, otherwise.
+  if (val >= min ) {
+      return true;
+  }
+  jio_fprintf(defaultStream::error_stream(),
+              "%s of " INTX_FORMAT " is invalid; must be greater than " INTX_FORMAT "\n",
+              name, val, min);
+  return false;
+}
+
 bool Arguments::verify_percentage(uintx value, const char* name) {
   if (value <= 100) {
     return true;
@@ -1613,6 +1652,16 @@
   return status;
 }
 
+// Check stack pages settings
+bool Arguments::check_stack_pages()
+{
+  bool status = true;
+  status = status && verify_min_value(StackYellowPages, 1, "StackYellowPages");
+  status = status && verify_min_value(StackRedPages, 1, "StackRedPages");
+  status = status && verify_min_value(StackShadowPages, 1, "StackShadowPages");
+  return status;
+}
+
 // Check the consistency of vm_init_args
 bool Arguments::check_vm_args_consistency() {
   // Method for adding checks for flag consistency.
@@ -1725,6 +1774,7 @@
   }
 
   status = status && check_gc_consistency();
+  status = status && check_stack_pages();
 
   if (_has_alloc_profile) {
     if (UseParallelGC || UseParallelOldGC) {
@@ -1907,7 +1957,6 @@
   Arguments::_UseOnStackReplacement    = UseOnStackReplacement;
   Arguments::_ClipInlining             = ClipInlining;
   Arguments::_BackgroundCompilation    = BackgroundCompilation;
-  Arguments::_Tier2CompileThreshold    = Tier2CompileThreshold;
 
   // Parse JAVA_TOOL_OPTIONS environment variable (if present)
   jint result = parse_java_tool_options_environment_variable(&scp, &scp_assembly_required);
@@ -2625,23 +2674,6 @@
     set_mode_flags(_int);
   }
 
-#ifdef TIERED
-  // If we are using tiered compilation in the tiered vm then c1 will
-  // do the profiling and we don't want to waste that time in the
-  // interpreter.
-  if (TieredCompilation) {
-    ProfileInterpreter = false;
-  } else {
-    // Since we are running vanilla server we must adjust the compile threshold
-    // unless the user has already adjusted it because the default threshold assumes
-    // we will run tiered.
-
-    if (FLAG_IS_DEFAULT(CompileThreshold)) {
-      CompileThreshold = Tier2CompileThreshold;
-    }
-  }
-#endif // TIERED
-
 #ifndef COMPILER2
   // Don't degrade server performance for footprint
   if (FLAG_IS_DEFAULT(UseLargePages) &&
@@ -2656,7 +2688,6 @@
 
   // Tiered compilation is undefined with C1.
   TieredCompilation = false;
-
 #else
   if (!FLAG_IS_DEFAULT(OptoLoopAlignment) && FLAG_IS_DEFAULT(MaxLoopPad)) {
     FLAG_SET_DEFAULT(MaxLoopPad, OptoLoopAlignment-1);
@@ -2832,6 +2863,13 @@
       CommandLineFlags::printFlags();
       vm_exit(0);
     }
+
+#ifndef PRODUCT
+    if (match_option(option, "-XX:+PrintFlagsWithComments", &tail)) {
+      CommandLineFlags::printFlags(true);
+      vm_exit(0);
+    }
+#endif
   }
 
   if (IgnoreUnrecognizedVMOptions) {
@@ -2913,7 +2951,7 @@
     PrintGC = true;
   }
 
-#if defined(_LP64) && defined(COMPILER1)
+#if defined(_LP64) && defined(COMPILER1) && !defined(TIERED)
   UseCompressedOops = false;
 #endif
 
@@ -2944,6 +2982,16 @@
     return JNI_EINVAL;
   }
 
+  if (TieredCompilation) {
+    set_tiered_flags();
+  } else {
+    // Check if the policy is valid. Policies 0 and 1 are valid for non-tiered setup.
+    if (CompilationPolicyChoice >= 2) {
+      vm_exit_during_initialization(
+        "Incompatible compilation policy selected", NULL);
+    }
+  }
+
 #ifndef KERNEL
   if (UseConcMarkSweepGC) {
     // Set flags for CMS and ParNew.  Check UseConcMarkSweep first
--- a/hotspot/src/share/vm/runtime/arguments.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -288,8 +288,9 @@
   static bool _BackgroundCompilation;
   static bool _ClipInlining;
   static bool _CIDynamicCompilePriority;
-  static intx _Tier2CompileThreshold;
 
+  // Tiered
+  static void set_tiered_flags();
   // CMS/ParNew garbage collectors
   static void set_parnew_gc_flags();
   static void set_cms_and_parnew_gc_flags();
@@ -338,6 +339,7 @@
   }
   static bool verify_interval(uintx val, uintx min,
                               uintx max, const char* name);
+  static bool verify_min_value(intx val, intx min, const char* name);
   static bool verify_percentage(uintx value, const char* name);
   static void describe_range_error(ArgsRange errcode);
   static ArgsRange check_memory_size(julong size, julong min_size);
@@ -400,6 +402,8 @@
   static bool check_gc_consistency();
   // Check consistecy or otherwise of VM argument settings
   static bool check_vm_args_consistency();
+  // Check stack pages settings
+  static bool check_stack_pages();
   // Used by os_solaris
   static bool process_settings_file(const char* file_name, bool should_exist, jboolean ignore_unrecognized);
 
--- a/hotspot/src/share/vm/runtime/compilationPolicy.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/runtime/compilationPolicy.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -45,10 +45,17 @@
     Unimplemented();
 #endif
     break;
-
+  case 2:
+#ifdef TIERED
+    CompilationPolicy::set_policy(new SimpleThresholdPolicy());
+#else
+    Unimplemented();
+#endif
+    break;
   default:
-    fatal("CompilationPolicyChoice must be in the range: [0-1]");
+    fatal("CompilationPolicyChoice must be in the range: [0-2]");
   }
+  CompilationPolicy::policy()->initialize();
 }
 
 void CompilationPolicy::completed_vm_startup() {
@@ -61,16 +68,16 @@
 // Returns true if m must be compiled before executing it
 // This is intended to force compiles for methods (usually for
 // debugging) that would otherwise be interpreted for some reason.
-bool CompilationPolicy::mustBeCompiled(methodHandle m) {
+bool CompilationPolicy::must_be_compiled(methodHandle m, int comp_level) {
   if (m->has_compiled_code()) return false;       // already compiled
-  if (!canBeCompiled(m))      return false;
+  if (!can_be_compiled(m, comp_level)) return false;
 
   return !UseInterpreter ||                                              // must compile all methods
          (UseCompiler && AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods
 }
 
 // Returns true if m is allowed to be compiled
-bool CompilationPolicy::canBeCompiled(methodHandle m) {
+bool CompilationPolicy::can_be_compiled(methodHandle m, int comp_level) {
   if (m->is_abstract()) return false;
   if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false;
 
@@ -83,8 +90,16 @@
   if (!AbstractInterpreter::can_be_compiled(m)) {
     return false;
   }
+  if (comp_level == CompLevel_all) {
+    return !m->is_not_compilable(CompLevel_simple) && !m->is_not_compilable(CompLevel_full_optimization);
+  } else {
+    return !m->is_not_compilable(comp_level);
+  }
+}
 
-  return !m->is_not_compilable();
+bool CompilationPolicy::is_compilation_enabled() {
+  // NOTE: CompileBroker::should_compile_new_jobs() checks for UseCompiler
+  return !delay_compilation_during_startup() && CompileBroker::should_compile_new_jobs();
 }
 
 #ifndef PRODUCT
@@ -94,7 +109,7 @@
   tty->print_cr ("  Total: %3.3f sec.", _accumulated_time.seconds());
 }
 
-static void trace_osr_completion(nmethod* osr_nm) {
+void NonTieredCompPolicy::trace_osr_completion(nmethod* osr_nm) {
   if (TraceOnStackReplacement) {
     if (osr_nm == NULL) tty->print_cr("compilation failed");
     else tty->print_cr("nmethod " INTPTR_FORMAT, osr_nm);
@@ -102,7 +117,35 @@
 }
 #endif // !PRODUCT
 
-void CompilationPolicy::reset_counter_for_invocation_event(methodHandle m) {
+void NonTieredCompPolicy::initialize() {
+  // Setup the compiler thread numbers
+  if (CICompilerCountPerCPU) {
+    // Example: if CICompilerCountPerCPU is true, then we get
+    // max(log2(8)-1,1) = 2 compiler threads on an 8-way machine.
+    // May help big-app startup time.
+    _compiler_count = MAX2(log2_intptr(os::active_processor_count())-1,1);
+  } else {
+    _compiler_count = CICompilerCount;
+  }
+}
+
+int NonTieredCompPolicy::compiler_count(CompLevel comp_level) {
+#ifdef COMPILER1
+  if (is_c1_compile(comp_level)) {
+    return _compiler_count;
+  }
+#endif
+
+#ifdef COMPILER2
+  if (is_c2_compile(comp_level)) {
+    return _compiler_count;
+  }
+#endif
+
+  return 0;
+}
+
+void NonTieredCompPolicy::reset_counter_for_invocation_event(methodHandle m) {
   // Make sure invocation and backedge counter doesn't overflow again right away
   // as would be the case for native methods.
 
@@ -114,7 +157,7 @@
   assert(!m->was_never_executed(), "don't reset to 0 -- could be mistaken for never-executed");
 }
 
-void CompilationPolicy::reset_counter_for_back_branch_event(methodHandle m) {
+void NonTieredCompPolicy::reset_counter_for_back_branch_event(methodHandle m) {
   // Delay next back-branch event but pump up invocation counter to triger
   // whole method compilation.
   InvocationCounter* i = m->invocation_counter();
@@ -128,6 +171,185 @@
   b->set(b->state(), CompileThreshold / 2);
 }
 
+//
+// CounterDecay
+//
+// Interates through invocation counters and decrements them. This
+// is done at each safepoint.
+//
+class CounterDecay : public AllStatic {
+  static jlong _last_timestamp;
+  static void do_method(methodOop m) {
+    m->invocation_counter()->decay();
+  }
+public:
+  static void decay();
+  static bool is_decay_needed() {
+    return (os::javaTimeMillis() - _last_timestamp) > CounterDecayMinIntervalLength;
+  }
+};
+
+jlong CounterDecay::_last_timestamp = 0;
+
+void CounterDecay::decay() {
+  _last_timestamp = os::javaTimeMillis();
+
+  // This operation is going to be performed only at the end of a safepoint
+  // and hence GC's will not be going on, all Java mutators are suspended
+  // at this point and hence SystemDictionary_lock is also not needed.
+  assert(SafepointSynchronize::is_at_safepoint(), "can only be executed at a safepoint");
+  int nclasses = SystemDictionary::number_of_classes();
+  double classes_per_tick = nclasses * (CounterDecayMinIntervalLength * 1e-3 /
+                                        CounterHalfLifeTime);
+  for (int i = 0; i < classes_per_tick; i++) {
+    klassOop k = SystemDictionary::try_get_next_class();
+    if (k != NULL && k->klass_part()->oop_is_instance()) {
+      instanceKlass::cast(k)->methods_do(do_method);
+    }
+  }
+}
+
+// Called at the end of the safepoint
+void NonTieredCompPolicy::do_safepoint_work() {
+  if(UseCounterDecay && CounterDecay::is_decay_needed()) {
+    CounterDecay::decay();
+  }
+}
+
+void NonTieredCompPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
+  ScopeDesc* sd = trap_scope;
+  for (; !sd->is_top(); sd = sd->sender()) {
+    // Reset ICs of inlined methods, since they can trigger compilations also.
+    sd->method()->invocation_counter()->reset();
+  }
+  InvocationCounter* c = sd->method()->invocation_counter();
+  if (is_osr) {
+    // It was an OSR method, so bump the count higher.
+    c->set(c->state(), CompileThreshold);
+  } else {
+    c->reset();
+  }
+  sd->method()->backedge_counter()->reset();
+}
+
+// This method can be called by any component of the runtime to notify the policy
+// that it's recommended to delay the complation of this method.
+void NonTieredCompPolicy::delay_compilation(methodOop method) {
+  method->invocation_counter()->decay();
+  method->backedge_counter()->decay();
+}
+
+void NonTieredCompPolicy::disable_compilation(methodOop method) {
+  method->invocation_counter()->set_state(InvocationCounter::wait_for_nothing);
+  method->backedge_counter()->set_state(InvocationCounter::wait_for_nothing);
+}
+
+CompileTask* NonTieredCompPolicy::select_task(CompileQueue* compile_queue) {
+  return compile_queue->first();
+}
+
+bool NonTieredCompPolicy::is_mature(methodOop method) {
+  methodDataOop mdo = method->method_data();
+  assert(mdo != NULL, "Should be");
+  uint current = mdo->mileage_of(method);
+  uint initial = mdo->creation_mileage();
+  if (current < initial)
+    return true;  // some sort of overflow
+  uint target;
+  if (ProfileMaturityPercentage <= 0)
+    target = (uint) -ProfileMaturityPercentage;  // absolute value
+  else
+    target = (uint)( (ProfileMaturityPercentage * CompileThreshold) / 100 );
+  return (current >= initial + target);
+}
+
+nmethod* NonTieredCompPolicy::event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, TRAPS) {
+  assert(comp_level == CompLevel_none, "This should be only called from the interpreter");
+  NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci));
+  if (JvmtiExport::can_post_interpreter_events()) {
+    assert(THREAD->is_Java_thread(), "Wrong type of thread");
+    if (((JavaThread*)THREAD)->is_interp_only_mode()) {
+      // If certain JVMTI events (e.g. frame pop event) are requested then the
+      // thread is forced to remain in interpreted code. This is
+      // implemented partly by a check in the run_compiled_code
+      // section of the interpreter whether we should skip running
+      // compiled code, and partly by skipping OSR compiles for
+      // interpreted-only threads.
+      if (bci != InvocationEntryBci) {
+        reset_counter_for_back_branch_event(method);
+        return NULL;
+      }
+    }
+  }
+  if (bci == InvocationEntryBci) {
+    // when code cache is full, compilation gets switched off, UseCompiler
+    // is set to false
+    if (!method->has_compiled_code() && UseCompiler) {
+      method_invocation_event(method, CHECK_NULL);
+    } else {
+      // Force counter overflow on method entry, even if no compilation
+      // happened.  (The method_invocation_event call does this also.)
+      reset_counter_for_invocation_event(method);
+    }
+    // compilation at an invocation overflow no longer goes and retries test for
+    // compiled method. We always run the loser of the race as interpreted.
+    // so return NULL
+    return NULL;
+  } else {
+    // counter overflow in a loop => try to do on-stack-replacement
+    nmethod* osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true);
+    NOT_PRODUCT(trace_osr_request(method, osr_nm, bci));
+    // when code cache is full, we should not compile any more...
+    if (osr_nm == NULL && UseCompiler) {
+      method_back_branch_event(method, bci, CHECK_NULL);
+      osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true);
+    }
+    if (osr_nm == NULL) {
+      reset_counter_for_back_branch_event(method);
+      return NULL;
+    }
+    return osr_nm;
+  }
+  return NULL;
+}
+
+#ifndef PRODUCT
+void NonTieredCompPolicy::trace_frequency_counter_overflow(methodHandle m, int branch_bci, int bci) {
+  if (TraceInvocationCounterOverflow) {
+    InvocationCounter* ic = m->invocation_counter();
+    InvocationCounter* bc = m->backedge_counter();
+    ResourceMark rm;
+    const char* msg =
+      bci == InvocationEntryBci
+      ? "comp-policy cntr ovfl @ %d in entry of "
+      : "comp-policy cntr ovfl @ %d in loop of ";
+    tty->print(msg, bci);
+    m->print_value();
+    tty->cr();
+    ic->print();
+    bc->print();
+    if (ProfileInterpreter) {
+      if (bci != InvocationEntryBci) {
+        methodDataOop mdo = m->method_data();
+        if (mdo != NULL) {
+          int count = mdo->bci_to_data(branch_bci)->as_JumpData()->taken();
+          tty->print_cr("back branch count = %d", count);
+        }
+      }
+    }
+  }
+}
+
+void NonTieredCompPolicy::trace_osr_request(methodHandle method, nmethod* osr, int bci) {
+  if (TraceOnStackReplacement) {
+    ResourceMark rm;
+    tty->print(osr != NULL ? "Reused OSR entry for " : "Requesting OSR entry for ");
+    method->print_short_name(tty);
+    tty->print_cr(" at bci %d", bci);
+  }
+}
+#endif // !PRODUCT
+
 // SimpleCompPolicy - compile current method
 
 void SimpleCompPolicy::method_invocation_event( methodHandle m, TRAPS) {
@@ -137,59 +359,28 @@
   reset_counter_for_invocation_event(m);
   const char* comment = "count";
 
-  if (!delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler && CompileBroker::should_compile_new_jobs()) {
+  if (is_compilation_enabled() && can_be_compiled(m)) {
     nmethod* nm = m->code();
     if (nm == NULL ) {
       const char* comment = "count";
-      CompileBroker::compile_method(m, InvocationEntryBci,
+      CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_highest_tier,
                                     m, hot_count, comment, CHECK);
-    } else {
-#ifdef TIERED
-
-      if (nm->is_compiled_by_c1()) {
-        const char* comment = "tier1 overflow";
-        CompileBroker::compile_method(m, InvocationEntryBci,
-                                      m, hot_count, comment, CHECK);
-      }
-#endif // TIERED
     }
   }
 }
 
-void SimpleCompPolicy::method_back_branch_event(methodHandle m, int branch_bci, int loop_top_bci, TRAPS) {
+void SimpleCompPolicy::method_back_branch_event(methodHandle m, int bci, TRAPS) {
   assert(UseCompiler || CompileTheWorld, "UseCompiler should be set by now.");
 
   int hot_count = m->backedge_count();
   const char* comment = "backedge_count";
 
-  if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m) && CompileBroker::should_compile_new_jobs()) {
-    CompileBroker::compile_method(m, loop_top_bci, m, hot_count, comment, CHECK);
-
-    NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(loop_top_bci));)
+  if (is_compilation_enabled() && !m->is_not_osr_compilable() && can_be_compiled(m)) {
+    CompileBroker::compile_method(m, bci, CompLevel_highest_tier,
+                                  m, hot_count, comment, CHECK);
+    NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true));)
   }
 }
-
-int SimpleCompPolicy::compilation_level(methodHandle m, int branch_bci)
-{
-#ifdef TIERED
-  if (!TieredCompilation) {
-    return CompLevel_highest_tier;
-  }
-  if (/* m()->tier1_compile_done() && */
-     // QQQ HACK FIX ME set tier1_compile_done!!
-      !m()->is_native()) {
-    // Grab the nmethod so it doesn't go away while it's being queried
-    nmethod* code = m()->code();
-    if (code != NULL && code->is_compiled_by_c1()) {
-      return CompLevel_highest_tier;
-    }
-  }
-  return CompLevel_fast_compile;
-#else
-  return CompLevel_highest_tier;
-#endif // TIERED
-}
-
 // StackWalkCompPolicy - walk up stack to find a suitable method to compile
 
 #ifdef COMPILER2
@@ -204,7 +395,7 @@
   reset_counter_for_invocation_event(m);
   const char* comment = "count";
 
-  if (m->code() == NULL && !delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler && CompileBroker::should_compile_new_jobs()) {
+  if (is_compilation_enabled() && m->code() == NULL && can_be_compiled(m)) {
     ResourceMark rm(THREAD);
     JavaThread *thread = (JavaThread*)THREAD;
     frame       fr     = thread->last_frame();
@@ -224,10 +415,6 @@
     if (first->top_method()->code() != NULL) {
       // called obsolete method/nmethod -- no need to recompile
       if (TraceCompilationPolicy) tty->print_cr(" --> " INTPTR_FORMAT, first->top_method()->code());
-    } else if (compilation_level(m, InvocationEntryBci) == CompLevel_fast_compile) {
-      // Tier1 compilation policy avaoids stack walking.
-      CompileBroker::compile_method(m, InvocationEntryBci,
-                                    m, hot_count, comment, CHECK);
     } else {
       if (TimeCompilationPolicy) accumulated_time()->start();
       GrowableArray<RFrame*>* stack = new GrowableArray<RFrame*>(50);
@@ -236,53 +423,25 @@
       if (TimeCompilationPolicy) accumulated_time()->stop();
       assert(top != NULL, "findTopInlinableFrame returned null");
       if (TraceCompilationPolicy) top->print();
-      CompileBroker::compile_method(top->top_method(), InvocationEntryBci,
+      CompileBroker::compile_method(top->top_method(), InvocationEntryBci, CompLevel_highest_tier,
                                     m, hot_count, comment, CHECK);
     }
   }
 }
 
-void StackWalkCompPolicy::method_back_branch_event(methodHandle m, int branch_bci, int loop_top_bci, TRAPS) {
+void StackWalkCompPolicy::method_back_branch_event(methodHandle m, int bci, TRAPS) {
   assert(UseCompiler || CompileTheWorld, "UseCompiler should be set by now.");
 
   int hot_count = m->backedge_count();
   const char* comment = "backedge_count";
 
-  if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m) && CompileBroker::should_compile_new_jobs()) {
-    CompileBroker::compile_method(m, loop_top_bci, m, hot_count, comment, CHECK);
+  if (is_compilation_enabled() && !m->is_not_osr_compilable() && can_be_compiled(m)) {
+    CompileBroker::compile_method(m, bci, CompLevel_highest_tier, m, hot_count, comment, CHECK);
 
-    NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(loop_top_bci));)
+    NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true));)
   }
 }
 
-int StackWalkCompPolicy::compilation_level(methodHandle m, int osr_bci)
-{
-  int comp_level = CompLevel_full_optimization;
-  if (TieredCompilation && osr_bci == InvocationEntryBci) {
-    if (CompileTheWorld) {
-      // Under CTW, the first compile is tier1, the second tier2
-      if (m->highest_tier_compile() == CompLevel_none) {
-        comp_level = CompLevel_fast_compile;
-      }
-    } else if (!m->has_osr_nmethod()) {
-      // Before tier1 is done, use invocation_count + backedge_count to
-      // compare against the threshold.  After that, the counters may/will
-      // be reset, so rely on the straight interpreter_invocation_count.
-      if (m->highest_tier_compile() == CompLevel_initial_compile) {
-        if (m->interpreter_invocation_count() < Tier2CompileThreshold) {
-          comp_level = CompLevel_fast_compile;
-        }
-      } else if (m->invocation_count() + m->backedge_count() <
-                 Tier2CompileThreshold) {
-        comp_level = CompLevel_fast_compile;
-      }
-    }
-
-  }
-  return comp_level;
-}
-
-
 RFrame* StackWalkCompPolicy::findTopInlinableFrame(GrowableArray<RFrame*>* stack) {
   // go up the stack until finding a frame that (probably) won't be inlined
   // into its caller
@@ -372,7 +531,7 @@
 
     // If the caller method is too big or something then we do not want to
     // compile it just to inline a method
-    if (!canBeCompiled(next_m)) {
+    if (!can_be_compiled(next_m)) {
       msg = "caller cannot be compiled";
       break;
     }
@@ -439,7 +598,7 @@
   if (!instanceKlass::cast(m->method_holder())->is_initialized()) return (_msg = "method holder not initialized");
   if (m->is_native()) return (_msg = "native method");
   nmethod* m_code = m->code();
-  if( m_code != NULL && m_code->instructions_size() > InlineSmallCode )
+  if (m_code != NULL && m_code->code_size() > InlineSmallCode)
     return (_msg = "already compiled into a big method");
 
   // use frequency-based objections only for non-trivial methods
--- a/hotspot/src/share/vm/runtime/compilationPolicy.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/runtime/compilationPolicy.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,53 +25,91 @@
 // The CompilationPolicy selects which method (if any) should be compiled.
 // It also decides which methods must always be compiled (i.e., are never
 // interpreted).
+class CompileTask;
+class CompileQueue;
 
 class CompilationPolicy : public CHeapObj {
- private:
   static CompilationPolicy* _policy;
   // Accumulated time
   static elapsedTimer       _accumulated_time;
 
   static bool               _in_vm_startup;
-
- public:
-  virtual void method_invocation_event(methodHandle m, TRAPS) = 0;
-  virtual void method_back_branch_event(methodHandle m, int branch_bci, int loop_top_bci, TRAPS) = 0;
-  virtual int compilation_level(methodHandle m, int branch_bci) = 0;
-
-  void reset_counter_for_invocation_event(methodHandle method);
-  void reset_counter_for_back_branch_event(methodHandle method);
-
+public:
   static  void set_in_vm_startup(bool in_vm_startup) { _in_vm_startup = in_vm_startup; }
   static  void completed_vm_startup();
-  static  bool delayCompilationDuringStartup() { return _in_vm_startup; }
+  static  bool delay_compilation_during_startup()    { return _in_vm_startup; }
 
-  static bool mustBeCompiled(methodHandle m);      // m must be compiled before executing it
-  static bool canBeCompiled(methodHandle m);       // m is allowed to be compiled
-
+  // m must be compiled before executing it
+  static bool must_be_compiled(methodHandle m, int comp_level = CompLevel_all);
+  // m is allowed to be compiled
+  static bool can_be_compiled(methodHandle m, int comp_level = CompLevel_all);
+  static bool is_compilation_enabled();
   static void set_policy(CompilationPolicy* policy) { _policy = policy; }
-  static CompilationPolicy* policy() { return _policy; }
+  static CompilationPolicy* policy()                { return _policy; }
 
   // Profiling
   elapsedTimer* accumulated_time() { return &_accumulated_time; }
   void print_time() PRODUCT_RETURN;
+  virtual int compiler_count(CompLevel comp_level) = 0;
+  // main notification entry, return a pointer to an nmethod if the OSR is required,
+  // returns NULL otherwise.
+  virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, TRAPS) = 0;
+  // safepoint() is called at the end of the safepoint
+  virtual void do_safepoint_work() = 0;
+  // reprofile request
+  virtual void reprofile(ScopeDesc* trap_scope, bool is_osr) = 0;
+  // delay_compilation(method) can be called by any component of the runtime to notify the policy
+  // that it's recommended to delay the complation of this method.
+  virtual void delay_compilation(methodOop method) = 0;
+  // disable_compilation() is called whenever the runtime decides to disable compilation of the
+  // specified method.
+  virtual void disable_compilation(methodOop method) = 0;
+  // Select task is called by CompileBroker. The queue is guaranteed to have at least one
+  // element and is locked. The function should select one and return it.
+  virtual CompileTask* select_task(CompileQueue* compile_queue) = 0;
+  // Tell the runtime if we think a given method is adequately profiled.
+  virtual bool is_mature(methodOop method) = 0;
+  // Do policy initialization
+  virtual void initialize() = 0;
 };
 
-class SimpleCompPolicy : public CompilationPolicy {
+// A base class for baseline policies.
+class NonTieredCompPolicy : public CompilationPolicy {
+  int _compiler_count;
+protected:
+  static void trace_frequency_counter_overflow(methodHandle m, int branch_bci, int bci);
+  static void trace_osr_request(methodHandle method, nmethod* osr, int bci);
+  static void trace_osr_completion(nmethod* osr_nm);
+  void reset_counter_for_invocation_event(methodHandle method);
+  void reset_counter_for_back_branch_event(methodHandle method);
+public:
+  NonTieredCompPolicy() : _compiler_count(0) { }
+  virtual int compiler_count(CompLevel comp_level);
+  virtual void do_safepoint_work();
+  virtual void reprofile(ScopeDesc* trap_scope, bool is_osr);
+  virtual void delay_compilation(methodOop method);
+  virtual void disable_compilation(methodOop method);
+  virtual bool is_mature(methodOop method);
+  virtual void initialize();
+  virtual CompileTask* select_task(CompileQueue* compile_queue);
+  virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, TRAPS);
+  virtual void method_invocation_event(methodHandle m, TRAPS) = 0;
+  virtual void method_back_branch_event(methodHandle m, int bci, TRAPS) = 0;
+};
+
+class SimpleCompPolicy : public NonTieredCompPolicy {
  public:
-  void method_invocation_event( methodHandle m, TRAPS);
-  void method_back_branch_event(methodHandle m, int branch_bci, int loop_top_bci, TRAPS);
-  int compilation_level(methodHandle m, int branch_bci);
+  virtual void method_invocation_event(methodHandle m, TRAPS);
+  virtual void method_back_branch_event(methodHandle m, int bci, TRAPS);
 };
 
 // StackWalkCompPolicy - existing C2 policy
 
 #ifdef COMPILER2
-class StackWalkCompPolicy : public CompilationPolicy {
+class StackWalkCompPolicy : public NonTieredCompPolicy {
  public:
-  void method_invocation_event(methodHandle m, TRAPS);
-  void method_back_branch_event(methodHandle m, int branch_bci, int loop_top_bci, TRAPS);
-  int compilation_level(methodHandle m, int branch_bci);
+  virtual void method_invocation_event(methodHandle m, TRAPS);
+  virtual void method_back_branch_event(methodHandle m, int bci, TRAPS);
 
  private:
   RFrame* findTopInlinableFrame(GrowableArray<RFrame*>* stack);
--- a/hotspot/src/share/vm/runtime/deoptimization.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/runtime/deoptimization.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1301,7 +1301,7 @@
     bool update_trap_state = true;
     bool make_not_entrant = false;
     bool make_not_compilable = false;
-    bool reset_counters = false;
+    bool reprofile = false;
     switch (action) {
     case Action_none:
       // Keep the old code.
@@ -1328,7 +1328,7 @@
       // had been traps taken from compiled code.  This will update
       // the MDO trap history so that the next compilation will
       // properly detect hot trap sites.
-      reset_counters = true;
+      reprofile = true;
       break;
     case Action_make_not_entrant:
       // Request immediate recompilation, and get rid of the old code.
@@ -1422,7 +1422,7 @@
       // this trap point already, run the method in the interpreter
       // for a while to exercise it more thoroughly.
       if (make_not_entrant && maybe_prior_recompile && maybe_prior_trap) {
-        reset_counters = true;
+        reprofile = true;
       }
 
     }
@@ -1452,24 +1452,21 @@
         if (trap_method() == nm->method()) {
           make_not_compilable = true;
         } else {
-          trap_method->set_not_compilable();
+          trap_method->set_not_compilable(CompLevel_full_optimization);
           // But give grace to the enclosing nm->method().
         }
       }
     }
 
-    // Reset invocation counters
-    if (reset_counters) {
-      if (nm->is_osr_method())
-        reset_invocation_counter(trap_scope, CompileThreshold);
-      else
-        reset_invocation_counter(trap_scope);
+    // Reprofile
+    if (reprofile) {
+      CompilationPolicy::policy()->reprofile(trap_scope, nm->is_osr_method());
     }
 
     // Give up compiling
-    if (make_not_compilable && !nm->method()->is_not_compilable()) {
+    if (make_not_compilable && !nm->method()->is_not_compilable(CompLevel_full_optimization)) {
       assert(make_not_entrant, "consistent");
-      nm->method()->set_not_compilable();
+      nm->method()->set_not_compilable(CompLevel_full_optimization);
     }
 
   } // Free marked resources
@@ -1569,22 +1566,6 @@
                            ignore_maybe_prior_recompile);
 }
 
-void Deoptimization::reset_invocation_counter(ScopeDesc* trap_scope, jint top_count) {
-  ScopeDesc* sd = trap_scope;
-  for (; !sd->is_top(); sd = sd->sender()) {
-    // Reset ICs of inlined methods, since they can trigger compilations also.
-    sd->method()->invocation_counter()->reset();
-  }
-  InvocationCounter* c = sd->method()->invocation_counter();
-  if (top_count != _no_count) {
-    // It was an OSR method, so bump the count higher.
-    c->set(c->state(), top_count);
-  } else {
-    c->reset();
-  }
-  sd->method()->backedge_counter()->reset();
-}
-
 Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* thread, jint trap_request) {
 
   // Still in Java no safepoints
--- a/hotspot/src/share/vm/runtime/deoptimization.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/runtime/deoptimization.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -311,12 +311,6 @@
   static void popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address);
 
  private:
-  enum {
-    _no_count = -1
-  };
-
-  static void reset_invocation_counter(ScopeDesc* trap_scope, jint count = _no_count);
-
   static methodDataOop get_method_data(JavaThread* thread, methodHandle m, bool create_if_missing);
   // Update the mdo's count and per-BCI reason bits, returning previous state:
   static ProfileData* query_update_method_data(methodDataHandle trap_mdo,
--- a/hotspot/src/share/vm/runtime/dtraceJSDT.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/runtime/dtraceJSDT.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -65,7 +65,7 @@
         THROW_MSG_0(vmSymbols::java_lang_RuntimeException(),
           "Unable to register DTrace probes (CodeCache: no room for DTrace nmethods).");
       }
-      h_method()->set_not_compilable(CompLevel_highest_tier);
+      h_method()->set_not_compilable();
       h_method()->set_code(h_method, nm);
       probes->nmethod_at_put(count++, nm);
     }
--- a/hotspot/src/share/vm/runtime/frame.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/runtime/frame.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -537,8 +537,8 @@
     st->cr();
 #ifndef PRODUCT
     if (end == NULL) {
-      begin = _cb->instructions_begin();
-      end = _cb->instructions_end();
+      begin = _cb->code_begin();
+      end   = _cb->code_end();
     }
 #endif
   }
--- a/hotspot/src/share/vm/runtime/globals.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/runtime/globals.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -68,30 +68,38 @@
 // Length of format string (e.g. "%.1234s") for printing ccstr below
 #define FORMAT_BUFFER_LEN 16
 
-void Flag::print_on(outputStream* st) {
-  st->print("%5s %-35s %c= ", type, name, (origin != DEFAULT ? ':' : ' '));
+void Flag::print_on(outputStream* st, bool withComments) {
+  st->print("%9s %-40s %c= ", type, name, (origin != DEFAULT ? ':' : ' '));
   if (is_bool())     st->print("%-16s", get_bool() ? "true" : "false");
   if (is_intx())     st->print("%-16ld", get_intx());
   if (is_uintx())    st->print("%-16lu", get_uintx());
   if (is_uint64_t()) st->print("%-16lu", get_uint64_t());
+  if (is_double())   st->print("%-16f", get_double());
+
   if (is_ccstr()) {
-    const char* cp = get_ccstr();
-    if (cp != NULL) {
-      const char* eol;
-      while ((eol = strchr(cp, '\n')) != NULL) {
-        char format_buffer[FORMAT_BUFFER_LEN];
-        size_t llen = pointer_delta(eol, cp, sizeof(char));
-        jio_snprintf(format_buffer, FORMAT_BUFFER_LEN,
+     const char* cp = get_ccstr();
+     if (cp != NULL) {
+       const char* eol;
+       while ((eol = strchr(cp, '\n')) != NULL) {
+         char format_buffer[FORMAT_BUFFER_LEN];
+         size_t llen = pointer_delta(eol, cp, sizeof(char));
+         jio_snprintf(format_buffer, FORMAT_BUFFER_LEN,
                      "%%." SIZE_FORMAT "s", llen);
-        st->print(format_buffer, cp);
-        st->cr();
-        cp = eol+1;
-        st->print("%5s %-35s += ", "", name);
-      }
-      st->print("%-16s", cp);
-    }
+         st->print(format_buffer, cp);
+         st->cr();
+         cp = eol+1;
+         st->print("%5s %-35s += ", "", name);
+       }
+       st->print("%-16s", cp);
+     }
+     else st->print("%-16s", "");
   }
-  st->print(" %s", kind);
+  st->print("%-20s", kind);
+  if (withComments) {
+#ifndef PRODUCT
+    st->print("%s", doc );
+#endif
+  }
   st->cr();
 }
 
@@ -131,67 +139,67 @@
 // 4991491 do not "optimize out" the was_set false values: omitting them
 // tickles a Microsoft compiler bug causing flagTable to be malformed
 
-#define RUNTIME_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{product}", DEFAULT },
-#define RUNTIME_PD_PRODUCT_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, "{pd product}", DEFAULT },
-#define RUNTIME_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{diagnostic}", DEFAULT },
-#define RUNTIME_EXPERIMENTAL_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{experimental}", DEFAULT },
-#define RUNTIME_MANAGEABLE_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{manageable}", DEFAULT },
-#define RUNTIME_PRODUCT_RW_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{product rw}", DEFAULT },
+#define RUNTIME_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{product}", DEFAULT },
+#define RUNTIME_PD_PRODUCT_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{pd product}", DEFAULT },
+#define RUNTIME_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{diagnostic}", DEFAULT },
+#define RUNTIME_EXPERIMENTAL_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{experimental}", DEFAULT },
+#define RUNTIME_MANAGEABLE_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{manageable}", DEFAULT },
+#define RUNTIME_PRODUCT_RW_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{product rw}", DEFAULT },
 
 #ifdef PRODUCT
   #define RUNTIME_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
   #define RUNTIME_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     /* flag is constant */
   #define RUNTIME_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc)
 #else
-  #define RUNTIME_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "", DEFAULT },
-  #define RUNTIME_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, "{pd}", DEFAULT },
-  #define RUNTIME_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{notproduct}", DEFAULT },
+  #define RUNTIME_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "", DEFAULT },
+  #define RUNTIME_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, doc, "{pd}", DEFAULT },
+  #define RUNTIME_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{notproduct}", DEFAULT },
 #endif
 
 #ifdef _LP64
-  #define RUNTIME_LP64_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{lp64_product}", DEFAULT },
+  #define RUNTIME_LP64_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{lp64_product}", DEFAULT },
 #else
   #define RUNTIME_LP64_PRODUCT_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
 #endif // _LP64
 
-#define C1_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C1 product}", DEFAULT },
-#define C1_PD_PRODUCT_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, "{C1 pd product}", DEFAULT },
+#define C1_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 product}", DEFAULT },
+#define C1_PD_PRODUCT_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 pd product}", DEFAULT },
 #ifdef PRODUCT
   #define C1_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
   #define C1_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     /* flag is constant */
   #define C1_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc)
 #else
-  #define C1_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C1}", DEFAULT },
-  #define C1_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, "{C1 pd}", DEFAULT },
-  #define C1_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C1 notproduct}", DEFAULT },
+  #define C1_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{C1}", DEFAULT },
+  #define C1_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, doc, "{C1 pd}", DEFAULT },
+  #define C1_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{C1 notproduct}", DEFAULT },
 #endif
 
 
-#define C2_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C2 product}", DEFAULT },
-#define C2_PD_PRODUCT_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, "{C2 pd product}", DEFAULT },
-#define C2_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C2 diagnostic}", DEFAULT },
-#define C2_EXPERIMENTAL_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C2 experimental}", DEFAULT },
+#define C2_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C2 product}", DEFAULT },
+#define C2_PD_PRODUCT_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C2 pd product}", DEFAULT },
+#define C2_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C2 diagnostic}", DEFAULT },
+#define C2_EXPERIMENTAL_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C2 experimental}", DEFAULT },
 #ifdef PRODUCT
   #define C2_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
   #define C2_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     /* flag is constant */
   #define C2_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc)
 #else
-  #define C2_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C2}", DEFAULT },
-  #define C2_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, "{C2 pd}", DEFAULT },
-  #define C2_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C2 notproduct}", DEFAULT },
+  #define C2_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{C2}", DEFAULT },
+  #define C2_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, doc, "{C2 pd}", DEFAULT },
+  #define C2_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{C2 notproduct}", DEFAULT },
 #endif
 
-#define SHARK_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{Shark product}", DEFAULT },
-#define SHARK_PD_PRODUCT_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, "{Shark pd product}", DEFAULT },
-#define SHARK_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{Shark diagnostic}", DEFAULT },
+#define SHARK_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{Shark product}", DEFAULT },
+#define SHARK_PD_PRODUCT_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{Shark pd product}", DEFAULT },
+#define SHARK_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{Shark diagnostic}", DEFAULT },
 #ifdef PRODUCT
   #define SHARK_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
   #define SHARK_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     /* flag is constant */
   #define SHARK_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc)
 #else
-  #define SHARK_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{Shark}", DEFAULT },
-  #define SHARK_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, "{Shark pd}", DEFAULT },
-  #define SHARK_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{Shark notproduct}", DEFAULT },
+  #define SHARK_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{Shark}", DEFAULT },
+  #define SHARK_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, doc, "{Shark pd}", DEFAULT },
+  #define SHARK_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{Shark notproduct}", DEFAULT },
 #endif
 
 static Flag flagTable[] = {
@@ -485,7 +493,7 @@
 
 #endif // PRODUCT
 
-void CommandLineFlags::printFlags() {
+void CommandLineFlags::printFlags(bool withComments) {
   // Print the flags sorted by name
   // note: this method is called before the thread structure is in place
   //       which means resource allocation cannot be used.
@@ -505,7 +513,7 @@
   tty->print_cr("[Global flags]");
   for (int i = 0; i < length; i++) {
     if (array[i]->is_unlocked()) {
-      array[i]->print_on(tty);
+      array[i]->print_on(tty, withComments);
     }
   }
   FREE_C_HEAP_ARRAY(Flag*, array);
--- a/hotspot/src/share/vm/runtime/globals.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -35,14 +35,7 @@
 define_pd_global(bool, TieredCompilation,            false);
 
 define_pd_global(intx, CompileThreshold,             0);
-define_pd_global(intx, Tier2CompileThreshold,        0);
-define_pd_global(intx, Tier3CompileThreshold,        0);
-define_pd_global(intx, Tier4CompileThreshold,        0);
-
 define_pd_global(intx, BackEdgeThreshold,            0);
-define_pd_global(intx, Tier2BackEdgeThreshold,       0);
-define_pd_global(intx, Tier3BackEdgeThreshold,       0);
-define_pd_global(intx, Tier4BackEdgeThreshold,       0);
 
 define_pd_global(intx, OnStackReplacePercentage,     0);
 define_pd_global(bool, ResizeTLAB,                   false);
@@ -90,6 +83,9 @@
   const char *type;
   const char *name;
   void*       addr;
+
+  NOT_PRODUCT(const char *doc;)
+
   const char *kind;
   FlagValueOrigin origin;
 
@@ -131,7 +127,7 @@
   bool is_writeable() const;
   bool is_external() const;
 
-  void print_on(outputStream* st);
+  void print_on(outputStream* st, bool withComments = false );
   void print_as_flag(outputStream* st);
 };
 
@@ -211,7 +207,7 @@
   static bool wasSetOnCmdline(const char* name, bool* value);
   static void printSetFlags();
 
-  static void printFlags();
+  static void printFlags(bool withComments = false );
 
   static void verify() PRODUCT_RETURN;
 };
@@ -1541,13 +1537,13 @@
           "Use BinaryTreeDictionary as default in the CMS generation")      \
                                                                             \
   product(uintx, CMSIndexedFreeListReplenish, 4,                            \
-          "Replenish and indexed free list with this number of chunks")     \
+          "Replenish an indexed free list with this number of chunks")     \
                                                                             \
   product(bool, CMSReplenishIntermediate, true,                             \
           "Replenish all intermediate free-list caches")                    \
                                                                             \
   product(bool, CMSSplitIndexedFreeListBlocks, true,                        \
-          "When satisfying batched demand, splot blocks from the "          \
+          "When satisfying batched demand, split blocks from the "          \
           "IndexedFreeList whose size is a multiple of requested size")     \
                                                                             \
   product(bool, CMSLoopWarn, false,                                         \
@@ -1971,7 +1967,7 @@
   product(uintx, TenuredGenerationSizeSupplementDecay, 2,                   \
           "Decay factor to TenuredGenerationSizeIncrement")                 \
                                                                             \
-  product(uintx, MaxGCPauseMillis, max_uintx,                               \
+  product(uintx, MaxGCPauseMillis, max_uintx,                           \
           "Adaptive size policy maximum GC pause time goal in msec, "       \
           "or (G1 Only) the max. GC time per MMU time slice")               \
                                                                             \
@@ -2366,9 +2362,6 @@
   develop(bool, EagerInitialization, false,                                 \
           "Eagerly initialize classes if possible")                         \
                                                                             \
-  product(bool, Tier1UpdateMethodData, trueInTiered,                        \
-          "Update methodDataOops in Tier1-generated code")                  \
-                                                                            \
   develop(bool, TraceMethodReplacement, false,                              \
           "Print when methods are replaced do to recompilation")            \
                                                                             \
@@ -2406,6 +2399,9 @@
   product(bool, PrintFlagsFinal, false,                                     \
          "Print all VM flags after argument and ergonomic processing")      \
                                                                             \
+  notproduct(bool, PrintFlagsWithComments, false,                           \
+         "Print all VM flags with default values and descriptions and exit")\
+                                                                            \
   diagnostic(bool, SerializeVMOutput, true,                                 \
          "Use a mutex to serialize output to tty and hotspot.log")          \
                                                                             \
@@ -2476,6 +2472,9 @@
   develop(bool, MonomorphicArrayCheck, true,                                \
           "Uncommon-trap array store checks that require full type check")  \
                                                                             \
+  diagnostic(bool, ProfileDynamicTypes, true,                               \
+          "do extra type profiling and use it more aggressively")           \
+                                                                            \
   develop(bool, DelayCompilationDuringStartup, true,                        \
           "Delay invoking the compiler until main application class is "    \
           "loaded")                                                         \
@@ -2895,7 +2894,7 @@
           "if non-zero, start verifying C heap after Nth call to "          \
           "malloc/realloc/free")                                            \
                                                                             \
-  product(intx, TypeProfileWidth,      2,                                   \
+  product(intx, TypeProfileWidth,     2,                                   \
           "number of receiver types to record in call/cast profile")        \
                                                                             \
   develop(intx, BciProfileWidth,      2,                                    \
@@ -3303,30 +3302,98 @@
   product_pd(intx, BackEdgeThreshold,                                       \
           "Interpreter Back edge threshold at which an OSR compilation is invoked")\
                                                                             \
-  product(intx, Tier1BytecodeLimit,      10,                                \
-          "Must have at least this many bytecodes before tier1"             \
-          "invocation counters are used")                                   \
-                                                                            \
-  product_pd(intx, Tier2CompileThreshold,                                   \
-          "threshold at which a tier 2 compilation is invoked")             \
-                                                                            \
-  product_pd(intx, Tier2BackEdgeThreshold,                                  \
-          "Back edge threshold at which a tier 2 compilation is invoked")   \
-                                                                            \
-  product_pd(intx, Tier3CompileThreshold,                                   \
-          "threshold at which a tier 3 compilation is invoked")             \
-                                                                            \
-  product_pd(intx, Tier3BackEdgeThreshold,                                  \
-          "Back edge threshold at which a tier 3 compilation is invoked")   \
-                                                                            \
-  product_pd(intx, Tier4CompileThreshold,                                   \
-          "threshold at which a tier 4 compilation is invoked")             \
-                                                                            \
-  product_pd(intx, Tier4BackEdgeThreshold,                                  \
-          "Back edge threshold at which a tier 4 compilation is invoked")   \
+  product(intx, Tier0InvokeNotifyFreqLog, 7,                                \
+          "Interpreter (tier 0) invocation notification frequency.")        \
+                                                                            \
+  product(intx, Tier2InvokeNotifyFreqLog, 11,                               \
+          "C1 without MDO (tier 2) invocation notification frequency.")     \
+                                                                            \
+  product(intx, Tier3InvokeNotifyFreqLog, 10,                               \
+          "C1 with MDO profiling (tier 3) invocation notification "         \
+          "frequency.")                                                     \
+                                                                            \
+  product(intx, Tier0BackedgeNotifyFreqLog, 10,                             \
+          "Interpreter (tier 0) invocation notification frequency.")        \
+                                                                            \
+  product(intx, Tier2BackedgeNotifyFreqLog, 14,                             \
+          "C1 without MDO (tier 2) invocation notification frequency.")     \
+                                                                            \
+  product(intx, Tier3BackedgeNotifyFreqLog, 13,                             \
+          "C1 with MDO profiling (tier 3) invocation notification "         \
+          "frequency.")                                                     \
+                                                                            \
+  product(intx, Tier2CompileThreshold, 0,                                   \
+          "threshold at which tier 2 compilation is invoked")               \
+                                                                            \
+  product(intx, Tier2BackEdgeThreshold, 0,                                  \
+          "Back edge threshold at which tier 2 compilation is invoked")     \
+                                                                            \
+  product(intx, Tier3InvocationThreshold, 200,                              \
+          "Compile if number of method invocations crosses this "           \
+          "threshold")                                                      \
+                                                                            \
+  product(intx, Tier3MinInvocationThreshold, 100,                           \
+          "Minimum invocation to compile at tier 3")                        \
+                                                                            \
+  product(intx, Tier3CompileThreshold, 2000,                                \
+          "Threshold at which tier 3 compilation is invoked (invocation "   \
+          "minimum must be satisfied.")                                     \
+                                                                            \
+  product(intx, Tier3BackEdgeThreshold,  7000,                              \
+          "Back edge threshold at which tier 3 OSR compilation is invoked") \
+                                                                            \
+  product(intx, Tier4InvocationThreshold, 5000,                             \
+          "Compile if number of method invocations crosses this "           \
+          "threshold")                                                      \
+                                                                            \
+  product(intx, Tier4MinInvocationThreshold, 600,                           \
+          "Minimum invocation to compile at tier 4")                        \
+                                                                            \
+  product(intx, Tier4CompileThreshold, 15000,                               \
+          "Threshold at which tier 4 compilation is invoked (invocation "   \
+          "minimum must be satisfied.")                                     \
+                                                                            \
+  product(intx, Tier4BackEdgeThreshold, 40000,                              \
+          "Back edge threshold at which tier 4 OSR compilation is invoked") \
+                                                                            \
+  product(intx, Tier3DelayOn, 5,                                            \
+          "If C2 queue size grows over this amount per compiler thread "    \
+          "stop compiling at tier 3 and start compiling at tier 2")         \
+                                                                            \
+  product(intx, Tier3DelayOff, 2,                                           \
+          "If C2 queue size is less than this amount per compiler thread "  \
+          "allow methods compiled at tier 2 transition to tier 3")          \
+                                                                            \
+  product(intx, Tier3LoadFeedback, 5,                                       \
+          "Tier 3 thresholds will increase twofold when C1 queue size "     \
+          "reaches this amount per compiler thread")                        \
+                                                                            \
+  product(intx, Tier4LoadFeedback, 3,                                       \
+          "Tier 4 thresholds will increase twofold when C2 queue size "     \
+          "reaches this amount per compiler thread")                        \
+                                                                            \
+  product(intx, TieredCompileTaskTimeout, 50,                               \
+          "Kill compile task if method was not used within "                \
+          "given timeout in milliseconds")                                  \
+                                                                            \
+  product(intx, TieredStopAtLevel, 4,                                       \
+          "Stop at given compilation level")                                \
+                                                                            \
+  product(intx, Tier0ProfilingStartPercentage, 200,                         \
+          "Start profiling in interpreter if the counters exceed tier 3"    \
+          "thresholds by the specified percentage")                         \
+                                                                            \
+  product(intx, TieredRateUpdateMinTime, 1,                                 \
+          "Minimum rate sampling interval (in milliseconds)")               \
+                                                                            \
+  product(intx, TieredRateUpdateMaxTime, 25,                                \
+          "Maximum rate sampling interval (in milliseconds)")               \
                                                                             \
   product_pd(bool, TieredCompilation,                                       \
-          "Enable two-tier compilation")                                    \
+          "Enable tiered compilation")                                      \
+                                                                            \
+  product(bool, PrintTieredEvents, false,                                   \
+          "Print tiered events notifications")                              \
                                                                             \
   product(bool, StressTieredRuntime, false,                                 \
           "Alternate client and server compiler on compile requests")       \
--- a/hotspot/src/share/vm/runtime/icache.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/runtime/icache.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,7 +33,7 @@
   ResourceMark rm;
 
   BufferBlob* b = BufferBlob::create("flush_icache_stub", ICache::stub_size);
-  CodeBuffer c(b->instructions_begin(), b->instructions_size());
+  CodeBuffer c(b);
 
   ICacheStubGenerator g(&c);
   g.generate_icache_flush(&_flush_icache_stub);
--- a/hotspot/src/share/vm/runtime/java.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/runtime/java.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -198,7 +198,7 @@
   if (CountCompiledCalls) {
     print_method_invocation_histogram();
   }
-  if (ProfileInterpreter || Tier1UpdateMethodData) {
+  if (ProfileInterpreter || C1UpdateMethodData) {
     print_method_profiling_data();
   }
   if (TimeCompiler) {
--- a/hotspot/src/share/vm/runtime/javaCalls.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/runtime/javaCalls.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -329,9 +329,10 @@
 
 
   assert(!thread->is_Compiler_thread(), "cannot compile from the compiler");
-  if (CompilationPolicy::mustBeCompiled(method)) {
+  if (CompilationPolicy::must_be_compiled(method)) {
     CompileBroker::compile_method(method, InvocationEntryBci,
-                                  methodHandle(), 0, "mustBeCompiled", CHECK);
+                                  CompLevel_initial_compile,
+                                  methodHandle(), 0, "must_be_compiled", CHECK);
   }
 
   // Since the call stub sets up like the interpreter we call the from_interpreted_entry
--- a/hotspot/src/share/vm/runtime/rframe.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/runtime/rframe.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -120,7 +120,7 @@
 int CompiledRFrame::cost() const {
   nmethod* nm = top_method()->code();
   if (nm != NULL) {
-    return nm->code_size();
+    return nm->insts_size();
   } else {
     return top_method()->code_size();
   }
--- a/hotspot/src/share/vm/runtime/safepoint.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/runtime/safepoint.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -430,29 +430,7 @@
   return false;
 }
 
-jlong CounterDecay::_last_timestamp = 0;
 
-static void do_method(methodOop m) {
-  m->invocation_counter()->decay();
-}
-
-void CounterDecay::decay() {
-  _last_timestamp = os::javaTimeMillis();
-
-  // This operation is going to be performed only at the end of a safepoint
-  // and hence GC's will not be going on, all Java mutators are suspended
-  // at this point and hence SystemDictionary_lock is also not needed.
-  assert(SafepointSynchronize::is_at_safepoint(), "can only be executed at a safepoint");
-  int nclasses = SystemDictionary::number_of_classes();
-  double classes_per_tick = nclasses * (CounterDecayMinIntervalLength * 1e-3 /
-                                        CounterHalfLifeTime);
-  for (int i = 0; i < classes_per_tick; i++) {
-    klassOop k = SystemDictionary::try_get_next_class();
-    if (k != NULL && k->klass_part()->oop_is_instance()) {
-      instanceKlass::cast(k)->methods_do(do_method);
-    }
-  }
-}
 
 // Various cleaning tasks that should be done periodically at safepoints
 void SafepointSynchronize::do_cleanup_tasks() {
@@ -465,10 +443,9 @@
     TraceTime t2("updating inline caches", TraceSafepointCleanupTime);
     InlineCacheBuffer::update_inline_caches();
   }
-
-  if(UseCounterDecay && CounterDecay::is_decay_needed()) {
-    TraceTime t3("decaying counter", TraceSafepointCleanupTime);
-    CounterDecay::decay();
+  {
+    TraceTime t3("compilation policy safepoint handler", TraceSafepointCleanupTime);
+    CompilationPolicy::policy()->do_safepoint_work();
   }
 
   TraceTime t4("sweeping nmethods", TraceSafepointCleanupTime);
--- a/hotspot/src/share/vm/runtime/safepoint.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/runtime/safepoint.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -147,6 +147,9 @@
   static long last_non_safepoint_interval() {
     return os::javaTimeMillis() - _end_of_last_safepoint;
   }
+  static long end_of_last_safepoint() {
+    return _end_of_last_safepoint;
+  }
   static bool is_cleanup_needed();
   static void do_cleanup_tasks();
 
@@ -228,15 +231,4 @@
   }
 };
 
-//
-// CounterDecay
-//
-// Interates through invocation counters and decrements them. This
-// is done at each safepoint.
-//
-class CounterDecay : public AllStatic {
-  static jlong _last_timestamp;
- public:
-  static  void decay();
-  static  bool is_decay_needed() { return (os::javaTimeMillis() - _last_timestamp) > CounterDecayMinIntervalLength; }
-};
+
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -455,11 +455,11 @@
   if (at_poll_return) {
     assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
            "polling page return stub not created yet");
-    stub = SharedRuntime::polling_page_return_handler_blob()->instructions_begin();
+    stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
   } else {
     assert(SharedRuntime::polling_page_safepoint_handler_blob() != NULL,
            "polling page safepoint stub not created yet");
-    stub = SharedRuntime::polling_page_safepoint_handler_blob()->instructions_begin();
+    stub = SharedRuntime::polling_page_safepoint_handler_blob()->entry_point();
   }
 #ifndef PRODUCT
   if( TraceSafepoint ) {
@@ -574,7 +574,7 @@
   }
 
   // found handling method => lookup exception handler
-  int catch_pco = ret_pc - nm->instructions_begin();
+  int catch_pco = ret_pc - nm->code_begin();
 
   ExceptionHandlerTable table(nm);
   HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
@@ -607,7 +607,7 @@
     return NULL;
   }
 
-  return nm->instructions_begin() + t->pco();
+  return nm->code_begin() + t->pco();
 }
 
 JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* thread))
@@ -1633,8 +1633,13 @@
 char* SharedRuntime::generate_wrong_method_type_message(JavaThread* thread,
                                                         oopDesc* required,
                                                         oopDesc* actual) {
+  if (TraceMethodHandles) {
+    tty->print_cr("WrongMethodType thread="PTR_FORMAT" req="PTR_FORMAT" act="PTR_FORMAT"",
+                  thread, required, actual);
+  }
   assert(EnableMethodHandles, "");
   oop singleKlass = wrong_method_type_is_for_single_argument(thread, required);
+  char* message = NULL;
   if (singleKlass != NULL) {
     const char* objName = "argument or return value";
     if (actual != NULL) {
@@ -1647,7 +1652,7 @@
     Klass* targetKlass = Klass::cast(required->is_klass()
                                      ? (klassOop)required
                                      : java_lang_Class::as_klassOop(required));
-    return generate_class_cast_message(objName, targetKlass->external_name());
+    message = generate_class_cast_message(objName, targetKlass->external_name());
   } else {
     // %%% need to get the MethodType string, without messing around too much
     // Get a signature from the invoke instruction
@@ -1679,9 +1684,13 @@
       if (mhName[0] == '$')
         mhName = actual_method->signature()->as_C_string();
     }
-    return generate_class_cast_message(mhName, targetType,
-                                       " cannot be called as ");
+    message = generate_class_cast_message(mhName, targetType,
+                                          " cannot be called as ");
   }
+  if (TraceMethodHandles) {
+    tty->print_cr("WrongMethodType => message=%s", message);
+  }
+  return message;
 }
 
 oop SharedRuntime::wrong_method_type_is_for_single_argument(JavaThread* thr,
@@ -2252,7 +2261,7 @@
 
   ResourceMark rm;
 
-  NOT_PRODUCT(int code_size);
+  NOT_PRODUCT(int insts_size);
   AdapterBlob* B = NULL;
   AdapterHandlerEntry* entry = NULL;
   AdapterFingerPrint* fingerprint = NULL;
@@ -2305,7 +2314,7 @@
 
     BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
     if (buf != NULL) {
-      CodeBuffer buffer(buf->instructions_begin(), buf->instructions_size());
+      CodeBuffer buffer(buf);
       short buffer_locs[20];
       buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
                                              sizeof(buffer_locs)/sizeof(relocInfo));
@@ -2321,19 +2330,19 @@
 #ifdef ASSERT
       if (VerifyAdapterSharing) {
         if (shared_entry != NULL) {
-          assert(shared_entry->compare_code(buf->instructions_begin(), buffer.code_size(), total_args_passed, sig_bt),
+          assert(shared_entry->compare_code(buf->code_begin(), buffer.insts_size(), total_args_passed, sig_bt),
                  "code must match");
           // Release the one just created and return the original
           _adapters->free_entry(entry);
           return shared_entry;
         } else  {
-          entry->save_code(buf->instructions_begin(), buffer.code_size(), total_args_passed, sig_bt);
+          entry->save_code(buf->code_begin(), buffer.insts_size(), total_args_passed, sig_bt);
         }
       }
 #endif
 
       B = AdapterBlob::create(&buffer);
-      NOT_PRODUCT(code_size = buffer.code_size());
+      NOT_PRODUCT(insts_size = buffer.insts_size());
     }
     if (B == NULL) {
       // CodeCache is full, disable compilation
@@ -2343,16 +2352,16 @@
       CompileBroker::handle_full_code_cache();
       return NULL; // Out of CodeCache space
     }
-    entry->relocate(B->instructions_begin());
+    entry->relocate(B->content_begin());
 #ifndef PRODUCT
     // debugging suppport
     if (PrintAdapterHandlers) {
       tty->cr();
       tty->print_cr("i2c argument handler #%d for: %s %s (fingerprint = %s, %d bytes generated)",
                     _adapters->number_of_entries(), (method->is_static() ? "static" : "receiver"),
-                    method->signature()->as_C_string(), fingerprint->as_string(), code_size );
+                    method->signature()->as_C_string(), fingerprint->as_string(), insts_size );
       tty->print_cr("c2i argument handler starts at %p",entry->get_c2i_entry());
-      Disassembler::decode(entry->get_i2c_entry(), entry->get_i2c_entry() + code_size);
+      Disassembler::decode(entry->get_i2c_entry(), entry->get_i2c_entry() + insts_size);
     }
 #endif
 
@@ -2366,13 +2375,11 @@
                  "%s(%s)@" PTR_FORMAT,
                  B->name(),
                  fingerprint->as_string(),
-                 B->instructions_begin());
-    Forte::register_stub(blob_id, B->instructions_begin(), B->instructions_end());
+                 B->content_begin());
+    Forte::register_stub(blob_id, B->content_begin(), B->content_end());
 
     if (JvmtiExport::should_post_dynamic_code_generated()) {
-      JvmtiExport::post_dynamic_code_generated(blob_id,
-                                               B->instructions_begin(),
-                                               B->instructions_end());
+      JvmtiExport::post_dynamic_code_generated(blob_id, B->content_begin(), B->content_end());
     }
   }
   return entry;
@@ -2456,7 +2463,7 @@
 
     BufferBlob*  buf = buffer_blob(); // the temporary code buffer in CodeCache
     if (buf != NULL) {
-      CodeBuffer buffer(buf->instructions_begin(), buf->instructions_size());
+      CodeBuffer buffer(buf);
       double locs_buf[20];
       buffer.insts()->initialize_shared_locs((relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
       MacroAssembler _masm(&buffer);
@@ -2540,7 +2547,7 @@
 
     BufferBlob*  buf = buffer_blob(); // the temporary code buffer in CodeCache
     if (buf != NULL) {
-      CodeBuffer buffer(buf->instructions_begin(), buf->instructions_size());
+      CodeBuffer buffer(buf);
       // Need a few relocation entries
       double locs_buf[20];
       buffer.insts()->initialize_shared_locs(
--- a/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -173,12 +173,12 @@
 
   static address get_ic_miss_stub() {
     assert(_ic_miss_blob!= NULL, "oops");
-    return _ic_miss_blob->instructions_begin();
+    return _ic_miss_blob->entry_point();
   }
 
   static address get_handle_wrong_method_stub() {
     assert(_wrong_method_blob!= NULL, "oops");
-    return _wrong_method_blob->instructions_begin();
+    return _wrong_method_blob->entry_point();
   }
 
 #ifdef COMPILER2
@@ -188,15 +188,15 @@
 
   static address get_resolve_opt_virtual_call_stub(){
     assert(_resolve_opt_virtual_call_blob != NULL, "oops");
-    return _resolve_opt_virtual_call_blob->instructions_begin();
+    return _resolve_opt_virtual_call_blob->entry_point();
   }
   static address get_resolve_virtual_call_stub() {
     assert(_resolve_virtual_call_blob != NULL, "oops");
-    return _resolve_virtual_call_blob->instructions_begin();
+    return _resolve_virtual_call_blob->entry_point();
   }
   static address get_resolve_static_call_stub() {
     assert(_resolve_static_call_blob != NULL, "oops");
-    return _resolve_static_call_blob->instructions_begin();
+    return _resolve_static_call_blob->entry_point();
   }
 
   static SafepointBlob* polling_page_return_handler_blob()     { return _polling_page_return_handler_blob; }
@@ -548,16 +548,17 @@
 // This library manages argument marshaling adapters and native wrappers.
 // There are 2 flavors of adapters: I2C and C2I.
 //
-// The I2C flavor takes a stock interpreted call setup, marshals the arguments
-// for a Java-compiled call, and jumps to Rmethod-> code()->
-// instructions_begin().  It is broken to call it without an nmethod assigned.
-// The usual behavior is to lift any register arguments up out of the stack
-// and possibly re-pack the extra arguments to be contigious.  I2C adapters
-// will save what the interpreter's stack pointer will be after arguments are
-// popped, then adjust the interpreter's frame size to force alignment and
-// possibly to repack the arguments.  After re-packing, it jumps to the
-// compiled code start.  There are no safepoints in this adapter code and a GC
-// cannot happen while marshaling is in progress.
+// The I2C flavor takes a stock interpreted call setup, marshals the
+// arguments for a Java-compiled call, and jumps to Rmethod-> code()->
+// code_begin().  It is broken to call it without an nmethod assigned.
+// The usual behavior is to lift any register arguments up out of the
+// stack and possibly re-pack the extra arguments to be contigious.
+// I2C adapters will save what the interpreter's stack pointer will be
+// after arguments are popped, then adjust the interpreter's frame
+// size to force alignment and possibly to repack the arguments.
+// After re-packing, it jumps to the compiled code start.  There are
+// no safepoints in this adapter code and a GC cannot happen while
+// marshaling is in progress.
 //
 // The C2I flavor takes a stock compiled call setup plus the target method in
 // Rmethod, marshals the arguments for an interpreted call and jumps to
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -0,0 +1,377 @@
+/*
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_simpleThresholdPolicy.cpp.incl"
+
+// Print an event.
+void SimpleThresholdPolicy::print_event(EventType type, methodHandle mh, methodHandle imh,
+                                        int bci, CompLevel level) {
+  bool inlinee_event = mh() != imh();
+
+  ttyLocker tty_lock;
+  tty->print("%lf: [", os::elapsedTime());
+
+  int invocation_count = mh->invocation_count();
+  int backedge_count = mh->backedge_count();
+  switch(type) {
+  case CALL:
+    tty->print("call");
+    break;
+  case LOOP:
+    tty->print("loop");
+    break;
+  case COMPILE:
+    tty->print("compile");
+  }
+
+  tty->print(" level: %d ", level);
+
+  ResourceMark rm;
+  char *method_name = mh->name_and_sig_as_C_string();
+  tty->print("[%s", method_name);
+  // We can have an inlinee, although currently we don't generate any notifications for the inlined methods.
+  if (inlinee_event) {
+    char *inlinee_name = imh->name_and_sig_as_C_string();
+    tty->print(" [%s]] ", inlinee_name);
+  }
+  else tty->print("] ");
+  tty->print("@%d queues: %d,%d", bci, CompileBroker::queue_size(CompLevel_full_profile),
+                                       CompileBroker::queue_size(CompLevel_full_optimization));
+
+  print_specific(type, mh, imh, bci, level);
+
+  if (type != COMPILE) {
+    methodDataHandle mdh = mh->method_data();
+    int mdo_invocations = 0, mdo_backedges = 0;
+    if (mdh() != NULL) {
+      mdo_invocations = mdh->invocation_count();
+      mdo_backedges = mdh->backedge_count();
+    }
+    tty->print(" total: %d,%d mdo: %d,%d",
+               invocation_count, backedge_count,
+               mdo_invocations, mdo_backedges);
+    tty->print(" max levels: %d,%d",
+                mh->highest_comp_level(), mh->highest_osr_comp_level());
+    if (inlinee_event) {
+      tty->print(" inlinee max levels: %d,%d", imh->highest_comp_level(), imh->highest_osr_comp_level());
+    }
+    tty->print(" compilable: ");
+    bool need_comma = false;
+    if (!mh->is_not_compilable(CompLevel_full_profile)) {
+      tty->print("c1");
+      need_comma = true;
+    }
+    if (!mh->is_not_compilable(CompLevel_full_optimization)) {
+      if (need_comma) tty->print(", ");
+      tty->print("c2");
+      need_comma = true;
+    }
+    if (!mh->is_not_osr_compilable()) {
+      if (need_comma) tty->print(", ");
+      tty->print("osr");
+    }
+    tty->print(" status:");
+    if (mh->queued_for_compilation()) {
+      tty->print(" in queue");
+    } else tty->print(" idle");
+  }
+  tty->print_cr("]");
+}
+
+void SimpleThresholdPolicy::initialize() {
+  if (FLAG_IS_DEFAULT(CICompilerCount)) {
+    FLAG_SET_DEFAULT(CICompilerCount, 3);
+  }
+  int count = CICompilerCount;
+  if (CICompilerCountPerCPU) {
+    count = MAX2(log2_intptr(os::active_processor_count()), 1) * 3 / 2;
+  }
+  set_c1_count(MAX2(count / 3, 1));
+  set_c2_count(MAX2(count - count / 3, 1));
+}
+
+void SimpleThresholdPolicy::set_carry_if_necessary(InvocationCounter *counter) {
+  if (!counter->carry() && counter->count() > InvocationCounter::count_limit / 2) {
+    counter->set_carry_flag();
+  }
+}
+
+// Set carry flags on the counters if necessary
+void SimpleThresholdPolicy::handle_counter_overflow(methodOop method) {
+  set_carry_if_necessary(method->invocation_counter());
+  set_carry_if_necessary(method->backedge_counter());
+  methodDataOop mdo = method->method_data();
+  if (mdo != NULL) {
+    set_carry_if_necessary(mdo->invocation_counter());
+    set_carry_if_necessary(mdo->backedge_counter());
+  }
+}
+
+// Called with the queue locked and with at least one element
+CompileTask* SimpleThresholdPolicy::select_task(CompileQueue* compile_queue) {
+  return compile_queue->first();
+}
+
+nmethod* SimpleThresholdPolicy::event(methodHandle method, methodHandle inlinee,
+                                      int branch_bci, int bci, CompLevel comp_level, TRAPS) {
+  if (comp_level == CompLevel_none &&
+      JvmtiExport::can_post_interpreter_events()) {
+    assert(THREAD->is_Java_thread(), "Should be java thread");
+    if (((JavaThread*)THREAD)->is_interp_only_mode()) {
+      return NULL;
+    }
+  }
+  nmethod *osr_nm = NULL;
+
+  handle_counter_overflow(method());
+  if (method() != inlinee()) {
+    handle_counter_overflow(inlinee());
+  }
+
+  if (PrintTieredEvents) {
+    print_event(bci == InvocationEntryBci ? CALL : LOOP, method, inlinee, bci, comp_level);
+  }
+
+  if (bci == InvocationEntryBci) {
+    method_invocation_event(method, inlinee, comp_level, THREAD);
+  } else {
+    method_back_branch_event(method, inlinee, bci, comp_level, THREAD);
+    int highest_level = method->highest_osr_comp_level();
+    if (highest_level > comp_level) {
+      osr_nm = method->lookup_osr_nmethod_for(bci, highest_level, false);
+    }
+  }
+  return osr_nm;
+}
+
+// Check if the method can be compiled, change level if necessary
+void SimpleThresholdPolicy::compile(methodHandle mh, int bci, CompLevel level, TRAPS) {
+  // Take the given ceiling into the account.
+  // NOTE: You can set it to 1 to get a pure C1 version.
+  if ((CompLevel)TieredStopAtLevel < level) {
+    level = (CompLevel)TieredStopAtLevel;
+  }
+  if (level == CompLevel_none) {
+    return;
+  }
+  // Check if the method can be compiled, if not - try different levels.
+  if (!can_be_compiled(mh, level)) {
+    if (level < CompLevel_full_optimization && can_be_compiled(mh, CompLevel_full_optimization)) {
+      compile(mh, bci, CompLevel_full_optimization, THREAD);
+    }
+    if (level == CompLevel_full_optimization && can_be_compiled(mh, CompLevel_simple)) {
+        compile(mh, bci, CompLevel_simple, THREAD);
+    }
+    return;
+  }
+  if (bci != InvocationEntryBci && mh->is_not_osr_compilable()) {
+    return;
+  }
+  if (PrintTieredEvents) {
+    print_event(COMPILE, mh, mh, bci, level);
+  }
+  if (!CompileBroker::compilation_is_in_queue(mh, bci)) {
+    submit_compile(mh, bci, level, THREAD);
+  }
+}
+
+// Tell the broker to compile the method
+void SimpleThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel level, TRAPS) {
+  int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
+  CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", THREAD);
+}
+
+// Call and loop predicates determine whether a transition to a higher
+// compilation level should be performed (pointers to predicate functions
+// are passed to common() transition function).
+bool SimpleThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level) {
+  switch(cur_level) {
+  case CompLevel_none:
+  case CompLevel_limited_profile: {
+    return loop_predicate_helper<CompLevel_none>(i, b, 1.0);
+  }
+  case CompLevel_full_profile: {
+    return loop_predicate_helper<CompLevel_full_profile>(i, b, 1.0);
+  }
+  default:
+    return true;
+  }
+}
+
+bool SimpleThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level) {
+  switch(cur_level) {
+  case CompLevel_none:
+  case CompLevel_limited_profile: {
+    return call_predicate_helper<CompLevel_none>(i, b, 1.0);
+  }
+  case CompLevel_full_profile: {
+    return call_predicate_helper<CompLevel_full_profile>(i, b, 1.0);
+  }
+  default:
+    return true;
+  }
+}
+
+// Determine is a method is mature.
+bool SimpleThresholdPolicy::is_mature(methodOop method) {
+  if (is_trivial(method)) return true;
+  methodDataOop mdo = method->method_data();
+  if (mdo != NULL) {
+    int i = mdo->invocation_count();
+    int b = mdo->backedge_count();
+    double k = ProfileMaturityPercentage / 100.0;
+    return call_predicate_helper<CompLevel_full_profile>(i, b, k) ||
+           loop_predicate_helper<CompLevel_full_profile>(i, b, k);
+  }
+  return false;
+}
+
+// Common transition function. Given a predicate determines if a method should transition to another level.
+CompLevel SimpleThresholdPolicy::common(Predicate p, methodOop method, CompLevel cur_level) {
+  CompLevel next_level = cur_level;
+  int i = method->invocation_count();
+  int b = method->backedge_count();
+
+  switch(cur_level) {
+  case CompLevel_none:
+    {
+      methodDataOop mdo = method->method_data();
+      if (mdo != NULL) {
+        int mdo_i = mdo->invocation_count();
+        int mdo_b = mdo->backedge_count();
+        // If we were at full profile level, would we switch to full opt?
+        if ((this->*p)(mdo_i, mdo_b, CompLevel_full_profile)) {
+          next_level = CompLevel_full_optimization;
+        }
+      }
+    }
+    if (next_level == cur_level && (this->*p)(i, b, cur_level)) {
+      if (is_trivial(method)) {
+        next_level = CompLevel_simple;
+      } else {
+        next_level = CompLevel_full_profile;
+      }
+    }
+    break;
+  case CompLevel_limited_profile:
+  case CompLevel_full_profile:
+    if (is_trivial(method)) {
+      next_level = CompLevel_simple;
+    } else {
+      methodDataOop mdo = method->method_data();
+      guarantee(mdo != NULL, "MDO should always exist");
+      if (mdo->would_profile()) {
+        int mdo_i = mdo->invocation_count();
+        int mdo_b = mdo->backedge_count();
+        if ((this->*p)(mdo_i, mdo_b, cur_level)) {
+          next_level = CompLevel_full_optimization;
+        }
+      } else {
+        next_level = CompLevel_full_optimization;
+      }
+    }
+    break;
+  }
+  return next_level;
+}
+
+// Determine if a method should be compiled with a normal entry point at a different level.
+CompLevel SimpleThresholdPolicy::call_event(methodOop method,  CompLevel cur_level) {
+  CompLevel highest_level = (CompLevel)method->highest_comp_level();
+  if (cur_level == CompLevel_none && highest_level > cur_level) {
+    // TODO: We may want to try to do more extensive reprofiling in this case.
+    return highest_level;
+  }
+
+  CompLevel osr_level = (CompLevel) method->highest_osr_comp_level();
+  CompLevel next_level = common(&SimpleThresholdPolicy::call_predicate, method, cur_level);
+
+  // If OSR method level is greater than the regular method level, the levels should be
+  // equalized by raising the regular method level in order to avoid OSRs during each
+  // invocation of the method.
+  if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {
+    methodDataOop mdo = method->method_data();
+    guarantee(mdo != NULL, "MDO should not be NULL");
+    if (mdo->invocation_count() >= 1) {
+      next_level = CompLevel_full_optimization;
+    }
+  } else {
+    next_level = MAX2(osr_level, next_level);
+  }
+
+  return next_level;
+}
+
+// Determine if we should do an OSR compilation of a given method.
+CompLevel SimpleThresholdPolicy::loop_event(methodOop method, CompLevel cur_level) {
+  if (cur_level == CompLevel_none) {
+    // If there is a live OSR method that means that we deopted to the interpreter
+    // for the transition.
+    CompLevel osr_level = (CompLevel)method->highest_osr_comp_level();
+    if (osr_level > CompLevel_none) {
+      return osr_level;
+    }
+  }
+  return common(&SimpleThresholdPolicy::loop_predicate, method, cur_level);
+}
+
+
+// Handle the invocation event.
+void SimpleThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh,
+                                              CompLevel level, TRAPS) {
+  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
+    CompLevel next_level = call_event(mh(), level);
+    if (next_level != level) {
+      compile(mh, InvocationEntryBci, next_level, THREAD);
+    }
+  }
+}
+
+// Handle the back branch event. Notice that we can compile the method
+// with a regular entry from here.
+void SimpleThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh,
+                                               int bci, CompLevel level, TRAPS) {
+  // If the method is already compiling, quickly bail out.
+  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, bci)) {
+    // Use loop event as an opportinity to also check there's been
+    // enough calls.
+    CompLevel cur_level = comp_level(mh());
+    CompLevel next_level = call_event(mh(), cur_level);
+    CompLevel next_osr_level = loop_event(mh(), level);
+
+    next_level = MAX2(next_level,
+                      next_osr_level < CompLevel_full_optimization ? next_osr_level : cur_level);
+    bool is_compiling = false;
+    if (next_level != cur_level) {
+      compile(mh, InvocationEntryBci, next_level, THREAD);
+      is_compiling = true;
+    }
+
+    // Do the OSR version
+    if (!is_compiling && next_osr_level != level) {
+      compile(mh, bci, next_osr_level, THREAD);
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+class CompileTask;
+class CompileQueue;
+
+class SimpleThresholdPolicy : public CompilationPolicy {
+  int _c1_count, _c2_count;
+
+  // Check if the counter is big enough and set carry (effectively infinity).
+  inline void set_carry_if_necessary(InvocationCounter *counter);
+  // Set carry flags in the counters (in methodOop and MDO).
+  inline void handle_counter_overflow(methodOop method);
+  // Call and loop predicates determine whether a transition to a higher compilation
+  // level should be performed (pointers to predicate functions are passed to common_TF().
+  // Predicates also take compiler load into account.
+  typedef bool (SimpleThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level);
+  bool call_predicate(int i, int b, CompLevel cur_level);
+  bool loop_predicate(int i, int b, CompLevel cur_level);
+  // Common transition function. Given a predicate determines if a method should transition to another level.
+  CompLevel common(Predicate p, methodOop method, CompLevel cur_level);
+  // Transition functions.
+  // call_event determines if a method should be compiled at a different
+  // level with a regular invocation entry.
+  CompLevel call_event(methodOop method, CompLevel cur_level);
+  // loop_event checks if a method should be OSR compiled at a different
+  // level.
+  CompLevel loop_event(methodOop method, CompLevel cur_level);
+
+protected:
+  int c1_count() const     { return _c1_count; }
+  int c2_count() const     { return _c2_count; }
+  void set_c1_count(int x) { _c1_count = x;    }
+  void set_c2_count(int x) { _c2_count = x;    }
+
+  enum EventType { CALL, LOOP, COMPILE };
+  void print_event(EventType type, methodHandle mh, methodHandle imh, int bci, CompLevel level);
+  // Print policy-specific information if necessary
+  virtual void print_specific(EventType type, methodHandle mh, methodHandle imh, int bci, CompLevel level) { }
+  // Check if the method can be compiled, change level if necessary
+  void compile(methodHandle mh, int bci, CompLevel level, TRAPS);
+  // Submit a given method for compilation
+  virtual void submit_compile(methodHandle mh, int bci, CompLevel level, TRAPS);
+  // Simple methods are as good being compiled with C1 as C2.
+  // This function tells if it's such a function.
+  inline bool is_trivial(methodOop method);
+
+  // Predicate helpers are used by .*_predicate() methods as well as others.
+  // They check the given counter values, multiplied by the scale against the thresholds.
+  template<CompLevel level> static inline bool call_predicate_helper(int i, int b, double scale);
+  template<CompLevel level> static inline bool loop_predicate_helper(int i, int b, double scale);
+
+  // Get a compilation level for a given method.
+  static CompLevel comp_level(methodOop method) {
+    nmethod *nm = method->code();
+    if (nm != NULL && nm->is_in_use()) {
+      return (CompLevel)nm->comp_level();
+    }
+    return CompLevel_none;
+  }
+  virtual void method_invocation_event(methodHandle method, methodHandle inlinee,
+                                       CompLevel level, TRAPS);
+  virtual void method_back_branch_event(methodHandle method, methodHandle inlinee,
+                                        int bci, CompLevel level, TRAPS);
+public:
+  SimpleThresholdPolicy() : _c1_count(0), _c2_count(0) { }
+  virtual int compiler_count(CompLevel comp_level) {
+    if (is_c1_compile(comp_level)) return c1_count();
+    if (is_c2_compile(comp_level)) return c2_count();
+    return 0;
+  }
+  virtual void do_safepoint_work() { }
+  virtual void delay_compilation(methodOop method) { }
+  virtual void disable_compilation(methodOop method) { }
+  // TODO: we should honour reprofiling requests in the future. Currently reprofiling
+  // would happen but not to the extent we would ideally like.
+  virtual void reprofile(ScopeDesc* trap_scope, bool is_osr) { }
+  virtual nmethod* event(methodHandle method, methodHandle inlinee,
+                         int branch_bci, int bci, CompLevel comp_level, TRAPS);
+  // Select task is called by CompileBroker. We should return a task or NULL.
+  virtual CompileTask* select_task(CompileQueue* compile_queue);
+  // Tell the runtime if we think a given method is adequately profiled.
+  virtual bool is_mature(methodOop method);
+  // Initialize: set compiler thread count
+  virtual void initialize();
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.inline.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+
+template<CompLevel level>
+bool SimpleThresholdPolicy::call_predicate_helper(int i, int b, double scale) {
+  switch(level) {
+  case CompLevel_none:
+  case CompLevel_limited_profile:
+    return (i > Tier3InvocationThreshold * scale) ||
+           (i > Tier3MinInvocationThreshold * scale && i + b > Tier3CompileThreshold * scale);
+  case CompLevel_full_profile:
+   return (i > Tier4InvocationThreshold * scale) ||
+          (i > Tier4MinInvocationThreshold * scale && i + b > Tier4CompileThreshold * scale);
+  }
+  return true;
+}
+
+template<CompLevel level>
+bool SimpleThresholdPolicy::loop_predicate_helper(int i, int b, double scale) {
+  switch(level) {
+  case CompLevel_none:
+  case CompLevel_limited_profile:
+    return b > Tier3BackEdgeThreshold * scale;
+  case CompLevel_full_profile:
+    return b > Tier4BackEdgeThreshold * scale;
+  }
+  return true;
+}
+
+// Simple methods are as good being compiled with C1 as C2.
+// Determine if a given method is such a case.
+bool SimpleThresholdPolicy::is_trivial(methodOop method) {
+  if (method->is_accessor()) return true;
+  if (method->code() != NULL) {
+    methodDataOop mdo = method->method_data();
+    if (mdo != NULL && mdo->num_loops() == 0 &&
+        (method->code_size() < 5  || (mdo->num_blocks() < 4) && (method->code_size() < 15))) {
+      return !mdo->would_profile();
+    }
+  }
+  return false;
+}
--- a/hotspot/src/share/vm/runtime/stubRoutines.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/runtime/stubRoutines.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -97,6 +97,15 @@
 address StubRoutines::_unsafe_arraycopy                  = NULL;
 address StubRoutines::_generic_arraycopy                 = NULL;
 
+
+address StubRoutines::_jbyte_fill;
+address StubRoutines::_jshort_fill;
+address StubRoutines::_jint_fill;
+address StubRoutines::_arrayof_jbyte_fill;
+address StubRoutines::_arrayof_jshort_fill;
+address StubRoutines::_arrayof_jint_fill;
+
+
 double (* StubRoutines::_intrinsic_log   )(double) = NULL;
 double (* StubRoutines::_intrinsic_log10 )(double) = NULL;
 double (* StubRoutines::_intrinsic_exp   )(double) = NULL;
@@ -119,10 +128,9 @@
     TraceTime timer("StubRoutines generation 1", TraceStartupTime);
     _code1 = BufferBlob::create("StubRoutines (1)", code_size1);
     if (_code1 == NULL) {
-      vm_exit_out_of_memory(code_size1,
-                            "CodeCache: no room for StubRoutines (1)");
+      vm_exit_out_of_memory(code_size1, "CodeCache: no room for StubRoutines (1)");
     }
-    CodeBuffer buffer(_code1->instructions_begin(), _code1->instructions_size());
+    CodeBuffer buffer(_code1);
     StubGenerator_generate(&buffer, false);
   }
 }
@@ -172,10 +180,9 @@
     TraceTime timer("StubRoutines generation 2", TraceStartupTime);
     _code2 = BufferBlob::create("StubRoutines (2)", code_size2);
     if (_code2 == NULL) {
-      vm_exit_out_of_memory(code_size2,
-                            "CodeCache: no room for StubRoutines (2)");
+      vm_exit_out_of_memory(code_size2, "CodeCache: no room for StubRoutines (2)");
     }
-    CodeBuffer buffer(_code2->instructions_begin(), _code2->instructions_size());
+    CodeBuffer buffer(_code2);
     StubGenerator_generate(&buffer, true);
   }
 
@@ -195,6 +202,46 @@
 
 #undef TEST_ARRAYCOPY
 
+#define TEST_FILL(type)                                                                      \
+  if (_##type##_fill != NULL) {                                                              \
+    union {                                                                                  \
+      double d;                                                                              \
+      type body[96];                                                                         \
+    } s;                                                                                     \
+                                                                                             \
+    int v = 32;                                                                              \
+    for (int offset = -2; offset <= 2; offset++) {                                           \
+      for (int i = 0; i < 96; i++) {                                                         \
+        s.body[i] = 1;                                                                       \
+      }                                                                                      \
+      type* start = s.body + 8 + offset;                                                     \
+      for (int aligned = 0; aligned < 2; aligned++) {                                        \
+        if (aligned) {                                                                       \
+          if (((intptr_t)start) % HeapWordSize == 0) {                                       \
+            ((void (*)(type*, int, int))StubRoutines::_arrayof_##type##_fill)(start, v, 80); \
+          } else {                                                                           \
+            continue;                                                                        \
+          }                                                                                  \
+        } else {                                                                             \
+          ((void (*)(type*, int, int))StubRoutines::_##type##_fill)(start, v, 80);           \
+        }                                                                                    \
+        for (int i = 0; i < 96; i++) {                                                       \
+          if (i < (8 + offset) || i >= (88 + offset)) {                                      \
+            assert(s.body[i] == 1, "what?");                                                 \
+          } else {                                                                           \
+            assert(s.body[i] == 32, "what?");                                                \
+          }                                                                                  \
+        }                                                                                    \
+      }                                                                                      \
+    }                                                                                        \
+  }                                                                                          \
+
+  TEST_FILL(jbyte);
+  TEST_FILL(jshort);
+  TEST_FILL(jint);
+
+#undef TEST_FILL
+
 #define TEST_COPYRTN(type) \
   test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::conjoint_##type##s_atomic),  sizeof(type)); \
   test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::arrayof_conjoint_##type##s), (int)MAX2(sizeof(HeapWord), sizeof(type)))
@@ -315,3 +362,39 @@
   Copy::arrayof_conjoint_oops(src, dest, count);
   gen_arraycopy_barrier((oop *) dest, count);
 JRT_END
+
+
+address StubRoutines::select_fill_function(BasicType t, bool aligned, const char* &name) {
+#define RETURN_STUB(xxx_fill) { \
+  name = #xxx_fill; \
+  return StubRoutines::xxx_fill(); }
+
+  switch (t) {
+  case T_BYTE:
+  case T_BOOLEAN:
+    if (!aligned) RETURN_STUB(jbyte_fill);
+    RETURN_STUB(arrayof_jbyte_fill);
+  case T_CHAR:
+  case T_SHORT:
+    if (!aligned) RETURN_STUB(jshort_fill);
+    RETURN_STUB(arrayof_jshort_fill);
+  case T_INT:
+  case T_FLOAT:
+    if (!aligned) RETURN_STUB(jint_fill);
+    RETURN_STUB(arrayof_jint_fill);
+  case T_DOUBLE:
+  case T_LONG:
+  case T_ARRAY:
+  case T_OBJECT:
+  case T_NARROWOOP:
+  case T_ADDRESS:
+    // Currently unsupported
+    return NULL;
+
+  default:
+    ShouldNotReachHere();
+    return NULL;
+  }
+
+#undef RETURN_STUB
+}
--- a/hotspot/src/share/vm/runtime/stubRoutines.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/runtime/stubRoutines.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -148,6 +148,13 @@
   static address _unsafe_arraycopy;
   static address _generic_arraycopy;
 
+  static address _jbyte_fill;
+  static address _jshort_fill;
+  static address _jint_fill;
+  static address _arrayof_jbyte_fill;
+  static address _arrayof_jshort_fill;
+  static address _arrayof_jint_fill;
+
   // These are versions of the java.lang.Math methods which perform
   // the same operations as the intrinsic version.  They are used for
   // constant folding in the compiler to ensure equivalence.  If the
@@ -259,6 +266,16 @@
   static address unsafe_arraycopy()        { return _unsafe_arraycopy; }
   static address generic_arraycopy()       { return _generic_arraycopy; }
 
+  static address jbyte_fill()          { return _jbyte_fill; }
+  static address jshort_fill()         { return _jshort_fill; }
+  static address jint_fill()           { return _jint_fill; }
+  static address arrayof_jbyte_fill()  { return _arrayof_jbyte_fill; }
+  static address arrayof_jshort_fill() { return _arrayof_jshort_fill; }
+  static address arrayof_jint_fill()   { return _arrayof_jint_fill; }
+
+  static address select_fill_function(BasicType t, bool aligned, const char* &name);
+
+
   static double  intrinsic_log(double d) {
     assert(_intrinsic_log != NULL, "must be defined");
     return _intrinsic_log(d);
--- a/hotspot/src/share/vm/runtime/sweeper.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/runtime/sweeper.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -368,8 +368,7 @@
         disconnected++;
       } else if (nm->is_speculatively_disconnected()) {
         // This method was previously considered for preemptive unloading and was not called since then
-        nm->method()->invocation_counter()->decay();
-        nm->method()->backedge_counter()->decay();
+        CompilationPolicy::policy()->delay_compilation(nm->method());
         nm->make_not_entrant();
         made_not_entrant++;
       }
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -604,7 +604,8 @@
   nonstatic_field(CodeBlob,                    _size,                                         int)                                   \
   nonstatic_field(CodeBlob,                    _header_size,                                  int)                                   \
   nonstatic_field(CodeBlob,                    _relocation_size,                              int)                                   \
-  nonstatic_field(CodeBlob,                    _instructions_offset,                          int)                                   \
+  nonstatic_field(CodeBlob,                    _content_offset,                               int)                                   \
+  nonstatic_field(CodeBlob,                    _code_offset,                                  int)                                   \
   nonstatic_field(CodeBlob,                    _frame_complete_offset,                        int)                                   \
   nonstatic_field(CodeBlob,                    _data_offset,                                  int)                                   \
   nonstatic_field(CodeBlob,                    _frame_size,                                   int)                                   \
--- a/hotspot/src/share/vm/runtime/vm_version.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/runtime/vm_version.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -121,7 +121,8 @@
 #ifdef VENDOR
   return XSTR(VENDOR);
 #else
-  return "Sun Microsystems Inc.";
+  return JDK_Version::is_gte_jdk17x_version() ?
+    "Oracle Corporation" : "Sun Microsystems Inc.";
 #endif
 }
 
--- a/hotspot/src/share/vm/services/g1MemoryPool.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/services/g1MemoryPool.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -28,12 +28,11 @@
 G1MemoryPoolSuper::G1MemoryPoolSuper(G1CollectedHeap* g1h,
                                      const char* name,
                                      size_t init_size,
-                                     size_t max_size,
                                      bool support_usage_threshold) :
   _g1h(g1h), CollectedMemoryPool(name,
                                  MemoryPool::Heap,
                                  init_size,
-                                 max_size,
+                                 undefined_max(),
                                  support_usage_threshold) {
   assert(UseG1GC, "sanity");
 }
@@ -53,13 +52,6 @@
 }
 
 // See the comment at the top of g1MemoryPool.hpp
-size_t G1MemoryPoolSuper::eden_space_max(G1CollectedHeap* g1h) {
-  // This should ensure that it returns a value no smaller than the
-  // region size. Currently, eden_space_committed() guarantees that.
-  return eden_space_committed(g1h);
-}
-
-// See the comment at the top of g1MemoryPool.hpp
 size_t G1MemoryPoolSuper::survivor_space_committed(G1CollectedHeap* g1h) {
   return MAX2(survivor_space_used(g1h), (size_t) HeapRegion::GrainBytes);
 }
@@ -72,13 +64,6 @@
 }
 
 // See the comment at the top of g1MemoryPool.hpp
-size_t G1MemoryPoolSuper::survivor_space_max(G1CollectedHeap* g1h) {
-  // This should ensure that it returns a value no smaller than the
-  // region size. Currently, survivor_space_committed() guarantees that.
-  return survivor_space_committed(g1h);
-}
-
-// See the comment at the top of g1MemoryPool.hpp
 size_t G1MemoryPoolSuper::old_space_committed(G1CollectedHeap* g1h) {
   size_t committed = overall_committed(g1h);
   size_t eden_committed = eden_space_committed(g1h);
@@ -99,24 +84,11 @@
   return used;
 }
 
-// See the comment at the top of g1MemoryPool.hpp
-size_t G1MemoryPoolSuper::old_space_max(G1CollectedHeap* g1h) {
-  size_t max = overall_max(g1h);
-  size_t eden_max = eden_space_max(g1h);
-  size_t survivor_max = survivor_space_max(g1h);
-  max = subtract_up_to_zero(max, eden_max);
-  max = subtract_up_to_zero(max, survivor_max);
-  max = MAX2(max, (size_t) HeapRegion::GrainBytes);
-  return max;
-}
-
 G1EdenPool::G1EdenPool(G1CollectedHeap* g1h) :
   G1MemoryPoolSuper(g1h,
                     "G1 Eden",
                     eden_space_committed(g1h), /* init_size */
-                    eden_space_max(g1h), /* max_size */
-                    false /* support_usage_threshold */) {
-}
+                    false /* support_usage_threshold */) { }
 
 MemoryUsage G1EdenPool::get_memory_usage() {
   size_t initial_sz = initial_size();
@@ -131,9 +103,7 @@
   G1MemoryPoolSuper(g1h,
                     "G1 Survivor",
                     survivor_space_committed(g1h), /* init_size */
-                    survivor_space_max(g1h), /* max_size */
-                    false /* support_usage_threshold */) {
-}
+                    false /* support_usage_threshold */) { }
 
 MemoryUsage G1SurvivorPool::get_memory_usage() {
   size_t initial_sz = initial_size();
@@ -148,9 +118,7 @@
   G1MemoryPoolSuper(g1h,
                     "G1 Old Gen",
                     old_space_committed(g1h), /* init_size */
-                    old_space_max(g1h), /* max_size */
-                    true /* support_usage_threshold */) {
-}
+                    true /* support_usage_threshold */) { }
 
 MemoryUsage G1OldGenPool::get_memory_usage() {
   size_t initial_sz = initial_size();
--- a/hotspot/src/share/vm/services/g1MemoryPool.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/services/g1MemoryPool.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -74,14 +74,20 @@
 // in the future.
 //
 // 3) Another decision that is again not straightforward is what is
-// the max size that each memory pool can grow to. Right now, we set
-// that the committed size for the eden and the survivors and
-// calculate the old gen max as follows (basically, it's a similar
-// pattern to what we use for the committed space, as described
-// above):
+// the max size that each memory pool can grow to. One way to do this
+// would be to use the committed size for the max for the eden and
+// survivors and calculate the old gen max as follows (basically, it's
+// a similar pattern to what we use for the committed space, as
+// described above):
 //
 //  old_gen_max = overall_max - eden_max - survivor_max
 //
+// Unfortunately, the above makes the max of each pool fluctuate over
+// time and, even though this is allowed according to the spec, it
+// broke several assumptions in the M&M framework (there were cases
+// where used would reach a value greater than max). So, for max we
+// use -1, which means "undefined" according to the spec.
+//
 // 4) Now, there is a very subtle issue with all the above. The
 // framework will call get_memory_usage() on the three pools
 // asynchronously. As a result, each call might get a different value
@@ -125,33 +131,30 @@
   G1MemoryPoolSuper(G1CollectedHeap* g1h,
                     const char* name,
                     size_t init_size,
-                    size_t max_size,
                     bool support_usage_threshold);
 
   // The reason why all the code is in static methods is so that it
   // can be safely called from the constructors of the subclasses.
 
+  static size_t undefined_max() {
+    return (size_t) -1;
+  }
+
   static size_t overall_committed(G1CollectedHeap* g1h) {
     return g1h->capacity();
   }
   static size_t overall_used(G1CollectedHeap* g1h) {
     return g1h->used_unlocked();
   }
-  static size_t overall_max(G1CollectedHeap* g1h) {
-    return g1h->g1_reserved_obj_bytes();
-  }
 
   static size_t eden_space_committed(G1CollectedHeap* g1h);
   static size_t eden_space_used(G1CollectedHeap* g1h);
-  static size_t eden_space_max(G1CollectedHeap* g1h);
 
   static size_t survivor_space_committed(G1CollectedHeap* g1h);
   static size_t survivor_space_used(G1CollectedHeap* g1h);
-  static size_t survivor_space_max(G1CollectedHeap* g1h);
 
   static size_t old_space_committed(G1CollectedHeap* g1h);
   static size_t old_space_used(G1CollectedHeap* g1h);
-  static size_t old_space_max(G1CollectedHeap* g1h);
 };
 
 // Memory pool that represents the G1 eden.
@@ -163,7 +166,7 @@
     return eden_space_used(_g1h);
   }
   size_t max_size() const {
-    return eden_space_max(_g1h);
+    return undefined_max();
   }
   MemoryUsage get_memory_usage();
 };
@@ -177,7 +180,7 @@
     return survivor_space_used(_g1h);
   }
   size_t max_size() const {
-    return survivor_space_max(_g1h);
+    return undefined_max();
   }
   MemoryUsage get_memory_usage();
 };
@@ -191,7 +194,7 @@
     return old_space_used(_g1h);
   }
   size_t max_size() const {
-    return old_space_max(_g1h);
+    return undefined_max();
   }
   MemoryUsage get_memory_usage();
 };
--- a/hotspot/src/share/vm/services/heapDumper.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/services/heapDumper.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1305,6 +1305,8 @@
   static VM_HeapDumper* _global_dumper;
   static DumpWriter*    _global_writer;
   DumpWriter*           _local_writer;
+  JavaThread*           _oome_thread;
+  methodOop             _oome_constructor;
   bool _gc_before_heap_dump;
   bool _is_segmented_dump;
   jlong _dump_start;
@@ -1366,7 +1368,7 @@
   void end_of_dump();
 
  public:
-  VM_HeapDumper(DumpWriter* writer, bool gc_before_heap_dump) :
+  VM_HeapDumper(DumpWriter* writer, bool gc_before_heap_dump, bool oome) :
     VM_GC_Operation(0 /* total collections,      dummy, ignored */,
                     0 /* total full collections, dummy, ignored */,
                     gc_before_heap_dump) {
@@ -1377,6 +1379,18 @@
     _klass_map = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(INITIAL_CLASS_COUNT, true);
     _stack_traces = NULL;
     _num_threads = 0;
+    if (oome) {
+      assert(!Thread::current()->is_VM_thread(), "Dump from OutOfMemoryError cannot be called by the VMThread");
+      // get OutOfMemoryError zero-parameter constructor
+      instanceKlass* oome_ik = instanceKlass::cast(SystemDictionary::OutOfMemoryError_klass());
+      _oome_constructor = oome_ik->find_method(vmSymbols::object_initializer_name(),
+                                                          vmSymbols::void_method_signature());
+      // get thread throwing OOME when generating the heap dump at OOME
+      _oome_thread = JavaThread::current();
+    } else {
+      _oome_thread = NULL;
+      _oome_constructor = NULL;
+    }
   }
   ~VM_HeapDumper() {
     if (_stack_traces != NULL) {
@@ -1557,7 +1571,11 @@
     frame f = java_thread->last_frame();
     vframe* vf = vframe::new_vframe(&f, &reg_map, java_thread);
     frame* last_entry_frame = NULL;
+    int extra_frames = 0;
 
+    if (java_thread == _oome_thread && _oome_constructor != NULL) {
+      extra_frames++;
+    }
     while (vf != NULL) {
       blk.set_frame_number(stack_depth);
       if (vf->is_java_frame()) {
@@ -1574,7 +1592,7 @@
                 writer()->write_u1(HPROF_GC_ROOT_JAVA_FRAME);
                 writer()->write_objectID(o);
                 writer()->write_u4(thread_serial_num);
-                writer()->write_u4((u4) stack_depth);
+                writer()->write_u4((u4) (stack_depth + extra_frames));
               }
             }
           }
@@ -1764,6 +1782,17 @@
       // write HPROF_FRAME records for this thread's stack trace
       int depth = stack_trace->get_stack_depth();
       int thread_frame_start = frame_serial_num;
+      int extra_frames = 0;
+      // write fake frame that makes it look like the thread, which caused OOME,
+      // is in the OutOfMemoryError zero-parameter constructor
+      if (thread == _oome_thread && _oome_constructor != NULL) {
+        int oome_serial_num = _klass_map->find(Klass::cast(_oome_constructor->method_holder()));
+        // the class serial number starts from 1
+        assert(oome_serial_num > 0, "OutOfMemoryError class not found");
+        DumperSupport::dump_stack_frame(writer(), ++frame_serial_num, oome_serial_num,
+                                        _oome_constructor, 0);
+        extra_frames++;
+      }
       for (int j=0; j < depth; j++) {
         StackFrameInfo* frame = stack_trace->stack_frame_at(j);
         methodOop m = frame->method();
@@ -1772,6 +1801,7 @@
         assert(class_serial_num > 0, "class not found");
         DumperSupport::dump_stack_frame(writer(), ++frame_serial_num, class_serial_num, m, frame->bci());
       }
+      depth += extra_frames;
 
       // write HPROF_TRACE record for one thread
       DumperSupport::write_header(writer(), HPROF_TRACE, 3*sizeof(u4) + depth*oopSize);
@@ -1808,7 +1838,7 @@
   }
 
   // generate the dump
-  VM_HeapDumper dumper(&writer, _gc_before_heap_dump);
+  VM_HeapDumper dumper(&writer, _gc_before_heap_dump, _oome);
   if (Thread::current()->is_VM_thread()) {
     assert(SafepointSynchronize::is_at_safepoint(), "Expected to be called at a safepoint");
     dumper.doit();
@@ -1869,12 +1899,22 @@
   }
 }
 
+// Called by out-of-memory error reporting by a single Java thread
+// outside of a JVM safepoint
+void HeapDumper::dump_heap_from_oome() {
+  HeapDumper::dump_heap(true);
+}
+
 // Called by error reporting by a single Java thread outside of a JVM safepoint,
 // or by heap dumping by the VM thread during a (GC) safepoint. Thus, these various
 // callers are strictly serialized and guaranteed not to interfere below. For more
 // general use, however, this method will need modification to prevent
 // inteference when updating the static variables base_path and dump_file_seq below.
 void HeapDumper::dump_heap() {
+  HeapDumper::dump_heap(false);
+}
+
+void HeapDumper::dump_heap(bool oome) {
   static char base_path[JVM_MAXPATHLEN] = {'\0'};
   static uint dump_file_seq = 0;
   char   my_path[JVM_MAXPATHLEN] = {'\0'};
@@ -1930,6 +1970,7 @@
   dump_file_seq++;   // increment seq number for next time we dump
 
   HeapDumper dumper(false /* no GC before heap dump */,
-                    true  /* send to tty */);
+                    true  /* send to tty */,
+                    oome  /* pass along out-of-memory-error flag */);
   dumper.dump(my_path);
 }
--- a/hotspot/src/share/vm/services/heapDumper.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/services/heapDumper.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,8 +39,12 @@
   char* _error;
   bool _print_to_tty;
   bool _gc_before_heap_dump;
+  bool _oome;
   elapsedTimer _t;
 
+  HeapDumper(bool gc_before_heap_dump, bool print_to_tty, bool oome) :
+    _gc_before_heap_dump(gc_before_heap_dump), _error(NULL), _print_to_tty(print_to_tty), _oome(oome) { }
+
   // string representation of error
   char* error() const                   { return _error; }
   void set_error(char* error);
@@ -51,11 +55,11 @@
   // internal timer.
   elapsedTimer* timer()                 { return &_t; }
 
+  static void dump_heap(bool oome);
+
  public:
   HeapDumper(bool gc_before_heap_dump) :
-    _gc_before_heap_dump(gc_before_heap_dump), _error(NULL), _print_to_tty(false) { }
-  HeapDumper(bool gc_before_heap_dump, bool print_to_tty) :
-    _gc_before_heap_dump(gc_before_heap_dump), _error(NULL), _print_to_tty(print_to_tty) { }
+    _gc_before_heap_dump(gc_before_heap_dump), _error(NULL), _print_to_tty(false), _oome(false) { }
 
   ~HeapDumper();
 
@@ -66,4 +70,6 @@
   char* error_as_C_string() const;
 
   static void dump_heap()    KERNEL_RETURN;
+
+  static void dump_heap_from_oome()    KERNEL_RETURN;
 };
--- a/hotspot/src/share/vm/services/management.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/services/management.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -785,10 +785,11 @@
     }
   }
 
-  // In our current implementation, all pools should have
-  // defined init and max size
-  assert(!has_undefined_init_size, "Undefined init size");
-  assert(!has_undefined_max_size, "Undefined max size");
+  // In our current implementation, we make sure that all non-heap
+  // pools have defined init and max sizes. Heap pools do not matter,
+  // as we never use total_init and total_max for them.
+  assert(heap || !has_undefined_init_size, "Undefined init size");
+  assert(heap || !has_undefined_max_size,  "Undefined max size");
 
   MemoryUsage usage((heap ? InitialHeapSize : total_init),
                     total_used,
--- a/hotspot/src/share/vm/utilities/accessFlags.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/utilities/accessFlags.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,7 +39,8 @@
   JVM_ACC_HAS_LOOPS               = 0x40000000,     // Method has loops
   JVM_ACC_LOOPS_FLAG_INIT         = (int)0x80000000,// The loop flag has been initialized
   JVM_ACC_QUEUED                  = 0x01000000,     // Queued for compilation
-  JVM_ACC_NOT_TIER1_COMPILABLE    = 0x04000000,
+  JVM_ACC_NOT_C2_COMPILABLE       = 0x02000000,
+  JVM_ACC_NOT_C1_COMPILABLE       = 0x04000000,
   JVM_ACC_NOT_OSR_COMPILABLE      = 0x08000000,
   JVM_ACC_HAS_LINE_NUMBER_TABLE   = 0x00100000,
   JVM_ACC_HAS_CHECKED_EXCEPTIONS  = 0x00400000,
@@ -47,6 +48,7 @@
   JVM_ACC_IS_OLD                  = 0x00010000,     // RedefineClasses() has replaced this method
   JVM_ACC_IS_OBSOLETE             = 0x00020000,     // RedefineClasses() has made method obsolete
   JVM_ACC_IS_PREFIXED_NATIVE      = 0x00040000,     // JVMTI has prefixed this native method
+
   JVM_MH_INVOKE_BITS           // = 0x10001100      // MethodHandle.invoke quasi-native
                                   = (JVM_ACC_NATIVE | JVM_ACC_SYNTHETIC | JVM_ACC_MONITOR_MATCH),
 
@@ -108,7 +110,8 @@
   bool has_loops               () const { return (_flags & JVM_ACC_HAS_LOOPS              ) != 0; }
   bool loops_flag_init         () const { return (_flags & JVM_ACC_LOOPS_FLAG_INIT        ) != 0; }
   bool queued_for_compilation  () const { return (_flags & JVM_ACC_QUEUED                 ) != 0; }
-  bool is_not_tier1_compilable  () const { return (_flags & JVM_ACC_NOT_TIER1_COMPILABLE  ) != 0; }
+  bool is_not_c1_compilable () const    { return (_flags & JVM_ACC_NOT_C1_COMPILABLE      ) != 0; }
+  bool is_not_c2_compilable () const    { return (_flags & JVM_ACC_NOT_C2_COMPILABLE      ) != 0; }
   bool is_not_osr_compilable   () const { return (_flags & JVM_ACC_NOT_OSR_COMPILABLE     ) != 0; }
   bool has_linenumber_table    () const { return (_flags & JVM_ACC_HAS_LINE_NUMBER_TABLE  ) != 0; }
   bool has_checked_exceptions  () const { return (_flags & JVM_ACC_HAS_CHECKED_EXCEPTIONS ) != 0; }
@@ -172,7 +175,8 @@
   void set_has_monitor_bytecodes()     { atomic_set_bits(JVM_ACC_HAS_MONITOR_BYTECODES);   }
   void set_has_loops()                 { atomic_set_bits(JVM_ACC_HAS_LOOPS);               }
   void set_loops_flag_init()           { atomic_set_bits(JVM_ACC_LOOPS_FLAG_INIT);         }
-  void set_not_tier1_compilable()      { atomic_set_bits(JVM_ACC_NOT_TIER1_COMPILABLE);    }
+  void set_not_c1_compilable()         { atomic_set_bits(JVM_ACC_NOT_C1_COMPILABLE);       }
+  void set_not_c2_compilable()         { atomic_set_bits(JVM_ACC_NOT_C2_COMPILABLE);       }
   void set_not_osr_compilable()        { atomic_set_bits(JVM_ACC_NOT_OSR_COMPILABLE);      }
   void set_has_linenumber_table()      { atomic_set_bits(JVM_ACC_HAS_LINE_NUMBER_TABLE);   }
   void set_has_checked_exceptions()    { atomic_set_bits(JVM_ACC_HAS_CHECKED_EXCEPTIONS);  }
--- a/hotspot/src/share/vm/utilities/debug.cpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/utilities/debug.cpp	Wed Jul 05 17:22:53 2017 +0200
@@ -234,7 +234,7 @@
     // create heap dump before OnOutOfMemoryError commands are executed
     if (HeapDumpOnOutOfMemoryError) {
       tty->print_cr("java.lang.OutOfMemoryError: %s", message);
-      HeapDumper::dump_heap();
+      HeapDumper::dump_heap_from_oome();
     }
 
     if (OnOutOfMemoryError && OnOutOfMemoryError[0]) {
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -529,7 +529,7 @@
 #ifdef ASSERT
 extern int type2aelembytes(BasicType t, bool allow_address = false); // asserts
 #else
-inline int type2aelembytes(BasicType t) { return _type2aelembytes[t]; }
+inline int type2aelembytes(BasicType t, bool allow_address = false) { return _type2aelembytes[t]; }
 #endif
 
 
@@ -710,24 +710,41 @@
 
 // Enumeration to distinguish tiers of compilation
 enum CompLevel {
-  CompLevel_none              = 0,
-  CompLevel_fast_compile      = 1,
-  CompLevel_full_optimization = 2,
+  CompLevel_any               = -1,
+  CompLevel_all               = -1,
+  CompLevel_none              = 0,         // Interpreter
+  CompLevel_simple            = 1,         // C1
+  CompLevel_limited_profile   = 2,         // C1, invocation & backedge counters
+  CompLevel_full_profile      = 3,         // C1, invocation & backedge counters + mdo
+  CompLevel_full_optimization = 4,         // C2
 
-  CompLevel_highest_tier      = CompLevel_full_optimization,
-#ifdef TIERED
-  CompLevel_initial_compile   = CompLevel_fast_compile
+#if defined(COMPILER2)
+  CompLevel_highest_tier      = CompLevel_full_optimization,  // pure C2 and tiered
+#elif defined(COMPILER1)
+  CompLevel_highest_tier      = CompLevel_simple,             // pure C1
 #else
-  CompLevel_initial_compile   = CompLevel_full_optimization
-#endif // TIERED
+  CompLevel_highest_tier      = CompLevel_none,
+#endif
+
+#if defined(TIERED)
+  CompLevel_initial_compile   = CompLevel_full_profile        // tiered
+#elif defined(COMPILER1)
+  CompLevel_initial_compile   = CompLevel_simple              // pure C1
+#elif defined(COMPILER2)
+  CompLevel_initial_compile   = CompLevel_full_optimization   // pure C2
+#else
+  CompLevel_initial_compile   = CompLevel_none
+#endif
 };
 
-inline bool is_tier1_compile(int comp_level) {
-  return comp_level == CompLevel_fast_compile;
+inline bool is_c1_compile(int comp_level) {
+  return comp_level > CompLevel_none && comp_level < CompLevel_full_optimization;
 }
-inline bool is_tier2_compile(int comp_level) {
+
+inline bool is_c2_compile(int comp_level) {
   return comp_level == CompLevel_full_optimization;
 }
+
 inline bool is_highest_tier_compile(int comp_level) {
   return comp_level == CompLevel_highest_tier;
 }
@@ -1017,22 +1034,22 @@
 
 // This routine takes eight bytes:
 inline u8 build_u8_from( u1 c1, u1 c2, u1 c3, u1 c4, u1 c5, u1 c6, u1 c7, u1 c8 ) {
-  return  ( u8(c1) << 56 )  &  ( u8(0xff) << 56 )
-       |  ( u8(c2) << 48 )  &  ( u8(0xff) << 48 )
-       |  ( u8(c3) << 40 )  &  ( u8(0xff) << 40 )
-       |  ( u8(c4) << 32 )  &  ( u8(0xff) << 32 )
-       |  ( u8(c5) << 24 )  &  ( u8(0xff) << 24 )
-       |  ( u8(c6) << 16 )  &  ( u8(0xff) << 16 )
-       |  ( u8(c7) <<  8 )  &  ( u8(0xff) <<  8 )
-       |  ( u8(c8) <<  0 )  &  ( u8(0xff) <<  0 );
+  return  (( u8(c1) << 56 )  &  ( u8(0xff) << 56 ))
+       |  (( u8(c2) << 48 )  &  ( u8(0xff) << 48 ))
+       |  (( u8(c3) << 40 )  &  ( u8(0xff) << 40 ))
+       |  (( u8(c4) << 32 )  &  ( u8(0xff) << 32 ))
+       |  (( u8(c5) << 24 )  &  ( u8(0xff) << 24 ))
+       |  (( u8(c6) << 16 )  &  ( u8(0xff) << 16 ))
+       |  (( u8(c7) <<  8 )  &  ( u8(0xff) <<  8 ))
+       |  (( u8(c8) <<  0 )  &  ( u8(0xff) <<  0 ));
 }
 
 // This routine takes four bytes:
 inline u4 build_u4_from( u1 c1, u1 c2, u1 c3, u1 c4 ) {
-  return  ( u4(c1) << 24 )  &  0xff000000
-       |  ( u4(c2) << 16 )  &  0x00ff0000
-       |  ( u4(c3) <<  8 )  &  0x0000ff00
-       |  ( u4(c4) <<  0 )  &  0x000000ff;
+  return  (( u4(c1) << 24 )  &  0xff000000)
+       |  (( u4(c2) << 16 )  &  0x00ff0000)
+       |  (( u4(c3) <<  8 )  &  0x0000ff00)
+       |  (( u4(c4) <<  0 )  &  0x000000ff);
 }
 
 // And this one works if the four bytes are contiguous in memory:
@@ -1042,8 +1059,8 @@
 
 // Ditto for two-byte ints:
 inline u2 build_u2_from( u1 c1, u1 c2 ) {
-  return  u2(( u2(c1) <<  8 )  &  0xff00
-          |  ( u2(c2) <<  0 )  &  0x00ff);
+  return  u2((( u2(c1) <<  8 )  &  0xff00)
+          |  (( u2(c2) <<  0 )  &  0x00ff));
 }
 
 // And this one works if the two bytes are contiguous in memory:
@@ -1066,14 +1083,14 @@
 // now (64-bit) longs
 
 inline jlong build_long_from( u1 c1, u1 c2, u1 c3, u1 c4, u1 c5, u1 c6, u1 c7, u1 c8 ) {
-  return  ( jlong(c1) << 56 )  &  ( jlong(0xff) << 56 )
-       |  ( jlong(c2) << 48 )  &  ( jlong(0xff) << 48 )
-       |  ( jlong(c3) << 40 )  &  ( jlong(0xff) << 40 )
-       |  ( jlong(c4) << 32 )  &  ( jlong(0xff) << 32 )
-       |  ( jlong(c5) << 24 )  &  ( jlong(0xff) << 24 )
-       |  ( jlong(c6) << 16 )  &  ( jlong(0xff) << 16 )
-       |  ( jlong(c7) <<  8 )  &  ( jlong(0xff) <<  8 )
-       |  ( jlong(c8) <<  0 )  &  ( jlong(0xff) <<  0 );
+  return  (( jlong(c1) << 56 )  &  ( jlong(0xff) << 56 ))
+       |  (( jlong(c2) << 48 )  &  ( jlong(0xff) << 48 ))
+       |  (( jlong(c3) << 40 )  &  ( jlong(0xff) << 40 ))
+       |  (( jlong(c4) << 32 )  &  ( jlong(0xff) << 32 ))
+       |  (( jlong(c5) << 24 )  &  ( jlong(0xff) << 24 ))
+       |  (( jlong(c6) << 16 )  &  ( jlong(0xff) << 16 ))
+       |  (( jlong(c7) <<  8 )  &  ( jlong(0xff) <<  8 ))
+       |  (( jlong(c8) <<  0 )  &  ( jlong(0xff) <<  0 ));
 }
 
 inline jlong build_long_from( u1* p ) {
--- a/hotspot/src/share/vm/utilities/macros.hpp	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/src/share/vm/utilities/macros.hpp	Wed Jul 05 17:22:53 2017 +0200
@@ -71,17 +71,27 @@
 #define NOT_COMPILER2(code) code
 #endif // COMPILER2
 
+#ifdef TIERED
+#define TIERED_ONLY(code) code
+#define NOT_TIERED(code)
+#else
+#define TIERED_ONLY(code)
+#define NOT_TIERED(code) code
+#endif // TIERED
+
 
 // PRODUCT variant
 #ifdef PRODUCT
 #define PRODUCT_ONLY(code) code
 #define NOT_PRODUCT(code)
+#define NOT_PRODUCT_ARG(arg)
 #define PRODUCT_RETURN  {}
 #define PRODUCT_RETURN0 { return 0; }
 #define PRODUCT_RETURN_(code) { code }
 #else // PRODUCT
 #define PRODUCT_ONLY(code)
 #define NOT_PRODUCT(code) code
+#define NOT_PRODUCT_ARG(arg) arg,
 #define PRODUCT_RETURN  /*next token must be ;*/
 #define PRODUCT_RETURN0 /*next token must be ;*/
 #define PRODUCT_RETURN_(code)  /*next token must be ;*/
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6982370/Test6982370.java	Wed Jul 05 17:22:53 2017 +0200
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6982370
+ * @summary SIGBUS in jbyte_fill
+ *
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+OptimizeFill -Xbatch Test6982370
+ */
+
+import java.util.Arrays;
+
+/**
+ * Exercise the fill routine for various short alignments and sizes
+ */
+
+public class Test6982370 {
+    public static void main(String[] args) {
+        test_byte();
+        test_char();
+        test_short();
+        test_int();
+        test_float();
+    }
+
+    public static void test_int() {
+        int[] a = new int[16];
+        for (int i = 0; i < 200000; i++) {
+            int start = i & 7;
+            int end = start + ((i >> 4) & 7);
+            int value = i;
+            if ((i & 1) == 1) value = -value;
+            Arrays.fill(a, start, end, value);
+            boolean error = false;
+            for (int j = start; j < end; j++) {
+                if (a[j] != value) {
+                    System.err.println("a[" + j + "] = " + a[j] + " != " + value + " for " + a.length);
+                    error = true;
+                }
+            }
+            if (error) throw new InternalError();
+        }
+    }
+
+    public static void test_float() {
+        float[] a = new float[16];
+        for (int i = 0; i < 200000; i++) {
+            int start = i & 7;
+            int end = start + ((i >> 4) & 7);
+            float value = (float)i;
+            if ((i & 1) == 1) value = -value;
+            Arrays.fill(a, start, end, value);
+            boolean error = false;
+            for (int j = start; j < end; j++) {
+                if (a[j] != value) {
+                    System.err.println("a[" + j + "] = " + a[j] + " != " + value + " for " + a.length);
+                    error = true;
+                }
+            }
+            if (error) throw new InternalError();
+        }
+    }
+    public static void test_char() {
+        char[] a = new char[16];
+        for (int i = 0; i < 200000; i++) {
+            int start = i & 7;
+            int end = start + ((i >> 4) & 7);
+            char value = (char)i;
+            Arrays.fill(a, start, end, value);
+            boolean error = false;
+            for (int j = start; j < end; j++) {
+                if (a[j] != value) {
+                    System.err.println("a[" + j + "] = " + a[j] + " != " + value + " for " + a.length);
+                    error = true;
+                }
+            }
+            if (error) throw new InternalError();
+        }
+    }
+    public static void test_short() {
+        short[] a = new short[16];
+        for (int i = 0; i < 200000; i++) {
+            int start = i & 7;
+            int end = start + ((i >> 4) & 7);
+            short value = (short)i;
+            if ((i & 1) == 1) value = (short)-value;
+            Arrays.fill(a, start, end, value);
+            boolean error = false;
+            for (int j = start; j < end; j++) {
+                if (a[j] != value) {
+                    System.err.println("a[" + j + "] = " + a[j] + " != " + value + " for " + a.length);
+                    error = true;
+                }
+            }
+            if (error) throw new InternalError();
+        }
+    }
+
+    public static void test_byte() {
+        for (int i = 0; i < 200000; i++) {
+            byte[] a = new byte[16];
+            int start = i & 7;
+            int end = start + ((i >> 4) & 7);
+            byte value = (byte)i;
+            if ((i & 1) == 1) value = (byte)-value;
+            Arrays.fill(a, start, end, value);
+            boolean error = false;
+            for (int j = start; j < end; j++) {
+                if (a[j] != value) {
+                    System.err.println("a[" + j + "] = " + a[j] + " != " + value + " for " + a.length);
+                    error = true;
+                }
+            }
+            if (error) throw new InternalError();
+        }
+    }
+}
--- a/hotspot/test/gc/6581734/Test6581734.java	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/test/gc/6581734/Test6581734.java	Wed Jul 05 17:22:53 2017 +0200
@@ -121,7 +121,7 @@
         }
 
         if (collectorsWithTime<collectorsFound) {
-            throw new RuntimeException("collectors found with zero time";
+            throw new RuntimeException("collectors found with zero time");
         }
         System.out.println("Test passed.");
     }
--- a/hotspot/test/runtime/6626217/Test6626217.sh	Fri Sep 24 16:41:32 2010 -0700
+++ b/hotspot/test/runtime/6626217/Test6626217.sh	Wed Jul 05 17:22:53 2017 +0200
@@ -1,9 +1,27 @@
-#   
-# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
-# SUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
-#
+# 
+#  Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+#  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+# 
+#  This code is free software; you can redistribute it and/or modify it
+#  under the terms of the GNU General Public License version 2 only, as
+#  published by the Free Software Foundation.
+# 
+#  This code is distributed in the hope that it will be useful, but WITHOUT
+#  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+#  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+#  version 2 for more details (a copy is included in the LICENSE file that
+#  accompanied this code).
+# 
+#  You should have received a copy of the GNU General Public License version
+#  2 along with this work; if not, write to the Free Software Foundation,
+#  Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+# 
+#  Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+#  or visit www.oracle.com if you need additional information or have any
+#  questions.
+# 
 
-#
+ 
 # @test @(#)Test6626217.sh
 # @bug 6626217
 # @summary Loader-constraint table allows arrays instead of only the base-classes
@@ -96,6 +114,6 @@
 ${RM} many_loader.java
 
 ${JAVA} ${BIT_FLAG} -Xverify -Xint -cp . bug_21227 >test.out 2>&1
-grep "violates loader constraints" test.out
+grep "loader constraint" test.out
 exit $?
 
--- a/make/hotspot-rules.gmk	Fri Sep 24 16:41:32 2010 -0700
+++ b/make/hotspot-rules.gmk	Wed Jul 05 17:22:53 2017 +0200
@@ -73,7 +73,11 @@
 endif
 
 ifeq ($(ZERO_BUILD), true)
-  HOTSPOT_TARGET := $(HOTSPOT_TARGET)zero
+  ifeq ($(SHARK_BUILD), true)
+    HOTSPOT_TARGET := $(HOTSPOT_TARGET)shark
+  else
+    HOTSPOT_TARGET := $(HOTSPOT_TARGET)zero
+  endif
 endif
 
 HOTSPOT_BUILD_ARGUMENTS += $(COMMON_BUILD_ARGUMENTS)