Merge
authoramurillo
Sat, 05 Mar 2016 20:46:42 -0800
changeset 36405 e38c9a1ee108
parent 36280 c870cb782aca (current diff)
parent 36404 165ddc1dd2e9 (diff)
child 36406 8045e63439dd
child 36559 b35fcd3da015
child 37050 85b12760d820
Merge
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/development/Server16.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/development/Server24.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/About16.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/About24.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/Delete16.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/Delete24.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/Find16.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/Help16.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/Help24.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/History16.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/History24.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/Information16.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/Information24.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/New16.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/New24.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/Open16.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/Open24.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/Save24.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/SaveAs16.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/SaveAs24.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/Zoom16.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/ZoomIn16.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/ZoomIn24.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/navigation/Down16.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/navigation/Up16.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/text/AlignCenter16.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/text/AlignCenter24.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/text/AlignLeft16.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/text/AlignLeft24.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/text/AlignRight16.gif
hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/text/AlignRight24.gif
hotspot/src/share/vm/classfile/vmSymbols.hpp
hotspot/src/share/vm/jvmci/commandLineFlagConstraintsJVMCI.cpp
hotspot/src/share/vm/jvmci/commandLineFlagConstraintsJVMCI.hpp
hotspot/src/share/vm/runtime/thread.cpp
hotspot/test/serviceability/dcmd/jvmti/LoadJavaAgentDcmdTest.java
--- a/hotspot/make/aix/makefiles/trace.make	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/make/aix/makefiles/trace.make	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -57,11 +57,6 @@
 TraceGeneratedNames +=  \
 	traceRequestables.hpp \
     traceEventControl.hpp
-
-ifneq ($(INCLUDE_TRACE), false)
-TraceGeneratedNames += traceProducer.cpp
-endif
-
 endif
 
 TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
@@ -100,9 +95,6 @@
 $(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
 	$(GENERATE_CODE)
 
-$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
 $(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
 	$(GENERATE_CODE)
 
--- a/hotspot/make/bsd/makefiles/trace.make	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/make/bsd/makefiles/trace.make	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -57,11 +57,6 @@
 TraceGeneratedNames +=  \
 	traceRequestables.hpp \
     traceEventControl.hpp
-
-ifneq ($(INCLUDE_TRACE), false)
-TraceGeneratedNames += traceProducer.cpp
-endif
-
 endif
 
 
@@ -101,9 +96,6 @@
 $(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
 	$(GENERATE_CODE)
 
-$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
 $(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
 	$(GENERATE_CODE)
 
--- a/hotspot/make/linux/makefiles/trace.make	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/make/linux/makefiles/trace.make	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -57,11 +57,6 @@
 TraceGeneratedNames +=  \
 	traceRequestables.hpp \
     traceEventControl.hpp
-
-ifneq ($(INCLUDE_TRACE), false)
-TraceGeneratedNames += traceProducer.cpp
-endif
-
 endif
 
 TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
@@ -100,9 +95,6 @@
 $(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
 	$(GENERATE_CODE)
 
-$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
 $(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
 	$(GENERATE_CODE)
 
--- a/hotspot/make/solaris/makefiles/amd64.make	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/make/solaris/makefiles/amd64.make	Sat Mar 05 20:46:42 2016 -0800
@@ -39,7 +39,7 @@
 # of OPT_CFLAGS. Restore it here.
 ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
   OPT_CFLAGS/generateOptoStub.o += -g0 -xs
-  OPT_CFLAGS/LinearScan.o += -g0 -xs
+  OPT_CFLAGS/c1_LinearScan.o += -g0 -xs
 endif
 
 else
--- a/hotspot/make/solaris/makefiles/trace.make	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/make/solaris/makefiles/trace.make	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -56,8 +56,7 @@
 ifeq ($(HAS_ALT_SRC), true)
 TraceGeneratedNames +=  \
 	traceRequestables.hpp \
-    traceEventControl.hpp \
-    traceProducer.cpp
+    traceEventControl.hpp
 endif
 
 TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
@@ -96,9 +95,6 @@
 $(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
 	$(GENERATE_CODE)
 
-$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
-	$(GENERATE_CODE)
-
 $(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
 	$(GENERATE_CODE)
 
--- a/hotspot/make/test/JtregNative.gmk	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/make/test/JtregNative.gmk	Sat Mar 05 20:46:42 2016 -0800
@@ -48,6 +48,7 @@
     $(HOTSPOT_TOPDIR)/test/runtime/SameObject \
     $(HOTSPOT_TOPDIR)/test/compiler/floatingpoint/ \
     $(HOTSPOT_TOPDIR)/test/compiler/calls \
+    $(HOTSPOT_TOPDIR)/test/compiler/native \
     #
 
 # Add conditional directories here when needed.
--- a/hotspot/make/windows/makefiles/trace.make	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/make/windows/makefiles/trace.make	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -43,8 +43,7 @@
 !if EXISTS($(TraceAltSrcDir))
 TraceGeneratedNames = $(TraceGeneratedNames) \
     traceRequestables.hpp \
-    traceEventControl.hpp \
-    traceProducer.cpp
+    traceEventControl.hpp
 !endif
 
 
@@ -58,8 +57,7 @@
 !if EXISTS($(TraceAltSrcDir))
 TraceGeneratedFiles = $(TraceGeneratedFiles) \
 	$(TraceOutDir)/traceRequestables.hpp \
-    $(TraceOutDir)/traceEventControl.hpp \
-	$(TraceOutDir)/traceProducer.cpp
+    $(TraceOutDir)/traceEventControl.hpp
 !endif
 
 XSLT = $(QUIETLY) $(REMOTE) $(RUN_JAVA) -classpath $(JvmtiOutDir) jvmtiGen
@@ -98,10 +96,6 @@
 	@echo Generating AltSrc $@
 	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp
 
-$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
-	@echo Generating AltSrc $@
-	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceProducer.xsl -OUT $(TraceOutDir)/traceProducer.cpp
-
 $(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
 	@echo Generating AltSrc $@
 	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceRequestables.xsl -OUT $(TraceOutDir)/traceRequestables.hpp
--- a/hotspot/src/cpu/aarch64/vm/aarch64.ad	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/cpu/aarch64/vm/aarch64.ad	Sat Mar 05 20:46:42 2016 -0800
@@ -1041,10 +1041,8 @@
   bool is_card_mark_membar(const MemBarNode *barrier);
   bool is_CAS(int opcode);
 
-  MemBarNode *leading_to_normal(MemBarNode *leading);
-  MemBarNode *normal_to_leading(const MemBarNode *barrier);
-  MemBarNode *card_mark_to_trailing(const MemBarNode *barrier);
-  MemBarNode *trailing_to_card_mark(const MemBarNode *trailing);
+  MemBarNode *leading_to_trailing(MemBarNode *leading);
+  MemBarNode *card_mark_to_leading(const MemBarNode *barrier);
   MemBarNode *trailing_to_leading(const MemBarNode *trailing);
 
   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
@@ -1418,23 +1416,28 @@
   // leading MemBarRelease and a trailing MemBarVolatile as follows
   //
   //   MemBarRelease
-  //  {      ||      } -- optional
+  //  {    ||        } -- optional
   //  {MemBarCPUOrder}
-  //         ||     \\
-  //         ||     StoreX[mo_release]
-  //         | \     /
-  //         | MergeMem
-  //         | /
+  //       ||       \\
+  //       ||     StoreX[mo_release]
+  //       | \ Bot    / ???
+  //       | MergeMem
+  //       | /
   //   MemBarVolatile
   //
   // where
   //  || and \\ represent Ctl and Mem feeds via Proj nodes
   //  | \ and / indicate further routing of the Ctl and Mem feeds
   //
-  // this is the graph we see for non-object stores. however, for a
-  // volatile Object store (StoreN/P) we may see other nodes below the
-  // leading membar because of the need for a GC pre- or post-write
-  // barrier.
+  // Note that the memory feed from the CPUOrder membar to the
+  // MergeMem node is an AliasIdxBot slice while the feed from the
+  // StoreX is for a slice determined by the type of value being
+  // written.
+  //
+  // the diagram above shows the graph we see for non-object stores.
+  // for a volatile Object store (StoreN/P) we may see other nodes
+  // below the leading membar because of the need for a GC pre- or
+  // post-write barrier.
   //
   // with most GC configurations we with see this simple variant which
   // includes a post-write barrier card mark.
@@ -1442,7 +1445,7 @@
   //   MemBarRelease______________________________
   //         ||    \\               Ctl \        \\
   //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
-  //         | \     /                       . . .  /
+  //         | \ Bot  / oop                 . . .  /
   //         | MergeMem
   //         | /
   //         ||      /
@@ -1452,152 +1455,142 @@
   // the object address to an int used to compute the card offset) and
   // Ctl+Mem to a StoreB node (which does the actual card mark).
   //
-  // n.b. a StoreCM node will only appear in this configuration when
-  // using CMS. StoreCM differs from a normal card mark write (StoreB)
-  // because it implies a requirement to order visibility of the card
-  // mark (StoreCM) relative to the object put (StoreP/N) using a
-  // StoreStore memory barrier (arguably this ought to be represented
-  // explicitly in the ideal graph but that is not how it works). This
-  // ordering is required for both non-volatile and volatile
-  // puts. Normally that means we need to translate a StoreCM using
-  // the sequence
+  // n.b. a StoreCM node is only ever used when CMS (with or without
+  // CondCardMark) or G1 is configured. This abstract instruction
+  // differs from a normal card mark write (StoreB) because it implies
+  // a requirement to order visibility of the card mark (StoreCM)
+  // after that of the object put (StoreP/N) using a StoreStore memory
+  // barrier. Note that this is /not/ a requirement to order the
+  // instructions in the generated code (that is already guaranteed by
+  // the order of memory dependencies). Rather it is a requirement to
+  // ensure visibility order which only applies on architectures like
+  // AArch64 which do not implement TSO. This ordering is required for
+  // both non-volatile and volatile puts.
+  //
+  // That implies that we need to translate a StoreCM using the
+  // sequence
   //
   //   dmb ishst
   //   stlrb
   //
-  // However, in the case of a volatile put if we can recognise this
-  // configuration and plant an stlr for the object write then we can
-  // omit the dmb and just plant an strb since visibility of the stlr
-  // is ordered before visibility of subsequent stores. StoreCM nodes
-  // also arise when using G1 or using CMS with conditional card
-  // marking. In these cases (as we shall see) we don't need to insert
-  // the dmb when translating StoreCM because there is already an
-  // intervening StoreLoad barrier between it and the StoreP/N.
-  //
-  // It is also possible to perform the card mark conditionally on it
-  // currently being unmarked in which case the volatile put graph
-  // will look slightly different
+  // This dmb cannot be omitted even when the associated StoreX or
+  // CompareAndSwapX is implemented using stlr. However, as described
+  // below there are circumstances where a specific GC configuration
+  // requires a stronger barrier in which case it can be omitted.
+  // 
+  // With the Serial or Parallel GC using +CondCardMark the card mark
+  // is performed conditionally on it currently being unmarked in
+  // which case the volatile put graph looks slightly different
   //
   //   MemBarRelease____________________________________________
   //         ||    \\               Ctl \     Ctl \     \\  Mem \
   //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
-  //         | \     /                              \            |
+  //         | \ Bot / oop                          \            |
   //         | MergeMem                            . . .      StoreB
   //         | /                                                /
   //         ||     /
   //   MemBarVolatile
   //
-  // It is worth noting at this stage that both the above
+  // It is worth noting at this stage that all the above
   // configurations can be uniquely identified by checking that the
   // memory flow includes the following subgraph:
   //
   //   MemBarRelease
   //  {MemBarCPUOrder}
-  //          |  \      . . .
-  //          |  StoreX[mo_release]  . . .
-  //          |   /
-  //         MergeMem
-  //          |
+  //      |  \      . . .
+  //      |  StoreX[mo_release]  . . .
+  //  Bot |   / oop
+  //     MergeMem
+  //      |
   //   MemBarVolatile
   //
-  // This is referred to as a *normal* subgraph. It can easily be
-  // detected starting from any candidate MemBarRelease,
-  // StoreX[mo_release] or MemBarVolatile.
-  //
-  // A simple variation on this normal case occurs for an unsafe CAS
-  // operation. The basic graph for a non-object CAS is
+  // This is referred to as a *normal* volatile store subgraph. It can
+  // easily be detected starting from any candidate MemBarRelease,
+  // StoreX[mo_release] or MemBarVolatile node.
+  //
+  // A small variation on this normal case occurs for an unsafe CAS
+  // operation. The basic memory flow subgraph for a non-object CAS is
+  // as follows
   //
   //   MemBarRelease
   //         ||
   //   MemBarCPUOrder
-  //         ||     \\   . . .
-  //         ||     CompareAndSwapX
-  //         ||       |
-  //         ||     SCMemProj
-  //         | \     /
-  //         | MergeMem
-  //         | /
+  //          |     \\   . . .
+  //          |     CompareAndSwapX
+  //          |       |
+  //      Bot |     SCMemProj
+  //           \     / Bot
+  //           MergeMem
+  //           /
   //   MemBarCPUOrder
   //         ||
   //   MemBarAcquire
   //
   // The same basic variations on this arrangement (mutatis mutandis)
-  // occur when a card mark is introduced. i.e. we se the same basic
-  // shape but the StoreP/N is replaced with CompareAndSawpP/N and the
-  // tail of the graph is a pair comprising a MemBarCPUOrder +
-  // MemBarAcquire.
-  //
-  // So, in the case of a CAS the normal graph has the variant form
-  //
-  //   MemBarRelease
-  //   MemBarCPUOrder
-  //          |   \      . . .
-  //          |  CompareAndSwapX  . . .
-  //          |    |
-  //          |   SCMemProj
-  //          |   /  . . .
-  //         MergeMem
-  //          |
-  //   MemBarCPUOrder
-  //   MemBarAcquire
-  //
-  // This graph can also easily be detected starting from any
-  // candidate MemBarRelease, CompareAndSwapX or MemBarAcquire.
-  //
-  // the code below uses two helper predicates, leading_to_normal and
-  // normal_to_leading to identify these normal graphs, one validating
-  // the layout starting from the top membar and searching down and
-  // the other validating the layout starting from the lower membar
-  // and searching up.
-  //
-  // There are two special case GC configurations when a normal graph
-  // may not be generated: when using G1 (which always employs a
-  // conditional card mark); and when using CMS with conditional card
-  // marking configured. These GCs are both concurrent rather than
-  // stop-the world GCs. So they introduce extra Ctl+Mem flow into the
-  // graph between the leading and trailing membar nodes, in
-  // particular enforcing stronger memory serialisation beween the
-  // object put and the corresponding conditional card mark. CMS
-  // employs a post-write GC barrier while G1 employs both a pre- and
-  // post-write GC barrier. Of course the extra nodes may be absent --
-  // they are only inserted for object puts. This significantly
-  // complicates the task of identifying whether a MemBarRelease,
-  // StoreX[mo_release] or MemBarVolatile forms part of a volatile put
-  // when using these GC configurations (see below). It adds similar
-  // complexity to the task of identifying whether a MemBarRelease,
-  // CompareAndSwapX or MemBarAcquire forms part of a CAS.
-  //
-  // In both cases the post-write subtree includes an auxiliary
-  // MemBarVolatile (StoreLoad barrier) separating the object put and
-  // the read of the corresponding card. This poses two additional
-  // problems.
-  //
-  // Firstly, a card mark MemBarVolatile needs to be distinguished
-  // from a normal trailing MemBarVolatile. Resolving this first
-  // problem is straightforward: a card mark MemBarVolatile always
-  // projects a Mem feed to a StoreCM node and that is a unique marker
+  // occur when a card mark is introduced. i.e. the CPUOrder MemBar
+  // feeds the extra CastP2X, LoadB etc nodes but the above memory
+  // flow subgraph is still present.
+  // 
+  // This is referred to as a *normal* CAS subgraph. It can easily be
+  // detected starting from any candidate MemBarRelease,
+  // StoreX[mo_release] or MemBarAcquire node.
+  //
+  // The code below uses two helper predicates, leading_to_trailing
+  // and trailing_to_leading to identify these normal graphs, one
+  // validating the layout starting from the top membar and searching
+  // down and the other validating the layout starting from the lower
+  // membar and searching up.
+  //
+  // There are two special case GC configurations when the simple
+  // normal graphs above may not be generated: when using G1 (which
+  // always employs a conditional card mark); and when using CMS with
+  // conditional card marking (+CondCardMark) configured. These GCs
+  // are both concurrent rather than stop-the world GCs. So they
+  // introduce extra Ctl+Mem flow into the graph between the leading
+  // and trailing membar nodes, in particular enforcing stronger
+  // memory serialisation beween the object put and the corresponding
+  // conditional card mark. CMS employs a post-write GC barrier while
+  // G1 employs both a pre- and post-write GC barrier.
+  //
+  // The post-write barrier subgraph for these configurations includes
+  // a MemBarVolatile node -- referred to as a card mark membar --
+  // which is needed to order the card write (StoreCM) operation in
+  // the barrier, the preceding StoreX (or CompareAndSwapX) and Store
+  // operations performed by GC threads i.e. a card mark membar
+  // constitutes a StoreLoad barrier hence must be translated to a dmb
+  // ish (whether or not it sits inside a volatile store sequence).
+  //
+  // Of course, the use of the dmb ish for the card mark membar also
+  // implies theat the StoreCM which follows can omit the dmb ishst
+  // instruction. The necessary visibility ordering will already be
+  // guaranteed by the dmb ish. In sum, the dmb ishst instruction only
+  // needs to be generated for as part of the StoreCM sequence with GC
+  // configuration +CMS -CondCardMark.
+  // 
+  // Of course all these extra barrier nodes may well be absent --
+  // they are only inserted for object puts. Their potential presence
+  // significantly complicates the task of identifying whether a
+  // MemBarRelease, StoreX[mo_release], MemBarVolatile or
+  // MemBarAcquire forms part of a volatile put or CAS when using
+  // these GC configurations (see below) and also complicates the
+  // decision as to how to translate a MemBarVolatile and StoreCM.
+  //
+  // So, thjis means that a card mark MemBarVolatile occurring in the
+  // post-barrier graph it needs to be distinguished from a normal
+  // trailing MemBarVolatile. Resolving this is straightforward: a
+  // card mark MemBarVolatile always projects a Mem feed to a StoreCM
+  // node and that is a unique marker
   //
   //      MemBarVolatile (card mark)
   //       C |    \     . . .
   //         |   StoreCM   . . .
   //       . . .
   //
-  // The second problem is how the code generator is to translate the
-  // card mark barrier? It always needs to be translated to a "dmb
-  // ish" instruction whether or not it occurs as part of a volatile
-  // put. A StoreLoad barrier is needed after the object put to ensure
-  // i) visibility to GC threads of the object put and ii) visibility
-  // to the mutator thread of any card clearing write by a GC
-  // thread. Clearly a normal store (str) will not guarantee this
-  // ordering but neither will a releasing store (stlr). The latter
-  // guarantees that the object put is visible but does not guarantee
-  // that writes by other threads have also been observed.
-  //
-  // So, returning to the task of translating the object put and the
-  // leading/trailing membar nodes: what do the non-normal node graph
-  // look like for these 2 special cases? and how can we determine the
-  // status of a MemBarRelease, StoreX[mo_release] or MemBarVolatile
-  // in both normal and non-normal cases?
+  // Returning to the task of translating the object put and the
+  // leading/trailing membar nodes: what do the node graphs look like
+  // for these 2 special cases? and how can we determine the status of
+  // a MemBarRelease, StoreX[mo_release] or MemBarVolatile in both
+  // normal and non-normal cases?
   //
   // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
   // which selects conditonal execution based on the value loaded
@@ -1608,91 +1601,117 @@
   // which looks like this
   //
   //   MemBarRelease
-  //   MemBarCPUOrder_(leading)__________________
-  //     C |    M \       \\                   C \
-  //       |       \    StoreN/P[mo_release]  CastP2X
-  //       |    Bot \    /
-  //       |       MergeMem
-  //       |         /
-  //      MemBarVolatile (card mark)
-  //     C |  ||    M |
-  //       | LoadB    |
-  //       |   |      |
-  //       | Cmp      |\
-  //       | /        | \
-  //       If         |  \
-  //       | \        |   \
-  // IfFalse  IfTrue  |    \
-  //       \     / \  |     \
-  //        \   / StoreCM    |
-  //         \ /      |      |
-  //        Region   . . .   |
-  //          | \           /
-  //          |  . . .  \  / Bot
+  //   MemBarCPUOrder_(leading)____________________
+  //     C |  | M \       \\               M |   C \
+  //       |  |    \    StoreN/P[mo_release] |  CastP2X
+  //       |  | Bot \    / oop      \        |
+  //       |  |    MergeMem          \      / 
+  //       |  |      /                |    /
+  //     MemBarVolatile (card mark)   |   /
+  //     C |  ||    M |               |  /
+  //       | LoadB    | Bot       oop | / Bot
+  //       |   |      |              / /
+  //       | Cmp      |\            / /
+  //       | /        | \          / /
+  //       If         |  \        / /
+  //       | \        |   \      / /
+  // IfFalse  IfTrue  |    \    / /
+  //       \     / \  |    |   / /
+  //        \   / StoreCM  |  / /
+  //         \ /      \   /  / /
+  //        Region     Phi  / /
+  //          | \   Raw |  / /
+  //          |  . . .  | / /
   //          |       MergeMem
-  //          |          |
+  //          |           |
   //        MemBarVolatile (trailing)
   //
-  // The first MergeMem merges the AliasIdxBot Mem slice from the
-  // leading membar and the oopptr Mem slice from the Store into the
-  // card mark membar. The trailing MergeMem merges the AliasIdxBot
-  // Mem slice from the card mark membar and the AliasIdxRaw slice
-  // from the StoreCM into the trailing membar (n.b. the latter
-  // proceeds via a Phi associated with the If region).
-  //
-  // The graph for a CAS varies slightly, the obvious difference being
-  // that the StoreN/P node is replaced by a CompareAndSwapP/N node
-  // and the trailing MemBarVolatile by a MemBarCPUOrder +
-  // MemBarAcquire pair. The other important difference is that the
-  // CompareAndSwap node's SCMemProj is not merged into the card mark
-  // membar - it still feeds the trailing MergeMem. This also means
-  // that the card mark membar receives its Mem feed directly from the
-  // leading membar rather than via a MergeMem.
+  // Notice that there are two MergeMem nodes below the leading
+  // membar. The first MergeMem merges the AliasIdxBot Mem slice from
+  // the leading membar and the oopptr Mem slice from the Store into
+  // the card mark membar. The trailing MergeMem merges the
+  // AliasIdxBot Mem slice from the leading membar, the AliasIdxRaw
+  // slice from the StoreCM and an oop slice from the StoreN/P node
+  // into the trailing membar (n.b. the raw slice proceeds via a Phi
+  // associated with the If region).
+  //
+  // So, in the case of CMS + CondCardMark the volatile object store
+  // graph still includes a normal volatile store subgraph from the
+  // leading membar to the trailing membar. However, it also contains
+  // the same shape memory flow to the card mark membar. The two flows
+  // can be distinguished by testing whether or not the downstream
+  // membar is a card mark membar.
+  //
+  // The graph for a CAS also varies with CMS + CondCardMark, in
+  // particular employing a control feed from the CompareAndSwapX node
+  // through a CmpI and If to the card mark membar and StoreCM which
+  // updates the associated card. This avoids executing the card mark
+  // if the CAS fails. However, it can be seen from the diagram below
+  // that the presence of the barrier does not alter the normal CAS
+  // memory subgraph where the leading membar feeds a CompareAndSwapX,
+  // an SCMemProj, a MergeMem then a final trailing MemBarCPUOrder and
+  // MemBarAcquire pair.
   //
   //   MemBarRelease
-  //   MemBarCPUOrder__(leading)_________________________
-  //       ||                       \\                 C \
-  //   MemBarVolatile (card mark)  CompareAndSwapN/P  CastP2X
-  //     C |  ||    M |              |
-  //       | LoadB    |       ______/|
-  //       |   |      |      /       |
-  //       | Cmp      |     /      SCMemProj
-  //       | /        |    /         |
-  //       If         |   /         /
-  //       | \        |  /         /
-  // IfFalse  IfTrue  | /         /
-  //       \     / \  |/ prec    /
-  //        \   / StoreCM       /
-  //         \ /      |        /
-  //        Region   . . .    /
-  //          | \            /
-  //          |  . . .  \   / Bot
-  //          |       MergeMem
-  //          |          |
-  //        MemBarCPUOrder
-  //        MemBarAcquire (trailing)
+  //   MemBarCPUOrder__(leading)_______________________
+  //   C /  M |                        \\            C \
+  //  . . .   | Bot                CompareAndSwapN/P   CastP2X
+  //          |                  C /  M |
+  //          |                 CmpI    |
+  //          |                  /      |
+  //          |               . . .     |
+  //          |              IfTrue     |
+  //          |              /          |
+  //       MemBarVolatile (card mark)   |
+  //        C |  ||    M |              |
+  //          | LoadB    | Bot   ______/|
+  //          |   |      |      /       |
+  //          | Cmp      |     /      SCMemProj
+  //          | /        |    /         |
+  //          If         |   /         /
+  //          | \        |  /         / Bot
+  //     IfFalse  IfTrue | /         /
+  //          |   / \   / / prec    /
+  //   . . .  |  /  StoreCM        /
+  //        \ | /      | raw      /
+  //        Region    . . .      /
+  //           | \              /
+  //           |   . . .   \    / Bot
+  //           |        MergeMem
+  //           |          /
+  //         MemBarCPUOrder
+  //         MemBarAcquire (trailing)
   //
   // This has a slightly different memory subgraph to the one seen
-  // previously but the core of it is the same as for the CAS normal
-  // sungraph
+  // previously but the core of it has a similar memory flow to the
+  // CAS normal subgraph:
   //
   //   MemBarRelease
   //   MemBarCPUOrder____
-  //      ||             \      . . .
-  //   MemBarVolatile  CompareAndSwapX  . . .
-  //      |  \            |
-  //        . . .   SCMemProj
-  //          |     /  . . .
-  //         MergeMem
-  //          |
+  //         |          \      . . .
+  //         |       CompareAndSwapX  . . .
+  //         |       C /  M |
+  //         |      CmpI    |
+  //         |       /      |
+  //         |      . .    /
+  //     Bot |   IfTrue   /
+  //         |   /       /
+  //    MemBarVolatile  /
+  //         | ...     /
+  //      StoreCM ... /
+  //         |       / 
+  //       . . .  SCMemProj
+  //      Raw \    / Bot
+  //        MergeMem
+  //           |
   //   MemBarCPUOrder
   //   MemBarAcquire
   //
-  //
-  // G1 is quite a lot more complicated. The nodes inserted on behalf
-  // of G1 may comprise: a pre-write graph which adds the old value to
-  // the SATB queue; the releasing store itself; and, finally, a
-  // post-write graph which performs a card mark.
+  // The G1 graph for a volatile object put is a lot more complicated.
+  // Nodes inserted on behalf of G1 may comprise: a pre-write graph
+  // which adds the old value to the SATB queue; the releasing store
+  // itself; and, finally, a post-write graph which performs a card
+  // mark.
   //
   // The pre-write graph may be omitted, but only when the put is
   // writing to a newly allocated (young gen) object and then only if
@@ -1730,25 +1749,60 @@
   //       | CastP2X | StoreN/P[mo_release] |
   //       |         |         |            |
   //     C |       M |       M |          M |
-  //        \        |         |           /
+  //        \        | Raw     | oop       / Bot
   //                  . . .
   //          (post write subtree elided)
   //                    . . .
   //             C \         M /
   //         MemBarVolatile (trailing)
   //
+  // Note that the three memory feeds into the post-write tree are an
+  // AliasRawIdx slice associated with the writes in the pre-write
+  // tree, an oop type slice from the StoreX specific to the type of
+  // the volatile field and the AliasBotIdx slice emanating from the
+  // leading membar.
+  //
   // n.b. the LoadB in this subgraph is not the card read -- it's a
   // read of the SATB queue active flag.
   //
-  // Once again the CAS graph is a minor variant on the above with the
-  // expected substitutions of CompareAndSawpX for StoreN/P and
-  // MemBarCPUOrder + MemBarAcquire for trailing MemBarVolatile.
+  // The CAS graph is once again a variant of the above with a
+  // CompareAndSwapX node and SCMemProj in place of the StoreX.  The
+  // value from the CompareAndSwapX node is fed into the post-write
+  // graph aling with the AliasIdxRaw feed from the pre-barrier and
+  // the AliasIdxBot feeds from the leading membar and the ScMemProj.
+  //
+  //  MemBarRelease (leading)____________
+  //     C |  ||  M \   M \    M \  M \ . . .
+  //       | LoadB   \  LoadL  LoadN   \
+  //       | /        \                 \
+  //       If         |\                 \
+  //       | \        | \                 \
+  //  IfFalse  IfTrue |  \                 \
+  //       |     |    |   \                 \
+  //       |     If   |    \                 |
+  //       |     |          \                |
+  //       |                 \               |
+  //       |    . . .         \              |
+  //       | /       | /       \             |
+  //      Region  Phi[M]        \            |
+  //       | \       |           \           |
+  //       |  \_____ |            |          |
+  //     C | C \     |            |          |
+  //       | CastP2X |     CompareAndSwapX   |
+  //       |         |   res |     |         |
+  //     C |       M |       |  SCMemProj  M |
+  //        \        | Raw   |     | Bot    / Bot
+  //                  . . .
+  //          (post write subtree elided)
+  //                    . . .
+  //             C \         M /
+  //         MemBarVolatile (trailing)
   //
   // The G1 post-write subtree is also optional, this time when the
   // new value being written is either null or can be identified as a
   // newly allocated (young gen) object with no intervening control
   // flow. The latter cannot happen but the former may, in which case
-  // the card mark membar is omitted and the memory feeds form the
+  // the card mark membar is omitted and the memory feeds from the
   // leading membar and the SToreN/P are merged direct into the
   // trailing membar as per the normal subgraph. So, the only special
   // case which arises is when the post-write subgraph is generated.
@@ -1770,94 +1824,106 @@
   //
   //                (pre-write subtree elided)
   //        . . .                  . . .    . . .  . . .
-  //        C |                    M |     M |    M |
-  //       Region                  Phi[M] StoreN    |
-  //          |                     / \      |      |
-  //         / \_______            /   \     |      |
-  //      C / C \      . . .            \    |      |
-  //       If   CastP2X . . .            |   |      |
-  //       / \                           |   |      |
-  //      /   \                          |   |      |
-  // IfFalse IfTrue                      |   |      |
-  //   |       |                         |   |     /|
-  //   |       If                        |   |    / |
-  //   |      / \                        |   |   /  |
-  //   |     /   \                        \  |  /   |
-  //   | IfFalse IfTrue                   MergeMem  |
-  //   |  . . .    / \                       /      |
-  //   |          /   \                     /       |
-  //   |     IfFalse IfTrue                /        |
-  //   |      . . .    |                  /         |
-  //   |               If                /          |
-  //   |               / \              /           |
-  //   |              /   \            /            |
-  //   |         IfFalse IfTrue       /             |
-  //   |           . . .   |         /              |
-  //   |                    \       /               |
-  //   |                     \     /                |
-  //   |             MemBarVolatile__(card mark)    |
-  //   |                ||   C |  M \  M \          |
-  //   |               LoadB   If    |    |         |
-  //   |                      / \    |    |         |
-  //   |                     . . .   |    |         |
-  //   |                          \  |    |        /
-  //   |                        StoreCM   |       /
-  //   |                          . . .   |      /
-  //   |                        _________/      /
-  //   |                       /  _____________/
-  //   |   . . .       . . .  |  /            /
-  //   |    |                 | /   _________/
-  //   |    |               Phi[M] /        /
-  //   |    |                 |   /        /
-  //   |    |                 |  /        /
-  //   |  Region  . . .     Phi[M]  _____/
-  //   |    /                 |    /
-  //   |                      |   /
-  //   | . . .   . . .        |  /
-  //   | /                    | /
-  // Region           |  |  Phi[M]
-  //   |              |  |  / Bot
-  //    \            MergeMem
-  //     \            /
-  //     MemBarVolatile
-  //
-  // As with CMS the initial MergeMem merges the AliasIdxBot Mem slice
-  // from the leading membar and the oopptr Mem slice from the Store
-  // into the card mark membar i.e. the memory flow to the card mark
-  // membar still looks like a normal graph.
-  //
-  // The trailing MergeMem merges an AliasIdxBot Mem slice with other
-  // Mem slices (from the StoreCM and other card mark queue stores).
-  // However in this case the AliasIdxBot Mem slice does not come
-  // direct from the card mark membar. It is merged through a series
-  // of Phi nodes. These are needed to merge the AliasIdxBot Mem flow
-  // from the leading membar with the Mem feed from the card mark
-  // membar. Each Phi corresponds to one of the Ifs which may skip
-  // around the card mark membar. So when the If implementing the NULL
-  // value check has been elided the total number of Phis is 2
-  // otherwise it is 3.
-  //
-  // The CAS graph when using G1GC also includes a pre-write subgraph
-  // and an optional post-write subgraph. Teh sam evarioations are
-  // introduced as for CMS with conditional card marking i.e. the
-  // StoreP/N is swapped for a CompareAndSwapP/N, the tariling
-  // MemBarVolatile for a MemBarCPUOrder + MemBarAcquire pair and the
-  // Mem feed from the CompareAndSwapP/N includes a precedence
-  // dependency feed to the StoreCM and a feed via an SCMemProj to the
-  // trailing membar. So, as before the configuration includes the
-  // normal CAS graph as a subgraph of the memory flow.
-  //
-  // So, the upshot is that in all cases the volatile put graph will
-  // include a *normal* memory subgraph betwen the leading membar and
-  // its child membar, either a volatile put graph (including a
-  // releasing StoreX) or a CAS graph (including a CompareAndSwapX).
-  // When that child is not a card mark membar then it marks the end
-  // of the volatile put or CAS subgraph. If the child is a card mark
-  // membar then the normal subgraph will form part of a volatile put
-  // subgraph if and only if the child feeds an AliasIdxBot Mem feed
-  // to a trailing barrier via a MergeMem. That feed is either direct
-  // (for CMS) or via 2 or 3 Phi nodes merging the leading barrier
-  // memory flow (for G1).
+  //        C |               M |    M |    M |
+  //       Region            Phi[M] StoreN    |
+  //          |            Raw  |  oop |  Bot |
+  //         / \_______         |\     |\     |\
+  //      C / C \      . . .    | \    | \    | \
+  //       If   CastP2X . . .   |  \   |  \   |  \
+  //       / \                  |   \  |   \  |   \
+  //      /   \                 |    \ |    \ |    \
+  // IfFalse IfTrue             |      |      |     \
+  //   |       |                 \     |     /       |
+  //   |       If                 \    | \  /   \    |
+  //   |      / \                  \   |   /     \   |
+  //   |     /   \                  \  |  / \     |  |
+  //   | IfFalse IfTrue           MergeMem   \    |  |
+  //   |  . . .    / \                 |      \   |  |
+  //   |          /   \                |       |  |  |
+  //   |     IfFalse IfTrue            |       |  |  |
+  //   |      . . .    |               |       |  |  |
+  //   |               If             /        |  |  |
+  //   |               / \           /         |  |  |
+  //   |              /   \         /          |  |  |
+  //   |         IfFalse IfTrue    /           |  |  |
+  //   |           . . .   |      /            |  |  |
+  //   |                    \    /             |  |  |
+  //   |                     \  /              |  |  |
+  //   |         MemBarVolatile__(card mark  ) |  |  |
+  //   |              ||   C |     \           |  |  |
+  //   |             LoadB   If     |         /   |  |
+  //   |                    / \ Raw |        /   /  /
+  //   |                   . . .    |       /   /  /
+  //   |                        \   |      /   /  /
+  //   |                        StoreCM   /   /  /
+  //   |                           |     /   /  /
+  //   |                            . . .   /  /
+  //   |                                   /  /
+  //   |   . . .                          /  /
+  //   |    |             | /            /  /
+  //   |    |           Phi[M] /        /  /
+  //   |    |             |   /        /  /
+  //   |    |             |  /        /  /
+  //   |  Region  . . .  Phi[M]      /  /
+  //   |    |             |         /  /
+  //    \   |             |        /  /
+  //     \  | . . .       |       /  /
+  //      \ |             |      /  /
+  //      Region         Phi[M] /  /
+  //        |               \  /  /
+  //         \             MergeMem
+  //          \            /
+  //          MemBarVolatile
+  //
+  // As with CMS + CondCardMark the first MergeMem merges the
+  // AliasIdxBot Mem slice from the leading membar and the oopptr Mem
+  // slice from the Store into the card mark membar. However, in this
+  // case it may also merge an AliasRawIdx mem slice from the pre
+  // barrier write.
+  //
+  // The trailing MergeMem merges an AliasIdxBot Mem slice from the
+  // leading membar with an oop slice from the StoreN and an
+  // AliasRawIdx slice from the post barrier writes. In this case the
+  // AliasIdxRaw Mem slice is merged through a series of Phi nodes
+  // which combine feeds from the If regions in the post barrier
+  // subgraph.
+  //
+  // So, for G1 the same characteristic subgraph arises as for CMS +
+  // CondCardMark. There is a normal subgraph feeding the card mark
+  // membar and a normal subgraph feeding the trailing membar.
+  //
+  // The CAS graph when using G1GC also includes an optional
+  // post-write subgraph. It is very similar to the above graph except
+  // for a few details.
+  // 
+  // - The control flow is gated by an additonal If which tests the
+  // result from the CompareAndSwapX node
+  // 
+  //  - The MergeMem which feeds the card mark membar only merges the
+  // AliasIdxBot slice from the leading membar and the AliasIdxRaw
+  // slice from the pre-barrier. It does not merge the SCMemProj
+  // AliasIdxBot slice. So, this subgraph does not look like the
+  // normal CAS subgraph.
+  //
+  // - The MergeMem which feeds the trailing membar merges the
+  // AliasIdxBot slice from the leading membar, the AliasIdxRaw slice
+  // from the post-barrier and the SCMemProj AliasIdxBot slice i.e. it
+  // has two AliasIdxBot input slices. However, this subgraph does
+  // still look like the normal CAS subgraph.
+  //
+  // So, the upshot is:
+  //
+  // In all cases a volatile put graph will include a *normal*
+  // volatile store subgraph betwen the leading membar and the
+  // trailing membar. It may also include a normal volatile store
+  // subgraph betwen the leading membar and the card mark membar.
+  //
+  // In all cases a CAS graph will contain a unique normal CAS graph
+  // feeding the trailing membar.
+  //
+  // In all cases where there is a card mark membar (either as part of
+  // a volatile object put or CAS) it will be fed by a MergeMem whose
+  // AliasIdxBot slice feed will be a leading membar.
   //
   // The predicates controlling generation of instructions for store
   // and barrier nodes employ a few simple helper functions (described
@@ -1878,24 +1944,24 @@
 	    opcode == Op_CompareAndSwapP);
   }
 
-  // leading_to_normal
+  // leading_to_trailing
   //
   //graph traversal helper which detects the normal case Mem feed from
   // a release membar (or, optionally, its cpuorder child) to a
   // dependent volatile membar i.e. it ensures that one or other of
   // the following Mem flow subgraph is present.
   //
-  //   MemBarRelease
-  //   MemBarCPUOrder {leading}
-  //          |  \      . . .
-  //          |  StoreN/P[mo_release]  . . .
-  //          |   /
-  //         MergeMem
-  //          |
-  //   MemBarVolatile {trailing or card mark}
-  //
-  //   MemBarRelease
-  //   MemBarCPUOrder {leading}
+  //   MemBarRelease {leading}
+  //   {MemBarCPUOrder} {optional}
+  //     Bot |  \      . . .
+  //         |  StoreN/P[mo_release]  . . .
+  //         |   /
+  //        MergeMem
+  //         |
+  //   MemBarVolatile {not card mark}
+  //
+  //   MemBarRelease {leading}
+  //   {MemBarCPUOrder} {optional}
   //      |       \      . . .
   //      |     CompareAndSwapX  . . .
   //               |
@@ -1906,6 +1972,23 @@
   //    MemBarCPUOrder
   //    MemBarAcquire {trailing}
   //
+  // the predicate needs to be capable of distinguishing the following
+  // volatile put graph which may arises when a GC post barrier
+  // inserts a card mark membar
+  //
+  //   MemBarRelease {leading}
+  //   {MemBarCPUOrder}__
+  //     Bot |   \       \
+  //         |   StoreN/P \
+  //         |    / \     |
+  //        MergeMem \    |
+  //         |        \   |
+  //   MemBarVolatile  \  |
+  //    {card mark}     \ |
+  //                  MergeMem
+  //                      |
+  // {not card mark} MemBarVolatile
+  //
   // if the correct configuration is present returns the trailing
   // membar otherwise NULL.
   //
@@ -1916,7 +1999,7 @@
   // the returned value may be a card mark or trailing membar
   //
 
-  MemBarNode *leading_to_normal(MemBarNode *leading)
+  MemBarNode *leading_to_trailing(MemBarNode *leading)
   {
     assert((leading->Opcode() == Op_MemBarRelease ||
 	    leading->Opcode() == Op_MemBarCPUOrder),
@@ -1933,15 +2016,21 @@
     StoreNode * st = NULL;
     LoadStoreNode *cas = NULL;
     MergeMemNode *mm = NULL;
+    MergeMemNode *mm2 = NULL;
 
     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
       x = mem->fast_out(i);
       if (x->is_MergeMem()) {
 	if (mm != NULL) {
-	  return NULL;
+	  if (mm2 != NULL) {
+	  // should not see more than 2 merge mems
+	    return NULL;
+	  } else {
+	    mm2 = x->as_MergeMem();
+	  }
+	} else {
+	  mm = x->as_MergeMem();
 	}
-	// two merge mems is one too many
-	mm = x->as_MergeMem();
       } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
 	// two releasing stores/CAS nodes is one too many
 	if (st != NULL || cas != NULL) {
@@ -1961,13 +2050,13 @@
       return NULL;
     }
 
-    // must have a merge if we also have st
+    // must have at least one merge if we also have st
     if (st && !mm) {
       return NULL;
     }
 
-    Node *y = NULL;
     if (cas) {
+      Node *y = NULL;
       // look for an SCMemProj
       for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
 	x = cas->fast_out(i);
@@ -1987,10 +2076,29 @@
 	  break;
 	}
       }
-      if (mm == NULL)
+      if (mm == NULL) {
 	return NULL;
+      }
+      MemBarNode *mbar = NULL;
+      // ensure the merge feeds a trailing membar cpuorder + acquire pair
+      for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
+	x = mm->fast_out(i);
+	if (x->is_MemBar()) {
+	  int opcode = x->Opcode();
+	  if (opcode == Op_MemBarCPUOrder) {
+	    MemBarNode *z =  x->as_MemBar();
+	    z = child_membar(z);
+	    if (z != NULL && z->Opcode() == Op_MemBarAcquire) {
+	      mbar = z;
+	    }
+	  }
+	  break;
+	}
+      }
+      return mbar;
     } else {
-      // ensure the store feeds the existing mergemem;
+      Node *y = NULL;
+      // ensure the store feeds the first mergemem;
       for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
 	if (st->fast_out(i) == mm) {
 	  y = st;
@@ -2000,55 +2108,89 @@
       if (y == NULL) {
 	return NULL;
       }
-    }
-
-    MemBarNode *mbar = NULL;
-    // ensure the merge feeds to the expected type of membar
-    for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
-      x = mm->fast_out(i);
-      if (x->is_MemBar()) {
-	int opcode = x->Opcode();
-	if (opcode == Op_MemBarVolatile && st) {
-	  mbar = x->as_MemBar();
-	} else if (cas && opcode == Op_MemBarCPUOrder) {
-	  MemBarNode *y =  x->as_MemBar();
-	  y = child_membar(y);
-	  if (y != NULL && y->Opcode() == Op_MemBarAcquire) {
-	    mbar = y;
+      if (mm2 != NULL) {
+	// ensure the store feeds the second mergemem;
+	y = NULL;
+	for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
+	  if (st->fast_out(i) == mm2) {
+	    y = st;
 	  }
 	}
-	break;
+	if (y == NULL) {
+	  return NULL;
+	}
+      }
+
+      MemBarNode *mbar = NULL;
+      // ensure the first mergemem feeds a volatile membar
+      for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
+	x = mm->fast_out(i);
+	if (x->is_MemBar()) {
+	  int opcode = x->Opcode();
+	  if (opcode == Op_MemBarVolatile) {
+	    mbar = x->as_MemBar();
+	  }
+	  break;
+	}
+      }
+      if (mm2 == NULL) {
+	// this is our only option for a trailing membar
+	return mbar;
       }
-    }
-
-    return mbar;
-  }
-
-  // normal_to_leading
+      // ensure the second mergemem feeds a volatile membar
+      MemBarNode *mbar2 = NULL;
+      for (DUIterator_Fast imax, i = mm2->fast_outs(imax); i < imax; i++) {
+	x = mm2->fast_out(i);
+	if (x->is_MemBar()) {
+	  int opcode = x->Opcode();
+	  if (opcode == Op_MemBarVolatile) {
+	    mbar2 = x->as_MemBar();
+	  }
+	  break;
+	}
+      }
+      // if we have two merge mems we must have two volatile membars
+      if (mbar == NULL || mbar2 == NULL) {
+	return NULL;
+      }
+      // return the trailing membar
+      if (is_card_mark_membar(mbar2)) {
+	return mbar;
+      } else {
+	if (is_card_mark_membar(mbar)) {
+	  return mbar2;
+	} else {
+	  return NULL;
+	}
+      }
+    }
+  }
+
+  // trailing_to_leading
   //
   // graph traversal helper which detects the normal case Mem feed
-  // from either a card mark or a trailing membar to a preceding
-  // release membar (optionally its cpuorder child) i.e. it ensures
-  // that one or other of the following Mem flow subgraphs is present.
-  //
-  //   MemBarRelease
-  //   MemBarCPUOrder {leading}
-  //          |  \      . . .
-  //          |  StoreN/P[mo_release]  . . .
-  //          |   /
-  //         MergeMem
-  //          |
-  //   MemBarVolatile {card mark or trailing}
-  //
-  //   MemBarRelease
-  //   MemBarCPUOrder {leading}
+  // from a trailing membar to a preceding release membar (optionally
+  // its cpuorder child) i.e. it ensures that one or other of the
+  // following Mem flow subgraphs is present.
+  //
+  //   MemBarRelease {leading}
+  //   MemBarCPUOrder {optional}
+  //    | Bot |  \      . . .
+  //    |     |  StoreN/P[mo_release]  . . .
+  //    |     |   /
+  //    |    MergeMem
+  //    |     |
+  //   MemBarVolatile {not card mark}
+  //
+  //   MemBarRelease {leading}
+  //   MemBarCPUOrder {optional}
   //      |       \      . . .
   //      |     CompareAndSwapX  . . .
   //               |
   //     . . .    SCMemProj
   //           \   |
   //      |    MergeMem
-  //      |        /
+  //      |       |
   //    MemBarCPUOrder
   //    MemBarAcquire {trailing}
   //
@@ -2058,15 +2200,20 @@
   // if the configuration is present returns the cpuorder member for
   // preference or when absent the release membar otherwise NULL.
   //
-  // n.b. the input membar is expected to be a MemBarVolatile but
-  // need not be a card mark membar.
-
-  MemBarNode *normal_to_leading(const MemBarNode *barrier)
+  // n.b. the input membar is expected to be a MemBarVolatile or
+  // MemBarAcquire. if it is a MemBarVolatile it must *not* be a card
+  // mark membar.
+
+  MemBarNode *trailing_to_leading(const MemBarNode *barrier)
   {
     // input must be a volatile membar
     assert((barrier->Opcode() == Op_MemBarVolatile ||
 	    barrier->Opcode() == Op_MemBarAcquire),
 	   "expecting a volatile or an acquire membar");
+
+    assert((barrier->Opcode() != Op_MemBarVolatile) ||
+	   !is_card_mark_membar(barrier),
+	   "not expecting a card mark membar");
     Node *x;
     bool is_cas = barrier->Opcode() == Op_MemBarAcquire;
 
@@ -2179,169 +2326,35 @@
     return NULL;
   }
 
-  // card_mark_to_trailing
-  //
-  // graph traversal helper which detects extra, non-normal Mem feed
-  // from a card mark volatile membar to a trailing membar i.e. it
-  // ensures that one of the following three GC post-write Mem flow
-  // subgraphs is present.
-  //
-  // 1)
-  //     . . .
-  //       |
-  //   MemBarVolatile (card mark)
-  //      |          |
-  //      |        StoreCM
-  //      |          |
-  //      |        . . .
-  //  Bot |  /
-  //   MergeMem
-  //      |
-  //      |
-  //    MemBarVolatile {trailing}
-  //
-  // 2)
-  //   MemBarRelease/CPUOrder (leading)
-  //    |
-  //    |
-  //    |\       . . .
-  //    | \        |
-  //    |  \  MemBarVolatile (card mark)
-  //    |   \   |     |
-  //     \   \  |   StoreCM    . . .
-  //      \   \ |
-  //       \  Phi
-  //        \ /
-  //        Phi  . . .
+  // card_mark_to_leading
+  //
+  // graph traversal helper which traverses from a card mark volatile
+  // membar to a leading membar i.e. it ensures that the following Mem
+  // flow subgraph is present.
+  //
+  //    MemBarRelease {leading}
+  //   {MemBarCPUOrder} {optional}
+  //         |   . . .
   //     Bot |   /
-  //       MergeMem
+  //      MergeMem
   //         |
-  //    MemBarVolatile {trailing}
-  //
-  //
-  // 3)
-  //   MemBarRelease/CPUOrder (leading)
-  //    |
-  //    |\
-  //    | \
-  //    |  \      . . .
-  //    |   \       |
-  //    |\   \  MemBarVolatile (card mark)
-  //    | \   \   |     |
-  //    |  \   \  |   StoreCM    . . .
-  //    |   \   \ |
-  //     \   \  Phi
-  //      \   \ /
-  //       \  Phi
-  //        \ /
-  //        Phi  . . .
-  //     Bot |   /
-  //       MergeMem
-  //         |
-  //         |
-  //    MemBarVolatile {trailing}
-  //
-  // configuration 1 is only valid if UseConcMarkSweepGC &&
-  // UseCondCardMark
-  //
-  // configurations 2 and 3 are only valid if UseG1GC.
-  //
-  // if a valid configuration is present returns the trailing membar
-  // otherwise NULL.
-  //
-  // n.b. the supplied membar is expected to be a card mark
-  // MemBarVolatile i.e. the caller must ensure the input node has the
-  // correct operand and feeds Mem to a StoreCM node
-
-  MemBarNode *card_mark_to_trailing(const MemBarNode *barrier)
+  //     MemBarVolatile (card mark)
+  //        |     \
+  //      . . .   StoreCM
+  //
+  // if the configuration is present returns the cpuorder member for
+  // preference or when absent the release membar otherwise NULL.
+  //
+  // n.b. the input membar is expected to be a MemBarVolatile amd must
+  // be a card mark membar.
+
+  MemBarNode *card_mark_to_leading(const MemBarNode *barrier)
   {
     // input must be a card mark volatile membar
     assert(is_card_mark_membar(barrier), "expecting a card mark membar");
 
-    Node *feed = barrier->proj_out(TypeFunc::Memory);
-    Node *x;
-    MergeMemNode *mm = NULL;
-
-    const int MAX_PHIS = 3;	// max phis we will search through
-    int phicount = 0; 		// current search count
-
-    bool retry_feed = true;
-    while (retry_feed) {
-      // see if we have a direct MergeMem feed
-      for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
-	x = feed->fast_out(i);
-	// the correct Phi will be merging a Bot memory slice
-	if (x->is_MergeMem()) {
-	  mm = x->as_MergeMem();
-	  break;
-	}
-      }
-      if (mm) {
-	retry_feed = false;
-      } else if (UseG1GC & phicount++ < MAX_PHIS) {
-	// the barrier may feed indirectly via one or two Phi nodes
-	PhiNode *phi = NULL;
-	for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
-	  x = feed->fast_out(i);
-	  // the correct Phi will be merging a Bot memory slice
-	  if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) {
-	    phi = x->as_Phi();
-	    break;
-	  }
-	}
-	if (!phi) {
-	  return NULL;
-	}
-	// look for another merge below this phi
-	feed = phi;
-      } else {
-	// couldn't find a merge
-	return NULL;
-      }
-    }
-
-    // sanity check this feed turns up as the expected slice
-    assert(mm->as_MergeMem()->in(Compile::AliasIdxBot) == feed, "expecting membar to feed AliasIdxBot slice to Merge");
-
-    MemBarNode *trailing = NULL;
-    // be sure we have a trailing membar the merge
-    for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
-      x = mm->fast_out(i);
-      if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) {
-	trailing = x->as_MemBar();
-	break;
-      }
-    }
-
-    return trailing;
-  }
-
-  // trailing_to_card_mark
-  //
-  // graph traversal helper which detects extra, non-normal Mem feed
-  // from a trailing volatile membar to a preceding card mark volatile
-  // membar i.e. it identifies whether one of the three possible extra
-  // GC post-write Mem flow subgraphs is present
-  //
-  // this predicate checks for the same flow as the previous predicate
-  // but starting from the bottom rather than the top.
-  //
-  // if the configuration is present returns the card mark membar
-  // otherwise NULL
-  //
-  // n.b. the supplied membar is expected to be a trailing
-  // MemBarVolatile i.e. the caller must ensure the input node has the
-  // correct opcode
-
-  MemBarNode *trailing_to_card_mark(const MemBarNode *trailing)
-  {
-    assert(trailing->Opcode() == Op_MemBarVolatile,
-	   "expecting a volatile membar");
-    assert(!is_card_mark_membar(trailing),
-	   "not expecting a card mark membar");
-
     // the Mem feed to the membar should be a merge
-    Node *x = trailing->in(TypeFunc::Memory);
+    Node *x = barrier->in(TypeFunc::Memory);
     if (!x->is_MergeMem()) {
       return NULL;
     }
@@ -2349,117 +2362,19 @@
     MergeMemNode *mm = x->as_MergeMem();
 
     x = mm->in(Compile::AliasIdxBot);
-    // with G1 we may possibly see a Phi or two before we see a Memory
-    // Proj from the card mark membar
-
-    const int MAX_PHIS = 3;	// max phis we will search through
-    int phicount = 0; 		// current search count
-
-    bool retry_feed = !x->is_Proj();
-
-    while (retry_feed) {
-      if (UseG1GC && x->is_Phi() && phicount++ < MAX_PHIS) {
-	PhiNode *phi = x->as_Phi();
-	ProjNode *proj = NULL;
-	PhiNode *nextphi = NULL;
-	bool found_leading = false;
-	for (uint i = 1; i < phi->req(); i++) {
-	  x = phi->in(i);
-	  if (x->is_Phi()) {
-	    nextphi = x->as_Phi();
-	  } else if (x->is_Proj()) {
-	    int opcode = x->in(0)->Opcode();
-	    if (opcode == Op_MemBarVolatile) {
-	      proj = x->as_Proj();
-	    } else if (opcode == Op_MemBarRelease ||
-		       opcode == Op_MemBarCPUOrder) {
-	      // probably a leading membar
-	      found_leading = true;
-	    }
-	  }
-	}
-	// if we found a correct looking proj then retry from there
-	// otherwise we must see a leading and a phi or this the
-	// wrong config
-	if (proj != NULL) {
-	  x = proj;
-	  retry_feed = false;
-	} else if (found_leading && nextphi != NULL) {
-	  // retry from this phi to check phi2
-	  x = nextphi;
-	} else {
-	  // not what we were looking for
-	  return NULL;
-	}
-      } else {
-	return NULL;
-      }
-    }
-    // the proj has to come from the card mark membar
-    x = x->in(0);
+
     if (!x->is_MemBar()) {
       return NULL;
     }
 
-    MemBarNode *card_mark_membar = x->as_MemBar();
-
-    if (!is_card_mark_membar(card_mark_membar)) {
-      return NULL;
-    }
-
-    return card_mark_membar;
-  }
-
-  // trailing_to_leading
-  //
-  // graph traversal helper which checks the Mem flow up the graph
-  // from a (non-card mark) trailing membar attempting to locate and
-  // return an associated leading membar. it first looks for a
-  // subgraph in the normal configuration (relying on helper
-  // normal_to_leading). failing that it then looks for one of the
-  // possible post-write card mark subgraphs linking the trailing node
-  // to a the card mark membar (relying on helper
-  // trailing_to_card_mark), and then checks that the card mark membar
-  // is fed by a leading membar (once again relying on auxiliary
-  // predicate normal_to_leading).
-  //
-  // if the configuration is valid returns the cpuorder member for
-  // preference or when absent the release membar otherwise NULL.
-  //
-  // n.b. the input membar is expected to be either a volatile or
-  // acquire membar but in the former case must *not* be a card mark
-  // membar.
-
-  MemBarNode *trailing_to_leading(const MemBarNode *trailing)
-  {
-    assert((trailing->Opcode() == Op_MemBarAcquire ||
-	    trailing->Opcode() == Op_MemBarVolatile),
-	   "expecting an acquire or volatile membar");
-    assert((trailing->Opcode() != Op_MemBarVolatile ||
-	    !is_card_mark_membar(trailing)),
-	   "not expecting a card mark membar");
-
-    MemBarNode *leading = normal_to_leading(trailing);
-
-    if (leading) {
+    MemBarNode *leading = x->as_MemBar();
+
+    if (leading_membar(leading)) {
       return leading;
     }
 
-    // nothing more to do if this is an acquire
-    if (trailing->Opcode() == Op_MemBarAcquire) {
-      return NULL;
-    }
-
-    MemBarNode *card_mark_membar = trailing_to_card_mark(trailing);
-
-    if (!card_mark_membar) {
-      return NULL;
-    }
-
-    return normal_to_leading(card_mark_membar);
-  }
-
-  // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
+    return NULL;
+  }
 
 bool unnecessary_acquire(const Node *barrier)
 {
@@ -2675,19 +2590,8 @@
   }
 
   // must start with a normal feed
-  MemBarNode *child_barrier = leading_to_normal(barrier);
-
-  if (!child_barrier) {
-    return false;
-  }
-
-  if (!is_card_mark_membar(child_barrier)) {
-    // this is the trailing membar and we are done
-    return true;
-  }
-
-  // must be sure this card mark feeds a trailing membar
-  MemBarNode *trailing = card_mark_to_trailing(child_barrier);
+  MemBarNode *trailing = leading_to_trailing(barrier);
+
   return (trailing != NULL);
 }
 
@@ -2709,7 +2613,7 @@
   }
 
   // ok, if it's not a card mark then we still need to check if it is
-  // a trailing membar of a volatile put hgraph.
+  // a trailing membar of a volatile put graph.
 
   return (trailing_to_leading(mbvol) != NULL);
 }
@@ -2759,20 +2663,9 @@
   }
 
   // does this lead a normal subgraph?
-  MemBarNode *mbvol = leading_to_normal(barrier);
-
-  if (!mbvol) {
-    return false;
-  }
-
-  // all done unless this is a card mark
-  if (!is_card_mark_membar(mbvol)) {
-    return true;
-  }
-
-  // we found a card mark -- just make sure we have a trailing barrier
-
-  return (card_mark_to_trailing(mbvol) != NULL);
+  MemBarNode *trailing = leading_to_trailing(barrier);
+
+  return (trailing != NULL);
 }
 
 // predicate controlling translation of CAS
@@ -2814,7 +2707,7 @@
 	  "CAS not fed by cpuorder+release membar pair!");
 
   // does this lead a normal subgraph?
-  MemBarNode *mbar = leading_to_normal(barrier);
+  MemBarNode *mbar = leading_to_trailing(barrier);
 
   assert(mbar != NULL, "CAS not embedded in normal graph!");
 
@@ -2835,48 +2728,27 @@
 
   // we only ever need to generate a dmb ishst between an object put
   // and the associated card mark when we are using CMS without
-  // conditional card marking
+  // conditional card marking. Any other occurence will happen when
+  // performing a card mark using CMS with conditional card marking or
+  // G1. In those cases the preceding MamBarVolatile will be
+  // translated to a dmb ish which guarantes visibility of the
+  // preceding StoreN/P before this StoreCM
 
   if (!UseConcMarkSweepGC || UseCondCardMark) {
     return true;
   }
 
-  // if we are implementing volatile puts using barriers then the
-  // object put as an str so we must insert the dmb ishst
+  // if we are implementing volatile puts using barriers then we must
+  // insert the dmb ishst
 
   if (UseBarriersForVolatile) {
     return false;
   }
 
-  // we can omit the dmb ishst if this StoreCM is part of a volatile
-  // put because in thta case the put will be implemented by stlr
-  //
-  // we need to check for a normal subgraph feeding this StoreCM.
-  // that means the StoreCM must be fed Memory from a leading membar,
-  // either a MemBarRelease or its dependent MemBarCPUOrder, and the
-  // leading membar must be part of a normal subgraph
-
-  Node *x = storecm->in(StoreNode::Memory);
-
-  if (!x->is_Proj()) {
-    return false;
-  }
-
-  x = x->in(0);
-
-  if (!x->is_MemBar()) {
-    return false;
-  }
-
-  MemBarNode *leading = x->as_MemBar();
-
-  // reject invalid candidates
-  if (!leading_membar(leading)) {
-    return false;
-  }
-
-  // we can omit the StoreStore if it is the head of a normal subgraph
-  return (leading_to_normal(leading) != NULL);
+  // we must be using CMS with conditional card marking so we ahve to
+  // generate the StoreStore
+
+  return false;
 }
 
 
@@ -13409,7 +13281,7 @@
     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
   %}
 
-  ins_pipe(pipe_class_memory);
+  ins_pipe(fp_f2i);
 
 %}
 
@@ -13427,7 +13299,7 @@
     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
   %}
 
-  ins_pipe(pipe_class_memory);
+  ins_pipe(fp_i2f);
 
 %}
 
@@ -13445,7 +13317,7 @@
     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
   %}
 
-  ins_pipe(pipe_class_memory);
+  ins_pipe(fp_d2l);
 
 %}
 
@@ -13463,7 +13335,7 @@
     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
   %}
 
-  ins_pipe(pipe_class_memory);
+  ins_pipe(fp_l2d);
 
 %}
 
@@ -14319,6 +14191,25 @@
   ins_pipe(pipe_cmp_branch);
 %}
 
+instruct cmpN_imm0_branch(cmpOp cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
+  match(If cmp (CmpN op1 op2));
+  predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
+	    || n->in(1)->as_Bool()->_test._test == BoolTest::eq);
+  effect(USE labl);
+
+  ins_cost(BRANCH_COST);
+  format %{ "cbw$cmp   $op1, $labl" %}
+  ins_encode %{
+    Label* L = $labl$$label;
+    Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
+    if (cond == Assembler::EQ)
+      __ cbzw($op1$$Register, *L);
+    else
+      __ cbnzw($op1$$Register, *L);
+  %}
+  ins_pipe(pipe_cmp_branch);
+%}
+
 instruct cmpP_narrowOop_imm0_branch(cmpOp cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
   match(If cmp (CmpP (DecodeN oop) zero));
   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
@@ -14911,19 +14802,19 @@
 %}
 
 instruct string_equals(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
-                        iRegI_R0 result, iRegP_R10 tmp, rFlagsReg cr)
+                        iRegI_R0 result, rFlagsReg cr)
 %{
   predicate(!CompactStrings);
   match(Set result (StrEquals (Binary str1 str2) cnt));
-  effect(KILL tmp, USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
-
-  format %{ "String Equals $str1,$str2,$cnt -> $result    // KILL $tmp" %}
+  effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
+
+  format %{ "String Equals $str1,$str2,$cnt -> $result" %}
   ins_encode %{
     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
     __ asrw($cnt$$Register, $cnt$$Register, 1);
-    __ string_equals($str1$$Register, $str2$$Register,
-                      $cnt$$Register, $result$$Register,
-                      $tmp$$Register);
+    __ arrays_equals($str1$$Register, $str2$$Register,
+                     $result$$Register, $cnt$$Register,
+                     2, /*is_string*/true);
   %}
   ins_pipe(pipe_class_memory);
 %}
@@ -14937,9 +14828,10 @@
 
   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
   ins_encode %{
-    __ byte_arrays_equals($ary1$$Register, $ary2$$Register,
-                          $result$$Register, $tmp$$Register);
-  %}
+    __ arrays_equals($ary1$$Register, $ary2$$Register,
+                     $result$$Register, $tmp$$Register,
+                     1, /*is_string*/false);
+    %}
   ins_pipe(pipe_class_memory);
 %}
 
@@ -14952,12 +14844,14 @@
 
   format %{ "Array Equals $ary1,ary2 -> $result    // KILL $tmp" %}
   ins_encode %{
-    __ char_arrays_equals($ary1$$Register, $ary2$$Register,
-                          $result$$Register, $tmp$$Register);
+    __ arrays_equals($ary1$$Register, $ary2$$Register,
+                     $result$$Register, $tmp$$Register,
+                     2, /*is_string*/false);
   %}
   ins_pipe(pipe_class_memory);
 %}
 
+
 // encode char[] to byte[] in ISO_8859_1
 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
                           vRegD_V0 Vtmp1, vRegD_V1 Vtmp2,
@@ -16608,7 +16502,7 @@
             as_FloatRegister($src$$reg),
             as_FloatRegister($shift$$reg));
   %}
-  ins_pipe(vshift64_imm);
+  ins_pipe(vshift64);
 %}
 
 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
@@ -16622,7 +16516,7 @@
             as_FloatRegister($src$$reg),
             as_FloatRegister($shift$$reg));
   %}
-  ins_pipe(vshift128_imm);
+  ins_pipe(vshift128);
 %}
 
 instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
@@ -16635,7 +16529,7 @@
             as_FloatRegister($src$$reg),
             as_FloatRegister($shift$$reg));
   %}
-  ins_pipe(vshift64_imm);
+  ins_pipe(vshift64);
 %}
 
 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
@@ -16648,7 +16542,7 @@
             as_FloatRegister($src$$reg),
             as_FloatRegister($shift$$reg));
   %}
-  ins_pipe(vshift128_imm);
+  ins_pipe(vshift128);
 %}
 
 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
@@ -16766,7 +16660,7 @@
            as_FloatRegister($src$$reg),
            (int)$shift$$constant & 63);
   %}
-  ins_pipe(vshift128);
+  ins_pipe(vshift128_imm);
 %}
 
 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
--- a/hotspot/src/cpu/aarch64/vm/jvmciCodeInstaller_aarch64.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/cpu/aarch64/vm/jvmciCodeInstaller_aarch64.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -74,7 +74,7 @@
 void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset, TRAPS) {
   address pc = _instructions->start() + pc_offset;
   NativeInstruction* inst = nativeInstruction_at(pc);
-  if (inst->is_adr_aligned()) {
+  if (inst->is_adr_aligned() || inst->is_ldr_literal()) {
     address dest = _constants->start() + data_offset;
     _instructions->relocate(pc, section_word_Relocation::spec((address) dest, CodeBuffer::SECT_CONSTS));
     TRACE_jvmci_3("relocating at " PTR_FORMAT " (+%d) with destination at %d", p2i(pc), pc_offset, data_offset);
--- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -4481,224 +4481,125 @@
   BLOCK_COMMENT("} string_compare");
 }
 
-
-void MacroAssembler::string_equals(Register str1, Register str2,
-                                   Register cnt, Register result,
-                                   Register tmp1) {
-  Label SAME_CHARS, DONE, SHORT_LOOP, SHORT_STRING,
-    NEXT_WORD;
-
-  const Register tmp2 = rscratch1;
-  assert_different_registers(str1, str2, cnt, result, tmp1, tmp2, rscratch2);
-
-  BLOCK_COMMENT("string_equals {");
-
-  // Start by assuming that the strings are not equal.
-  mov(result, zr);
-
-  // A very short string
-  cmpw(cnt, 4);
-  br(Assembler::LT, SHORT_STRING);
-
-  // Check if the strings start at the same location.
-  cmp(str1, str2);
-  br(Assembler::EQ, SAME_CHARS);
-
-  // Compare longwords
-  {
-    subw(cnt, cnt, 4); // The last longword is a special case
-
-    // Move both string pointers to the last longword of their
-    // strings, negate the remaining count, and convert it to bytes.
-    lea(str1, Address(str1, cnt, Address::uxtw(1)));
-    lea(str2, Address(str2, cnt, Address::uxtw(1)));
-    sub(cnt, zr, cnt, LSL, 1);
-
-    // Loop, loading longwords and comparing them into rscratch2.
-    bind(NEXT_WORD);
-    ldr(tmp1, Address(str1, cnt));
-    ldr(tmp2, Address(str2, cnt));
-    adds(cnt, cnt, wordSize);
-    eor(rscratch2, tmp1, tmp2);
-    cbnz(rscratch2, DONE);
-    br(Assembler::LT, NEXT_WORD);
-
-    // Last longword.  In the case where length == 4 we compare the
-    // same longword twice, but that's still faster than another
-    // conditional branch.
-
-    ldr(tmp1, Address(str1));
-    ldr(tmp2, Address(str2));
-    eor(rscratch2, tmp1, tmp2);
-    cbz(rscratch2, SAME_CHARS);
-    b(DONE);
-  }
-
-  bind(SHORT_STRING);
-  // Is the length zero?
-  cbz(cnt, SAME_CHARS);
-
-  bind(SHORT_LOOP);
-  load_unsigned_short(tmp1, Address(post(str1, 2)));
-  load_unsigned_short(tmp2, Address(post(str2, 2)));
-  subw(tmp1, tmp1, tmp2);
-  cbnz(tmp1, DONE);
-  sub(cnt, cnt, 1);
-  cbnz(cnt, SHORT_LOOP);
-
-  // Strings are equal.
-  bind(SAME_CHARS);
-  mov(result, true);
-
-  // That's it
-  bind(DONE);
-
-  BLOCK_COMMENT("} string_equals");
-}
-
-
-void MacroAssembler::byte_arrays_equals(Register ary1, Register ary2,
-                                        Register result, Register tmp1)
+// Compare Strings or char/byte arrays.
+
+// is_string is true iff this is a string comparison.
+
+// For Strings we're passed the address of the first characters in a1
+// and a2 and the length in cnt1.
+
+// For byte and char arrays we're passed the arrays themselves and we
+// have to extract length fields and do null checks here.
+
+// elem_size is the element size in bytes: either 1 or 2.
+
+// There are two implementations.  For arrays >= 8 bytes, all
+// comparisons (including the final one, which may overlap) are
+// performed 8 bytes at a time.  For arrays < 8 bytes, we compare a
+// halfword, then a short, and then a byte.
+
+void MacroAssembler::arrays_equals(Register a1, Register a2,
+                                   Register result, Register cnt1,
+                                   int elem_size, bool is_string)
 {
-  Register cnt1 = rscratch1;
-  Register cnt2 = rscratch2;
+  Label SAME, DONE, SHORT, NEXT_WORD, ONE;
+  Register tmp1 = rscratch1;
   Register tmp2 = rscratch2;
-
-  Label SAME, DIFFER, NEXT, TAIL07, TAIL03, TAIL01;
-
-  int length_offset  = arrayOopDesc::length_offset_in_bytes();
-  int base_offset    = arrayOopDesc::base_offset_in_bytes(T_BYTE);
-
-  BLOCK_COMMENT("byte_arrays_equals  {");
-
-    // different until proven equal
-    mov(result, false);
-
-    // same array?
-    cmp(ary1, ary2);
-    br(Assembler::EQ, SAME);
-
-    // ne if either null
-    cbz(ary1, DIFFER);
-    cbz(ary2, DIFFER);
-
-    // lengths ne?
-    ldrw(cnt1, Address(ary1, length_offset));
-    ldrw(cnt2, Address(ary2, length_offset));
-    cmp(cnt1, cnt2);
-    br(Assembler::NE, DIFFER);
-
-    lea(ary1, Address(ary1, base_offset));
-    lea(ary2, Address(ary2, base_offset));
-
-    subs(cnt1, cnt1, 8);
-    br(LT, TAIL07);
-
-  BIND(NEXT);
-    ldr(tmp1, Address(post(ary1, 8)));
-    ldr(tmp2, Address(post(ary2, 8)));
-    subs(cnt1, cnt1, 8);
+  Register cnt2 = tmp2;  // cnt2 only used in array length compare
+  int elem_per_word = wordSize/elem_size;
+  int log_elem_size = exact_log2(elem_size);
+  int length_offset = arrayOopDesc::length_offset_in_bytes();
+  int base_offset
+    = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE);
+
+  assert(elem_size == 1 || elem_size == 2, "must be char or byte");
+  assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2);
+
+  BLOCK_COMMENT(is_string ? "string_equals {" : "array_equals {");
+
+  mov(result, false);
+
+  if (!is_string) {
+    // if (a==a2)
+    //     return true;
+    eor(rscratch1, a1, a2);
+    cbz(rscratch1, SAME);
+    // if (a==null || a2==null)
+    //     return false;
+    cbz(a1, DONE);
+    cbz(a2, DONE);
+    // if (a1.length != a2.length)
+    //      return false;
+    ldrw(cnt1, Address(a1, length_offset));
+    ldrw(cnt2, Address(a2, length_offset));
+    eorw(tmp1, cnt1, cnt2);
+    cbnzw(tmp1, DONE);
+
+    lea(a1, Address(a1, base_offset));
+    lea(a2, Address(a2, base_offset));
+  }
+
+  // Check for short strings, i.e. smaller than wordSize.
+  subs(cnt1, cnt1, elem_per_word);
+  br(Assembler::LT, SHORT);
+  // Main 8 byte comparison loop.
+  bind(NEXT_WORD); {
+    ldr(tmp1, Address(post(a1, wordSize)));
+    ldr(tmp2, Address(post(a2, wordSize)));
+    subs(cnt1, cnt1, elem_per_word);
     eor(tmp1, tmp1, tmp2);
-    cbnz(tmp1, DIFFER);
-    br(GE, NEXT);
-
-  BIND(TAIL07);  // 0-7 bytes left, cnt1 = #bytes left - 4
-    tst(cnt1, 0b100);
-    br(EQ, TAIL03);
-    ldrw(tmp1, Address(post(ary1, 4)));
-    ldrw(tmp2, Address(post(ary2, 4)));
-    cmp(tmp1, tmp2);
-    br(NE, DIFFER);
-
-  BIND(TAIL03);  // 0-3 bytes left, cnt1 = #bytes left - 4
-    tst(cnt1, 0b10);
-    br(EQ, TAIL01);
-    ldrh(tmp1, Address(post(ary1, 2)));
-    ldrh(tmp2, Address(post(ary2, 2)));
-    cmp(tmp1, tmp2);
-    br(NE, DIFFER);
-  BIND(TAIL01);  // 0-1 byte left
-    tst(cnt1, 0b01);
-    br(EQ, SAME);
-    ldrb(tmp1, ary1);
-    ldrb(tmp2, ary2);
-    cmp(tmp1, tmp2);
-    br(NE, DIFFER);
-
-  BIND(SAME);
-    mov(result, true);
-  BIND(DIFFER); // result already set
-
-  BLOCK_COMMENT("} byte_arrays_equals");
+    cbnz(tmp1, DONE);
+  } br(GT, NEXT_WORD);
+  // Last longword.  In the case where length == 4 we compare the
+  // same longword twice, but that's still faster than another
+  // conditional branch.
+  // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when
+  // length == 4.
+  if (log_elem_size > 0)
+    lsl(cnt1, cnt1, log_elem_size);
+  ldr(tmp1, Address(a1, cnt1));
+  ldr(tmp2, Address(a2, cnt1));
+  eor(tmp1, tmp1, tmp2);
+  cbnz(tmp1, DONE);
+  b(SAME);
+
+  bind(SHORT);
+  Label TAIL03, TAIL01;
+
+  tbz(cnt1, 2 - log_elem_size, TAIL03); // 0-7 bytes left.
+  {
+    ldrw(tmp1, Address(post(a1, 4)));
+    ldrw(tmp2, Address(post(a2, 4)));
+    eorw(tmp1, tmp1, tmp2);
+    cbnzw(tmp1, DONE);
+  }
+  bind(TAIL03);
+  tbz(cnt1, 1 - log_elem_size, TAIL01); // 0-3 bytes left.
+  {
+    ldrh(tmp1, Address(post(a1, 2)));
+    ldrh(tmp2, Address(post(a2, 2)));
+    eorw(tmp1, tmp1, tmp2);
+    cbnzw(tmp1, DONE);
+  }
+  bind(TAIL01);
+  if (elem_size == 1) { // Only needed when comparing byte arrays.
+    tbz(cnt1, 0, SAME); // 0-1 bytes left.
+    {
+      ldrb(tmp1, a1);
+      ldrb(tmp2, a2);
+      eorw(tmp1, tmp1, tmp2);
+      cbnzw(tmp1, DONE);
+    }
+  }
+  // Arrays are equal.
+  bind(SAME);
+  mov(result, true);
+
+  // That's it.
+  bind(DONE);
+  BLOCK_COMMENT(is_string ? "} string_equals" : "} array_equals");
 }
 
-// Compare char[] arrays aligned to 4 bytes
-void MacroAssembler::char_arrays_equals(Register ary1, Register ary2,
-                                        Register result, Register tmp1)
-{
-  Register cnt1 = rscratch1;
-  Register cnt2 = rscratch2;
-  Register tmp2 = rscratch2;
-
-  Label SAME, DIFFER, NEXT, TAIL03, TAIL01;
-
-  int length_offset  = arrayOopDesc::length_offset_in_bytes();
-  int base_offset    = arrayOopDesc::base_offset_in_bytes(T_CHAR);
-
-  BLOCK_COMMENT("char_arrays_equals  {");
-
-    // different until proven equal
-    mov(result, false);
-
-    // same array?
-    cmp(ary1, ary2);
-    br(Assembler::EQ, SAME);
-
-    // ne if either null
-    cbz(ary1, DIFFER);
-    cbz(ary2, DIFFER);
-
-    // lengths ne?
-    ldrw(cnt1, Address(ary1, length_offset));
-    ldrw(cnt2, Address(ary2, length_offset));
-    cmp(cnt1, cnt2);
-    br(Assembler::NE, DIFFER);
-
-    lea(ary1, Address(ary1, base_offset));
-    lea(ary2, Address(ary2, base_offset));
-
-    subs(cnt1, cnt1, 4);
-    br(LT, TAIL03);
-
-  BIND(NEXT);
-    ldr(tmp1, Address(post(ary1, 8)));
-    ldr(tmp2, Address(post(ary2, 8)));
-    subs(cnt1, cnt1, 4);
-    eor(tmp1, tmp1, tmp2);
-    cbnz(tmp1, DIFFER);
-    br(GE, NEXT);
-
-  BIND(TAIL03);  // 0-3 chars left, cnt1 = #chars left - 4
-    tst(cnt1, 0b10);
-    br(EQ, TAIL01);
-    ldrw(tmp1, Address(post(ary1, 4)));
-    ldrw(tmp2, Address(post(ary2, 4)));
-    cmp(tmp1, tmp2);
-    br(NE, DIFFER);
-  BIND(TAIL01);  // 0-1 chars left
-    tst(cnt1, 0b01);
-    br(EQ, SAME);
-    ldrh(tmp1, ary1);
-    ldrh(tmp2, ary2);
-    cmp(tmp1, tmp2);
-    br(NE, DIFFER);
-
-  BIND(SAME);
-    mov(result, true);
-  BIND(DIFFER); // result already set
-
-  BLOCK_COMMENT("} char_arrays_equals");
-}
 
 // encode char[] to byte[] in ISO_8859_1
 void MacroAssembler::encode_iso_array(Register src, Register dst,
--- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1186,13 +1186,11 @@
   void string_compare(Register str1, Register str2,
                       Register cnt1, Register cnt2, Register result,
                       Register tmp1);
-  void string_equals(Register str1, Register str2,
-                     Register cnt, Register result,
-                     Register tmp1);
-  void char_arrays_equals(Register ary1, Register ary2,
-                          Register result, Register tmp1);
-  void byte_arrays_equals(Register ary1, Register ary2,
-                          Register result, Register tmp1);
+
+  void arrays_equals(Register a1, Register a2,
+                     Register result, Register cnt1,
+                     int elem_size, bool is_string);
+
   void encode_iso_array(Register src, Register dst,
                         Register len, Register result,
                         FloatRegister Vtmp1, FloatRegister Vtmp2,
--- a/hotspot/src/cpu/aarch64/vm/nativeInst_aarch64.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/cpu/aarch64/vm/nativeInst_aarch64.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -105,13 +105,20 @@
   inline friend NativeInstruction* nativeInstruction_at(address address);
 
   static bool is_adrp_at(address instr);
+
   static bool is_ldr_literal_at(address instr);
+
+  bool is_ldr_literal() {
+    return is_ldr_literal_at(addr_at(0));
+  }
+
   static bool is_ldrw_to_zr(address instr);
 
   static bool is_call_at(address instr) {
     const uint32_t insn = (*(uint32_t*)instr);
     return (insn >> 26) == 0b100101;
   }
+
   bool is_call() {
     return is_call_at(addr_at(0));
   }
--- a/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -163,30 +163,20 @@
     sp_after_call_off = -26,
 
     d15_off            = -26,
-    d14_off            = -25,
     d13_off            = -24,
-    d12_off            = -23,
     d11_off            = -22,
-    d10_off            = -21,
     d9_off             = -20,
-    d8_off             = -19,
 
     r28_off            = -18,
-    r27_off            = -17,
     r26_off            = -16,
-    r25_off            = -15,
     r24_off            = -14,
-    r23_off            = -13,
     r22_off            = -12,
-    r21_off            = -11,
     r20_off            = -10,
-    r19_off            =  -9,
     call_wrapper_off   =  -8,
     result_off         =  -7,
     result_type_off    =  -6,
     method_off         =  -5,
     entry_point_off    =  -4,
-    parameters_off     =  -3,
     parameter_size_off =  -2,
     thread_off         =  -1,
     fp_f               =   0,
@@ -208,30 +198,20 @@
     const Address result_type   (rfp, result_type_off    * wordSize);
     const Address method        (rfp, method_off         * wordSize);
     const Address entry_point   (rfp, entry_point_off    * wordSize);
-    const Address parameters    (rfp, parameters_off     * wordSize);
     const Address parameter_size(rfp, parameter_size_off * wordSize);
 
     const Address thread        (rfp, thread_off         * wordSize);
 
     const Address d15_save      (rfp, d15_off * wordSize);
-    const Address d14_save      (rfp, d14_off * wordSize);
     const Address d13_save      (rfp, d13_off * wordSize);
-    const Address d12_save      (rfp, d12_off * wordSize);
     const Address d11_save      (rfp, d11_off * wordSize);
-    const Address d10_save      (rfp, d10_off * wordSize);
     const Address d9_save       (rfp, d9_off * wordSize);
-    const Address d8_save       (rfp, d8_off * wordSize);
 
     const Address r28_save      (rfp, r28_off * wordSize);
-    const Address r27_save      (rfp, r27_off * wordSize);
     const Address r26_save      (rfp, r26_off * wordSize);
-    const Address r25_save      (rfp, r25_off * wordSize);
     const Address r24_save      (rfp, r24_off * wordSize);
-    const Address r23_save      (rfp, r23_off * wordSize);
     const Address r22_save      (rfp, r22_off * wordSize);
-    const Address r21_save      (rfp, r21_off * wordSize);
     const Address r20_save      (rfp, r20_off * wordSize);
-    const Address r19_save      (rfp, r19_off * wordSize);
 
     // stub code
 
@@ -254,31 +234,20 @@
     // rthread because we want to sanity check rthread later
     __ str(c_rarg7,  thread);
     __ strw(c_rarg6, parameter_size);
-    __ str(c_rarg5,  parameters);
-    __ str(c_rarg4,  entry_point);
-    __ str(c_rarg3,  method);
-    __ str(c_rarg2,  result_type);
-    __ str(c_rarg1,  result);
-    __ str(c_rarg0,  call_wrapper);
-    __ str(r19,      r19_save);
-    __ str(r20,      r20_save);
-    __ str(r21,      r21_save);
-    __ str(r22,      r22_save);
-    __ str(r23,      r23_save);
-    __ str(r24,      r24_save);
-    __ str(r25,      r25_save);
-    __ str(r26,      r26_save);
-    __ str(r27,      r27_save);
-    __ str(r28,      r28_save);
-
-    __ strd(v8,      d8_save);
-    __ strd(v9,      d9_save);
-    __ strd(v10,     d10_save);
-    __ strd(v11,     d11_save);
-    __ strd(v12,     d12_save);
-    __ strd(v13,     d13_save);
-    __ strd(v14,     d14_save);
-    __ strd(v15,     d15_save);
+    __ stp(c_rarg4, c_rarg5,  entry_point);
+    __ stp(c_rarg2, c_rarg3,  result_type);
+    __ stp(c_rarg0, c_rarg1,  call_wrapper);
+
+    __ stp(r20, r19,   r20_save);
+    __ stp(r22, r21,   r22_save);
+    __ stp(r24, r23,   r24_save);
+    __ stp(r26, r25,   r26_save);
+    __ stp(r28, r27,   r28_save);
+
+    __ stpd(v9,  v8,   d9_save);
+    __ stpd(v11, v10,  d11_save);
+    __ stpd(v13, v12,  d13_save);
+    __ stpd(v15, v14,  d15_save);
 
     // install Java thread in global register now we have saved
     // whatever value it held
@@ -385,33 +354,22 @@
 #endif
 
     // restore callee-save registers
-    __ ldrd(v15,      d15_save);
-    __ ldrd(v14,      d14_save);
-    __ ldrd(v13,      d13_save);
-    __ ldrd(v12,      d12_save);
-    __ ldrd(v11,      d11_save);
-    __ ldrd(v10,      d10_save);
-    __ ldrd(v9,       d9_save);
-    __ ldrd(v8,       d8_save);
-
-    __ ldr(r28,      r28_save);
-    __ ldr(r27,      r27_save);
-    __ ldr(r26,      r26_save);
-    __ ldr(r25,      r25_save);
-    __ ldr(r24,      r24_save);
-    __ ldr(r23,      r23_save);
-    __ ldr(r22,      r22_save);
-    __ ldr(r21,      r21_save);
-    __ ldr(r20,      r20_save);
-    __ ldr(r19,      r19_save);
-    __ ldr(c_rarg0,  call_wrapper);
-    __ ldr(c_rarg1,  result);
+    __ ldpd(v15, v14,  d15_save);
+    __ ldpd(v13, v12,  d13_save);
+    __ ldpd(v11, v10,  d11_save);
+    __ ldpd(v9,  v8,   d9_save);
+
+    __ ldp(r28, r27,   r28_save);
+    __ ldp(r26, r25,   r26_save);
+    __ ldp(r24, r23,   r24_save);
+    __ ldp(r22, r21,   r22_save);
+    __ ldp(r20, r19,   r20_save);
+
+    __ ldp(c_rarg0, c_rarg1,  call_wrapper);
     __ ldrw(c_rarg2, result_type);
     __ ldr(c_rarg3,  method);
-    __ ldr(c_rarg4,  entry_point);
-    __ ldr(c_rarg5,  parameters);
-    __ ldr(c_rarg6,  parameter_size);
-    __ ldr(c_rarg7,  thread);
+    __ ldp(c_rarg4, c_rarg5,  entry_point);
+    __ ldp(c_rarg6, c_rarg7,  parameter_size);
 
 #ifndef PRODUCT
     // tell the simulator we are about to end Java execution
@@ -666,7 +624,7 @@
   //     count   -  element count
   //     tmp     - scratch register
   //
-  //     Destroy no registers!
+  //     Destroy no registers except rscratch1 and rscratch2
   //
   void  gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) {
     BarrierSet* bs = Universe::heap()->barrier_set();
@@ -674,12 +632,13 @@
     case BarrierSet::G1SATBCTLogging:
       // With G1, don't generate the call if we statically know that the target in uninitialized
       if (!dest_uninitialized) {
-        __ push(RegSet::range(r0, r29), sp);         // integer registers except lr & sp
+        __ push_call_clobbered_registers();
         if (count == c_rarg0) {
           if (addr == c_rarg1) {
             // exactly backwards!!
-            __ stp(c_rarg0, c_rarg1, __ pre(sp, -2 * wordSize));
-            __ ldp(c_rarg1, c_rarg0, __ post(sp, -2 * wordSize));
+            __ mov(rscratch1, c_rarg0);
+            __ mov(c_rarg0, c_rarg1);
+            __ mov(c_rarg1, rscratch1);
           } else {
             __ mov(c_rarg1, count);
             __ mov(c_rarg0, addr);
@@ -689,7 +648,7 @@
           __ mov(c_rarg1, count);
         }
         __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
-        __ pop(RegSet::range(r0, r29), sp);         // integer registers except lr & sp        }
+        __ pop_call_clobbered_registers();
         break;
       case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
@@ -719,7 +678,7 @@
       case BarrierSet::G1SATBCTLogging:
 
         {
-          __ push(RegSet::range(r0, r29), sp);         // integer registers except lr & sp
+          __ push_call_clobbered_registers();
           // must compute element count unless barrier set interface is changed (other platforms supply count)
           assert_different_registers(start, end, scratch);
           __ lea(scratch, Address(end, BytesPerHeapOop));
@@ -728,7 +687,7 @@
           __ mov(c_rarg0, start);
           __ mov(c_rarg1, scratch);
           __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2);
-          __ pop(RegSet::range(r0, r29), sp);         // integer registers except lr & sp        }
+          __ pop_call_clobbered_registers();
         }
         break;
       case BarrierSet::CardTableForRS:
@@ -1394,10 +1353,10 @@
   //   no-overlap entry point used by generate_conjoint_long_oop_copy().
   //
   address generate_disjoint_oop_copy(bool aligned, address *entry,
-                                     const char *name, bool dest_uninitialized = false) {
+                                     const char *name, bool dest_uninitialized) {
     const bool is_oop = true;
     const size_t size = UseCompressedOops ? sizeof (jint) : sizeof (jlong);
-    return generate_disjoint_copy(size, aligned, is_oop, entry, name);
+    return generate_disjoint_copy(size, aligned, is_oop, entry, name, dest_uninitialized);
   }
 
   // Arguments:
@@ -1412,10 +1371,11 @@
   //
   address generate_conjoint_oop_copy(bool aligned,
                                      address nooverlap_target, address *entry,
-                                     const char *name, bool dest_uninitialized = false) {
+                                     const char *name, bool dest_uninitialized) {
     const bool is_oop = true;
     const size_t size = UseCompressedOops ? sizeof (jint) : sizeof (jlong);
-    return generate_conjoint_copy(size, aligned, is_oop, nooverlap_target, entry, name);
+    return generate_conjoint_copy(size, aligned, is_oop, nooverlap_target, entry,
+                                  name, dest_uninitialized);
   }
 
 
@@ -1522,6 +1482,8 @@
     }
 #endif //ASSERT
 
+    gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
+
     // save the original count
     __ mov(count_save, count);
 
@@ -1988,9 +1950,11 @@
       bool aligned = !UseCompressedOops;
 
       StubRoutines::_arrayof_oop_disjoint_arraycopy
-        = generate_disjoint_oop_copy(aligned, &entry, "arrayof_oop_disjoint_arraycopy");
+        = generate_disjoint_oop_copy(aligned, &entry, "arrayof_oop_disjoint_arraycopy",
+                                     /*dest_uninitialized*/false);
       StubRoutines::_arrayof_oop_arraycopy
-        = generate_conjoint_oop_copy(aligned, entry, &entry_oop_arraycopy, "arrayof_oop_arraycopy");
+        = generate_conjoint_oop_copy(aligned, entry, &entry_oop_arraycopy, "arrayof_oop_arraycopy",
+                                     /*dest_uninitialized*/false);
       // Aligned versions without pre-barriers
       StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit
         = generate_disjoint_oop_copy(aligned, &entry, "arrayof_oop_disjoint_arraycopy_uninit",
--- a/hotspot/src/cpu/ppc/vm/globals_ppc.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/globals_ppc.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
+ * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -74,8 +74,7 @@
 
 define_pd_global(uintx, TypeProfileLevel, 111);
 
-// No performance work done here yet.
-define_pd_global(bool, CompactStrings, false);
+define_pd_global(bool, CompactStrings, true);
 
 // Platform dependent flag handling: flags only defined on this platform.
 #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint)  \
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -45,6 +45,9 @@
 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc/g1/heapRegion.hpp"
 #endif // INCLUDE_ALL_GCS
+#ifdef COMPILER2
+#include "opto/intrinsicnode.hpp"
+#endif
 
 #ifdef PRODUCT
 #define BLOCK_COMMENT(str) // nothing
@@ -3168,6 +3171,553 @@
 
 /////////////////////////////////////////// String intrinsics ////////////////////////////////////////////
 
+#ifdef COMPILER2
+// Intrinsics for CompactStrings
+
+// Compress char[] to byte[] by compressing 16 bytes at once.
+void MacroAssembler::string_compress_16(Register src, Register dst, Register cnt,
+                                        Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5,
+                                        Label& Lfailure) {
+
+  const Register tmp0 = R0;
+  assert_different_registers(src, dst, cnt, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5);
+  Label Lloop, Lslow;
+
+  // Check if cnt >= 8 (= 16 bytes)
+  lis(tmp1, 0xFF);                // tmp1 = 0x00FF00FF00FF00FF
+  srwi_(tmp2, cnt, 3);
+  beq(CCR0, Lslow);
+  ori(tmp1, tmp1, 0xFF);
+  rldimi(tmp1, tmp1, 32, 0);
+  mtctr(tmp2);
+
+  // 2x unrolled loop
+  bind(Lloop);
+  ld(tmp2, 0, src);               // _0_1_2_3 (Big Endian)
+  ld(tmp4, 8, src);               // _4_5_6_7
+
+  orr(tmp0, tmp2, tmp4);
+  rldicl(tmp3, tmp2, 6*8, 64-24); // _____1_2
+  rldimi(tmp2, tmp2, 2*8, 2*8);   // _0_2_3_3
+  rldicl(tmp5, tmp4, 6*8, 64-24); // _____5_6
+  rldimi(tmp4, tmp4, 2*8, 2*8);   // _4_6_7_7
+
+  andc_(tmp0, tmp0, tmp1);
+  bne(CCR0, Lfailure);            // Not latin1.
+  addi(src, src, 16);
+
+  rlwimi(tmp3, tmp2, 0*8, 24, 31);// _____1_3
+  srdi(tmp2, tmp2, 3*8);          // ____0_2_
+  rlwimi(tmp5, tmp4, 0*8, 24, 31);// _____5_7
+  srdi(tmp4, tmp4, 3*8);          // ____4_6_
+
+  orr(tmp2, tmp2, tmp3);          // ____0123
+  orr(tmp4, tmp4, tmp5);          // ____4567
+
+  stw(tmp2, 0, dst);
+  stw(tmp4, 4, dst);
+  addi(dst, dst, 8);
+  bdnz(Lloop);
+
+  bind(Lslow);                    // Fallback to slow version
+}
+
+// Compress char[] to byte[]. cnt must be positive int.
+void MacroAssembler::string_compress(Register src, Register dst, Register cnt, Register tmp, Label& Lfailure) {
+  Label Lloop;
+  mtctr(cnt);
+
+  bind(Lloop);
+  lhz(tmp, 0, src);
+  cmplwi(CCR0, tmp, 0xff);
+  bgt(CCR0, Lfailure);            // Not latin1.
+  addi(src, src, 2);
+  stb(tmp, 0, dst);
+  addi(dst, dst, 1);
+  bdnz(Lloop);
+}
+
+// Inflate byte[] to char[] by inflating 16 bytes at once.
+void MacroAssembler::string_inflate_16(Register src, Register dst, Register cnt,
+                                       Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) {
+  const Register tmp0 = R0;
+  assert_different_registers(src, dst, cnt, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5);
+  Label Lloop, Lslow;
+
+  // Check if cnt >= 8
+  srwi_(tmp2, cnt, 3);
+  beq(CCR0, Lslow);
+  lis(tmp1, 0xFF);                // tmp1 = 0x00FF00FF
+  ori(tmp1, tmp1, 0xFF);
+  mtctr(tmp2);
+
+  // 2x unrolled loop
+  bind(Lloop);
+  lwz(tmp2, 0, src);              // ____0123 (Big Endian)
+  lwz(tmp4, 4, src);              // ____4567
+  addi(src, src, 8);
+
+  rldicl(tmp3, tmp2, 7*8, 64-8);  // _______2
+  rlwimi(tmp2, tmp2, 3*8, 16, 23);// ____0113
+  rldicl(tmp5, tmp4, 7*8, 64-8);  // _______6
+  rlwimi(tmp4, tmp4, 3*8, 16, 23);// ____4557
+
+  andc(tmp0, tmp2, tmp1);         // ____0_1_
+  rlwimi(tmp2, tmp3, 2*8, 0, 23); // _____2_3
+  andc(tmp3, tmp4, tmp1);         // ____4_5_
+  rlwimi(tmp4, tmp5, 2*8, 0, 23); // _____6_7
+
+  rldimi(tmp2, tmp0, 3*8, 0*8);   // _0_1_2_3
+  rldimi(tmp4, tmp3, 3*8, 0*8);   // _4_5_6_7
+
+  std(tmp2, 0, dst);
+  std(tmp4, 8, dst);
+  addi(dst, dst, 16);
+  bdnz(Lloop);
+
+  bind(Lslow);                    // Fallback to slow version
+}
+
+// Inflate byte[] to char[]. cnt must be positive int.
+void MacroAssembler::string_inflate(Register src, Register dst, Register cnt, Register tmp) {
+  Label Lloop;
+  mtctr(cnt);
+
+  bind(Lloop);
+  lbz(tmp, 0, src);
+  addi(src, src, 1);
+  sth(tmp, 0, dst);
+  addi(dst, dst, 2);
+  bdnz(Lloop);
+}
+
+void MacroAssembler::string_compare(Register str1, Register str2,
+                                    Register cnt1, Register cnt2,
+                                    Register tmp1, Register result, int ae) {
+  const Register tmp0 = R0,
+                 diff = tmp1;
+
+  assert_different_registers(str1, str2, cnt1, cnt2, tmp0, tmp1, result);
+  Label Ldone, Lslow, Lloop, Lreturn_diff;
+
+  // Note: Making use of the fact that compareTo(a, b) == -compareTo(b, a)
+  // we interchange str1 and str2 in the UL case and negate the result.
+  // Like this, str1 is always latin1 encoded, except for the UU case.
+  // In addition, we need 0 (or sign which is 0) extend.
+
+  if (ae == StrIntrinsicNode::UU) {
+    srwi(cnt1, cnt1, 1);
+  } else {
+    clrldi(cnt1, cnt1, 32);
+  }
+
+  if (ae != StrIntrinsicNode::LL) {
+    srwi(cnt2, cnt2, 1);
+  } else {
+    clrldi(cnt2, cnt2, 32);
+  }
+
+  // See if the lengths are different, and calculate min in cnt1.
+  // Save diff in case we need it for a tie-breaker.
+  subf_(diff, cnt2, cnt1); // diff = cnt1 - cnt2
+  // if (diff > 0) { cnt1 = cnt2; }
+  if (VM_Version::has_isel()) {
+    isel(cnt1, CCR0, Assembler::greater, /*invert*/ false, cnt2);
+  } else {
+    Label Lskip;
+    blt(CCR0, Lskip);
+    mr(cnt1, cnt2);
+    bind(Lskip);
+  }
+
+  // Rename registers
+  Register chr1 = result;
+  Register chr2 = tmp0;
+
+  // Compare multiple characters in fast loop (only implemented for same encoding).
+  int stride1 = 8, stride2 = 8;
+  if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
+    int log2_chars_per_iter = (ae == StrIntrinsicNode::LL) ? 3 : 2;
+    Label Lfastloop, Lskipfast;
+
+    srwi_(tmp0, cnt1, log2_chars_per_iter);
+    beq(CCR0, Lskipfast);
+    rldicl(cnt2, cnt1, 0, 64 - log2_chars_per_iter); // Remaining characters.
+    li(cnt1, 1 << log2_chars_per_iter); // Initialize for failure case: Rescan characters from current iteration.
+    mtctr(tmp0);
+
+    bind(Lfastloop);
+    ld(chr1, 0, str1);
+    ld(chr2, 0, str2);
+    cmpd(CCR0, chr1, chr2);
+    bne(CCR0, Lslow);
+    addi(str1, str1, stride1);
+    addi(str2, str2, stride2);
+    bdnz(Lfastloop);
+    mr(cnt1, cnt2); // Remaining characters.
+    bind(Lskipfast);
+  }
+
+  // Loop which searches the first difference character by character.
+  cmpwi(CCR0, cnt1, 0);
+  beq(CCR0, Lreturn_diff);
+  bind(Lslow);
+  mtctr(cnt1);
+
+  switch (ae) {
+    case StrIntrinsicNode::LL: stride1 = 1; stride2 = 1; break;
+    case StrIntrinsicNode::UL: // fallthru (see comment above)
+    case StrIntrinsicNode::LU: stride1 = 1; stride2 = 2; break;
+    case StrIntrinsicNode::UU: stride1 = 2; stride2 = 2; break;
+    default: ShouldNotReachHere(); break;
+  }
+
+  bind(Lloop);
+  if (stride1 == 1) { lbz(chr1, 0, str1); } else { lhz(chr1, 0, str1); }
+  if (stride2 == 1) { lbz(chr2, 0, str2); } else { lhz(chr2, 0, str2); }
+  subf_(result, chr2, chr1); // result = chr1 - chr2
+  bne(CCR0, Ldone);
+  addi(str1, str1, stride1);
+  addi(str2, str2, stride2);
+  bdnz(Lloop);
+
+  // If strings are equal up to min length, return the length difference.
+  bind(Lreturn_diff);
+  mr(result, diff);
+
+  // Otherwise, return the difference between the first mismatched chars.
+  bind(Ldone);
+  if (ae == StrIntrinsicNode::UL) {
+    neg(result, result); // Negate result (see note above).
+  }
+}
+
+void MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary2,
+                                  Register limit, Register tmp1, Register result, bool is_byte) {
+  const Register tmp0 = R0;
+  assert_different_registers(ary1, ary2, limit, tmp0, tmp1, result);
+  Label Ldone, Lskiploop, Lloop, Lfastloop, Lskipfast;
+  bool limit_needs_shift = false;
+
+  if (is_array_equ) {
+    const int length_offset = arrayOopDesc::length_offset_in_bytes();
+    const int base_offset   = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR);
+
+    // Return true if the same array.
+    cmpd(CCR0, ary1, ary2);
+    beq(CCR0, Lskiploop);
+
+    // Return false if one of them is NULL.
+    cmpdi(CCR0, ary1, 0);
+    cmpdi(CCR1, ary2, 0);
+    li(result, 0);
+    cror(CCR0, Assembler::equal, CCR1, Assembler::equal);
+    beq(CCR0, Ldone);
+
+    // Load the lengths of arrays.
+    lwz(limit, length_offset, ary1);
+    lwz(tmp0, length_offset, ary2);
+
+    // Return false if the two arrays are not equal length.
+    cmpw(CCR0, limit, tmp0);
+    bne(CCR0, Ldone);
+
+    // Load array addresses.
+    addi(ary1, ary1, base_offset);
+    addi(ary2, ary2, base_offset);
+  } else {
+    limit_needs_shift = !is_byte;
+    li(result, 0); // Assume not equal.
+  }
+
+  // Rename registers
+  Register chr1 = tmp0;
+  Register chr2 = tmp1;
+
+  // Compare 8 bytes per iteration in fast loop.
+  const int log2_chars_per_iter = is_byte ? 3 : 2;
+
+  srwi_(tmp0, limit, log2_chars_per_iter + (limit_needs_shift ? 1 : 0));
+  beq(CCR0, Lskipfast);
+  mtctr(tmp0);
+
+  bind(Lfastloop);
+  ld(chr1, 0, ary1);
+  ld(chr2, 0, ary2);
+  addi(ary1, ary1, 8);
+  addi(ary2, ary2, 8);
+  cmpd(CCR0, chr1, chr2);
+  bne(CCR0, Ldone);
+  bdnz(Lfastloop);
+
+  bind(Lskipfast);
+  rldicl_(limit, limit, limit_needs_shift ? 64 - 1 : 0, 64 - log2_chars_per_iter); // Remaining characters.
+  beq(CCR0, Lskiploop);
+  mtctr(limit);
+
+  // Character by character.
+  bind(Lloop);
+  if (is_byte) {
+    lbz(chr1, 0, ary1);
+    lbz(chr2, 0, ary2);
+    addi(ary1, ary1, 1);
+    addi(ary2, ary2, 1);
+  } else {
+    lhz(chr1, 0, ary1);
+    lhz(chr2, 0, ary2);
+    addi(ary1, ary1, 2);
+    addi(ary2, ary2, 2);
+  }
+  cmpw(CCR0, chr1, chr2);
+  bne(CCR0, Ldone);
+  bdnz(Lloop);
+
+  bind(Lskiploop);
+  li(result, 1); // All characters are equal.
+  bind(Ldone);
+}
+
+void MacroAssembler::string_indexof(Register result, Register haystack, Register haycnt,
+                                    Register needle, ciTypeArray* needle_values, Register needlecnt, int needlecntval,
+                                    Register tmp1, Register tmp2, Register tmp3, Register tmp4, int ae) {
+
+  // Ensure 0<needlecnt<=haycnt in ideal graph as prerequisite!
+  Label L_TooShort, L_Found, L_NotFound, L_End;
+  Register last_addr = haycnt, // Kill haycnt at the beginning.
+  addr      = tmp1,
+  n_start   = tmp2,
+  ch1       = tmp3,
+  ch2       = R0;
+
+  assert(ae != StrIntrinsicNode::LU, "Invalid encoding");
+  const int h_csize = (ae == StrIntrinsicNode::LL) ? 1 : 2;
+  const int n_csize = (ae == StrIntrinsicNode::UU) ? 2 : 1;
+
+  // **************************************************************************************************
+  // Prepare for main loop: optimized for needle count >=2, bail out otherwise.
+  // **************************************************************************************************
+
+  // Compute last haystack addr to use if no match gets found.
+  clrldi(haycnt, haycnt, 32);         // Ensure positive int is valid as 64 bit value.
+  addi(addr, haystack, -h_csize);     // Accesses use pre-increment.
+  if (needlecntval == 0) { // variable needlecnt
+   cmpwi(CCR6, needlecnt, 2);
+   clrldi(needlecnt, needlecnt, 32);  // Ensure positive int is valid as 64 bit value.
+   blt(CCR6, L_TooShort);             // Variable needlecnt: handle short needle separately.
+  }
+
+  if (n_csize == 2) { lwz(n_start, 0, needle); } else { lhz(n_start, 0, needle); } // Load first 2 characters of needle.
+
+  if (needlecntval == 0) { // variable needlecnt
+   subf(ch1, needlecnt, haycnt);      // Last character index to compare is haycnt-needlecnt.
+   addi(needlecnt, needlecnt, -2);    // Rest of needle.
+  } else { // constant needlecnt
+  guarantee(needlecntval != 1, "IndexOf with single-character needle must be handled separately");
+  assert((needlecntval & 0x7fff) == needlecntval, "wrong immediate");
+   addi(ch1, haycnt, -needlecntval);  // Last character index to compare is haycnt-needlecnt.
+   if (needlecntval > 3) { li(needlecnt, needlecntval - 2); } // Rest of needle.
+  }
+
+  if (h_csize == 2) { slwi(ch1, ch1, 1); } // Scale to number of bytes.
+
+  if (ae ==StrIntrinsicNode::UL) {
+   srwi(tmp4, n_start, 1*8);          // ___0
+   rlwimi(n_start, tmp4, 2*8, 0, 23); // _0_1
+  }
+
+  add(last_addr, haystack, ch1);      // Point to last address to compare (haystack+2*(haycnt-needlecnt)).
+
+  // Main Loop (now we have at least 2 characters).
+  Label L_OuterLoop, L_InnerLoop, L_FinalCheck, L_Comp1, L_Comp2;
+  bind(L_OuterLoop); // Search for 1st 2 characters.
+  Register addr_diff = tmp4;
+   subf(addr_diff, addr, last_addr);  // Difference between already checked address and last address to check.
+   addi(addr, addr, h_csize);         // This is the new address we want to use for comparing.
+   srdi_(ch2, addr_diff, h_csize);
+   beq(CCR0, L_FinalCheck);           // 2 characters left?
+   mtctr(ch2);                        // num of characters / 2
+  bind(L_InnerLoop);                  // Main work horse (2x unrolled search loop)
+   if (h_csize == 2) {                // Load 2 characters of haystack (ignore alignment).
+    lwz(ch1, 0, addr);
+    lwz(ch2, 2, addr);
+   } else {
+    lhz(ch1, 0, addr);
+    lhz(ch2, 1, addr);
+   }
+   cmpw(CCR0, ch1, n_start);          // Compare 2 characters (1 would be sufficient but try to reduce branches to CompLoop).
+   cmpw(CCR1, ch2, n_start);
+   beq(CCR0, L_Comp1);                // Did we find the needle start?
+   beq(CCR1, L_Comp2);
+   addi(addr, addr, 2 * h_csize);
+   bdnz(L_InnerLoop);
+  bind(L_FinalCheck);
+   andi_(addr_diff, addr_diff, h_csize); // Remaining characters not covered by InnerLoop: (num of characters) & 1.
+   beq(CCR0, L_NotFound);
+   if (h_csize == 2) { lwz(ch1, 0, addr); } else { lhz(ch1, 0, addr); } // One position left at which we have to compare.
+   cmpw(CCR1, ch1, n_start);
+   beq(CCR1, L_Comp1);
+  bind(L_NotFound);
+   li(result, -1);                    // not found
+   b(L_End);
+
+   // **************************************************************************************************
+   // Special Case: unfortunately, the variable needle case can be called with needlecnt<2
+   // **************************************************************************************************
+  if (needlecntval == 0) {           // We have to handle these cases separately.
+  Label L_OneCharLoop;
+  bind(L_TooShort);
+   mtctr(haycnt);
+   if (n_csize == 2) { lhz(n_start, 0, needle); } else { lbz(n_start, 0, needle); } // First character of needle
+  bind(L_OneCharLoop);
+   if (h_csize == 2) { lhzu(ch1, 2, addr); } else { lbzu(ch1, 1, addr); }
+   cmpw(CCR1, ch1, n_start);
+   beq(CCR1, L_Found);               // Did we find the one character needle?
+   bdnz(L_OneCharLoop);
+   li(result, -1);                   // Not found.
+   b(L_End);
+  }
+
+  // **************************************************************************************************
+  // Regular Case Part II: compare rest of needle (first 2 characters have been compared already)
+  // **************************************************************************************************
+
+  // Compare the rest
+  bind(L_Comp2);
+   addi(addr, addr, h_csize);        // First comparison has failed, 2nd one hit.
+  bind(L_Comp1);                     // Addr points to possible needle start.
+  if (needlecntval != 2) {           // Const needlecnt==2?
+   if (needlecntval != 3) {
+    if (needlecntval == 0) { beq(CCR6, L_Found); } // Variable needlecnt==2?
+    Register n_ind = tmp4,
+             h_ind = n_ind;
+    li(n_ind, 2 * n_csize);          // First 2 characters are already compared, use index 2.
+    mtctr(needlecnt);                // Decremented by 2, still > 0.
+   Label L_CompLoop;
+   bind(L_CompLoop);
+    if (ae ==StrIntrinsicNode::UL) {
+      h_ind = ch1;
+      sldi(h_ind, n_ind, 1);
+    }
+    if (n_csize == 2) { lhzx(ch2, needle, n_ind); } else { lbzx(ch2, needle, n_ind); }
+    if (h_csize == 2) { lhzx(ch1, addr, h_ind); } else { lbzx(ch1, addr, h_ind); }
+    cmpw(CCR1, ch1, ch2);
+    bne(CCR1, L_OuterLoop);
+    addi(n_ind, n_ind, n_csize);
+    bdnz(L_CompLoop);
+   } else { // No loop required if there's only one needle character left.
+    if (n_csize == 2) { lhz(ch2, 2 * 2, needle); } else { lbz(ch2, 2 * 1, needle); }
+    if (h_csize == 2) { lhz(ch1, 2 * 2, addr); } else { lbz(ch1, 2 * 1, addr); }
+    cmpw(CCR1, ch1, ch2);
+    bne(CCR1, L_OuterLoop);
+   }
+  }
+  // Return index ...
+  bind(L_Found);
+   subf(result, haystack, addr);     // relative to haystack, ...
+   if (h_csize == 2) { srdi(result, result, 1); } // in characters.
+  bind(L_End);
+} // string_indexof
+
+void MacroAssembler::string_indexof_char(Register result, Register haystack, Register haycnt,
+                                         Register needle, jchar needleChar, Register tmp1, Register tmp2, bool is_byte) {
+  assert_different_registers(haystack, haycnt, needle, tmp1, tmp2);
+
+  Label L_InnerLoop, L_FinalCheck, L_Found1, L_Found2, L_NotFound, L_End;
+  Register addr = tmp1,
+           ch1 = tmp2,
+           ch2 = R0;
+
+  const int h_csize = is_byte ? 1 : 2;
+
+//4:
+   srwi_(tmp2, haycnt, 1);   // Shift right by exact_log2(UNROLL_FACTOR).
+   mr(addr, haystack);
+   beq(CCR0, L_FinalCheck);
+   mtctr(tmp2);              // Move to count register.
+//8:
+  bind(L_InnerLoop);         // Main work horse (2x unrolled search loop).
+   if (!is_byte) {
+    lhz(ch1, 0, addr);
+    lhz(ch2, 2, addr);
+   } else {
+    lbz(ch1, 0, addr);
+    lbz(ch2, 1, addr);
+   }
+   (needle != R0) ? cmpw(CCR0, ch1, needle) : cmplwi(CCR0, ch1, (unsigned int)needleChar);
+   (needle != R0) ? cmpw(CCR1, ch2, needle) : cmplwi(CCR1, ch2, (unsigned int)needleChar);
+   beq(CCR0, L_Found1);      // Did we find the needle?
+   beq(CCR1, L_Found2);
+   addi(addr, addr, 2 * h_csize);
+   bdnz(L_InnerLoop);
+//16:
+  bind(L_FinalCheck);
+   andi_(R0, haycnt, 1);
+   beq(CCR0, L_NotFound);
+   if (!is_byte) { lhz(ch1, 0, addr); } else { lbz(ch1, 0, addr); } // One position left at which we have to compare.
+   (needle != R0) ? cmpw(CCR1, ch1, needle) : cmplwi(CCR1, ch1, (unsigned int)needleChar);
+   beq(CCR1, L_Found1);
+//21:
+  bind(L_NotFound);
+   li(result, -1);           // Not found.
+   b(L_End);
+
+  bind(L_Found2);
+   addi(addr, addr, h_csize);
+//24:
+  bind(L_Found1);            // Return index ...
+   subf(result, haystack, addr); // relative to haystack, ...
+   if (!is_byte) { srdi(result, result, 1); } // in characters.
+  bind(L_End);
+} // string_indexof_char
+
+
+void MacroAssembler::has_negatives(Register src, Register cnt, Register result,
+                                   Register tmp1, Register tmp2) {
+  const Register tmp0 = R0;
+  assert_different_registers(src, result, cnt, tmp0, tmp1, tmp2);
+  Label Lfastloop, Lslow, Lloop, Lnoneg, Ldone;
+
+  // Check if cnt >= 8 (= 16 bytes)
+  lis(tmp1, (int)(short)0x8080);  // tmp1 = 0x8080808080808080
+  srwi_(tmp2, cnt, 4);
+  li(result, 1);                  // Assume there's a negative byte.
+  beq(CCR0, Lslow);
+  ori(tmp1, tmp1, 0x8080);
+  rldimi(tmp1, tmp1, 32, 0);
+  mtctr(tmp2);
+
+  // 2x unrolled loop
+  bind(Lfastloop);
+  ld(tmp2, 0, src);
+  ld(tmp0, 8, src);
+
+  orr(tmp0, tmp2, tmp0);
+
+  and_(tmp0, tmp0, tmp1);
+  bne(CCR0, Ldone);               // Found negative byte.
+  addi(src, src, 16);
+
+  bdnz(Lfastloop);
+
+  bind(Lslow);                    // Fallback to slow version
+  rldicl_(tmp0, cnt, 0, 64-4);
+  beq(CCR0, Lnoneg);
+  mtctr(tmp0);
+  bind(Lloop);
+  lbz(tmp0, 0, src);
+  addi(src, src, 1);
+  andi_(tmp0, tmp0, 0x80);
+  bne(CCR0, Ldone);               // Found negative byte.
+  bdnz(Lloop);
+  bind(Lnoneg);
+  li(result, 0);
+
+  bind(Ldone);
+}
+
+
+// Intrinsics for non-CompactStrings
+
 // Search for a single jchar in an jchar[].
 //
 // Assumes that result differs from all other registers.
@@ -3613,6 +4163,8 @@
   bind(Ldone_false);
 }
 
+#endif // Compiler2
+
 // Helpers for Intrinsic Emitters
 //
 // Revert the byte order of a 32bit value in a register
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
+ * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -679,6 +679,39 @@
 
   void clear_memory_doubleword(Register base_ptr, Register cnt_dwords, Register tmp = R0);
 
+#ifdef COMPILER2
+  // Intrinsics for CompactStrings
+  // Compress char[] to byte[] by compressing 16 bytes at once.
+  void string_compress_16(Register src, Register dst, Register cnt,
+                          Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5,
+                          Label& Lfailure);
+
+  // Compress char[] to byte[]. cnt must be positive int.
+  void string_compress(Register src, Register dst, Register cnt, Register tmp, Label& Lfailure);
+
+  // Inflate byte[] to char[] by inflating 16 bytes at once.
+  void string_inflate_16(Register src, Register dst, Register cnt,
+                         Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5);
+
+  // Inflate byte[] to char[]. cnt must be positive int.
+  void string_inflate(Register src, Register dst, Register cnt, Register tmp);
+
+  void string_compare(Register str1, Register str2, Register cnt1, Register cnt2,
+                      Register tmp1, Register result, int ae);
+
+  void array_equals(bool is_array_equ, Register ary1, Register ary2,
+                    Register limit, Register tmp1, Register result, bool is_byte);
+
+  void string_indexof(Register result, Register haystack, Register haycnt,
+                      Register needle, ciTypeArray* needle_values, Register needlecnt, int needlecntval,
+                      Register tmp1, Register tmp2, Register tmp3, Register tmp4, int ae);
+
+  void string_indexof_char(Register result, Register haystack, Register haycnt,
+                           Register needle, jchar needleChar, Register tmp1, Register tmp2, bool is_byte);
+
+  void has_negatives(Register src, Register cnt, Register result, Register tmp1, Register tmp2);
+
+  // Intrinsics for non-CompactStrings
   // Needle of length 1.
   void string_indexof_1(Register result, Register haystack, Register haycnt,
                         Register needle, jchar needleChar,
@@ -694,6 +727,7 @@
                           Register tmp5_reg);
   void char_arrays_equalsImm(Register str1_reg, Register str2_reg, int cntval, Register result_reg,
                              Register tmp1_reg, Register tmp2_reg);
+#endif
 
   // Emitters for BigInteger.multiplyToLen intrinsic.
   inline void multiply64(Register dest_hi, Register dest_lo,
--- a/hotspot/src/cpu/ppc/vm/ppc.ad	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/ppc.ad	Sat Mar 05 20:46:42 2016 -0800
@@ -1,6 +1,6 @@
 //
 // Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
-// Copyright (c) 2012, 2015 SAP SE. All rights reserved.
+// Copyright (c) 2012, 2016 SAP SE. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -2024,13 +2024,13 @@
     return (UsePopCountInstruction && VM_Version::has_popcntw());
 
   case Op_StrComp:
-    return SpecialStringCompareTo && !CompactStrings;
+    return SpecialStringCompareTo;
   case Op_StrEquals:
-    return SpecialStringEquals && !CompactStrings;
+    return SpecialStringEquals;
   case Op_StrIndexOf:
-    return SpecialStringIndexOf && !CompactStrings;
+    return SpecialStringIndexOf;
   case Op_StrIndexOfChar:
-    return SpecialStringIndexOf && !CompactStrings;
+    return SpecialStringIndexOf;
   }
 
   return true;  // Per default match rules are supported.
@@ -11022,6 +11022,584 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct string_compareL(rarg1RegP str1, rarg2RegP str2, rarg3RegI cnt1, rarg4RegI cnt2, iRegIdst result,
+                         iRegIdst tmp, regCTR ctr, flagsRegCR0 cr0) %{
+  predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
+  match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
+  effect(TEMP_DEF result, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ctr, KILL cr0, TEMP tmp);
+  ins_cost(300);
+  format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result \t// KILL $tmp" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ string_compare($str1$$Register, $str2$$Register,
+                      $cnt1$$Register, $cnt2$$Register,
+                      $tmp$$Register,
+                      $result$$Register, StrIntrinsicNode::LL);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct string_compareU(rarg1RegP str1, rarg2RegP str2, rarg3RegI cnt1, rarg4RegI cnt2, iRegIdst result,
+                         iRegIdst tmp, regCTR ctr, flagsRegCR0 cr0) %{
+  predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
+  match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
+  effect(TEMP_DEF result, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ctr, KILL cr0, TEMP tmp);
+  ins_cost(300);
+  format %{ "String Compare char[] $str1,$cnt1,$str2,$cnt2 -> $result \t// KILL $tmp" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ string_compare($str1$$Register, $str2$$Register,
+                      $cnt1$$Register, $cnt2$$Register,
+                      $tmp$$Register,
+                      $result$$Register, StrIntrinsicNode::UU);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct string_compareLU(rarg1RegP str1, rarg2RegP str2, rarg3RegI cnt1, rarg4RegI cnt2, iRegIdst result,
+                          iRegIdst tmp, regCTR ctr, flagsRegCR0 cr0) %{
+  predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
+  match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
+  effect(TEMP_DEF result, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ctr, KILL cr0, TEMP tmp);
+  ins_cost(300);
+  format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result \t// KILL $tmp" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ string_compare($str1$$Register, $str2$$Register,
+                      $cnt1$$Register, $cnt2$$Register,
+                      $tmp$$Register,
+                      $result$$Register, StrIntrinsicNode::LU);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct string_compareUL(rarg1RegP str1, rarg2RegP str2, rarg3RegI cnt1, rarg4RegI cnt2, iRegIdst result,
+                          iRegIdst tmp, regCTR ctr, flagsRegCR0 cr0) %{
+  predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
+  match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
+  effect(TEMP_DEF result, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ctr, KILL cr0, TEMP tmp);
+  ins_cost(300);
+  format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result \t// KILL $tmp" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ string_compare($str2$$Register, $str1$$Register,
+                      $cnt2$$Register, $cnt1$$Register,
+                      $tmp$$Register,
+                      $result$$Register, StrIntrinsicNode::UL);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct string_equalsL(rarg1RegP str1, rarg2RegP str2, rarg3RegI cnt, iRegIdst result,
+                        iRegIdst tmp, regCTR ctr, flagsRegCR0 cr0) %{
+  predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
+  match(Set result (StrEquals (Binary str1 str2) cnt));
+  effect(TEMP_DEF result, USE_KILL str1, USE_KILL str2, USE_KILL cnt, TEMP tmp, KILL ctr, KILL cr0);
+  ins_cost(300);
+  format %{ "String Equals byte[] $str1,$str2,$cnt -> $result \t// KILL $tmp" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ array_equals(false, $str1$$Register, $str2$$Register,
+                    $cnt$$Register, $tmp$$Register,
+                    $result$$Register, true /* byte */);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct string_equalsU(rarg1RegP str1, rarg2RegP str2, rarg3RegI cnt, iRegIdst result,
+                        iRegIdst tmp, regCTR ctr, flagsRegCR0 cr0) %{
+  predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU);
+  match(Set result (StrEquals (Binary str1 str2) cnt));
+  effect(TEMP_DEF result, USE_KILL str1, USE_KILL str2, USE_KILL cnt, TEMP tmp, KILL ctr, KILL cr0);
+  ins_cost(300);
+  format %{ "String Equals char[]  $str1,$str2,$cnt -> $result \t// KILL $tmp" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ array_equals(false, $str1$$Register, $str2$$Register,
+                    $cnt$$Register, $tmp$$Register,
+                    $result$$Register, false /* byte */);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct array_equalsB(rarg1RegP ary1, rarg2RegP ary2, iRegIdst result,
+                       iRegIdst tmp1, iRegIdst tmp2, regCTR ctr, flagsRegCR0 cr0, flagsRegCR0 cr1) %{
+  predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
+  match(Set result (AryEq ary1 ary2));
+  effect(TEMP_DEF result, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, KILL ctr, KILL cr0, KILL cr1);
+  ins_cost(300);
+  format %{ "Array Equals $ary1,$ary2 -> $result \t// KILL $tmp1,$tmp2" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ array_equals(true, $ary1$$Register, $ary2$$Register,
+                    $tmp1$$Register, $tmp2$$Register,
+                    $result$$Register, true /* byte */);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct array_equalsC(rarg1RegP ary1, rarg2RegP ary2, iRegIdst result,
+                       iRegIdst tmp1, iRegIdst tmp2, regCTR ctr, flagsRegCR0 cr0, flagsRegCR0 cr1) %{
+  predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
+  match(Set result (AryEq ary1 ary2));
+  effect(TEMP_DEF result, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, KILL ctr, KILL cr0, KILL cr1);
+  ins_cost(300);
+  format %{ "Array Equals $ary1,$ary2 -> $result \t// KILL $tmp1,$tmp2" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ array_equals(true, $ary1$$Register, $ary2$$Register,
+                    $tmp1$$Register, $tmp2$$Register,
+                    $result$$Register, false /* byte */);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct indexOf_imm1_char_U(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt,
+                             immP needleImm, immL offsetImm, immI_1 needlecntImm,
+                             iRegIdst tmp1, iRegIdst tmp2,
+                             flagsRegCR0 cr0, flagsRegCR1 cr1, regCTR ctr) %{
+  match(Set result (StrIndexOf (Binary haystack haycnt) (Binary (AddP needleImm offsetImm) needlecntImm)));
+  effect(TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1, KILL ctr);
+  // Required for EA: check if it is still a type_array.
+  predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
+  ins_cost(150);
+
+  format %{ "String IndexOf CSCL1 $haystack[0..$haycnt], $needleImm+$offsetImm[0..$needlecntImm]"
+            "-> $result \t// KILL $haycnt, $tmp1, $tmp2, $cr0, $cr1" %}
+
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    immPOper *needleOper = (immPOper *)$needleImm;
+    const TypeOopPtr *t = needleOper->type()->isa_oopptr();
+    ciTypeArray* needle_values = t->const_oop()->as_type_array();  // Pointer to live char *
+    jchar chr;
+#ifdef VM_LITTLE_ENDIAN
+    chr = (((jchar)(unsigned char)needle_values->element_value(1).as_byte()) << 8) |
+           ((jchar)(unsigned char)needle_values->element_value(0).as_byte());
+#else
+    chr = (((jchar)(unsigned char)needle_values->element_value(0).as_byte()) << 8) |
+           ((jchar)(unsigned char)needle_values->element_value(1).as_byte());
+#endif
+    __ string_indexof_char($result$$Register,
+                           $haystack$$Register, $haycnt$$Register,
+                           R0, chr,
+                           $tmp1$$Register, $tmp2$$Register, false /*is_byte*/);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+instruct indexOf_imm1_char_L(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt,
+                             immP needleImm, immL offsetImm, immI_1 needlecntImm,
+                             iRegIdst tmp1, iRegIdst tmp2,
+                             flagsRegCR0 cr0, flagsRegCR1 cr1, regCTR ctr) %{
+  match(Set result (StrIndexOf (Binary haystack haycnt) (Binary (AddP needleImm offsetImm) needlecntImm)));
+  effect(TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1, KILL ctr);
+  // Required for EA: check if it is still a type_array.
+  predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
+  ins_cost(150);
+
+  format %{ "String IndexOf CSCL1 $haystack[0..$haycnt], $needleImm+$offsetImm[0..$needlecntImm]"
+            "-> $result \t// KILL $haycnt, $tmp1, $tmp2, $cr0, $cr1" %}
+
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    immPOper *needleOper = (immPOper *)$needleImm;
+    const TypeOopPtr *t = needleOper->type()->isa_oopptr();
+    ciTypeArray* needle_values = t->const_oop()->as_type_array();  // Pointer to live char *
+    jchar chr = (jchar)needle_values->element_value(0).as_byte();
+    __ string_indexof_char($result$$Register,
+                           $haystack$$Register, $haycnt$$Register,
+                           R0, chr,
+                           $tmp1$$Register, $tmp2$$Register, true /*is_byte*/);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+instruct indexOf_imm1_char_UL(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt,
+                              immP needleImm, immL offsetImm, immI_1 needlecntImm,
+                              iRegIdst tmp1, iRegIdst tmp2,
+                              flagsRegCR0 cr0, flagsRegCR1 cr1, regCTR ctr) %{
+  match(Set result (StrIndexOf (Binary haystack haycnt) (Binary (AddP needleImm offsetImm) needlecntImm)));
+  effect(TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1, KILL ctr);
+  // Required for EA: check if it is still a type_array.
+  predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
+  ins_cost(150);
+
+  format %{ "String IndexOf CSCL1 $haystack[0..$haycnt], $needleImm+$offsetImm[0..$needlecntImm]"
+            "-> $result \t// KILL $haycnt, $tmp1, $tmp2, $cr0, $cr1" %}
+
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    immPOper *needleOper = (immPOper *)$needleImm;
+    const TypeOopPtr *t = needleOper->type()->isa_oopptr();
+    ciTypeArray* needle_values = t->const_oop()->as_type_array();  // Pointer to live char *
+    jchar chr = (jchar)needle_values->element_value(0).as_byte();
+    __ string_indexof_char($result$$Register,
+                           $haystack$$Register, $haycnt$$Register,
+                           R0, chr,
+                           $tmp1$$Register, $tmp2$$Register, false /*is_byte*/);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+instruct indexOf_imm1_U(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt,
+                        rscratch2RegP needle, immI_1 needlecntImm,
+                        iRegIdst tmp1, iRegIdst tmp2,
+                        flagsRegCR0 cr0, flagsRegCR1 cr1, regCTR ctr) %{
+  match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecntImm)));
+  effect(USE_KILL needle, TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1, KILL ctr);
+  // Required for EA: check if it is still a type_array.
+  predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU &&
+            n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
+            n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop()->is_type_array());
+  ins_cost(180);
+
+  format %{ "String IndexOf SCL1 $haystack[0..$haycnt], $needle[0..$needlecntImm]"
+            " -> $result \t// KILL $haycnt, $needle, $tmp1, $tmp2, $cr0, $cr1" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    Node *ndl = in(operand_index($needle));  // The node that defines needle.
+    ciTypeArray* needle_values = ndl->bottom_type()->is_aryptr()->const_oop()->as_type_array();
+    guarantee(needle_values, "sanity");
+    jchar chr;
+#ifdef VM_LITTLE_ENDIAN
+    chr = (((jchar)(unsigned char)needle_values->element_value(1).as_byte()) << 8) |
+           ((jchar)(unsigned char)needle_values->element_value(0).as_byte());
+#else
+    chr = (((jchar)(unsigned char)needle_values->element_value(0).as_byte()) << 8) |
+           ((jchar)(unsigned char)needle_values->element_value(1).as_byte());
+#endif
+    __ string_indexof_char($result$$Register,
+                           $haystack$$Register, $haycnt$$Register,
+                           R0, chr,
+                           $tmp1$$Register, $tmp2$$Register, false /*is_byte*/);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+instruct indexOf_imm1_L(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt,
+                        rscratch2RegP needle, immI_1 needlecntImm,
+                        iRegIdst tmp1, iRegIdst tmp2,
+                        flagsRegCR0 cr0, flagsRegCR1 cr1, regCTR ctr) %{
+  match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecntImm)));
+  effect(USE_KILL needle, TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1, KILL ctr);
+  // Required for EA: check if it is still a type_array.
+  predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL &&
+            n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
+            n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop()->is_type_array());
+  ins_cost(180);
+
+  format %{ "String IndexOf SCL1 $haystack[0..$haycnt], $needle[0..$needlecntImm]"
+            " -> $result \t// KILL $haycnt, $needle, $tmp1, $tmp2, $cr0, $cr1" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    Node *ndl = in(operand_index($needle));  // The node that defines needle.
+    ciTypeArray* needle_values = ndl->bottom_type()->is_aryptr()->const_oop()->as_type_array();
+    guarantee(needle_values, "sanity");
+    jchar chr = (jchar)needle_values->element_value(0).as_byte();
+    __ string_indexof_char($result$$Register,
+                           $haystack$$Register, $haycnt$$Register,
+                           R0, chr,
+                           $tmp1$$Register, $tmp2$$Register, true /*is_byte*/);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+instruct indexOf_imm1_UL(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt,
+                         rscratch2RegP needle, immI_1 needlecntImm,
+                         iRegIdst tmp1, iRegIdst tmp2,
+                         flagsRegCR0 cr0, flagsRegCR1 cr1, regCTR ctr) %{
+  match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecntImm)));
+  effect(USE_KILL needle, TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1, KILL ctr);
+  // Required for EA: check if it is still a type_array.
+  predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL &&
+            n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
+            n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop()->is_type_array());
+  ins_cost(180);
+
+  format %{ "String IndexOf SCL1 $haystack[0..$haycnt], $needle[0..$needlecntImm]"
+            " -> $result \t// KILL $haycnt, $needle, $tmp1, $tmp2, $cr0, $cr1" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    Node *ndl = in(operand_index($needle));  // The node that defines needle.
+    ciTypeArray* needle_values = ndl->bottom_type()->is_aryptr()->const_oop()->as_type_array();
+    guarantee(needle_values, "sanity");
+    jchar chr = (jchar)needle_values->element_value(0).as_byte();
+    __ string_indexof_char($result$$Register,
+                           $haystack$$Register, $haycnt$$Register,
+                           R0, chr,
+                           $tmp1$$Register, $tmp2$$Register, false /*is_byte*/);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+instruct indexOfChar_U(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt,
+                       iRegIsrc ch, iRegIdst tmp1, iRegIdst tmp2,
+                       flagsRegCR0 cr0, flagsRegCR1 cr1, regCTR ctr) %{
+  match(Set result (StrIndexOfChar (Binary haystack haycnt) ch));
+  effect(TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1, KILL ctr);
+  predicate(CompactStrings);
+  ins_cost(180);
+
+  format %{ "String IndexOfChar $haystack[0..$haycnt], $ch"
+            " -> $result \t// KILL $haycnt, $tmp1, $tmp2, $cr0, $cr1" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ string_indexof_char($result$$Register,
+                           $haystack$$Register, $haycnt$$Register,
+                           $ch$$Register, 0 /* this is not used if the character is already in a register */,
+                           $tmp1$$Register, $tmp2$$Register, false /*is_byte*/);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+instruct indexOf_imm_U(iRegIdst result, iRegPsrc haystack, rscratch1RegI haycnt,
+                       iRegPsrc needle, uimmI15 needlecntImm,
+                       iRegIdst tmp1, iRegIdst tmp2, iRegIdst tmp3, iRegIdst tmp4, iRegIdst tmp5,
+                       flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6, regCTR ctr) %{
+  match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecntImm)));
+  effect(USE_KILL haycnt, /* better: TDEF haycnt, */ TEMP_DEF result,
+         TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, KILL cr0, KILL cr1, KILL cr6, KILL ctr);
+  // Required for EA: check if it is still a type_array.
+  predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU &&
+            n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
+            n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop()->is_type_array());
+  ins_cost(250);
+
+  format %{ "String IndexOf SCL $haystack[0..$haycnt], $needle[0..$needlecntImm]"
+            " -> $result \t// KILL $haycnt, $tmp1, $tmp2, $tmp3, $tmp4, $tmp5, $cr0, $cr1" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    Node *ndl = in(operand_index($needle));  // The node that defines needle.
+    ciTypeArray* needle_values = ndl->bottom_type()->is_aryptr()->const_oop()->as_type_array();
+
+    __ string_indexof($result$$Register,
+                      $haystack$$Register, $haycnt$$Register,
+                      $needle$$Register, needle_values, $tmp5$$Register, $needlecntImm$$constant,
+                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, StrIntrinsicNode::UU);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+instruct indexOf_imm_L(iRegIdst result, iRegPsrc haystack, rscratch1RegI haycnt,
+                       iRegPsrc needle, uimmI15 needlecntImm,
+                       iRegIdst tmp1, iRegIdst tmp2, iRegIdst tmp3, iRegIdst tmp4, iRegIdst tmp5,
+                       flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6, regCTR ctr) %{
+  match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecntImm)));
+  effect(USE_KILL haycnt, /* better: TDEF haycnt, */ TEMP_DEF result,
+         TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, KILL cr0, KILL cr1, KILL cr6, KILL ctr);
+  // Required for EA: check if it is still a type_array.
+  predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL &&
+            n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
+            n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop()->is_type_array());
+  ins_cost(250);
+
+  format %{ "String IndexOf SCL $haystack[0..$haycnt], $needle[0..$needlecntImm]"
+            " -> $result \t// KILL $haycnt, $tmp1, $tmp2, $tmp3, $tmp4, $tmp5, $cr0, $cr1" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    Node *ndl = in(operand_index($needle));  // The node that defines needle.
+    ciTypeArray* needle_values = ndl->bottom_type()->is_aryptr()->const_oop()->as_type_array();
+
+    __ string_indexof($result$$Register,
+                      $haystack$$Register, $haycnt$$Register,
+                      $needle$$Register, needle_values, $tmp5$$Register, $needlecntImm$$constant,
+                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, StrIntrinsicNode::LL);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+instruct indexOf_imm_UL(iRegIdst result, iRegPsrc haystack, rscratch1RegI haycnt,
+                        iRegPsrc needle, uimmI15 needlecntImm,
+                        iRegIdst tmp1, iRegIdst tmp2, iRegIdst tmp3, iRegIdst tmp4, iRegIdst tmp5,
+                        flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6, regCTR ctr) %{
+  match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecntImm)));
+  effect(USE_KILL haycnt, /* better: TDEF haycnt, */ TEMP_DEF result,
+         TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, KILL cr0, KILL cr1, KILL cr6, KILL ctr);
+  // Required for EA: check if it is still a type_array.
+  predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL &&
+            n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
+            n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop()->is_type_array());
+  ins_cost(250);
+
+  format %{ "String IndexOf SCL $haystack[0..$haycnt], $needle[0..$needlecntImm]"
+            " -> $result \t// KILL $haycnt, $tmp1, $tmp2, $tmp3, $tmp4, $tmp5, $cr0, $cr1" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    Node *ndl = in(operand_index($needle));  // The node that defines needle.
+    ciTypeArray* needle_values = ndl->bottom_type()->is_aryptr()->const_oop()->as_type_array();
+
+    __ string_indexof($result$$Register,
+                      $haystack$$Register, $haycnt$$Register,
+                      $needle$$Register, needle_values, $tmp5$$Register, $needlecntImm$$constant,
+                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, StrIntrinsicNode::UL);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+instruct indexOf_U(iRegIdst result, iRegPsrc haystack, rscratch1RegI haycnt, iRegPsrc needle, rscratch2RegI needlecnt,
+                   iRegLdst tmp1, iRegLdst tmp2, iRegLdst tmp3, iRegLdst tmp4,
+                   flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6, regCTR ctr) %{
+  match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecnt)));
+  effect(USE_KILL haycnt, USE_KILL needlecnt, /*better: TDEF haycnt, TDEF needlecnt,*/
+         TEMP_DEF result,
+         TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr0, KILL cr1, KILL cr6, KILL ctr);
+  predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
+  ins_cost(300);
+
+  format %{ "String IndexOf $haystack[0..$haycnt], $needle[0..$needlecnt]"
+             " -> $result \t// KILL $haycnt, $needlecnt, $tmp1, $tmp2, $tmp3, $tmp4, $cr0, $cr1" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ string_indexof($result$$Register,
+                      $haystack$$Register, $haycnt$$Register,
+                      $needle$$Register, NULL, $needlecnt$$Register, 0,  // needlecnt not constant.
+                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, StrIntrinsicNode::UU);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+instruct indexOf_L(iRegIdst result, iRegPsrc haystack, rscratch1RegI haycnt, iRegPsrc needle, rscratch2RegI needlecnt,
+                   iRegLdst tmp1, iRegLdst tmp2, iRegLdst tmp3, iRegLdst tmp4,
+                   flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6, regCTR ctr) %{
+  match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecnt)));
+  effect(USE_KILL haycnt, USE_KILL needlecnt, /*better: TDEF haycnt, TDEF needlecnt,*/
+         TEMP_DEF result,
+         TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr0, KILL cr1, KILL cr6, KILL ctr);
+  predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
+  ins_cost(300);
+
+  format %{ "String IndexOf $haystack[0..$haycnt], $needle[0..$needlecnt]"
+             " -> $result \t// KILL $haycnt, $needlecnt, $tmp1, $tmp2, $tmp3, $tmp4, $cr0, $cr1" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ string_indexof($result$$Register,
+                      $haystack$$Register, $haycnt$$Register,
+                      $needle$$Register, NULL, $needlecnt$$Register, 0,  // needlecnt not constant.
+                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, StrIntrinsicNode::LL);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+instruct indexOf_UL(iRegIdst result, iRegPsrc haystack, rscratch1RegI haycnt, iRegPsrc needle, rscratch2RegI needlecnt,
+                    iRegLdst tmp1, iRegLdst tmp2, iRegLdst tmp3, iRegLdst tmp4,
+                    flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6, regCTR ctr) %{
+  match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecnt)));
+  effect(USE_KILL haycnt, USE_KILL needlecnt, /*better: TDEF haycnt, TDEF needlecnt,*/
+         TEMP_DEF result,
+         TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr0, KILL cr1, KILL cr6, KILL ctr);
+  predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
+  ins_cost(300);
+
+  format %{ "String IndexOf $haystack[0..$haycnt], $needle[0..$needlecnt]"
+             " -> $result \t// KILL $haycnt, $needlecnt, $tmp1, $tmp2, $tmp3, $tmp4, $cr0, $cr1" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ string_indexof($result$$Register,
+                      $haystack$$Register, $haycnt$$Register,
+                      $needle$$Register, NULL, $needlecnt$$Register, 0,  // needlecnt not constant.
+                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, StrIntrinsicNode::UL);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+// char[] to byte[] compression
+instruct string_compress(rarg1RegP src, rarg2RegP dst, iRegIsrc len, iRegIdst result, iRegLdst tmp1,
+                         iRegLdst tmp2, iRegLdst tmp3, iRegLdst tmp4, iRegLdst tmp5, regCTR ctr, flagsRegCR0 cr0) %{
+  match(Set result (StrCompressedCopy src (Binary dst len)));
+  effect(TEMP_DEF result, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
+         USE_KILL src, USE_KILL dst, KILL ctr, KILL cr0);
+  ins_cost(300);
+  format %{ "String Compress $src,$dst,$len -> $result \t// KILL $tmp1, $tmp2, $tmp3, $tmp4, $tmp5" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    Label Lskip, Ldone;
+    __ li($result$$Register, 0);
+    __ string_compress_16($src$$Register, $dst$$Register, $len$$Register, $tmp1$$Register,
+                          $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, $tmp5$$Register, Ldone);
+    __ rldicl_($tmp1$$Register, $len$$Register, 0, 64-3); // Remaining characters.
+    __ beq(CCR0, Lskip);
+    __ string_compress($src$$Register, $dst$$Register, $tmp1$$Register, $tmp2$$Register, Ldone);
+    __ bind(Lskip);
+    __ mr($result$$Register, $len$$Register);
+    __ bind(Ldone);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// byte[] to char[] inflation
+instruct string_inflate(Universe dummy, rarg1RegP src, rarg2RegP dst, iRegIsrc len, iRegLdst tmp1,
+                        iRegLdst tmp2, iRegLdst tmp3, iRegLdst tmp4, iRegLdst tmp5, regCTR ctr, flagsRegCR0 cr0) %{
+  match(Set dummy (StrInflatedCopy src (Binary dst len)));
+  effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, USE_KILL src, USE_KILL dst, KILL ctr, KILL cr0);
+  ins_cost(300);
+  format %{ "String Inflate $src,$dst,$len \t// KILL $tmp1, $tmp2, $tmp3, $tmp4, $tmp5" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    Label Ldone;
+    __ string_inflate_16($src$$Register, $dst$$Register, $len$$Register, $tmp1$$Register,
+                         $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, $tmp5$$Register);
+    __ rldicl_($tmp1$$Register, $len$$Register, 0, 64-3); // Remaining characters.
+    __ beq(CCR0, Ldone);
+    __ string_inflate($src$$Register, $dst$$Register, $tmp1$$Register, $tmp2$$Register);
+    __ bind(Ldone);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// StringCoding.java intrinsics
+instruct has_negatives(rarg1RegP ary1, iRegIsrc len, iRegIdst result, iRegLdst tmp1, iRegLdst tmp2,
+                       regCTR ctr, flagsRegCR0 cr0)
+%{
+  match(Set result (HasNegatives ary1 len));
+  effect(TEMP_DEF result, USE_KILL ary1, TEMP tmp1, TEMP tmp2, KILL ctr, KILL cr0);
+  ins_cost(300);
+  format %{ "has negatives byte[] $ary1,$len -> $result \t// KILL $tmp1, $tmp2" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ has_negatives($ary1$$Register, $len$$Register, $result$$Register,
+                     $tmp1$$Register, $tmp2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// encode char[] to byte[] in ISO_8859_1
+instruct encode_iso_array(rarg1RegP src, rarg2RegP dst, iRegIsrc len, iRegIdst result, iRegLdst tmp1,
+                          iRegLdst tmp2, iRegLdst tmp3, iRegLdst tmp4, iRegLdst tmp5, regCTR ctr, flagsRegCR0 cr0) %{
+  match(Set result (EncodeISOArray src (Binary dst len)));
+  effect(TEMP_DEF result, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
+         USE_KILL src, USE_KILL dst, KILL ctr, KILL cr0);
+  ins_cost(300);
+  format %{ "Encode array $src,$dst,$len -> $result \t// KILL $tmp1, $tmp2, $tmp3, $tmp4, $tmp5" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    Label Lslow, Lfailure1, Lfailure2, Ldone;
+    __ string_compress_16($src$$Register, $dst$$Register, $len$$Register, $tmp1$$Register,
+                          $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, $tmp5$$Register, Lfailure1);
+    __ rldicl_($result$$Register, $len$$Register, 0, 64-3); // Remaining characters.
+    __ beq(CCR0, Ldone);
+    __ bind(Lslow);
+    __ string_compress($src$$Register, $dst$$Register, $result$$Register, $tmp2$$Register, Lfailure2);
+    __ li($result$$Register, 0);
+    __ b(Ldone);
+
+    __ bind(Lfailure1);
+    __ mr($result$$Register, $len$$Register);
+    __ mfctr($tmp1$$Register);
+    __ rldimi_($result$$Register, $tmp1$$Register, 3, 0); // Remaining characters.
+    __ beq(CCR0, Ldone);
+    __ b(Lslow);
+
+    __ bind(Lfailure2);
+    __ mfctr($result$$Register); // Remaining characters.
+
+    __ bind(Ldone);
+    __ subf($result$$Register, $result$$Register, $len$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+
 // String_IndexOf for needle of length 1.
 //
 // Match needle into immediate operands: no loadConP node needed. Saves one
@@ -11060,11 +11638,11 @@
     if (java_lang_String::has_coder_field()) {
       // New compact strings byte array strings
 #ifdef VM_LITTLE_ENDIAN
-      chr = (((jchar)needle_values->element_value(1).as_byte()) << 8) |
-              (jchar)needle_values->element_value(0).as_byte();
+    chr = (((jchar)(unsigned char)needle_values->element_value(1).as_byte()) << 8) |
+           ((jchar)(unsigned char)needle_values->element_value(0).as_byte());
 #else
-      chr = (((jchar)needle_values->element_value(0).as_byte()) << 8) |
-              (jchar)needle_values->element_value(1).as_byte();
+    chr = (((jchar)(unsigned char)needle_values->element_value(0).as_byte()) << 8) |
+           ((jchar)(unsigned char)needle_values->element_value(1).as_byte());
 #endif
     } else {
       // Old char array strings
@@ -11115,11 +11693,11 @@
     if (java_lang_String::has_coder_field()) {
       // New compact strings byte array strings
 #ifdef VM_LITTLE_ENDIAN
-      chr = (((jchar)needle_values->element_value(1).as_byte()) << 8) |
-              (jchar)needle_values->element_value(0).as_byte();
+    chr = (((jchar)(unsigned char)needle_values->element_value(1).as_byte()) << 8) |
+           ((jchar)(unsigned char)needle_values->element_value(0).as_byte());
 #else
-      chr = (((jchar)needle_values->element_value(0).as_byte()) << 8) |
-              (jchar)needle_values->element_value(1).as_byte();
+    chr = (((jchar)(unsigned char)needle_values->element_value(0).as_byte()) << 8) |
+           ((jchar)(unsigned char)needle_values->element_value(1).as_byte());
 #endif
     } else {
       // Old char array strings
@@ -11321,6 +11899,20 @@
   %}
 %}
 
+instruct minI_reg_reg_isel(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
+  match(Set dst (MinI src1 src2));
+  effect(KILL cr0);
+  predicate(VM_Version::has_isel());
+  ins_cost(DEFAULT_COST*2);
+
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ cmpw(CCR0, $src1$$Register, $src2$$Register);
+    __ isel($dst$$Register, CCR0, Assembler::less, /*invert*/false, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct maxI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
   match(Set dst (MaxI src1 src2));
   ins_cost(DEFAULT_COST*6);
@@ -11341,6 +11933,20 @@
   %}
 %}
 
+instruct maxI_reg_reg_isel(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
+  match(Set dst (MaxI src1 src2));
+  effect(KILL cr0);
+  predicate(VM_Version::has_isel());
+  ins_cost(DEFAULT_COST*2);
+
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ cmpw(CCR0, $src1$$Register, $src2$$Register);
+    __ isel($dst$$Register, CCR0, Assembler::greater, /*invert*/false, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 //---------- Population Count Instructions ------------------------------------
 
 // Popcnt for Power7.
--- a/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -2609,9 +2609,7 @@
    *   R5_ARG3    - int   length (of buffer)
    *
    * scratch:
-   *   R6_ARG4    - crc table address
-   *   R7_ARG5    - tmp1
-   *   R8_ARG6    - tmp2
+   *   R2, R6-R12
    *
    * Ouput:
    *   R3_RET     - int   crc result
@@ -2623,22 +2621,25 @@
     address start = __ function_entry();  // Remember stub start address (is rtn value).
 
     // arguments to kernel_crc32:
-    Register       crc     = R3_ARG1;  // Current checksum, preset by caller or result from previous call.
-    Register       data    = R4_ARG2;  // source byte array
-    Register       dataLen = R5_ARG3;  // #bytes to process
-    Register       table   = R6_ARG4;  // crc table address
-
-    Register       t0      = R9;       // work reg for kernel* emitters
-    Register       t1      = R10;      // work reg for kernel* emitters
-    Register       t2      = R11;      // work reg for kernel* emitters
-    Register       t3      = R12;      // work reg for kernel* emitters
+    const Register crc     = R3_ARG1;  // Current checksum, preset by caller or result from previous call.
+    const Register data    = R4_ARG2;  // source byte array
+    const Register dataLen = R5_ARG3;  // #bytes to process
+    const Register table   = R6_ARG4;  // crc table address
+
+    const Register t0      = R2;
+    const Register t1      = R7;
+    const Register t2      = R8;
+    const Register t3      = R9;
+    const Register tc0     = R10;
+    const Register tc1     = R11;
+    const Register tc2     = R12;
 
     BLOCK_COMMENT("Stub body {");
     assert_different_registers(crc, data, dataLen, table);
 
     StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
 
-    __ kernel_crc32_1byte(crc, data, dataLen, table, t0, t1, t2, t3);
+    __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, table);
 
     BLOCK_COMMENT("return");
     __ mr_if_needed(R3_RET, crc);      // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
--- a/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,7 +53,7 @@
 
   // If PowerArchitecturePPC64 hasn't been specified explicitly determine from features.
   if (FLAG_IS_DEFAULT(PowerArchitecturePPC64)) {
-    if (VM_Version::has_tcheck() && VM_Version::has_lqarx()) {
+    if (VM_Version::has_lqarx()) {
       FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 8);
     } else if (VM_Version::has_popcntw()) {
       FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 7);
@@ -68,8 +68,7 @@
 
   bool PowerArchitecturePPC64_ok = false;
   switch (PowerArchitecturePPC64) {
-    case 8: if (!VM_Version::has_tcheck() ) break;
-            if (!VM_Version::has_lqarx()  ) break;
+    case 8: if (!VM_Version::has_lqarx()  ) break;
     case 7: if (!VM_Version::has_popcntw()) break;
     case 6: if (!VM_Version::has_cmpb()   ) break;
     case 5: if (!VM_Version::has_popcntb()) break;
@@ -80,7 +79,7 @@
             UINTX_FORMAT " on this machine", PowerArchitecturePPC64);
 
   // Power 8: Configure Data Stream Control Register.
-  if (PowerArchitecturePPC64 >= 8) {
+  if (has_mfdscr()) {
     config_dscr();
   }
 
@@ -112,7 +111,7 @@
   // Create and print feature-string.
   char buf[(num_features+1) * 16]; // Max 16 chars per feature.
   jio_snprintf(buf, sizeof(buf),
-               "ppc64%s%s%s%s%s%s%s%s%s%s%s%s",
+               "ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s",
                (has_fsqrt()   ? " fsqrt"   : ""),
                (has_isel()    ? " isel"    : ""),
                (has_lxarxeh() ? " lxarxeh" : ""),
@@ -125,7 +124,8 @@
                (has_lqarx()   ? " lqarx"   : ""),
                (has_vcipher() ? " vcipher" : ""),
                (has_vpmsumb() ? " vpmsumb" : ""),
-               (has_tcheck()  ? " tcheck"  : "")
+               (has_tcheck()  ? " tcheck"  : ""),
+               (has_mfdscr()  ? " mfdscr"  : "")
                // Make sure number of %s matches num_features!
               );
   _features_string = os::strdup(buf);
@@ -610,6 +610,7 @@
   a->vcipher(VR0, VR1, VR2);                   // code[10] -> vcipher
   a->vpmsumb(VR0, VR1, VR2);                   // code[11] -> vpmsumb
   a->tcheck(0);                                // code[12] -> tcheck
+  a->mfdscr(R0);                               // code[13] -> mfdscr
   a->blr();
 
   // Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.
@@ -657,6 +658,7 @@
   if (code[feature_cntr++]) features |= vcipher_m;
   if (code[feature_cntr++]) features |= vpmsumb_m;
   if (code[feature_cntr++]) features |= tcheck_m;
+  if (code[feature_cntr++]) features |= mfdscr_m;
 
   // Print the detection code.
   if (PrintAssembly) {
@@ -670,8 +672,6 @@
 
 // Power 8: Configure Data Stream Control Register.
 void VM_Version::config_dscr() {
-  assert(has_tcheck(), "Only execute on Power 8 or later!");
-
   // 7 InstWords for each call (function descriptor + blr instruction).
   const int code_size = (2+2*7)*BytesPerInstWord;
 
--- a/hotspot/src/cpu/ppc/vm/vm_version_ppc.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/vm_version_ppc.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,6 +45,7 @@
     vcipher,
     vpmsumb,
     tcheck,
+    mfdscr,
     num_features // last entry to count features
   };
   enum Feature_Flag_Set {
@@ -62,6 +63,7 @@
     vcipher_m             = (1 << vcipher),
     vpmsumb_m             = (1 << vpmsumb),
     tcheck_m              = (1 << tcheck ),
+    mfdscr_m              = (1 << mfdscr ),
     all_features_m        = (unsigned long)-1
   };
 
@@ -94,6 +96,7 @@
   static bool has_vcipher() { return (_features & vcipher_m) != 0; }
   static bool has_vpmsumb() { return (_features & vpmsumb_m) != 0; }
   static bool has_tcheck()  { return (_features & tcheck_m) != 0; }
+  static bool has_mfdscr()  { return (_features & mfdscr_m) != 0; }
 
   // Assembler testing
   static void allow_all();
--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1349,9 +1349,12 @@
     }
   } else if (dst.first()->is_stack()) {
     // reg to stack
-    __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
+    // Some compilers (gcc) expect a clean 32 bit value on function entry
+    __ signx(src.first()->as_Register(), L5);
+    __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
   } else {
-    __ mov(src.first()->as_Register(), dst.first()->as_Register());
+    // Some compilers (gcc) expect a clean 32 bit value on function entry
+    __ signx(src.first()->as_Register(), dst.first()->as_Register());
   }
 }
 
--- a/hotspot/src/cpu/sparc/vm/sparc.ad	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad	Sat Mar 05 20:46:42 2016 -0800
@@ -948,28 +948,28 @@
   }
 #endif
 
-  uint instr;
-  instr = (Assembler::ldst_op << 30)
-        | (dst_enc        << 25)
-        | (primary        << 19)
-        | (src1_enc       << 14);
+  uint instr = (Assembler::ldst_op << 30)
+             | (dst_enc        << 25)
+             | (primary        << 19)
+             | (src1_enc       << 14);
 
   uint index = src2_enc;
   int disp = disp32;
 
   if (src1_enc == R_SP_enc || src1_enc == R_FP_enc) {
     disp += STACK_BIAS;
-    // Quick fix for JDK-8029668: check that stack offset fits, bailout if not
+    // Check that stack offset fits, load into O7 if not
     if (!Assembler::is_simm13(disp)) {
-      ra->C->record_method_not_compilable("unable to handle large constant offsets");
-      return;
+      MacroAssembler _masm(&cbuf);
+      __ set(disp, O7);
+      if (index != R_G0_enc) {
+        __ add(O7, reg_to_register_object(index), O7);
+      }
+      index = R_O7_enc;
+      disp = 0;
     }
   }
 
-  // We should have a compiler bailout here rather than a guarantee.
-  // Better yet would be some mechanism to handle variable-size matches correctly.
-  guarantee(Assembler::is_simm13(disp), "Do not match large constant offsets" );
-
   if( disp == 0 ) {
     // use reg-reg form
     // bit 13 is already zero
@@ -983,7 +983,7 @@
   cbuf.insts()->emit_int32(instr);
 
 #ifdef ASSERT
-  {
+  if (VerifyOops) {
     MacroAssembler _masm(&cbuf);
     if (is_verified_oop_base) {
       __ verify_oop(reg_to_register_object(src1_enc));
@@ -1342,7 +1342,7 @@
 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
 enum RC { rc_bad, rc_int, rc_float, rc_stack };
 static enum RC rc_class( OptoReg::Name reg ) {
-  if( !OptoReg::is_valid(reg)  ) return rc_bad;
+  if (!OptoReg::is_valid(reg)) return rc_bad;
   if (OptoReg::is_stack(reg)) return rc_stack;
   VMReg r = OptoReg::as_VMReg(reg);
   if (r->is_Register()) return rc_int;
@@ -1350,66 +1350,79 @@
   return rc_float;
 }
 
-static int impl_helper(const MachNode* mach, CodeBuffer* cbuf, PhaseRegAlloc* ra, bool do_size, bool is_load, int offset, int reg, int opcode, const char *op_str, int size, outputStream* st ) {
+#ifndef PRODUCT
+ATTRIBUTE_PRINTF(2, 3)
+static void print_helper(outputStream* st, const char* format, ...) {
+  if (st->position() > 0) {
+    st->cr();
+    st->sp();
+  }
+  va_list ap;
+  va_start(ap, format);
+  st->vprint(format, ap);
+  va_end(ap);
+}
+#endif // !PRODUCT
+
+static void impl_helper(const MachNode* mach, CodeBuffer* cbuf, PhaseRegAlloc* ra, bool is_load, int offset, int reg, int opcode, const char *op_str, outputStream* st) {
   if (cbuf) {
     emit_form3_mem_reg(*cbuf, ra, mach, opcode, -1, R_SP_enc, offset, 0, Matcher::_regEncode[reg]);
   }
 #ifndef PRODUCT
-  else if (!do_size) {
-    if (size != 0) st->print("\n\t");
-    if (is_load) st->print("%s   [R_SP + #%d],R_%s\t! spill",op_str,offset,OptoReg::regname(reg));
-    else         st->print("%s   R_%s,[R_SP + #%d]\t! spill",op_str,OptoReg::regname(reg),offset);
+  else {
+    if (is_load) {
+      print_helper(st, "%s   [R_SP + #%d],R_%s\t! spill", op_str, offset, OptoReg::regname(reg));
+    } else {
+      print_helper(st, "%s   R_%s,[R_SP + #%d]\t! spill", op_str, OptoReg::regname(reg), offset);
+    }
   }
 #endif
-  return size+4;
 }
 
-static int impl_mov_helper( CodeBuffer *cbuf, bool do_size, int src, int dst, int op1, int op2, const char *op_str, int size, outputStream* st ) {
-  if( cbuf ) emit3( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst], op1, 0, op2, Matcher::_regEncode[src] );
+static void impl_mov_helper(CodeBuffer *cbuf, int src, int dst, int op1, int op2, const char *op_str, outputStream* st) {
+  if (cbuf) {
+    emit3(*cbuf, Assembler::arith_op, Matcher::_regEncode[dst], op1, 0, op2, Matcher::_regEncode[src]);
+  }
 #ifndef PRODUCT
-  else if( !do_size ) {
-    if( size != 0 ) st->print("\n\t");
-    st->print("%s  R_%s,R_%s\t! spill",op_str,OptoReg::regname(src),OptoReg::regname(dst));
+  else {
+    print_helper(st, "%s  R_%s,R_%s\t! spill", op_str, OptoReg::regname(src), OptoReg::regname(dst));
   }
 #endif
-  return size+4;
 }
 
-uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
-                                        PhaseRegAlloc *ra_,
-                                        bool do_size,
-                                        outputStream* st ) const {
+static void mach_spill_copy_implementation_helper(const MachNode* mach,
+                                                  CodeBuffer *cbuf,
+                                                  PhaseRegAlloc *ra_,
+                                                  outputStream* st) {
   // Get registers to move
-  OptoReg::Name src_second = ra_->get_reg_second(in(1));
-  OptoReg::Name src_first = ra_->get_reg_first(in(1));
-  OptoReg::Name dst_second = ra_->get_reg_second(this );
-  OptoReg::Name dst_first = ra_->get_reg_first(this );
+  OptoReg::Name src_second = ra_->get_reg_second(mach->in(1));
+  OptoReg::Name src_first  = ra_->get_reg_first(mach->in(1));
+  OptoReg::Name dst_second = ra_->get_reg_second(mach);
+  OptoReg::Name dst_first  = ra_->get_reg_first(mach);
 
   enum RC src_second_rc = rc_class(src_second);
-  enum RC src_first_rc = rc_class(src_first);
+  enum RC src_first_rc  = rc_class(src_first);
   enum RC dst_second_rc = rc_class(dst_second);
-  enum RC dst_first_rc = rc_class(dst_first);
-
-  assert( OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
-
-  // Generate spill code!
-  int size = 0;
-
-  if( src_first == dst_first && src_second == dst_second )
-    return size;            // Self copy, no move
+  enum RC dst_first_rc  = rc_class(dst_first);
+
+  assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register");
+
+  if (src_first == dst_first && src_second == dst_second) {
+    return; // Self copy, no move
+  }
 
   // --------------------------------------
   // Check for mem-mem move.  Load into unused float registers and fall into
   // the float-store case.
-  if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) {
+  if (src_first_rc == rc_stack && dst_first_rc == rc_stack) {
     int offset = ra_->reg2offset(src_first);
     // Further check for aligned-adjacent pair, so we can use a double load
-    if( (src_first&1)==0 && src_first+1 == src_second ) {
+    if ((src_first&1) == 0 && src_first+1 == src_second) {
       src_second    = OptoReg::Name(R_F31_num);
       src_second_rc = rc_float;
-      size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F30_num,Assembler::lddf_op3,"LDDF",size, st);
+      impl_helper(mach, cbuf, ra_, true, offset, R_F30_num, Assembler::lddf_op3, "LDDF", st);
     } else {
-      size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F30_num,Assembler::ldf_op3 ,"LDF ",size, st);
+      impl_helper(mach, cbuf, ra_, true, offset, R_F30_num, Assembler::ldf_op3, "LDF ", st);
     }
     src_first    = OptoReg::Name(R_F30_num);
     src_first_rc = rc_float;
@@ -1417,7 +1430,7 @@
 
   if( src_second_rc == rc_stack && dst_second_rc == rc_stack ) {
     int offset = ra_->reg2offset(src_second);
-    size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F31_num,Assembler::ldf_op3,"LDF ",size, st);
+    impl_helper(mach, cbuf, ra_, true, offset, R_F31_num, Assembler::ldf_op3, "LDF ", st);
     src_second    = OptoReg::Name(R_F31_num);
     src_second_rc = rc_float;
   }
@@ -1427,36 +1440,38 @@
   if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS < 3) {
     int offset = frame::register_save_words*wordSize;
     if (cbuf) {
-      emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::sub_op3, R_SP_enc, 16 );
-      impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st);
-      impl_helper(this,cbuf,ra_,do_size,true ,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st);
-      emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::add_op3, R_SP_enc, 16 );
+      emit3_simm13(*cbuf, Assembler::arith_op, R_SP_enc, Assembler::sub_op3, R_SP_enc, 16);
+      impl_helper(mach, cbuf, ra_, false, offset, src_first,  Assembler::stf_op3, "STF ", st);
+      impl_helper(mach, cbuf, ra_,  true, offset, dst_first, Assembler::lduw_op3, "LDUW", st);
+      emit3_simm13(*cbuf, Assembler::arith_op, R_SP_enc, Assembler::add_op3, R_SP_enc, 16);
     }
 #ifndef PRODUCT
-    else if (!do_size) {
-      if (size != 0) st->print("\n\t");
-      st->print(  "SUB    R_SP,16,R_SP\n");
-      impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st);
-      impl_helper(this,cbuf,ra_,do_size,true ,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st);
-      st->print("\tADD    R_SP,16,R_SP\n");
+    else {
+      print_helper(st, "SUB    R_SP,16,R_SP");
+      impl_helper(mach, cbuf, ra_, false, offset, src_first,  Assembler::stf_op3, "STF ", st);
+      impl_helper(mach, cbuf, ra_,  true, offset, dst_first, Assembler::lduw_op3, "LDUW", st);
+      print_helper(st, "ADD    R_SP,16,R_SP");
     }
 #endif
-    size += 16;
   }
 
   // Check for float->int copy on T4
   if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS >= 3) {
     // Further check for aligned-adjacent pair, so we can use a double move
-    if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second)
-      return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mdtox_opf,"MOVDTOX",size, st);
-    size  =  impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mstouw_opf,"MOVSTOUW",size, st);
+    if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
+      impl_mov_helper(cbuf, src_first, dst_first, Assembler::mftoi_op3, Assembler::mdtox_opf, "MOVDTOX", st);
+      return;
+    }
+    impl_mov_helper(cbuf, src_first, dst_first, Assembler::mftoi_op3, Assembler::mstouw_opf, "MOVSTOUW", st);
   }
   // Check for int->float copy on T4
   if (src_first_rc == rc_int && dst_first_rc == rc_float && UseVIS >= 3) {
     // Further check for aligned-adjacent pair, so we can use a double move
-    if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second)
-      return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mxtod_opf,"MOVXTOD",size, st);
-    size  =  impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mwtos_opf,"MOVWTOS",size, st);
+    if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
+      impl_mov_helper(cbuf, src_first, dst_first, Assembler::mftoi_op3, Assembler::mxtod_opf, "MOVXTOD", st);
+      return;
+    }
+    impl_mov_helper(cbuf, src_first, dst_first, Assembler::mftoi_op3, Assembler::mwtos_opf, "MOVWTOS", st);
   }
 
   // --------------------------------------
@@ -1466,10 +1481,10 @@
   // there.  Misaligned sources only come from native-long-returns (handled
   // special below).
 #ifndef _LP64
-  if( src_first_rc == rc_int &&     // source is already big-endian
+  if (src_first_rc == rc_int &&     // source is already big-endian
       src_second_rc != rc_bad &&    // 64-bit move
-      ((dst_first&1)!=0 || dst_second != dst_first+1) ) { // misaligned dst
-    assert( (src_first&1)==0 && src_second == src_first+1, "source must be aligned" );
+      ((dst_first & 1) != 0 || dst_second != dst_first + 1)) { // misaligned dst
+    assert((src_first & 1) == 0 && src_second == src_first + 1, "source must be aligned");
     // Do the big-endian flop.
     OptoReg::Name tmp    = dst_first   ; dst_first    = dst_second   ; dst_second    = tmp   ;
     enum RC       tmp_rc = dst_first_rc; dst_first_rc = dst_second_rc; dst_second_rc = tmp_rc;
@@ -1478,30 +1493,28 @@
 
   // --------------------------------------
   // Check for integer reg-reg copy
-  if( src_first_rc == rc_int && dst_first_rc == rc_int ) {
+  if (src_first_rc == rc_int && dst_first_rc == rc_int) {
 #ifndef _LP64
-    if( src_first == R_O0_num && src_second == R_O1_num ) {  // Check for the evil O0/O1 native long-return case
+    if (src_first == R_O0_num && src_second == R_O1_num) {  // Check for the evil O0/O1 native long-return case
       // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value
       //       as stored in memory.  On a big-endian machine like SPARC, this means that the _second
       //       operand contains the least significant word of the 64-bit value and vice versa.
       OptoReg::Name tmp = OptoReg::Name(R_O7_num);
-      assert( (dst_first&1)==0 && dst_second == dst_first+1, "return a native O0/O1 long to an aligned-adjacent 64-bit reg" );
+      assert((dst_first & 1) == 0 && dst_second == dst_first + 1, "return a native O0/O1 long to an aligned-adjacent 64-bit reg" );
       // Shift O0 left in-place, zero-extend O1, then OR them into the dst
-      if( cbuf ) {
-        emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[tmp], Assembler::sllx_op3, Matcher::_regEncode[src_first], 0x1020 );
-        emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[src_second], Assembler::srl_op3, Matcher::_regEncode[src_second], 0x0000 );
-        emit3       ( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler:: or_op3, Matcher::_regEncode[tmp], 0, Matcher::_regEncode[src_second] );
+      if ( cbuf ) {
+        emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[tmp], Assembler::sllx_op3, Matcher::_regEncode[src_first], 0x1020);
+        emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[src_second], Assembler::srl_op3, Matcher::_regEncode[src_second], 0x0000);
+        emit3       (*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler:: or_op3, Matcher::_regEncode[tmp], 0, Matcher::_regEncode[src_second]);
 #ifndef PRODUCT
-      } else if( !do_size ) {
-        if( size != 0 ) st->print("\n\t");
-        st->print("SLLX   R_%s,32,R_%s\t! Move O0-first to O7-high\n\t", OptoReg::regname(src_first), OptoReg::regname(tmp));
-        st->print("SRL    R_%s, 0,R_%s\t! Zero-extend O1\n\t", OptoReg::regname(src_second), OptoReg::regname(src_second));
-        st->print("OR     R_%s,R_%s,R_%s\t! spill",OptoReg::regname(tmp), OptoReg::regname(src_second), OptoReg::regname(dst_first));
+      } else {
+        print_helper(st, "SLLX   R_%s,32,R_%s\t! Move O0-first to O7-high\n\t", OptoReg::regname(src_first), OptoReg::regname(tmp));
+        print_helper(st, "SRL    R_%s, 0,R_%s\t! Zero-extend O1\n\t", OptoReg::regname(src_second), OptoReg::regname(src_second));
+        print_helper(st, "OR     R_%s,R_%s,R_%s\t! spill",OptoReg::regname(tmp), OptoReg::regname(src_second), OptoReg::regname(dst_first));
 #endif
       }
-      return size+12;
-    }
-    else if( dst_first == R_I0_num && dst_second == R_I1_num ) {
+      return;
+    } else if (dst_first == R_I0_num && dst_second == R_I1_num) {
       // returning a long value in I0/I1
       // a SpillCopy must be able to target a return instruction's reg_class
       // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value
@@ -1511,27 +1524,25 @@
 
       if (src_first == dst_first) {
         tdest = OptoReg::Name(R_O7_num);
-        size += 4;
       }
 
-      if( cbuf ) {
-        assert( (src_first&1) == 0 && (src_first+1) == src_second, "return value was in an aligned-adjacent 64-bit reg");
+      if (cbuf) {
+        assert((src_first & 1) == 0 && (src_first + 1) == src_second, "return value was in an aligned-adjacent 64-bit reg");
         // Shift value in upper 32-bits of src to lower 32-bits of I0; move lower 32-bits to I1
         // ShrL_reg_imm6
-        emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[tdest], Assembler::srlx_op3, Matcher::_regEncode[src_second], 32 | 0x1000 );
+        emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[tdest], Assembler::srlx_op3, Matcher::_regEncode[src_second], 32 | 0x1000);
         // ShrR_reg_imm6  src, 0, dst
-        emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srl_op3, Matcher::_regEncode[src_first], 0x0000 );
+        emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srl_op3, Matcher::_regEncode[src_first], 0x0000);
         if (tdest != dst_first) {
-          emit3     ( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler::or_op3, 0/*G0*/, 0/*op2*/, Matcher::_regEncode[tdest] );
+          emit3     (*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler::or_op3, 0/*G0*/, 0/*op2*/, Matcher::_regEncode[tdest]);
         }
       }
 #ifndef PRODUCT
-      else if( !do_size ) {
-        if( size != 0 ) st->print("\n\t");  // %%%%% !!!!!
-        st->print("SRLX   R_%s,32,R_%s\t! Extract MSW\n\t",OptoReg::regname(src_second),OptoReg::regname(tdest));
-        st->print("SRL    R_%s, 0,R_%s\t! Extract LSW\n\t",OptoReg::regname(src_first),OptoReg::regname(dst_second));
+      else {
+        print_helper(st, "SRLX   R_%s,32,R_%s\t! Extract MSW\n\t",OptoReg::regname(src_second),OptoReg::regname(tdest));
+        print_helper(st, "SRL    R_%s, 0,R_%s\t! Extract LSW\n\t",OptoReg::regname(src_first),OptoReg::regname(dst_second));
         if (tdest != dst_first) {
-          st->print("MOV    R_%s,R_%s\t! spill\n\t", OptoReg::regname(tdest), OptoReg::regname(dst_first));
+          print_helper(st, "MOV    R_%s,R_%s\t! spill\n\t", OptoReg::regname(tdest), OptoReg::regname(dst_first));
         }
       }
 #endif // PRODUCT
@@ -1539,65 +1550,77 @@
     }
 #endif // !_LP64
     // Else normal reg-reg copy
-    assert( src_second != dst_first, "smashed second before evacuating it" );
-    size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::or_op3,0,"MOV  ",size, st);
-    assert( (src_first&1) == 0 && (dst_first&1) == 0, "never move second-halves of int registers" );
+    assert(src_second != dst_first, "smashed second before evacuating it");
+    impl_mov_helper(cbuf, src_first, dst_first, Assembler::or_op3, 0, "MOV  ", st);
+    assert((src_first & 1) == 0 && (dst_first & 1) == 0, "never move second-halves of int registers");
     // This moves an aligned adjacent pair.
     // See if we are done.
-    if( src_first+1 == src_second && dst_first+1 == dst_second )
-      return size;
+    if (src_first + 1 == src_second && dst_first + 1 == dst_second) {
+      return;
+    }
   }
 
   // Check for integer store
-  if( src_first_rc == rc_int && dst_first_rc == rc_stack ) {
+  if (src_first_rc == rc_int && dst_first_rc == rc_stack) {
     int offset = ra_->reg2offset(dst_first);
     // Further check for aligned-adjacent pair, so we can use a double store
-    if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second )
-      return impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stx_op3,"STX ",size, st);
-    size  =  impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stw_op3,"STW ",size, st);
+    if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
+      impl_helper(mach, cbuf, ra_, false, offset, src_first, Assembler::stx_op3, "STX ", st);
+      return;
+    }
+    impl_helper(mach, cbuf, ra_, false, offset, src_first, Assembler::stw_op3, "STW ", st);
   }
 
   // Check for integer load
-  if( dst_first_rc == rc_int && src_first_rc == rc_stack ) {
+  if (dst_first_rc == rc_int && src_first_rc == rc_stack) {
     int offset = ra_->reg2offset(src_first);
     // Further check for aligned-adjacent pair, so we can use a double load
-    if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second )
-      return impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::ldx_op3 ,"LDX ",size, st);
-    size  =  impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st);
+    if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
+      impl_helper(mach, cbuf, ra_, true, offset, dst_first, Assembler::ldx_op3, "LDX ", st);
+      return;
+    }
+    impl_helper(mach, cbuf, ra_, true, offset, dst_first, Assembler::lduw_op3, "LDUW", st);
   }
 
   // Check for float reg-reg copy
-  if( src_first_rc == rc_float && dst_first_rc == rc_float ) {
+  if (src_first_rc == rc_float && dst_first_rc == rc_float) {
     // Further check for aligned-adjacent pair, so we can use a double move
-    if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second )
-      return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::fpop1_op3,Assembler::fmovd_opf,"FMOVD",size, st);
-    size  =  impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::fpop1_op3,Assembler::fmovs_opf,"FMOVS",size, st);
+    if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
+      impl_mov_helper(cbuf, src_first, dst_first, Assembler::fpop1_op3, Assembler::fmovd_opf, "FMOVD", st);
+      return;
+    }
+    impl_mov_helper(cbuf, src_first, dst_first, Assembler::fpop1_op3, Assembler::fmovs_opf, "FMOVS", st);
   }
 
   // Check for float store
-  if( src_first_rc == rc_float && dst_first_rc == rc_stack ) {
+  if (src_first_rc == rc_float && dst_first_rc == rc_stack) {
     int offset = ra_->reg2offset(dst_first);
     // Further check for aligned-adjacent pair, so we can use a double store
-    if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second )
-      return impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stdf_op3,"STDF",size, st);
-    size  =  impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st);
+    if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
+      impl_helper(mach, cbuf, ra_, false, offset, src_first, Assembler::stdf_op3, "STDF", st);
+      return;
+    }
+    impl_helper(mach, cbuf, ra_, false, offset, src_first, Assembler::stf_op3, "STF ", st);
   }
 
   // Check for float load
-  if( dst_first_rc == rc_float && src_first_rc == rc_stack ) {
+  if (dst_first_rc == rc_float && src_first_rc == rc_stack) {
     int offset = ra_->reg2offset(src_first);
     // Further check for aligned-adjacent pair, so we can use a double load
-    if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second )
-      return impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::lddf_op3,"LDDF",size, st);
-    size  =  impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::ldf_op3 ,"LDF ",size, st);
+    if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
+      impl_helper(mach, cbuf, ra_, true, offset, dst_first, Assembler::lddf_op3, "LDDF", st);
+      return;
+    }
+    impl_helper(mach, cbuf, ra_, true, offset, dst_first, Assembler::ldf_op3, "LDF ", st);
   }
 
   // --------------------------------------------------------------------
   // Check for hi bits still needing moving.  Only happens for misaligned
   // arguments to native calls.
-  if( src_second == dst_second )
-    return size;               // Self copy; no move
-  assert( src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad" );
+  if (src_second == dst_second) {
+    return; // Self copy; no move
+  }
+  assert(src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad");
 
 #ifndef _LP64
   // In the LP64 build, all registers can be moved as aligned/adjacent
@@ -1609,52 +1632,57 @@
   // 32-bits of a 64-bit register, but are needed in low bits of another
   // register (else it's a hi-bits-to-hi-bits copy which should have
   // happened already as part of a 64-bit move)
-  if( src_second_rc == rc_int && dst_second_rc == rc_int ) {
-    assert( (src_second&1)==1, "its the evil O0/O1 native return case" );
-    assert( (dst_second&1)==0, "should have moved with 1 64-bit move" );
+  if (src_second_rc == rc_int && dst_second_rc == rc_int) {
+    assert((src_second & 1) == 1, "its the evil O0/O1 native return case");
+    assert((dst_second & 1) == 0, "should have moved with 1 64-bit move");
     // Shift src_second down to dst_second's low bits.
-    if( cbuf ) {
-      emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020 );
+    if (cbuf) {
+      emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020);
 #ifndef PRODUCT
-    } else if( !do_size ) {
-      if( size != 0 ) st->print("\n\t");
-      st->print("SRLX   R_%s,32,R_%s\t! spill: Move high bits down low",OptoReg::regname(src_second-1),OptoReg::regname(dst_second));
+    } else  {
+      print_helper(st, "SRLX   R_%s,32,R_%s\t! spill: Move high bits down low", OptoReg::regname(src_second - 1), OptoReg::regname(dst_second));
 #endif
     }
-    return size+4;
+    return;
   }
 
   // Check for high word integer store.  Must down-shift the hi bits
   // into a temp register, then fall into the case of storing int bits.
-  if( src_second_rc == rc_int && dst_second_rc == rc_stack && (src_second&1)==1 ) {
+  if (src_second_rc == rc_int && dst_second_rc == rc_stack && (src_second & 1) == 1) {
     // Shift src_second down to dst_second's low bits.
-    if( cbuf ) {
-      emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[R_O7_num], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020 );
+    if (cbuf) {
+      emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[R_O7_num], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020);
 #ifndef PRODUCT
-    } else if( !do_size ) {
-      if( size != 0 ) st->print("\n\t");
-      st->print("SRLX   R_%s,32,R_%s\t! spill: Move high bits down low",OptoReg::regname(src_second-1),OptoReg::regname(R_O7_num));
+    } else {
+      print_helper(st, "SRLX   R_%s,32,R_%s\t! spill: Move high bits down low", OptoReg::regname(src_second-1), OptoReg::regname(R_O7_num));
 #endif
     }
-    size+=4;
     src_second = OptoReg::Name(R_O7_num); // Not R_O7H_num!
   }
 
   // Check for high word integer load
-  if( dst_second_rc == rc_int && src_second_rc == rc_stack )
-    return impl_helper(this,cbuf,ra_,do_size,true ,ra_->reg2offset(src_second),dst_second,Assembler::lduw_op3,"LDUW",size, st);
+  if (dst_second_rc == rc_int && src_second_rc == rc_stack)
+    return impl_helper(this, cbuf, ra_, true, ra_->reg2offset(src_second), dst_second, Assembler::lduw_op3, "LDUW", size, st);
 
   // Check for high word integer store
-  if( src_second_rc == rc_int && dst_second_rc == rc_stack )
-    return impl_helper(this,cbuf,ra_,do_size,false,ra_->reg2offset(dst_second),src_second,Assembler::stw_op3 ,"STW ",size, st);
+  if (src_second_rc == rc_int && dst_second_rc == rc_stack)
+    return impl_helper(this, cbuf, ra_, false, ra_->reg2offset(dst_second), src_second, Assembler::stw_op3, "STW ", size, st);
 
   // Check for high word float store
-  if( src_second_rc == rc_float && dst_second_rc == rc_stack )
-    return impl_helper(this,cbuf,ra_,do_size,false,ra_->reg2offset(dst_second),src_second,Assembler::stf_op3 ,"STF ",size, st);
+  if (src_second_rc == rc_float && dst_second_rc == rc_stack)
+    return impl_helper(this, cbuf, ra_, false, ra_->reg2offset(dst_second), src_second, Assembler::stf_op3, "STF ", size, st);
 
 #endif // !_LP64
 
   Unimplemented();
+}
+
+uint MachSpillCopyNode::implementation(CodeBuffer *cbuf,
+                                       PhaseRegAlloc *ra_,
+                                       bool do_size,
+                                       outputStream* st) const {
+  assert(!do_size, "not supported");
+  mach_spill_copy_implementation_helper(this, cbuf, ra_, st);
   return 0;
 }
 
@@ -1669,19 +1697,19 @@
 }
 
 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
-  return implementation( NULL, ra_, true, NULL );
+  return MachNode::size(ra_);
 }
 
 //=============================================================================
 #ifndef PRODUCT
-void MachNopNode::format( PhaseRegAlloc *, outputStream *st ) const {
+void MachNopNode::format(PhaseRegAlloc *, outputStream *st) const {
   st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
 }
 #endif
 
-void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
+void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *) const {
   MacroAssembler _masm(&cbuf);
-  for(int i = 0; i < _count; i += 1) {
+  for (int i = 0; i < _count; i += 1) {
     __ nop();
   }
 }
@@ -5197,7 +5225,6 @@
   // No match rule to avoid chain rule match.
   effect(DEF dst, USE src);
   ins_cost(MEMORY_REF_COST);
-  size(4);
   format %{ "LDF    $src,$dst\t! stkI to regF" %}
   opcode(Assembler::ldf_op3);
   ins_encode(simple_form3_mem_reg(src, dst));
@@ -5208,7 +5235,6 @@
   // No match rule to avoid chain rule match.
   effect(DEF dst, USE src);
   ins_cost(MEMORY_REF_COST);
-  size(4);
   format %{ "LDDF   $src,$dst\t! stkL to regD" %}
   opcode(Assembler::lddf_op3);
   ins_encode(simple_form3_mem_reg(src, dst));
@@ -5219,7 +5245,6 @@
   // No match rule to avoid chain rule match.
   effect(DEF dst, USE src);
   ins_cost(MEMORY_REF_COST);
-  size(4);
   format %{ "STF    $src,$dst\t! regF to stkI" %}
   opcode(Assembler::stf_op3);
   ins_encode(simple_form3_mem_reg(dst, src));
@@ -5230,7 +5255,6 @@
   // No match rule to avoid chain rule match.
   effect(DEF dst, USE src);
   ins_cost(MEMORY_REF_COST);
-  size(4);
   format %{ "STDF   $src,$dst\t! regD to stkL" %}
   opcode(Assembler::stdf_op3);
   ins_encode(simple_form3_mem_reg(dst, src));
@@ -5240,7 +5264,6 @@
 instruct regI_to_stkLHi(stackSlotL dst, iRegI src) %{
   effect(DEF dst, USE src);
   ins_cost(MEMORY_REF_COST*2);
-  size(8);
   format %{ "STW    $src,$dst.hi\t! long\n\t"
             "STW    R_G0,$dst.lo" %}
   opcode(Assembler::stw_op3);
@@ -5252,7 +5275,6 @@
   // No match rule to avoid chain rule match.
   effect(DEF dst, USE src);
   ins_cost(MEMORY_REF_COST);
-  size(4);
   format %{ "STX    $src,$dst\t! regL to stkD" %}
   opcode(Assembler::stx_op3);
   ins_encode(simple_form3_mem_reg( dst, src ) );
@@ -5266,7 +5288,6 @@
   match(Set dst src);
   ins_cost(MEMORY_REF_COST);
 
-  size(4);
   format %{ "LDUW   $src,$dst\t!stk" %}
   opcode(Assembler::lduw_op3);
   ins_encode(simple_form3_mem_reg( src, dst ) );
@@ -5278,7 +5299,6 @@
   match(Set dst src);
   ins_cost(MEMORY_REF_COST);
 
-  size(4);
   format %{ "STW    $src,$dst\t!stk" %}
   opcode(Assembler::stw_op3);
   ins_encode(simple_form3_mem_reg( dst, src ) );
@@ -5290,7 +5310,6 @@
   match(Set dst src);
 
   ins_cost(MEMORY_REF_COST);
-  size(4);
   format %{ "LDX    $src,$dst\t! long" %}
   opcode(Assembler::ldx_op3);
   ins_encode(simple_form3_mem_reg( src, dst ) );
@@ -5302,7 +5321,6 @@
   match(Set dst src);
 
   ins_cost(MEMORY_REF_COST);
-  size(4);
   format %{ "STX    $src,$dst\t! long" %}
   opcode(Assembler::stx_op3);
   ins_encode(simple_form3_mem_reg( dst, src ) );
@@ -5314,7 +5332,6 @@
 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{
   match(Set dst src);
   ins_cost(MEMORY_REF_COST);
-  size(4);
   format %{ "LDX    $src,$dst\t!ptr" %}
   opcode(Assembler::ldx_op3);
   ins_encode(simple_form3_mem_reg( src, dst ) );
@@ -5325,7 +5342,6 @@
 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{
   match(Set dst src);
   ins_cost(MEMORY_REF_COST);
-  size(4);
   format %{ "STX    $src,$dst\t!ptr" %}
   opcode(Assembler::stx_op3);
   ins_encode(simple_form3_mem_reg( dst, src ) );
@@ -5771,7 +5787,6 @@
   match(Set dst (LoadL_unaligned mem));
   effect(KILL tmp);
   ins_cost(MEMORY_REF_COST*2+DEFAULT_COST);
-  size(16);
   format %{ "LDUW   $mem+4,R_O7\t! misaligned long\n"
           "\tLDUW   $mem  ,$dst\n"
           "\tSLLX   #32, $dst, $dst\n"
@@ -5786,7 +5801,6 @@
   match(Set dst (LoadRange mem));
   ins_cost(MEMORY_REF_COST);
 
-  size(4);
   format %{ "LDUW   $mem,$dst\t! range" %}
   opcode(Assembler::lduw_op3);
   ins_encode(simple_form3_mem_reg( mem, dst ) );
@@ -5797,7 +5811,6 @@
 instruct loadI_freg(regF dst, memory mem) %{
   match(Set dst (LoadI mem));
   ins_cost(MEMORY_REF_COST);
-  size(4);
 
   format %{ "LDF    $mem,$dst\t! for fitos/fitod" %}
   opcode(Assembler::ldf_op3);
@@ -5876,7 +5889,6 @@
   match(Set dst (LoadD mem));
   ins_cost(MEMORY_REF_COST);
 
-  size(4);
   format %{ "LDDF   $mem,$dst" %}
   opcode(Assembler::lddf_op3);
   ins_encode(simple_form3_mem_reg( mem, dst ) );
@@ -5887,7 +5899,6 @@
 instruct loadD_unaligned(regD_low dst, memory mem ) %{
   match(Set dst (LoadD_unaligned mem));
   ins_cost(MEMORY_REF_COST*2+DEFAULT_COST);
-  size(8);
   format %{ "LDF    $mem  ,$dst.hi\t! misaligned double\n"
           "\tLDF    $mem+4,$dst.lo\t!" %}
   opcode(Assembler::ldf_op3);
@@ -5900,7 +5911,6 @@
   match(Set dst (LoadF mem));
   ins_cost(MEMORY_REF_COST);
 
-  size(4);
   format %{ "LDF    $mem,$dst" %}
   opcode(Assembler::ldf_op3);
   ins_encode(simple_form3_mem_reg( mem, dst ) );
@@ -6119,7 +6129,6 @@
   predicate(AllocatePrefetchInstr == 0);
   match( PrefetchAllocation mem );
   ins_cost(MEMORY_REF_COST);
-  size(4);
 
   format %{ "PREFETCH $mem,2\t! Prefetch allocation" %}
   opcode(Assembler::prefetch_op3);
@@ -6175,7 +6184,6 @@
   match(Set mem (StoreB mem src));
   ins_cost(MEMORY_REF_COST);
 
-  size(4);
   format %{ "STB    $src,$mem\t! byte" %}
   opcode(Assembler::stb_op3);
   ins_encode(simple_form3_mem_reg( mem, src ) );
@@ -6186,7 +6194,6 @@
   match(Set mem (StoreB mem src));
   ins_cost(MEMORY_REF_COST);
 
-  size(4);
   format %{ "STB    $src,$mem\t! byte" %}
   opcode(Assembler::stb_op3);
   ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
@@ -6197,7 +6204,6 @@
   match(Set mem (StoreCM mem src));
   ins_cost(MEMORY_REF_COST);
 
-  size(4);
   format %{ "STB    $src,$mem\t! CMS card-mark byte 0" %}
   opcode(Assembler::stb_op3);
   ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
@@ -6209,7 +6215,6 @@
   match(Set mem (StoreC mem src));
   ins_cost(MEMORY_REF_COST);
 
-  size(4);
   format %{ "STH    $src,$mem\t! short" %}
   opcode(Assembler::sth_op3);
   ins_encode(simple_form3_mem_reg( mem, src ) );
@@ -6220,7 +6225,6 @@
   match(Set mem (StoreC mem src));
   ins_cost(MEMORY_REF_COST);
 
-  size(4);
   format %{ "STH    $src,$mem\t! short" %}
   opcode(Assembler::sth_op3);
   ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
@@ -6232,7 +6236,6 @@
   match(Set mem (StoreI mem src));
   ins_cost(MEMORY_REF_COST);
 
-  size(4);
   format %{ "STW    $src,$mem" %}
   opcode(Assembler::stw_op3);
   ins_encode(simple_form3_mem_reg( mem, src ) );
@@ -6243,7 +6246,6 @@
 instruct storeL(memory mem, iRegL src) %{
   match(Set mem (StoreL mem src));
   ins_cost(MEMORY_REF_COST);
-  size(4);
   format %{ "STX    $src,$mem\t! long" %}
   opcode(Assembler::stx_op3);
   ins_encode(simple_form3_mem_reg( mem, src ) );
@@ -6254,7 +6256,6 @@
   match(Set mem (StoreI mem src));
   ins_cost(MEMORY_REF_COST);
 
-  size(4);
   format %{ "STW    $src,$mem" %}
   opcode(Assembler::stw_op3);
   ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
@@ -6265,7 +6266,6 @@
   match(Set mem (StoreL mem src));
   ins_cost(MEMORY_REF_COST);
 
-  size(4);
   format %{ "STX    $src,$mem" %}
   opcode(Assembler::stx_op3);
   ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
@@ -6277,7 +6277,6 @@
   match(Set mem (StoreI mem src));
   ins_cost(MEMORY_REF_COST);
 
-  size(4);
   format %{ "STF    $src,$mem\t! after fstoi/fdtoi" %}
   opcode(Assembler::stf_op3);
   ins_encode(simple_form3_mem_reg( mem, src ) );
@@ -6288,7 +6287,6 @@
 instruct storeP(memory dst, sp_ptr_RegP src) %{
   match(Set dst (StoreP dst src));
   ins_cost(MEMORY_REF_COST);
-  size(4);
 
 #ifndef _LP64
   format %{ "STW    $src,$dst\t! ptr" %}
@@ -6304,7 +6302,6 @@
 instruct storeP0(memory dst, immP0 src) %{
   match(Set dst (StoreP dst src));
   ins_cost(MEMORY_REF_COST);
-  size(4);
 
 #ifndef _LP64
   format %{ "STW    $src,$dst\t! ptr" %}
@@ -6379,7 +6376,6 @@
   match(Set mem (StoreD mem src));
   ins_cost(MEMORY_REF_COST);
 
-  size(4);
   format %{ "STDF   $src,$mem" %}
   opcode(Assembler::stdf_op3);
   ins_encode(simple_form3_mem_reg( mem, src ) );
@@ -6390,7 +6386,6 @@
   match(Set mem (StoreD mem src));
   ins_cost(MEMORY_REF_COST);
 
-  size(4);
   format %{ "STX    $src,$mem" %}
   opcode(Assembler::stx_op3);
   ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
@@ -6402,7 +6397,6 @@
   match(Set mem (StoreF mem src));
   ins_cost(MEMORY_REF_COST);
 
-  size(4);
   format %{ "STF    $src,$mem" %}
   opcode(Assembler::stf_op3);
   ins_encode(simple_form3_mem_reg( mem, src ) );
@@ -6413,7 +6407,6 @@
   match(Set mem (StoreF mem src));
   ins_cost(MEMORY_REF_COST);
 
-  size(4);
   format %{ "STW    $src,$mem\t! storeF0" %}
   opcode(Assembler::stw_op3);
   ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
@@ -7068,7 +7061,6 @@
   ins_cost(MEMORY_REF_COST);
 
 #ifndef _LP64
-  size(4);
   format %{ "LDUW   $mem,$dst\t! ptr" %}
   opcode(Assembler::lduw_op3, 0, REGP_OP);
 #else
@@ -8138,7 +8130,6 @@
   effect(DEF dst, USE src);
   ins_cost(MEMORY_REF_COST);
 
-  size(4);
   format %{ "LDUW   $src,$dst\t! MoveF2I" %}
   opcode(Assembler::lduw_op3);
   ins_encode(simple_form3_mem_reg( src, dst ) );
@@ -8150,7 +8141,6 @@
   effect(DEF dst, USE src);
   ins_cost(MEMORY_REF_COST);
 
-  size(4);
   format %{ "LDF    $src,$dst\t! MoveI2F" %}
   opcode(Assembler::ldf_op3);
   ins_encode(simple_form3_mem_reg(src, dst));
@@ -8162,7 +8152,6 @@
   effect(DEF dst, USE src);
   ins_cost(MEMORY_REF_COST);
 
-  size(4);
   format %{ "LDX    $src,$dst\t! MoveD2L" %}
   opcode(Assembler::ldx_op3);
   ins_encode(simple_form3_mem_reg( src, dst ) );
@@ -8174,7 +8163,6 @@
   effect(DEF dst, USE src);
   ins_cost(MEMORY_REF_COST);
 
-  size(4);
   format %{ "LDDF   $src,$dst\t! MoveL2D" %}
   opcode(Assembler::lddf_op3);
   ins_encode(simple_form3_mem_reg(src, dst));
@@ -8186,7 +8174,6 @@
   effect(DEF dst, USE src);
   ins_cost(MEMORY_REF_COST);
 
-  size(4);
   format %{ "STF   $src,$dst\t! MoveF2I" %}
   opcode(Assembler::stf_op3);
   ins_encode(simple_form3_mem_reg(dst, src));
@@ -8198,7 +8185,6 @@
   effect(DEF dst, USE src);
   ins_cost(MEMORY_REF_COST);
 
-  size(4);
   format %{ "STW    $src,$dst\t! MoveI2F" %}
   opcode(Assembler::stw_op3);
   ins_encode(simple_form3_mem_reg( dst, src ) );
@@ -8210,7 +8196,6 @@
   effect(DEF dst, USE src);
   ins_cost(MEMORY_REF_COST);
 
-  size(4);
   format %{ "STDF   $src,$dst\t! MoveD2L" %}
   opcode(Assembler::stdf_op3);
   ins_encode(simple_form3_mem_reg(dst, src));
@@ -8222,7 +8207,6 @@
   effect(DEF dst, USE src);
   ins_cost(MEMORY_REF_COST);
 
-  size(4);
   format %{ "STX    $src,$dst\t! MoveL2D" %}
   opcode(Assembler::stx_op3);
   ins_encode(simple_form3_mem_reg( dst, src ) );
@@ -8427,7 +8411,6 @@
 instruct convI2D_mem(regD_low dst, memory mem) %{
   match(Set dst (ConvI2D (LoadI mem)));
   ins_cost(DEFAULT_COST + MEMORY_REF_COST);
-  size(8);
   format %{ "LDF    $mem,$dst\n\t"
             "FITOD  $dst,$dst" %}
   opcode(Assembler::ldf_op3, Assembler::fitod_opf);
@@ -8468,7 +8451,6 @@
 instruct convI2F_mem( regF dst, memory mem ) %{
   match(Set dst (ConvI2F (LoadI mem)));
   ins_cost(DEFAULT_COST + MEMORY_REF_COST);
-  size(8);
   format %{ "LDF    $mem,$dst\n\t"
             "FITOS  $dst,$dst" %}
   opcode(Assembler::ldf_op3, Assembler::fitos_opf);
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -463,3 +463,37 @@
   }
   return result;
 }
+
+
+int VM_Version::parse_features(const char* implementation) {
+  int features = unknown_m;
+  // Convert to UPPER case before compare.
+  char* impl = os::strdup_check_oom(implementation);
+
+  for (int i = 0; impl[i] != 0; i++)
+    impl[i] = (char)toupper((uint)impl[i]);
+
+  if (strstr(impl, "SPARC64") != NULL) {
+    features |= sparc64_family_m;
+  } else if (strstr(impl, "SPARC-M") != NULL) {
+    // M-series SPARC is based on T-series.
+    features |= (M_family_m | T_family_m);
+  } else if (strstr(impl, "SPARC-T") != NULL) {
+    features |= T_family_m;
+    if (strstr(impl, "SPARC-T1") != NULL) {
+      features |= T1_model_m;
+    }
+  } else {
+    if (strstr(impl, "SPARC") == NULL) {
+#ifndef PRODUCT
+      // kstat on Solaris 8 virtual machines (branded zones)
+      // returns "(unsupported)" implementation. Solaris 8 is not
+      // supported anymore, but include this check to be on the
+      // safe side.
+      warning("Can't parse CPU implementation = '%s', assume generic SPARC", impl);
+#endif
+    }
+  }
+  os::free((void*)impl);
+  return features;
+}
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -121,7 +121,7 @@
   static bool is_T1_model(int features) { return is_T_family(features) && ((features & T1_model_m) != 0); }
 
   static int maximum_niagara1_processor_count() { return 32; }
-
+  static int parse_features(const char* implementation);
 public:
   // Initialization
   static void initialize();
--- a/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -161,13 +161,7 @@
                                      create_klass_exception),
                rarg, rarg2);
   } else {
-    // kind of lame ExternalAddress can't take NULL because
-    // external_word_Relocation will assert.
-    if (message != NULL) {
-      __ lea(rarg2, ExternalAddress((address)message));
-    } else {
-      __ movptr(rarg2, NULL_WORD);
-    }
+    __ lea(rarg2, ExternalAddress((address)message));
     __ call_VM(rax,
                CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
                rarg, rarg2);
--- a/hotspot/src/cpu/x86/vm/x86_32.ad	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/x86_32.ad	Sat Mar 05 20:46:42 2016 -0800
@@ -7236,6 +7236,7 @@
 instruct compareAndSwapL( rRegI res, eSIRegP mem_ptr, eADXRegL oldval, eBCXRegL newval, eFlagsReg cr ) %{
   predicate(VM_Version::supports_cx8());
   match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval)));
+  match(Set res (WeakCompareAndSwapL mem_ptr (Binary oldval newval)));
   effect(KILL cr, KILL oldval);
   format %{ "CMPXCHG8 [$mem_ptr],$newval\t# If EDX:EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t"
             "MOV    $res,0\n\t"
@@ -7249,6 +7250,7 @@
 
 instruct compareAndSwapP( rRegI res,  pRegP mem_ptr, eAXRegP oldval, eCXRegP newval, eFlagsReg cr) %{
   match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
+  match(Set res (WeakCompareAndSwapP mem_ptr (Binary oldval newval)));
   effect(KILL cr, KILL oldval);
   format %{ "CMPXCHG [$mem_ptr],$newval\t# If EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t"
             "MOV    $res,0\n\t"
@@ -7261,6 +7263,7 @@
 
 instruct compareAndSwapI( rRegI res, pRegP mem_ptr, eAXRegI oldval, eCXRegI newval, eFlagsReg cr) %{
   match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
+  match(Set res (WeakCompareAndSwapI mem_ptr (Binary oldval newval)));
   effect(KILL cr, KILL oldval);
   format %{ "CMPXCHG [$mem_ptr],$newval\t# If EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t"
             "MOV    $res,0\n\t"
@@ -7271,6 +7274,31 @@
   ins_pipe( pipe_cmpxchg );
 %}
 
+instruct compareAndExchangeL( eSIRegP mem_ptr, eADXRegL oldval, eBCXRegL newval, eFlagsReg cr ) %{
+  predicate(VM_Version::supports_cx8());
+  match(Set oldval (CompareAndExchangeL mem_ptr (Binary oldval newval)));
+  effect(KILL cr);
+  format %{ "CMPXCHG8 [$mem_ptr],$newval\t# If EDX:EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t" %}
+  ins_encode( enc_cmpxchg8(mem_ptr) );
+  ins_pipe( pipe_cmpxchg );
+%}
+
+instruct compareAndExchangeP( pRegP mem_ptr, eAXRegP oldval, eCXRegP newval, eFlagsReg cr) %{
+  match(Set oldval (CompareAndExchangeP mem_ptr (Binary oldval newval)));
+  effect(KILL cr);
+  format %{ "CMPXCHG [$mem_ptr],$newval\t# If EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t" %}
+  ins_encode( enc_cmpxchg(mem_ptr) );
+  ins_pipe( pipe_cmpxchg );
+%}
+
+instruct compareAndExchangeI( pRegP mem_ptr, eAXRegI oldval, eCXRegI newval, eFlagsReg cr) %{
+  match(Set oldval (CompareAndExchangeI mem_ptr (Binary oldval newval)));
+  effect(KILL cr);
+  format %{ "CMPXCHG [$mem_ptr],$newval\t# If EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t" %}
+  ins_encode( enc_cmpxchg(mem_ptr) );
+  ins_pipe( pipe_cmpxchg );
+%}
+
 instruct xaddI_no_res( memory mem, Universe dummy, immI add, eFlagsReg cr) %{
   predicate(n->as_LoadStore()->result_not_used());
   match(Set dummy (GetAndAddI mem add));
--- a/hotspot/src/cpu/x86/vm/x86_64.ad	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad	Sat Mar 05 20:46:42 2016 -0800
@@ -7281,6 +7281,7 @@
 %{
   predicate(VM_Version::supports_cx8());
   match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
+  match(Set res (WeakCompareAndSwapP mem_ptr (Binary oldval newval)));
   effect(KILL cr, KILL oldval);
 
   format %{ "cmpxchgq $mem_ptr,$newval\t# "
@@ -7305,6 +7306,7 @@
 %{
   predicate(VM_Version::supports_cx8());
   match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval)));
+  match(Set res (WeakCompareAndSwapL mem_ptr (Binary oldval newval)));
   effect(KILL cr, KILL oldval);
 
   format %{ "cmpxchgq $mem_ptr,$newval\t# "
@@ -7328,6 +7330,7 @@
                          rFlagsReg cr)
 %{
   match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
+  match(Set res (WeakCompareAndSwapI mem_ptr (Binary oldval newval)));
   effect(KILL cr, KILL oldval);
 
   format %{ "cmpxchgl $mem_ptr,$newval\t# "
@@ -7351,6 +7354,7 @@
                           rax_RegN oldval, rRegN newval,
                           rFlagsReg cr) %{
   match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
+  match(Set res (WeakCompareAndSwapN mem_ptr (Binary oldval newval)));
   effect(KILL cr, KILL oldval);
 
   format %{ "cmpxchgl $mem_ptr,$newval\t# "
@@ -7368,6 +7372,83 @@
   ins_pipe( pipe_cmpxchg );
 %}
 
+instruct compareAndExchangeI(
+                         memory mem_ptr,
+                         rax_RegI oldval, rRegI newval,
+                         rFlagsReg cr)
+%{
+  match(Set oldval (CompareAndExchangeI mem_ptr (Binary oldval newval)));
+  effect(KILL cr);
+
+  format %{ "cmpxchgl $mem_ptr,$newval\t# "
+            "If rax == $mem_ptr then store $newval into $mem_ptr\n\t"  %}
+  opcode(0x0F, 0xB1);
+  ins_encode(lock_prefix,
+             REX_reg_mem(newval, mem_ptr),
+             OpcP, OpcS,
+             reg_mem(newval, mem_ptr) // lock cmpxchg
+             );
+  ins_pipe( pipe_cmpxchg );
+%}
+
+instruct compareAndExchangeL(
+                         memory mem_ptr,
+                         rax_RegL oldval, rRegL newval,
+                         rFlagsReg cr)
+%{
+  predicate(VM_Version::supports_cx8());
+  match(Set oldval (CompareAndExchangeL mem_ptr (Binary oldval newval)));
+  effect(KILL cr);
+
+  format %{ "cmpxchgq $mem_ptr,$newval\t# "
+            "If rax == $mem_ptr then store $newval into $mem_ptr\n\t"  %}
+  opcode(0x0F, 0xB1);
+  ins_encode(lock_prefix,
+             REX_reg_mem_wide(newval, mem_ptr),
+             OpcP, OpcS,
+             reg_mem(newval, mem_ptr)  // lock cmpxchg
+            );
+  ins_pipe( pipe_cmpxchg );
+%}
+
+instruct compareAndExchangeN(
+                          memory mem_ptr,
+                          rax_RegN oldval, rRegN newval,
+                          rFlagsReg cr) %{
+  match(Set oldval (CompareAndExchangeN mem_ptr (Binary oldval newval)));
+  effect(KILL cr);
+
+  format %{ "cmpxchgl $mem_ptr,$newval\t# "
+            "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" %}
+  opcode(0x0F, 0xB1);
+  ins_encode(lock_prefix,
+             REX_reg_mem(newval, mem_ptr),
+             OpcP, OpcS,
+             reg_mem(newval, mem_ptr)  // lock cmpxchg
+          );
+  ins_pipe( pipe_cmpxchg );
+%}
+
+instruct compareAndExchangeP(
+                         memory mem_ptr,
+                         rax_RegP oldval, rRegP newval,
+                         rFlagsReg cr)
+%{
+  predicate(VM_Version::supports_cx8());
+  match(Set oldval (CompareAndExchangeP mem_ptr (Binary oldval newval)));
+  effect(KILL cr);
+
+  format %{ "cmpxchgq $mem_ptr,$newval\t# "
+            "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" %}
+  opcode(0x0F, 0xB1);
+  ins_encode(lock_prefix,
+             REX_reg_mem_wide(newval, mem_ptr),
+             OpcP, OpcS,
+             reg_mem(newval, mem_ptr)  // lock cmpxchg
+          );
+  ins_pipe( pipe_cmpxchg );
+%}
+
 instruct xaddI_no_res( memory mem, Universe dummy, immI add, rFlagsReg cr) %{
   predicate(n->as_LoadStore()->result_not_used());
   match(Set dummy (GetAndAddI mem add));
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/development/Server16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/development/Server24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/About16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/About24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/Delete16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/Delete24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/Find16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/Help16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/Help24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/History16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/History24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/Information16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/Information24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/New16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/New24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/Open16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/Open24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/Save24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/SaveAs16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/SaveAs24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/Zoom16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/ZoomIn16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/general/ZoomIn24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/navigation/Down16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/navigation/Up16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/text/AlignCenter16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/text/AlignCenter24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/text/AlignLeft16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/text/AlignLeft24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/text/AlignRight16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/images/toolbarButtonGraphics/text/AlignRight24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/development/Server16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/development/Server24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/general/About16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/general/About24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/general/Delete16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/general/Delete24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/general/Find16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/general/Help16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/general/Help24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/general/History16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/general/History24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/general/Information16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/general/Information24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/general/New16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/general/New24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/general/Open16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/general/Open24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/general/Save24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/general/SaveAs16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/general/SaveAs24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/general/Zoom16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/general/ZoomIn16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/general/ZoomIn24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/navigation/Down16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/navigation/Up16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/text/AlignCenter16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/text/AlignCenter24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/text/AlignLeft16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/text/AlignLeft24.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/text/AlignRight16.gif has changed
Binary file hotspot/src/jdk.hotspot.agent/share/classes/toolbarButtonGraphics/text/AlignRight24.gif has changed
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.amd64/src/jdk/vm/ci/amd64/AMD64.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.amd64/src/jdk/vm/ci/amd64/AMD64.java	Sat Mar 05 20:46:42 2016 -0800
@@ -22,6 +22,7 @@
  */
 package jdk.vm.ci.amd64;
 
+import static jdk.vm.ci.code.MemoryBarriers.LOAD_LOAD;
 import static jdk.vm.ci.code.MemoryBarriers.LOAD_STORE;
 import static jdk.vm.ci.code.MemoryBarriers.STORE_STORE;
 import static jdk.vm.ci.code.Register.SPECIAL;
@@ -220,7 +221,7 @@
     private final AMD64Kind largestKind;
 
     public AMD64(EnumSet<CPUFeature> features, EnumSet<Flag> flags) {
-        super("AMD64", AMD64Kind.QWORD, ByteOrder.LITTLE_ENDIAN, true, allRegisters, LOAD_STORE | STORE_STORE, 1, 8);
+        super("AMD64", AMD64Kind.QWORD, ByteOrder.LITTLE_ENDIAN, true, allRegisters, LOAD_LOAD | LOAD_STORE | STORE_STORE, 1, 8);
         this.features = features;
         this.flags = flags;
         assert features.contains(CPUFeature.SSE2) : "minimum config for x64";
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotVMConfig.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotVMConfig.java	Sat Mar 05 20:46:42 2016 -0800
@@ -1141,7 +1141,7 @@
 
     @HotSpotVMField(name = "JavaFrameAnchor::_last_Java_sp", type = "intptr_t*", get = HotSpotVMField.Type.OFFSET) @Stable private int javaFrameAnchorLastJavaSpOffset;
     @HotSpotVMField(name = "JavaFrameAnchor::_last_Java_pc", type = "address", get = HotSpotVMField.Type.OFFSET) @Stable private int javaFrameAnchorLastJavaPcOffset;
-    @HotSpotVMField(name = "JavaFrameAnchor::_last_Java_fp", type = "intptr_t*", get = HotSpotVMField.Type.OFFSET, archs = {"amd64"}) @Stable private int javaFrameAnchorLastJavaFpOffset;
+    @HotSpotVMField(name = "JavaFrameAnchor::_last_Java_fp", type = "intptr_t*", get = HotSpotVMField.Type.OFFSET, archs = {"aarch64, amd64"}) @Stable private int javaFrameAnchorLastJavaFpOffset;
     @HotSpotVMField(name = "JavaFrameAnchor::_flags", type = "int", get = HotSpotVMField.Type.OFFSET, archs = {"sparc"}) @Stable private int javaFrameAnchorFlagsOffset;
 
     public int threadLastJavaSpOffset() {
@@ -1152,11 +1152,8 @@
         return javaThreadAnchorOffset + javaFrameAnchorLastJavaPcOffset;
     }
 
-    /**
-     * This value is only valid on AMD64.
-     */
     public int threadLastJavaFpOffset() {
-        // TODO add an assert for AMD64
+        assert getHostArchitectureName().equals("aarch64") || getHostArchitectureName().equals("amd64");
         return javaThreadAnchorOffset + javaFrameAnchorLastJavaFpOffset;
     }
 
--- a/hotspot/src/os/aix/vm/os_aix.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/os/aix/vm/os_aix.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,6 +36,7 @@
 #include "compiler/compileBroker.hpp"
 #include "interpreter/interpreter.hpp"
 #include "jvm_aix.h"
+#include "logging/log.hpp"
 #include "libo4.hpp"
 #include "libperfstat_aix.hpp"
 #include "libodm_aix.hpp"
@@ -791,13 +792,8 @@
   const pthread_t pthread_id = ::pthread_self();
   const tid_t kernel_thread_id = ::thread_self();
 
-  trcVerbose("newborn Thread : pthread-id %u, ktid " UINT64_FORMAT
-    ", stack %p ... %p, stacksize 0x%IX (%IB)",
-    pthread_id, kernel_thread_id,
-    thread->stack_end(),
-    thread->stack_base(),
-    thread->stack_size(),
-    thread->stack_size());
+  log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
+    os::current_thread_id(), (uintx) kernel_thread_id);
 
   // Normally, pthread stacks on AIX live in the data segment (are allocated with malloc()
   // by the pthread library). In rare cases, this may not be the case, e.g. when third-party
@@ -805,7 +801,7 @@
   // guard pages on those stacks, because the stacks may reside in memory which is not
   // protectable (shmated).
   if (thread->stack_base() > ::sbrk(0)) {
-    trcVerbose("Thread " UINT64_FORMAT ": stack not in data segment.", (uint64_t) pthread_id);
+    log_warning(os, thread)("Thread stack not in data segment.");
   }
 
   // Try to randomize the cache line index of hot stack frames.
@@ -839,8 +835,8 @@
   // Call one more level start routine.
   thread->run();
 
-  trcVerbose("Thread finished : pthread-id %u, ktid " UINT64_FORMAT ".",
-    pthread_id, kernel_thread_id);
+  log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
+    os::current_thread_id(), (uintx) kernel_thread_id);
 
   return 0;
 }
@@ -908,20 +904,19 @@
   pthread_t tid;
   int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
 
-  pthread_attr_destroy(&attr);
-
+
+  char buf[64];
   if (ret == 0) {
-    trcVerbose("Created New Thread : pthread-id %u", tid);
+    log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
+      (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
   } else {
-    if (os::Aix::on_pase()) {
-      // QIBM_MULTI_THREADED=Y is needed when the launcher is started on iSeries
-      // using QSH. Otherwise pthread_create fails with errno=11.
-      trcVerbose("(Please make sure you set the environment variable "
-              "QIBM_MULTI_THREADED=Y before running this program.)");
-    }
-    if (PrintMiscellaneous && (Verbose || WizardMode)) {
-      perror("pthread_create()");
-    }
+    log_warning(os, thread)("Failed to start thread - pthread_create failed (%s) for attributes: %s.",
+      strerror(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
+  }
+
+  pthread_attr_destroy(&attr);
+
+  if (ret != 0) {
     // Need to clean up stuff we've allocated so far
     thread->set_osthread(NULL);
     delete osthread;
@@ -958,13 +953,6 @@
   const pthread_t pthread_id = ::pthread_self();
   const tid_t kernel_thread_id = ::thread_self();
 
-  trcVerbose("attaching Thread : pthread-id %u, ktid " UINT64_FORMAT ", stack %p ... %p, stacksize 0x%IX (%IB)",
-    pthread_id, kernel_thread_id,
-    thread->stack_end(),
-    thread->stack_base(),
-    thread->stack_size(),
-    thread->stack_size());
-
   // OSThread::thread_id is the pthread id.
   osthread->set_thread_id(pthread_id);
 
@@ -990,6 +978,9 @@
   // and save the caller's signal mask
   os::Aix::hotspot_sigmask(thread);
 
+  log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
+    os::current_thread_id(), (uintx) kernel_thread_id);
+
   return true;
 }
 
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -32,6 +32,7 @@
 #include "compiler/disassembler.hpp"
 #include "interpreter/interpreter.hpp"
 #include "jvm_bsd.h"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/filemap.hpp"
 #include "mutex_bsd.inline.hpp"
@@ -681,6 +682,9 @@
 
   osthread->set_thread_id(os::Bsd::gettid());
 
+  log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").",
+    os::current_thread_id(), (uintx) pthread_self());
+
 #ifdef __APPLE__
   uint64_t unique_thread_id = locate_unique_thread_id(osthread->thread_id());
   guarantee(unique_thread_id != 0, "unique thread id was not found");
@@ -716,6 +720,9 @@
   // call one more level start routine
   thread->run();
 
+  log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").",
+    os::current_thread_id(), (uintx) pthread_self());
+
   return 0;
 }
 
@@ -776,12 +783,18 @@
     pthread_t tid;
     int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
 
+    char buf[64];
+    if (ret == 0) {
+      log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
+        (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
+    } else {
+      log_warning(os, thread)("Failed to start thread - pthread_create failed (%s) for attributes: %s.",
+        strerror(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
+    }
+
     pthread_attr_destroy(&attr);
 
     if (ret != 0) {
-      if (PrintMiscellaneous && (Verbose || WizardMode)) {
-        perror("pthread_create()");
-      }
       // Need to clean up stuff we've allocated so far
       thread->set_osthread(NULL);
       delete osthread;
@@ -858,6 +871,9 @@
   // and save the caller's signal mask
   os::Bsd::hotspot_sigmask(thread);
 
+  log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").",
+    os::current_thread_id(), (uintx) pthread_self());
+
   return true;
 }
 
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -662,6 +662,9 @@
 
   osthread->set_thread_id(os::current_thread_id());
 
+  log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").",
+    os::current_thread_id(), (uintx) pthread_self());
+
   if (UseNUMA) {
     int lgrp_id = os::numa_get_group_id();
     if (lgrp_id != -1) {
@@ -691,6 +694,9 @@
   // call one more level start routine
   thread->run();
 
+  log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").",
+    os::current_thread_id(), (uintx) pthread_self());
+
   return 0;
 }
 
@@ -756,12 +762,18 @@
     pthread_t tid;
     int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
 
+    char buf[64];
+    if (ret == 0) {
+      log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
+        (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
+    } else {
+      log_warning(os, thread)("Failed to start thread - pthread_create failed (%s) for attributes: %s.",
+        strerror(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
+    }
+
     pthread_attr_destroy(&attr);
 
     if (ret != 0) {
-      if (PrintMiscellaneous && (Verbose || WizardMode)) {
-        perror("pthread_create()");
-      }
       // Need to clean up stuff we've allocated so far
       thread->set_osthread(NULL);
       delete osthread;
@@ -858,6 +870,9 @@
   // and save the caller's signal mask
   os::Linux::hotspot_sigmask(thread);
 
+  log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").",
+    os::current_thread_id(), (uintx) pthread_self());
+
   return true;
 }
 
--- a/hotspot/src/os/posix/vm/os_posix.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/os/posix/vm/os_posix.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1071,6 +1071,19 @@
 #endif
 }
 
+char* os::Posix::describe_pthread_attr(char* buf, size_t buflen, const pthread_attr_t* attr) {
+  size_t stack_size = 0;
+  size_t guard_size = 0;
+  int detachstate = 0;
+  pthread_attr_getstacksize(attr, &stack_size);
+  pthread_attr_getguardsize(attr, &guard_size);
+  pthread_attr_getdetachstate(attr, &detachstate);
+  jio_snprintf(buf, buflen, "stacksize: " SIZE_FORMAT "k, guardsize: " SIZE_FORMAT "k, %s",
+    stack_size / 1024, guard_size / 1024,
+    (detachstate == PTHREAD_CREATE_DETACHED ? "detached" : "joinable"));
+  return buf;
+}
+
 
 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
   assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
--- a/hotspot/src/os/posix/vm/os_posix.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/os/posix/vm/os_posix.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -76,6 +76,11 @@
   static address ucontext_get_pc(const ucontext_t* ctx);
   // Set PC into context. Needed for continuation after signal.
   static void ucontext_set_pc(ucontext_t* ctx, address pc);
+
+  // Helper function; describes pthread attributes as short string. String is written
+  // to buf with len buflen; buf is returned.
+  static char* describe_pthread_attr(char* buf, size_t buflen, const pthread_attr_t* attr);
+
 };
 
 /*
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
 #include "compiler/disassembler.hpp"
 #include "interpreter/interpreter.hpp"
 #include "jvm_solaris.h"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/filemap.hpp"
 #include "mutex_solaris.inline.hpp"
@@ -68,6 +69,7 @@
 #include "utilities/defaultStream.hpp"
 #include "utilities/events.hpp"
 #include "utilities/growableArray.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/vmError.hpp"
 
 // put OS-includes here
@@ -736,6 +738,9 @@
   osthr->set_lwp_id(_lwp_self());  // Store lwp in case we are bound
   thread->_schedctl = (void *) schedctl_init();
 
+  log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").",
+    os::current_thread_id());
+
   if (UseNUMA) {
     int lgrp_id = os::numa_get_group_id();
     if (lgrp_id != -1) {
@@ -781,6 +786,8 @@
     Atomic::dec(&os::Solaris::_os_thread_count);
   }
 
+  log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
+
   if (UseDetachedThreads) {
     thr_exit(NULL);
     ShouldNotReachHere();
@@ -853,6 +860,9 @@
   // and save the caller's signal mask
   os::Solaris::hotspot_sigmask(thread);
 
+  log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
+    os::current_thread_id());
+
   return true;
 }
 
@@ -879,6 +889,24 @@
   return true;
 }
 
+// Helper function to trace thread attributes, similar to os::Posix::describe_pthread_attr()
+static char* describe_thr_create_attributes(char* buf, size_t buflen,
+                                            size_t stacksize, long flags) {
+  stringStream ss(buf, buflen);
+  ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
+  ss.print("flags: ");
+  #define PRINT_FLAG(f) if (flags & f) ss.print( #f " ");
+  #define ALL(X) \
+    X(THR_SUSPENDED) \
+    X(THR_DETACHED) \
+    X(THR_BOUND) \
+    X(THR_NEW_LWP) \
+    X(THR_DAEMON)
+  ALL(PRINT_FLAG)
+  #undef ALL
+  #undef PRINT_FLAG
+  return buf;
+}
 
 bool os::create_thread(Thread* thread, ThreadType thr_type,
                        size_t stack_size) {
@@ -974,10 +1002,17 @@
   osthread->set_thread_id(-1);
 
   status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
+
+  char buf[64];
+  if (status == 0) {
+    log_info(os, thread)("Thread started (tid: " UINTX_FORMAT ", attributes: %s). ",
+      (uintx) tid, describe_thr_create_attributes(buf, sizeof(buf), stack_size, flags));
+  } else {
+    log_warning(os, thread)("Failed to start thread - thr_create failed (%s) for attributes: %s.",
+      strerror(status), describe_thr_create_attributes(buf, sizeof(buf), stack_size, flags));
+  }
+
   if (status != 0) {
-    if (PrintMiscellaneous && (Verbose || WizardMode)) {
-      perror("os::create_thread");
-    }
     thread->set_osthread(NULL);
     // Need to clean up stuff we've allocated so far
     delete osthread;
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -35,6 +35,7 @@
 #include "compiler/disassembler.hpp"
 #include "interpreter/interpreter.hpp"
 #include "jvm_windows.h"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/filemap.hpp"
 #include "mutex_windows.inline.hpp"
@@ -71,6 +72,7 @@
 #include "utilities/defaultStream.hpp"
 #include "utilities/events.hpp"
 #include "utilities/growableArray.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/vmError.hpp"
 
 #ifdef _DEBUG
@@ -436,6 +438,8 @@
     res = 20115;    // java thread
   }
 
+  log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
+
   // Install a win32 structured exception handler around every thread created
   // by VM, so VM can generate error dump when an exception occurred in non-
   // Java thread (e.g. VM thread).
@@ -446,6 +450,8 @@
     // Nothing to do.
   }
 
+  log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
+
   // One less thread is executing
   // When the VMThread gets here, the main thread may have already exited
   // which frees the CodeHeap containing the Atomic::add code
@@ -509,6 +515,10 @@
   osthread->set_state(RUNNABLE);
 
   thread->set_osthread(osthread);
+
+  log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
+    os::current_thread_id());
+
   return true;
 }
 
@@ -530,6 +540,27 @@
   return true;
 }
 
+// Helper function to trace _beginthreadex attributes,
+//  similar to os::Posix::describe_pthread_attr()
+static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
+                                               size_t stacksize, unsigned initflag) {
+  stringStream ss(buf, buflen);
+  if (stacksize == 0) {
+    ss.print("stacksize: default, ");
+  } else {
+    ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
+  }
+  ss.print("flags: ");
+  #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
+  #define ALL(X) \
+    X(CREATE_SUSPENDED) \
+    X(STACK_SIZE_PARAM_IS_A_RESERVATION)
+  ALL(PRINT_FLAG)
+  #undef ALL
+  #undef PRINT_FLAG
+  return buf;
+}
+
 // Allocate and initialize a new OSThread
 bool os::create_thread(Thread* thread, ThreadType thr_type,
                        size_t stack_size) {
@@ -596,14 +627,24 @@
   // document because JVM uses C runtime library. The good news is that the
   // flag appears to work with _beginthredex() as well.
 
+  const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
   HANDLE thread_handle =
     (HANDLE)_beginthreadex(NULL,
                            (unsigned)stack_size,
                            (unsigned (__stdcall *)(void*)) java_start,
                            thread,
-                           CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION,
+                           initflag,
                            &thread_id);
 
+  char buf[64];
+  if (thread_handle != NULL) {
+    log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
+      thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
+  } else {
+    log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
+      strerror(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
+  }
+
   if (thread_handle == NULL) {
     // Need to clean up stuff we've allocated so far
     CloseHandle(osthread->interrupt_event());
@@ -1668,8 +1709,7 @@
     if (is_workstation) {
       st->print("10");
     } else {
-      // The server version name of Windows 10 is not known at this time
-      st->print("%d.%d", major_version, minor_version);
+      st->print("Server 2016");
     }
     break;
 
--- a/hotspot/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -264,6 +264,7 @@
 
 // We need to keep these here as long as we have to build on Solaris
 // versions before 10.
+
 #ifndef SI_ARCHITECTURE_32
 #define SI_ARCHITECTURE_32      516     /* basic 32-bit SI_ARCHITECTURE */
 #endif
@@ -272,36 +273,87 @@
 #define SI_ARCHITECTURE_64      517     /* basic 64-bit SI_ARCHITECTURE */
 #endif
 
-static void do_sysinfo(int si, const char* string, int* features, int mask) {
-  char   tmp;
-  size_t bufsize = sysinfo(si, &tmp, 1);
+#ifndef SI_CPUBRAND
+#define SI_CPUBRAND             523     /* return cpu brand string */
+#endif
 
-  // All SI defines used below must be supported.
-  guarantee(bufsize != -1, "must be supported");
+class Sysinfo {
+  char* _string;
+public:
+  Sysinfo(int si) : _string(NULL) {
+    char   tmp;
+    size_t bufsize = sysinfo(si, &tmp, 1);
 
-  char* buf = (char*) os::malloc(bufsize, mtInternal);
-
-  if (buf == NULL)
-    return;
+    if (bufsize != -1) {
+      char* buf = (char*) os::malloc(bufsize, mtInternal);
+      if (buf != NULL) {
+        if (sysinfo(si, buf, bufsize) == bufsize) {
+          _string = buf;
+        } else {
+          os::free(buf);
+        }
+      }
+    }
+  }
 
-  if (sysinfo(si, buf, bufsize) == bufsize) {
-    // Compare the string.
-    if (strcmp(buf, string) == 0) {
-      *features |= mask;
+  ~Sysinfo() {
+    if (_string != NULL) {
+      os::free(_string);
     }
   }
 
-  os::free(buf);
-}
+  const char* value() const {
+    return _string;
+  }
+
+  bool valid() const {
+    return _string != NULL;
+  }
+
+  bool match(const char* s) const {
+    return valid() ? strcmp(_string, s) == 0 : false;
+  }
+
+  bool match_substring(const char* s) const {
+    return valid() ? strstr(_string, s) != NULL : false;
+  }
+};
+
+class Sysconf {
+  int _value;
+public:
+  Sysconf(int sc) : _value(-1) {
+    _value = sysconf(sc);
+  }
+  bool valid() const {
+    return _value != -1;
+  }
+  int value() const {
+    return _value;
+  }
+};
+
+
+#ifndef _SC_DCACHE_LINESZ
+#define _SC_DCACHE_LINESZ       508     /* Data cache line size */
+#endif
+
+#ifndef _SC_L2CACHE_LINESZ
+#define _SC_L2CACHE_LINESZ      527     /* Size of L2 cache line */
+#endif
 
 int VM_Version::platform_features(int features) {
   assert(os::Solaris::supports_getisax(), "getisax() must be available");
 
   // Check 32-bit architecture.
-  do_sysinfo(SI_ARCHITECTURE_32, "sparc", &features, v8_instructions_m);
+  if (Sysinfo(SI_ARCHITECTURE_32).match("sparc")) {
+    features |= v8_instructions_m;
+  }
 
   // Check 64-bit architecture.
-  do_sysinfo(SI_ARCHITECTURE_64, "sparcv9", &features, generic_v9_m);
+  if (Sysinfo(SI_ARCHITECTURE_64).match("sparcv9")) {
+    features |= generic_v9_m;
+  }
 
   // Extract valid instruction set extensions.
   uint_t avs[2];
@@ -388,67 +440,63 @@
   if (av & AV_SPARC_SHA512)       features |= sha512_instruction_m;
 
   // Determine the machine type.
-  do_sysinfo(SI_MACHINE, "sun4v", &features, sun4v_m);
+  if (Sysinfo(SI_MACHINE).match("sun4v")) {
+    features |= sun4v_m;
+  }
 
-  {
-    // Using kstat to determine the machine type.
+  bool use_solaris_12_api = false;
+  Sysinfo impl(SI_CPUBRAND);
+  if (impl.valid()) {
+    // If SI_CPUBRAND works, that means Solaris 12 API to get the cache line sizes
+    // is available to us as well
+    use_solaris_12_api = true;
+    features |= parse_features(impl.value());
+  } else {
+    // Otherwise use kstat to determine the machine type.
     kstat_ctl_t* kc = kstat_open();
     kstat_t* ksp = kstat_lookup(kc, (char*)"cpu_info", -1, NULL);
-    const char* implementation = "UNKNOWN";
+    const char* implementation;
+    bool has_implementation = false;
     if (ksp != NULL) {
       if (kstat_read(kc, ksp, NULL) != -1 && ksp->ks_data != NULL) {
         kstat_named_t* knm = (kstat_named_t *)ksp->ks_data;
         for (int i = 0; i < ksp->ks_ndata; i++) {
           if (strcmp((const char*)&(knm[i].name),"implementation") == 0) {
             implementation = KSTAT_NAMED_STR_PTR(&knm[i]);
+            has_implementation = true;
 #ifndef PRODUCT
             if (PrintMiscellaneous && Verbose) {
               tty->print_cr("cpu_info.implementation: %s", implementation);
             }
 #endif
-            // Convert to UPPER case before compare.
-            char* impl = os::strdup_check_oom(implementation);
-
-            for (int i = 0; impl[i] != 0; i++)
-              impl[i] = (char)toupper((uint)impl[i]);
-
-            if (strstr(impl, "SPARC64") != NULL) {
-              features |= sparc64_family_m;
-            } else if (strstr(impl, "SPARC-M") != NULL) {
-              // M-series SPARC is based on T-series.
-              features |= (M_family_m | T_family_m);
-            } else if (strstr(impl, "SPARC-T") != NULL) {
-              features |= T_family_m;
-              if (strstr(impl, "SPARC-T1") != NULL) {
-                features |= T1_model_m;
-              }
-            } else {
-              if (strstr(impl, "SPARC") == NULL) {
-#ifndef PRODUCT
-                // kstat on Solaris 8 virtual machines (branded zones)
-                // returns "(unsupported)" implementation. Solaris 8 is not
-                // supported anymore, but include this check to be on the
-                // safe side.
-                warning("kstat cpu_info implementation = '%s', assume generic SPARC", impl);
-#endif
-                implementation = "SPARC";
-              }
-            }
-            os::free((void*)impl);
+            features |= parse_features(implementation);
             break;
           }
         } // for(
       }
     }
-    assert(strcmp(implementation, "UNKNOWN") != 0,
-           "unknown cpu info (changed kstat interface?)");
+    assert(has_implementation, "unknown cpu info (changed kstat interface?)");
     kstat_close(kc);
   }
 
-  // Figure out cache line sizes using PICL
-  PICL picl((features & sparc64_family_m) != 0, (features & sun4v_m) != 0);
-  _L1_data_cache_line_size = picl.L1_data_cache_line_size();
-  _L2_data_cache_line_size = picl.L2_data_cache_line_size();
+  bool is_sun4v = (features & sun4v_m) != 0;
+  if (use_solaris_12_api && is_sun4v) {
+    // If Solaris 12 API is supported and it's sun4v use sysconf() to get the cache line sizes
+    Sysconf l1_dcache_line_size(_SC_DCACHE_LINESZ);
+    if (l1_dcache_line_size.valid()) {
+      _L1_data_cache_line_size =  l1_dcache_line_size.value();
+    }
 
+    Sysconf l2_dcache_line_size(_SC_L2CACHE_LINESZ);
+    if (l2_dcache_line_size.valid()) {
+      _L2_data_cache_line_size = l2_dcache_line_size.value();
+    }
+  } else {
+    // Otherwise figure out the cache line sizes using PICL
+    bool is_fujitsu = (features & sparc64_family_m) != 0;
+    PICL picl(is_fujitsu, is_sun4v);
+    _L1_data_cache_line_size = picl.L1_data_cache_line_size();
+    _L2_data_cache_line_size = picl.L2_data_cache_line_size();
+  }
   return features;
 }
--- a/hotspot/src/os_cpu/windows_x86/vm/assembler_windows_x86.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/os_cpu/windows_x86/vm/assembler_windows_x86.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -51,11 +51,8 @@
 // movl reg, [reg + thread_ptr_offset]     Load thread
 //
 void MacroAssembler::get_thread(Register thread) {
-  // can't use ExternalAddress because it can't take NULL
-  AddressLiteral null(0, relocInfo::none);
-
   prefix(FS_segment);
-  movptr(thread, null);
+  movptr(thread, ExternalAddress(NULL));
   assert(os::win32::get_thread_ptr_offset() != 0,
          "Thread Pointer Offset has not been initialized");
   movl(thread, Address(thread, os::win32::get_thread_ptr_offset()));
--- a/hotspot/src/share/vm/adlc/formssel.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/adlc/formssel.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -3491,6 +3491,8 @@
     "LoadPLocked",
     "StorePConditional", "StoreIConditional", "StoreLConditional",
     "CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP", "CompareAndSwapN",
+    "WeakCompareAndSwapI", "WeakCompareAndSwapL", "WeakCompareAndSwapP", "WeakCompareAndSwapN",
+    "CompareAndExchangeI", "CompareAndExchangeL", "CompareAndExchangeP", "CompareAndExchangeN",
     "StoreCM",
     "ClearArray",
     "GetAndAddI", "GetAndSetI", "GetAndSetP",
--- a/hotspot/src/share/vm/c1/c1_CFGPrinter.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_CFGPrinter.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,9 @@
 // compilation for later analysis.
 
 class CFGPrinterOutput;
-class IntervalList;
+class Interval;
+
+typedef GrowableArray<Interval*> IntervalList;
 
 class CFGPrinter : public AllStatic {
 private:
--- a/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -222,27 +222,36 @@
 }
 
 void Canonicalizer::do_ArrayLength    (ArrayLength*     x) {
-  NewArray* array = x->array()->as_NewArray();
-  if (array != NULL && array->length() != NULL) {
-    Constant* length = array->length()->as_Constant();
-    if (length != NULL) {
-      // do not use the Constant itself, but create a new Constant
-      // with same value Otherwise a Constant is live over multiple
-      // blocks without being registered in a state array.
+  NewArray*  na;
+  Constant*  ct;
+  LoadField* lf;
+
+  if ((na = x->array()->as_NewArray()) != NULL) {
+    // New arrays might have the known length.
+    // Do not use the Constant itself, but create a new Constant
+    // with same value Otherwise a Constant is live over multiple
+    // blocks without being registered in a state array.
+    Constant* length;
+    if (na->length() != NULL &&
+        (length = na->length()->as_Constant()) != NULL) {
       assert(length->type()->as_IntConstant() != NULL, "array length must be integer");
       set_constant(length->type()->as_IntConstant()->value());
     }
-  } else {
-    LoadField* lf = x->array()->as_LoadField();
-    if (lf != NULL) {
-      ciField* field = lf->field();
-      if (field->is_constant() && field->is_static()) {
-        // final static field
-        ciObject* c = field->constant_value().as_object();
-        if (c->is_array()) {
-          ciArray* array = (ciArray*) c;
-          set_constant(array->length());
-        }
+
+  } else if ((ct = x->array()->as_Constant()) != NULL) {
+    // Constant arrays have constant lengths.
+    ArrayConstant* cnst = ct->type()->as_ArrayConstant();
+    if (cnst != NULL) {
+      set_constant(cnst->value()->length());
+    }
+
+  } else if ((lf = x->array()->as_LoadField()) != NULL) {
+    ciField* field = lf->field();
+    if (field->is_constant() && field->is_static()) {
+      assert(PatchALot || ScavengeRootsInCode < 2, "Constant field loads are folded during parsing");
+      ciObject* c = field->constant_value().as_object();
+      if (!c->is_null_object()) {
+        set_constant(c->as_array()->length());
       }
     }
   }
--- a/hotspot/src/share/vm/c1/c1_Compiler.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_Compiler.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -228,8 +228,6 @@
   case vmIntrinsics::_getCharStringU:
   case vmIntrinsics::_putCharStringU:
 #ifdef TRACE_HAVE_INTRINSICS
-  case vmIntrinsics::_classID:
-  case vmIntrinsics::_threadID:
   case vmIntrinsics::_counterTime:
 #endif
     break;
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -43,6 +43,9 @@
 #if INCLUDE_ALL_GCS
 #include "gc/g1/heapRegion.hpp"
 #endif // INCLUDE_ALL_GCS
+#ifdef TRACE_HAVE_INTRINSICS
+#include "trace/traceMacros.hpp"
+#endif
 
 #ifdef ASSERT
 #define __ gen()->lir(__FILE__, __LINE__)->
@@ -3067,42 +3070,7 @@
   __ move(reg, result);
 }
 
-#ifdef TRACE_HAVE_INTRINSICS
-void LIRGenerator::do_ThreadIDIntrinsic(Intrinsic* x) {
-    LIR_Opr thread = getThreadPointer();
-    LIR_Opr osthread = new_pointer_register();
-    __ move(new LIR_Address(thread, in_bytes(JavaThread::osthread_offset()), osthread->type()), osthread);
-    size_t thread_id_size = OSThread::thread_id_size();
-    if (thread_id_size == (size_t) BytesPerLong) {
-      LIR_Opr id = new_register(T_LONG);
-      __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_LONG), id);
-      __ convert(Bytecodes::_l2i, id, rlock_result(x));
-    } else if (thread_id_size == (size_t) BytesPerInt) {
-      __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_INT), rlock_result(x));
-    } else {
-      ShouldNotReachHere();
-    }
-}
-
-void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
-    CodeEmitInfo* info = state_for(x);
-    CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
-    BasicType klass_pointer_type = NOT_LP64(T_INT) LP64_ONLY(T_LONG);
-    assert(info != NULL, "must have info");
-    LIRItem arg(x->argument_at(1), this);
-    arg.load_item();
-    LIR_Opr klass = new_pointer_register();
-    __ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), klass_pointer_type), klass, info);
-    LIR_Opr id = new_register(T_LONG);
-    ByteSize offset = TRACE_ID_OFFSET;
-    LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
-    __ move(trace_id_addr, id);
-    __ logical_or(id, LIR_OprFact::longConst(0x01l), id);
-    __ store(id, trace_id_addr);
-    __ logical_and(id, LIR_OprFact::longConst(~0x3l), id);
-    __ move(id, rlock_result(x));
-}
-#endif
+
 
 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
   switch (x->id()) {
@@ -3115,8 +3083,6 @@
   }
 
 #ifdef TRACE_HAVE_INTRINSICS
-  case vmIntrinsics::_threadID: do_ThreadIDIntrinsic(x); break;
-  case vmIntrinsics::_classID: do_ClassIDIntrinsic(x); break;
   case vmIntrinsics::_counterTime:
     do_RuntimeCall(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), x);
     break;
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -440,10 +440,7 @@
   void do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux);
 
   void do_RuntimeCall(address routine, Intrinsic* x);
-#ifdef TRACE_HAVE_INTRINSICS
-  void do_ThreadIDIntrinsic(Intrinsic* x);
-  void do_ClassIDIntrinsic(Intrinsic* x);
-#endif
+
   ciKlass* profile_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k,
                         Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
                         ciKlass* callee_signature_k);
--- a/hotspot/src/share/vm/c1/c1_LinearScan.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_LinearScan.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1434,41 +1434,73 @@
 }
 
 #ifndef PRODUCT
+int interval_cmp(Interval* const& l, Interval* const& r) {
+  return l->from() - r->from();
+}
+
+bool find_interval(Interval* interval, IntervalArray* intervals) {
+  bool found;
+  int idx = intervals->find_sorted<Interval*, interval_cmp>(interval, found);
+
+  if (!found) {
+    return false;
+  }
+
+  int from = interval->from();
+
+  // The index we've found using binary search is pointing to an interval
+  // that is defined in the same place as the interval we were looking for.
+  // So now we have to look around that index and find exact interval.
+  for (int i = idx; i >= 0; i--) {
+    if (intervals->at(i) == interval) {
+      return true;
+    }
+    if (intervals->at(i)->from() != from) {
+      break;
+    }
+  }
+
+  for (int i = idx + 1; i < intervals->length(); i++) {
+    if (intervals->at(i) == interval) {
+      return true;
+    }
+    if (intervals->at(i)->from() != from) {
+      break;
+    }
+  }
+
+  return false;
+}
+
 bool LinearScan::is_sorted(IntervalArray* intervals) {
   int from = -1;
-  int i, j;
-  for (i = 0; i < intervals->length(); i ++) {
+  int null_count = 0;
+
+  for (int i = 0; i < intervals->length(); i++) {
     Interval* it = intervals->at(i);
     if (it != NULL) {
-      if (from > it->from()) {
-        assert(false, "");
-        return false;
-      }
+      assert(from <= it->from(), "Intervals are unordered");
       from = it->from();
-    }
-  }
-
-  // check in both directions if sorted list and unsorted list contain same intervals
-  for (i = 0; i < interval_count(); i++) {
-    if (interval_at(i) != NULL) {
-      int num_found = 0;
-      for (j = 0; j < intervals->length(); j++) {
-        if (interval_at(i) == intervals->at(j)) {
-          num_found++;
-        }
-      }
-      assert(num_found == 1, "lists do not contain same intervals");
-    }
-  }
-  for (j = 0; j < intervals->length(); j++) {
-    int num_found = 0;
-    for (i = 0; i < interval_count(); i++) {
-      if (interval_at(i) == intervals->at(j)) {
-        num_found++;
-      }
-    }
-    assert(num_found == 1, "lists do not contain same intervals");
-  }
+    } else {
+      null_count++;
+    }
+  }
+
+  assert(null_count == 0, "Sorted intervals should not contain nulls");
+
+  null_count = 0;
+
+  for (int i = 0; i < interval_count(); i++) {
+    Interval* interval = interval_at(i);
+    if (interval != NULL) {
+      assert(find_interval(interval, intervals), "Lists do not contain same intervals");
+    } else {
+      null_count++;
+    }
+  }
+
+  assert(interval_count() - null_count == intervals->length(),
+      "Sorted list should contain the same amount of non-NULL intervals as unsorted list");
 
   return true;
 }
@@ -1536,7 +1568,7 @@
       sorted_len++;
     }
   }
-  IntervalArray* sorted_list = new IntervalArray(sorted_len);
+  IntervalArray* sorted_list = new IntervalArray(sorted_len, sorted_len, NULL);
 
   // special sorting algorithm: the original interval-list is almost sorted,
   // only some intervals are swapped. So this is much faster than a complete QuickSort
@@ -1574,8 +1606,8 @@
     _needs_full_resort = false;
   }
 
-  IntervalArray* old_list      = _sorted_intervals;
-  IntervalList*  new_list      = _new_intervals_from_allocation;
+  IntervalArray* old_list = _sorted_intervals;
+  IntervalList* new_list = _new_intervals_from_allocation;
   int old_len = old_list->length();
   int new_len = new_list->length();
 
@@ -1589,7 +1621,8 @@
   new_list->sort(interval_cmp);
 
   // merge old and new list (both already sorted) into one combined list
-  IntervalArray* combined_list = new IntervalArray(old_len + new_len);
+  int combined_list_len = old_len + new_len;
+  IntervalArray* combined_list = new IntervalArray(combined_list_len, combined_list_len, NULL);
   int old_idx = 0;
   int new_idx = 0;
 
@@ -3211,6 +3244,10 @@
       has_error = true;
     }
 
+    // special intervals that are created in MoveResolver
+    // -> ignore them because the range information has no meaning there
+    if (i1->from() == 1 && i1->to() == 2) continue;
+
     if (i1->first() == Range::end()) {
       tty->print_cr("Interval %d has no Range", i1->reg_num()); i1->print(); tty->cr();
       has_error = true;
@@ -3225,18 +3262,13 @@
 
     for (int j = i + 1; j < len; j++) {
       Interval* i2 = interval_at(j);
-      if (i2 == NULL) continue;
-
-      // special intervals that are created in MoveResolver
-      // -> ignore them because the range information has no meaning there
-      if (i1->from() == 1 && i1->to() == 2) continue;
-      if (i2->from() == 1 && i2->to() == 2) continue;
+      if (i2 == NULL || (i2->from() == 1 && i2->to() == 2)) continue;
 
       int r1 = i1->assigned_reg();
       int r1Hi = i1->assigned_regHi();
       int r2 = i2->assigned_reg();
       int r2Hi = i2->assigned_regHi();
-      if (i1->intersects(i2) && (r1 == r2 || r1 == r2Hi || (r1Hi != any_reg && (r1Hi == r2 || r1Hi == r2Hi)))) {
+      if ((r1 == r2 || r1 == r2Hi || (r1Hi != any_reg && (r1Hi == r2 || r1Hi == r2Hi))) && i1->intersects(i2)) {
         tty->print_cr("Intervals %d and %d overlap and have the same register assigned", i1->reg_num(), i2->reg_num());
         i1->print(); tty->cr();
         i2->print(); tty->cr();
@@ -3429,7 +3461,8 @@
 
 void RegisterVerifier::verify(BlockBegin* start) {
   // setup input registers (method arguments) for first block
-  IntervalList* input_state = new IntervalList(state_size(), NULL);
+  int input_state_len = state_size();
+  IntervalList* input_state = new IntervalList(input_state_len, input_state_len, NULL);
   CallingConvention* args = compilation()->frame_map()->incoming_arguments();
   for (int n = 0; n < args->length(); n++) {
     LIR_Opr opr = args->at(n);
@@ -3543,7 +3576,7 @@
 
 IntervalList* RegisterVerifier::copy(IntervalList* input_state) {
   IntervalList* copy_state = new IntervalList(input_state->length());
-  copy_state->push_all(input_state);
+  copy_state->appendAll(input_state);
   return copy_state;
 }
 
@@ -5506,7 +5539,7 @@
     IntervalList* processed = _spill_intervals[reg];
     for (int i = 0; i < _spill_intervals[regHi]->length(); i++) {
       Interval* it = _spill_intervals[regHi]->at(i);
-      if (processed->index_of(it) == -1) {
+      if (processed->find_from_end(it) == -1) {
         remove_from_list(it);
         split_and_spill_interval(it);
       }
--- a/hotspot/src/share/vm/c1/c1_LinearScan.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_LinearScan.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,8 +42,8 @@
 class MoveResolver;
 class Range;
 
-define_array(IntervalArray, Interval*)
-define_stack(IntervalList, IntervalArray)
+typedef GrowableArray<Interval*> IntervalArray;
+typedef GrowableArray<Interval*> IntervalList;
 
 define_array(IntervalsArray, IntervalList*)
 define_stack(IntervalsList, IntervalsArray)
--- a/hotspot/src/share/vm/ci/ciArray.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciArray.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -107,8 +107,9 @@
   intptr_t header = arrayOopDesc::base_offset_in_bytes(elembt);
   intptr_t index = (element_offset - header) >> shift;
   intptr_t offset = header + ((intptr_t)index << shift);
-  if (offset != element_offset || index != (jint)index)
+  if (offset != element_offset || index != (jint)index || index < 0 || index >= length()) {
     return ciConstant();
+  }
   return element_value((jint) index);
 }
 
--- a/hotspot/src/share/vm/ci/ciConstantPoolCache.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciConstantPoolCache.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -41,15 +41,21 @@
   _keys = new (arena) GrowableArray<int>(arena, expected_size, 0, 0);
 }
 
+int ciConstantPoolCache::key_compare(const int& key, const int& elt) {
+  if (key < elt)      return -1;
+  else if (key > elt) return 1;
+  else                  return 0;
+}
+
 // ------------------------------------------------------------------
 // ciConstantPoolCache::get
 //
 // Get the entry at some index
 void* ciConstantPoolCache::get(int index) {
   ASSERT_IN_VM;
-  int pos = find(index);
-  if (pos >= _keys->length() ||
-      _keys->at(pos) != index) {
+  bool found = false;
+  int pos = _keys->find_sorted<int, ciConstantPoolCache::key_compare>(index, found);
+  if (!found) {
     // This element is not present in the cache.
     return NULL;
   }
@@ -57,42 +63,15 @@
 }
 
 // ------------------------------------------------------------------
-// ciConstantPoolCache::find
-//
-// Use binary search to find the position of this index in the cache.
-// If there is no entry in the cache corresponding to this oop, return
-// the position at which the index would be inserted.
-int ciConstantPoolCache::find(int key) {
-  int min = 0;
-  int max = _keys->length()-1;
-
-  while (max >= min) {
-    int mid = (max + min) / 2;
-    int value = _keys->at(mid);
-    if (value < key) {
-      min = mid + 1;
-    } else if (value > key) {
-      max = mid - 1;
-    } else {
-      return mid;
-    }
-  }
-  return min;
-}
-
-// ------------------------------------------------------------------
 // ciConstantPoolCache::insert
 //
 // Insert a ciObject into the table at some index.
 void ciConstantPoolCache::insert(int index, void* elem) {
-  int i;
-  int pos = find(index);
-  for (i = _keys->length()-1; i >= pos; i--) {
-    _keys->at_put_grow(i+1, _keys->at(i));
-    _elements->at_put_grow(i+1, _elements->at(i));
-  }
-  _keys->at_put_grow(pos, index);
-  _elements->at_put_grow(pos, elem);
+  bool found = false;
+  int pos = _keys->find_sorted<int, ciConstantPoolCache::key_compare>(index, found);
+  assert(!found, "duplicate");
+  _keys->insert_before(pos, index);
+  _elements->insert_before(pos, elem);
 }
 
 // ------------------------------------------------------------------
--- a/hotspot/src/share/vm/ci/ciConstantPoolCache.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciConstantPoolCache.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -38,7 +38,7 @@
   GrowableArray<int>*   _keys;
   GrowableArray<void*>* _elements;
 
-  int find(int index);
+  static int key_compare(const int& key, const int& elt);
 
 public:
   ciConstantPoolCache(Arena* arena, int expected_size);
--- a/hotspot/src/share/vm/ci/ciObjectFactory.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -260,6 +260,13 @@
   return new_object;
 }
 
+int ciObjectFactory::metadata_compare(Metadata* const& key, ciMetadata* const& elt) {
+  Metadata* value = elt->constant_encoding();
+  if (key < value)      return -1;
+  else if (key > value) return 1;
+  else                  return 0;
+}
+
 // ------------------------------------------------------------------
 // ciObjectFactory::get_metadata
 //
@@ -280,7 +287,8 @@
   }
 #endif // ASSERT
   int len = _ci_metadata->length();
-  int index = find(key, _ci_metadata);
+  bool found = false;
+  int index = _ci_metadata->find_sorted<Metadata*, ciObjectFactory::metadata_compare>(key, found);
 #ifdef ASSERT
   if (CIObjectFactoryVerify) {
     for (int i=0; i<_ci_metadata->length(); i++) {
@@ -290,7 +298,8 @@
     }
   }
 #endif
-  if (!is_found_at(index, key, _ci_metadata)) {
+
+  if (!found) {
     // The ciMetadata does not yet exist. Create it and insert it
     // into the cache.
     ciMetadata* new_object = create_new_metadata(key);
@@ -300,10 +309,10 @@
     if (len != _ci_metadata->length()) {
       // creating the new object has recursively entered new objects
       // into the table.  We need to recompute our index.
-      index = find(key, _ci_metadata);
+      index = _ci_metadata->find_sorted<Metadata*, ciObjectFactory::metadata_compare>(key, found);
     }
-    assert(!is_found_at(index, key, _ci_metadata), "no double insert");
-    insert(index, new_object, _ci_metadata);
+    assert(!found, "no double insert");
+    _ci_metadata->insert_before(index, new_object);
     return new_object;
   }
   return _ci_metadata->at(index)->as_metadata();
@@ -655,60 +664,6 @@
   obj->set_ident(_next_ident++);
 }
 
-// ------------------------------------------------------------------
-// ciObjectFactory::find
-//
-// Use binary search to find the position of this oop in the cache.
-// If there is no entry in the cache corresponding to this oop, return
-// the position at which the oop should be inserted.
-int ciObjectFactory::find(Metadata* key, GrowableArray<ciMetadata*>* objects) {
-  int min = 0;
-  int max = objects->length()-1;
-
-  // print_contents();
-
-  while (max >= min) {
-    int mid = (max + min) / 2;
-    Metadata* value = objects->at(mid)->constant_encoding();
-    if (value < key) {
-      min = mid + 1;
-    } else if (value > key) {
-      max = mid - 1;
-    } else {
-      return mid;
-    }
-  }
-  return min;
-}
-
-// ------------------------------------------------------------------
-// ciObjectFactory::is_found_at
-//
-// Verify that the binary seach found the given key.
-bool ciObjectFactory::is_found_at(int index, Metadata* key, GrowableArray<ciMetadata*>* objects) {
-  return (index < objects->length() &&
-          objects->at(index)->constant_encoding() == key);
-}
-
-
-// ------------------------------------------------------------------
-// ciObjectFactory::insert
-//
-// Insert a ciObject into the table at some index.
-void ciObjectFactory::insert(int index, ciMetadata* obj, GrowableArray<ciMetadata*>* objects) {
-  int len = objects->length();
-  if (len == index) {
-    objects->append(obj);
-  } else {
-    objects->append(objects->at(len-1));
-    int pos;
-    for (pos = len-2; pos >= index; pos--) {
-      objects->at_put(pos+1,objects->at(pos));
-    }
-    objects->at_put(index, obj);
-  }
-}
-
 static ciObjectFactory::NonPermObject* emptyBucket = NULL;
 
 // ------------------------------------------------------------------
--- a/hotspot/src/share/vm/ci/ciObjectFactory.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -68,9 +68,7 @@
   NonPermObject* _non_perm_bucket[NON_PERM_BUCKETS];
   int _non_perm_count;
 
-  int find(Metadata* key, GrowableArray<ciMetadata*>* objects);
-  bool is_found_at(int index, Metadata* key, GrowableArray<ciMetadata*>* objects);
-  void insert(int index, ciMetadata* obj, GrowableArray<ciMetadata*>* objects);
+  static int metadata_compare(Metadata* const& key, ciMetadata* const& elt);
 
   ciObject* create_new_object(oop o);
   ciMetadata* create_new_metadata(Metadata* o);
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -5380,7 +5380,7 @@
     }
   }
 
-  TRACE_INIT_ID(ik);
+  TRACE_INIT_KLASS_ID(ik);
 
   // If we reach here, all is well.
   // Now remove the InstanceKlass* from the _klass_to_deallocate field
--- a/hotspot/src/share/vm/classfile/classLoader.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/classfile/classLoader.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -37,6 +37,7 @@
 #include "gc/shared/generation.hpp"
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/oopMapCache.hpp"
+#include "logging/logTag.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/filemap.hpp"
 #include "memory/oopFactory.hpp"
@@ -417,34 +418,30 @@
 #if INCLUDE_CDS
 void ClassLoader::exit_with_path_failure(const char* error, const char* message) {
   assert(DumpSharedSpaces, "only called at dump time");
-  tty->print_cr("Hint: enable -XX:+TraceClassPaths to diagnose the failure");
+  tty->print_cr("Hint: enable -Xlog:classpath=info to diagnose the failure");
   vm_exit_during_initialization(error, message);
 }
 #endif
 
-void ClassLoader::trace_class_path(outputStream* out, const char* msg, const char* name) {
-  if (!TraceClassPaths) {
-    return;
-  }
-
-  if (msg) {
-    out->print("%s", msg);
-  }
-  if (name) {
-    if (strlen(name) < 256) {
-      out->print("%s", name);
-    } else {
-      // For very long paths, we need to print each character separately,
-      // as print_cr() has a length limit
-      while (name[0] != '\0') {
-        out->print("%c", name[0]);
-        name++;
+void ClassLoader::trace_class_path(const char* msg, const char* name) {
+  if (log_is_enabled(Info, classpath)) {
+    ResourceMark rm;
+    outputStream* out = LogHandle(classpath)::info_stream();
+    if (msg) {
+      out->print("%s", msg);
+    }
+    if (name) {
+      if (strlen(name) < 256) {
+        out->print("%s", name);
+      } else {
+        // For very long paths, we need to print each character separately,
+        // as print_cr() has a length limit
+        while (name[0] != '\0') {
+          out->print("%c", name[0]);
+          name++;
+        }
       }
     }
-  }
-  if (msg && msg[0] == '[') {
-    out->print_cr("]");
-  } else {
     out->cr();
   }
 }
@@ -470,11 +467,13 @@
 void ClassLoader::setup_bootstrap_search_path() {
   assert(_first_entry == NULL, "should not setup bootstrap class search path twice");
   const char* sys_class_path = Arguments::get_sysclasspath();
+  const char* java_class_path = Arguments::get_appclasspath();
   if (PrintSharedArchiveAndExit) {
     // Don't print sys_class_path - this is the bootcp of this current VM process, not necessarily
     // the same as the bootcp of the shared archive.
   } else {
-    trace_class_path(tty, "[Bootstrap loader class path=", sys_class_path);
+    trace_class_path("bootstrap loader class path=", sys_class_path);
+    trace_class_path("classpath: ", java_class_path);
   }
 #if INCLUDE_CDS
   if (DumpSharedSpaces) {
@@ -578,9 +577,7 @@
         }
       }
     }
-    if (TraceClassPaths) {
-      tty->print_cr("[Opened %s]", path);
-    }
+    log_info(classpath)("opened: %s", path);
     log_info(classload)("opened: %s", path);
   } else {
     // Directory
--- a/hotspot/src/share/vm/classfile/classLoader.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/classfile/classLoader.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -331,7 +331,7 @@
   static void  exit_with_path_failure(const char* error, const char* message);
 #endif
 
-  static void  trace_class_path(outputStream* out, const char* msg, const char* name = NULL);
+  static void  trace_class_path(const char* msg, const char* name = NULL);
 
   // VM monitoring and management support
   static jlong classloader_time_ms();
--- a/hotspot/src/share/vm/classfile/dictionary.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/classfile/dictionary.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -135,8 +135,10 @@
     //          via a store to _pd_set.
     OrderAccess::release_store_ptr(&_pd_set, new_head);
   }
-  if (TraceProtectionDomainVerification && WizardMode) {
-    print();
+  if (log_is_enabled(Trace, protectiondomain)) {
+    ResourceMark rm;
+    outputStream* log = LogHandle(protectiondomain)::trace_stream();
+    print_count(log);
   }
 }
 
--- a/hotspot/src/share/vm/classfile/dictionary.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/classfile/dictionary.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -29,6 +29,7 @@
 #include "oops/instanceKlass.hpp"
 #include "oops/oop.hpp"
 #include "utilities/hashtable.hpp"
+#include "utilities/ostream.hpp"
 
 class DictionaryEntry;
 class PSPromotionManager;
@@ -323,14 +324,14 @@
     return (klass->name() == class_name && _loader_data == loader_data);
   }
 
-  void print() {
+  void print_count(outputStream *st) {
     int count = 0;
     for (ProtectionDomainEntry* current = _pd_set;
                                 current != NULL;
                                 current = current->_next) {
       count++;
     }
-    tty->print_cr("pd set = #%d", count);
+    st->print_cr("pd set count = #%d", count);
   }
 };
 
--- a/hotspot/src/share/vm/classfile/klassFactory.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/classfile/klassFactory.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
 #include "classfile/klassFactory.hpp"
 #include "memory/resourceArea.hpp"
 #include "prims/jvmtiEnvBase.hpp"
+#include "trace/traceMacros.hpp"
 
 static ClassFileStream* prologue(ClassFileStream* stream,
                                  Symbol* name,
@@ -136,5 +137,7 @@
     result->set_cached_class_file(cached_class_file);
   }
 
+  TRACE_KLASS_CREATION(result, parser, THREAD);
+
   return result;
 }
--- a/hotspot/src/share/vm/classfile/sharedPathsMiscInfo.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/classfile/sharedPathsMiscInfo.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,15 +26,15 @@
 #include "classfile/classLoader.hpp"
 #include "classfile/classLoaderData.inline.hpp"
 #include "classfile/sharedPathsMiscInfo.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/metaspaceShared.hpp"
 #include "runtime/arguments.hpp"
+#include "utilities/ostream.hpp"
 
 void SharedPathsMiscInfo::add_path(const char* path, int type) {
-  if (TraceClassPaths) {
-    tty->print("[type=%s] ", type_name(type));
-    trace_class_path("[Add misc shared path ", path);
-  }
+  log_info(classpath)("type=%s ", type_name(type));
+  ClassLoader::trace_class_path("add misc shared path ", path);
   write(path, strlen(path) + 1);
   write_jint(jint(type));
 }
@@ -67,11 +67,29 @@
 }
 
 bool SharedPathsMiscInfo::fail(const char* msg, const char* name) {
-  ClassLoader::trace_class_path(tty, msg, name);
+  ClassLoader::trace_class_path(msg, name);
   MetaspaceShared::set_archive_loading_failed();
   return false;
 }
 
+void SharedPathsMiscInfo::print_path(int type, const char* path) {
+  ResourceMark rm;
+  outputStream* out = LogHandle(classpath)::info_stream();
+  switch (type) {
+  case BOOT:
+    out->print("Expecting -Dsun.boot.class.path=%s", path);
+    break;
+  case NON_EXIST:
+    out->print("Expecting that %s does not exist", path);
+    break;
+  case REQUIRED:
+    out->print("Expecting that file %s must exist and is not altered", path);
+    break;
+  default:
+    ShouldNotReachHere();
+  }
+}
+
 bool SharedPathsMiscInfo::check() {
   // The whole buffer must be 0 terminated so that we can use strlen and strcmp
   // without fear.
@@ -90,17 +108,14 @@
     if (!read_jint(&type)) {
       return fail("Corrupted archive file header");
     }
-    if (TraceClassPaths) {
-      tty->print("[type=%s ", type_name(type));
-      print_path(tty, type, path);
-      tty->print_cr("]");
-    }
+    log_info(classpath)("type=%s ", type_name(type));
+    print_path(type, path);
     if (!check(type, path)) {
       if (!PrintSharedArchiveAndExit) {
         return false;
       }
     } else {
-      trace_class_path("[ok");
+      ClassLoader::trace_class_path("ok");
     }
   }
 
--- a/hotspot/src/share/vm/classfile/sharedPathsMiscInfo.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/classfile/sharedPathsMiscInfo.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -64,9 +64,6 @@
   void write(const void* ptr, size_t size);
   bool read(void* ptr, size_t size);
 
-  static void trace_class_path(const char* msg, const char* name = NULL) {
-    ClassLoader::trace_class_path(tty, msg, name);
-  }
 protected:
   static bool fail(const char* msg, const char* name = NULL);
   virtual bool check(jint type, const char* path);
@@ -144,21 +141,7 @@
     }
   }
 
-  virtual void print_path(outputStream* out, int type, const char* path) {
-    switch (type) {
-    case BOOT:
-      out->print("Expecting -Dsun.boot.class.path=%s", path);
-      break;
-    case NON_EXIST:
-      out->print("Expecting that %s does not exist", path);
-      break;
-    case REQUIRED:
-      out->print("Expecting that file %s must exist and is not altered", path);
-      break;
-    default:
-      ShouldNotReachHere();
-    }
-  }
+  virtual void print_path(int type, const char* path);
 
   bool check();
   bool read_jint(jint *ptr) {
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -430,12 +430,15 @@
 
   // Now we have to call back to java to check if the initating class has access
   JavaValue result(T_VOID);
-  if (TraceProtectionDomainVerification) {
+  if (log_is_enabled(Debug, protectiondomain)) {
+    ResourceMark rm;
     // Print out trace information
-    tty->print_cr("Checking package access");
-    tty->print(" - class loader:      "); class_loader()->print_value_on(tty);      tty->cr();
-    tty->print(" - protection domain: "); protection_domain()->print_value_on(tty); tty->cr();
-    tty->print(" - loading:           "); klass()->print_value_on(tty);             tty->cr();
+    outputStream* log = LogHandle(protectiondomain)::debug_stream();
+    log->print_cr("Checking package access");
+    log->print("class loader: "); class_loader()->print_value_on(log);
+    log->print(" protection domain: "); protection_domain()->print_value_on(log);
+    log->print(" loading: "); klass()->print_value_on(log);
+    log->cr();
   }
 
   KlassHandle system_loader(THREAD, SystemDictionary::ClassLoader_klass());
@@ -448,13 +451,10 @@
                          protection_domain,
                          THREAD);
 
-  if (TraceProtectionDomainVerification) {
-    if (HAS_PENDING_EXCEPTION) {
-      tty->print_cr(" -> DENIED !!!!!!!!!!!!!!!!!!!!!");
-    } else {
-     tty->print_cr(" -> granted");
-    }
-    tty->cr();
+  if (HAS_PENDING_EXCEPTION) {
+    log_debug(protectiondomain)("DENIED !!!!!!!!!!!!!!!!!!!!!");
+  } else {
+   log_debug(protectiondomain)("granted");
   }
 
   if (HAS_PENDING_EXCEPTION) return;
--- a/hotspot/src/share/vm/classfile/vmSymbols.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/classfile/vmSymbols.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -328,8 +328,6 @@
   assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
   switch(id) {
 #ifdef TRACE_HAVE_INTRINSICS
-  case vmIntrinsics::_classID:
-  case vmIntrinsics::_threadID:
   case vmIntrinsics::_counterTime:
 #endif
   case vmIntrinsics::_currentTimeMillis:
@@ -544,6 +542,42 @@
   case vmIntrinsics::_putLongVolatile:
   case vmIntrinsics::_putFloatVolatile:
   case vmIntrinsics::_putDoubleVolatile:
+  case vmIntrinsics::_getObjectAcquire:
+  case vmIntrinsics::_getBooleanAcquire:
+  case vmIntrinsics::_getByteAcquire:
+  case vmIntrinsics::_getShortAcquire:
+  case vmIntrinsics::_getCharAcquire:
+  case vmIntrinsics::_getIntAcquire:
+  case vmIntrinsics::_getLongAcquire:
+  case vmIntrinsics::_getFloatAcquire:
+  case vmIntrinsics::_getDoubleAcquire:
+  case vmIntrinsics::_putObjectRelease:
+  case vmIntrinsics::_putBooleanRelease:
+  case vmIntrinsics::_putByteRelease:
+  case vmIntrinsics::_putShortRelease:
+  case vmIntrinsics::_putCharRelease:
+  case vmIntrinsics::_putIntRelease:
+  case vmIntrinsics::_putLongRelease:
+  case vmIntrinsics::_putFloatRelease:
+  case vmIntrinsics::_putDoubleRelease:
+  case vmIntrinsics::_getObjectOpaque:
+  case vmIntrinsics::_getBooleanOpaque:
+  case vmIntrinsics::_getByteOpaque:
+  case vmIntrinsics::_getShortOpaque:
+  case vmIntrinsics::_getCharOpaque:
+  case vmIntrinsics::_getIntOpaque:
+  case vmIntrinsics::_getLongOpaque:
+  case vmIntrinsics::_getFloatOpaque:
+  case vmIntrinsics::_getDoubleOpaque:
+  case vmIntrinsics::_putObjectOpaque:
+  case vmIntrinsics::_putBooleanOpaque:
+  case vmIntrinsics::_putByteOpaque:
+  case vmIntrinsics::_putShortOpaque:
+  case vmIntrinsics::_putCharOpaque:
+  case vmIntrinsics::_putIntOpaque:
+  case vmIntrinsics::_putLongOpaque:
+  case vmIntrinsics::_putFloatOpaque:
+  case vmIntrinsics::_putDoubleOpaque:
   case vmIntrinsics::_getByte_raw:
   case vmIntrinsics::_getShort_raw:
   case vmIntrinsics::_getChar_raw:
@@ -569,9 +603,27 @@
   case vmIntrinsics::_loadFence:
   case vmIntrinsics::_storeFence:
   case vmIntrinsics::_fullFence:
+  case vmIntrinsics::_compareAndSwapLong:
+  case vmIntrinsics::_weakCompareAndSwapLong:
+  case vmIntrinsics::_weakCompareAndSwapLongAcquire:
+  case vmIntrinsics::_weakCompareAndSwapLongRelease:
+  case vmIntrinsics::_compareAndSwapInt:
+  case vmIntrinsics::_weakCompareAndSwapInt:
+  case vmIntrinsics::_weakCompareAndSwapIntAcquire:
+  case vmIntrinsics::_weakCompareAndSwapIntRelease:
   case vmIntrinsics::_compareAndSwapObject:
-  case vmIntrinsics::_compareAndSwapLong:
-  case vmIntrinsics::_compareAndSwapInt:
+  case vmIntrinsics::_weakCompareAndSwapObject:
+  case vmIntrinsics::_weakCompareAndSwapObjectAcquire:
+  case vmIntrinsics::_weakCompareAndSwapObjectRelease:
+  case vmIntrinsics::_compareAndExchangeIntVolatile:
+  case vmIntrinsics::_compareAndExchangeIntAcquire:
+  case vmIntrinsics::_compareAndExchangeIntRelease:
+  case vmIntrinsics::_compareAndExchangeLongVolatile:
+  case vmIntrinsics::_compareAndExchangeLongAcquire:
+  case vmIntrinsics::_compareAndExchangeLongRelease:
+  case vmIntrinsics::_compareAndExchangeObjectVolatile:
+  case vmIntrinsics::_compareAndExchangeObjectAcquire:
+  case vmIntrinsics::_compareAndExchangeObjectRelease:
     if (!InlineUnsafeOps) return true;
     break;
   case vmIntrinsics::_getShortUnaligned:
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1146,6 +1146,64 @@
   do_intrinsic(_putFloatVolatile,         jdk_internal_misc_Unsafe,     putFloatVolatile_name, putFloat_signature,     F_RN)  \
   do_intrinsic(_putDoubleVolatile,        jdk_internal_misc_Unsafe,     putDoubleVolatile_name, putDouble_signature,   F_RN)  \
                                                                                                                         \
+  do_name(getObjectOpaque_name,"getObjectOpaque")     do_name(putObjectOpaque_name,"putObjectOpaque")                   \
+  do_name(getBooleanOpaque_name,"getBooleanOpaque")   do_name(putBooleanOpaque_name,"putBooleanOpaque")                 \
+  do_name(getByteOpaque_name,"getByteOpaque")         do_name(putByteOpaque_name,"putByteOpaque")                       \
+  do_name(getShortOpaque_name,"getShortOpaque")       do_name(putShortOpaque_name,"putShortOpaque")                     \
+  do_name(getCharOpaque_name,"getCharOpaque")         do_name(putCharOpaque_name,"putCharOpaque")                       \
+  do_name(getIntOpaque_name,"getIntOpaque")           do_name(putIntOpaque_name,"putIntOpaque")                         \
+  do_name(getLongOpaque_name,"getLongOpaque")         do_name(putLongOpaque_name,"putLongOpaque")                       \
+  do_name(getFloatOpaque_name,"getFloatOpaque")       do_name(putFloatOpaque_name,"putFloatOpaque")                     \
+  do_name(getDoubleOpaque_name,"getDoubleOpaque")     do_name(putDoubleOpaque_name,"putDoubleOpaque")                   \
+                                                                                                                        \
+  do_intrinsic(_getObjectOpaque,          jdk_internal_misc_Unsafe,        getObjectOpaque_name, getObject_signature,   F_R)  \
+  do_intrinsic(_getBooleanOpaque,         jdk_internal_misc_Unsafe,        getBooleanOpaque_name, getBoolean_signature, F_R)  \
+  do_intrinsic(_getByteOpaque,            jdk_internal_misc_Unsafe,        getByteOpaque_name, getByte_signature,       F_R)  \
+  do_intrinsic(_getShortOpaque,           jdk_internal_misc_Unsafe,        getShortOpaque_name, getShort_signature,     F_R)  \
+  do_intrinsic(_getCharOpaque,            jdk_internal_misc_Unsafe,        getCharOpaque_name, getChar_signature,       F_R)  \
+  do_intrinsic(_getIntOpaque,             jdk_internal_misc_Unsafe,        getIntOpaque_name, getInt_signature,         F_R)  \
+  do_intrinsic(_getLongOpaque,            jdk_internal_misc_Unsafe,        getLongOpaque_name, getLong_signature,       F_R)  \
+  do_intrinsic(_getFloatOpaque,           jdk_internal_misc_Unsafe,        getFloatOpaque_name, getFloat_signature,     F_R)  \
+  do_intrinsic(_getDoubleOpaque,          jdk_internal_misc_Unsafe,        getDoubleOpaque_name, getDouble_signature,   F_R)  \
+  do_intrinsic(_putObjectOpaque,          jdk_internal_misc_Unsafe,        putObjectOpaque_name, putObject_signature,   F_R)  \
+  do_intrinsic(_putBooleanOpaque,         jdk_internal_misc_Unsafe,        putBooleanOpaque_name, putBoolean_signature, F_R)  \
+  do_intrinsic(_putByteOpaque,            jdk_internal_misc_Unsafe,        putByteOpaque_name, putByte_signature,       F_R)  \
+  do_intrinsic(_putShortOpaque,           jdk_internal_misc_Unsafe,        putShortOpaque_name, putShort_signature,     F_R)  \
+  do_intrinsic(_putCharOpaque,            jdk_internal_misc_Unsafe,        putCharOpaque_name, putChar_signature,       F_R)  \
+  do_intrinsic(_putIntOpaque,             jdk_internal_misc_Unsafe,        putIntOpaque_name, putInt_signature,         F_R)  \
+  do_intrinsic(_putLongOpaque,            jdk_internal_misc_Unsafe,        putLongOpaque_name, putLong_signature,       F_R)  \
+  do_intrinsic(_putFloatOpaque,           jdk_internal_misc_Unsafe,        putFloatOpaque_name, putFloat_signature,     F_R)  \
+  do_intrinsic(_putDoubleOpaque,          jdk_internal_misc_Unsafe,        putDoubleOpaque_name, putDouble_signature,   F_R)  \
+                                                                                                                        \
+  do_name(getObjectAcquire_name,  "getObjectAcquire")    do_name(putObjectRelease_name,  "putObjectRelease")            \
+  do_name(getBooleanAcquire_name, "getBooleanAcquire")   do_name(putBooleanRelease_name, "putBooleanRelease")           \
+  do_name(getByteAcquire_name,    "getByteAcquire")      do_name(putByteRelease_name,    "putByteRelease")              \
+  do_name(getShortAcquire_name,   "getShortAcquire")     do_name(putShortRelease_name,   "putShortRelease")             \
+  do_name(getCharAcquire_name,    "getCharAcquire")      do_name(putCharRelease_name,    "putCharRelease")              \
+  do_name(getIntAcquire_name,     "getIntAcquire")       do_name(putIntRelease_name,     "putIntRelease")               \
+  do_name(getLongAcquire_name,    "getLongAcquire")      do_name(putLongRelease_name,    "putLongRelease")              \
+  do_name(getFloatAcquire_name,   "getFloatAcquire")     do_name(putFloatRelease_name,   "putFloatRelease")             \
+  do_name(getDoubleAcquire_name,  "getDoubleAcquire")    do_name(putDoubleRelease_name,  "putDoubleRelease")            \
+                                                                                                                        \
+  do_intrinsic(_getObjectAcquire,        jdk_internal_misc_Unsafe,        getObjectAcquire_name, getObject_signature,   F_R)  \
+  do_intrinsic(_getBooleanAcquire,       jdk_internal_misc_Unsafe,        getBooleanAcquire_name, getBoolean_signature, F_R)  \
+  do_intrinsic(_getByteAcquire,          jdk_internal_misc_Unsafe,        getByteAcquire_name, getByte_signature,       F_R)  \
+  do_intrinsic(_getShortAcquire,         jdk_internal_misc_Unsafe,        getShortAcquire_name, getShort_signature,     F_R)  \
+  do_intrinsic(_getCharAcquire,          jdk_internal_misc_Unsafe,        getCharAcquire_name, getChar_signature,       F_R)  \
+  do_intrinsic(_getIntAcquire,           jdk_internal_misc_Unsafe,        getIntAcquire_name, getInt_signature,         F_R)  \
+  do_intrinsic(_getLongAcquire,          jdk_internal_misc_Unsafe,        getLongAcquire_name, getLong_signature,       F_R)  \
+  do_intrinsic(_getFloatAcquire,         jdk_internal_misc_Unsafe,        getFloatAcquire_name, getFloat_signature,     F_R)  \
+  do_intrinsic(_getDoubleAcquire,        jdk_internal_misc_Unsafe,        getDoubleAcquire_name, getDouble_signature,   F_R)  \
+  do_intrinsic(_putObjectRelease,        jdk_internal_misc_Unsafe,        putObjectRelease_name, putObject_signature,   F_R)  \
+  do_intrinsic(_putBooleanRelease,       jdk_internal_misc_Unsafe,        putBooleanRelease_name, putBoolean_signature, F_R)  \
+  do_intrinsic(_putByteRelease,          jdk_internal_misc_Unsafe,        putByteRelease_name, putByte_signature,       F_R)  \
+  do_intrinsic(_putShortRelease,         jdk_internal_misc_Unsafe,        putShortRelease_name, putShort_signature,     F_R)  \
+  do_intrinsic(_putCharRelease,          jdk_internal_misc_Unsafe,        putCharRelease_name, putChar_signature,       F_R)  \
+  do_intrinsic(_putIntRelease,           jdk_internal_misc_Unsafe,        putIntRelease_name, putInt_signature,         F_R)  \
+  do_intrinsic(_putLongRelease,          jdk_internal_misc_Unsafe,        putLongRelease_name, putLong_signature,       F_R)  \
+  do_intrinsic(_putFloatRelease,         jdk_internal_misc_Unsafe,        putFloatRelease_name, putFloat_signature,     F_R)  \
+  do_intrinsic(_putDoubleRelease,        jdk_internal_misc_Unsafe,        putDoubleRelease_name, putDouble_signature,   F_R)  \
+                                                                                                                        \
   do_name(getShortUnaligned_name,"getShortUnaligned")     do_name(putShortUnaligned_name,"putShortUnaligned")           \
   do_name(getCharUnaligned_name,"getCharUnaligned")       do_name(putCharUnaligned_name,"putCharUnaligned")             \
   do_name(getIntUnaligned_name,"getIntUnaligned")         do_name(putIntUnaligned_name,"putIntUnaligned")               \
@@ -1197,24 +1255,68 @@
   do_intrinsic(_putDouble_raw,            jdk_internal_misc_Unsafe,     putDouble_name, putDouble_raw_signature,       F_R)  \
   do_intrinsic(_putAddress_raw,           jdk_internal_misc_Unsafe,     putAddress_name, putAddress_raw_signature,     F_R)  \
                                                                                                                         \
-  do_intrinsic(_compareAndSwapObject,     jdk_internal_misc_Unsafe,     compareAndSwapObject_name, compareAndSwapObject_signature, F_R) \
-   do_name(     compareAndSwapObject_name,                              "compareAndSwapObject")                                \
-   do_signature(compareAndSwapObject_signature,                         "(Ljava/lang/Object;JLjava/lang/Object;Ljava/lang/Object;)Z")          \
-  do_intrinsic(_compareAndSwapLong,       jdk_internal_misc_Unsafe,     compareAndSwapLong_name, compareAndSwapLong_signature, F_R) \
-   do_name(     compareAndSwapLong_name,                                "compareAndSwapLong")                                  \
-   do_signature(compareAndSwapLong_signature,                           "(Ljava/lang/Object;JJJ)Z")                            \
-  do_intrinsic(_compareAndSwapInt,        jdk_internal_misc_Unsafe,     compareAndSwapInt_name, compareAndSwapInt_signature, F_R) \
-   do_name(     compareAndSwapInt_name,                                 "compareAndSwapInt")                                   \
-   do_signature(compareAndSwapInt_signature,                            "(Ljava/lang/Object;JII)Z")                            \
-  do_intrinsic(_putOrderedObject,         jdk_internal_misc_Unsafe,     putOrderedObject_name, putOrderedObject_signature, F_R) \
-   do_name(     putOrderedObject_name,                                  "putOrderedObject")                                    \
-   do_alias(    putOrderedObject_signature,                             /*(LObject;JLObject;)V*/ putObject_signature)           \
-  do_intrinsic(_putOrderedLong,           jdk_internal_misc_Unsafe,     putOrderedLong_name, putOrderedLong_signature, F_R)  \
-   do_name(     putOrderedLong_name,                                    "putOrderedLong")                                      \
-   do_alias(    putOrderedLong_signature,                               /*(Ljava/lang/Object;JJ)V*/ putLong_signature)          \
-  do_intrinsic(_putOrderedInt,            jdk_internal_misc_Unsafe,     putOrderedInt_name, putOrderedInt_signature,   F_R)  \
-   do_name(     putOrderedInt_name,                                     "putOrderedInt")                                       \
-   do_alias(    putOrderedInt_signature,                                 /*(Ljava/lang/Object;JI)V*/ putInt_signature)           \
+  do_signature(compareAndSwapObject_signature,     "(Ljava/lang/Object;JLjava/lang/Object;Ljava/lang/Object;)Z")        \
+  do_signature(compareAndExchangeObject_signature, "(Ljava/lang/Object;JLjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;") \
+  do_signature(compareAndSwapLong_signature,       "(Ljava/lang/Object;JJJ)Z")                                          \
+  do_signature(compareAndExchangeLong_signature,   "(Ljava/lang/Object;JJJ)J")                                          \
+  do_signature(compareAndSwapInt_signature,        "(Ljava/lang/Object;JII)Z")                                          \
+  do_signature(compareAndExchangeInt_signature,    "(Ljava/lang/Object;JII)I")                                          \
+                                                                                                                        \
+  do_name(compareAndSwapObject_name,             "compareAndSwapObject")                                                \
+  do_name(compareAndExchangeObjectVolatile_name, "compareAndExchangeObjectVolatile")                                    \
+  do_name(compareAndExchangeObjectAcquire_name,  "compareAndExchangeObjectAcquire")                                     \
+  do_name(compareAndExchangeObjectRelease_name,  "compareAndExchangeObjectRelease")                                     \
+  do_name(compareAndSwapLong_name,               "compareAndSwapLong")                                                  \
+  do_name(compareAndExchangeLongVolatile_name,   "compareAndExchangeLongVolatile")                                      \
+  do_name(compareAndExchangeLongAcquire_name,    "compareAndExchangeLongAcquire")                                       \
+  do_name(compareAndExchangeLongRelease_name,    "compareAndExchangeLongRelease")                                       \
+  do_name(compareAndSwapInt_name,                "compareAndSwapInt")                                                   \
+  do_name(compareAndExchangeIntVolatile_name,    "compareAndExchangeIntVolatile")                                       \
+  do_name(compareAndExchangeIntAcquire_name,     "compareAndExchangeIntAcquire")                                        \
+  do_name(compareAndExchangeIntRelease_name,     "compareAndExchangeIntRelease")                                        \
+                                                                                                                        \
+  do_name(weakCompareAndSwapObject_name,         "weakCompareAndSwapObject")                                            \
+  do_name(weakCompareAndSwapObjectAcquire_name,  "weakCompareAndSwapObjectAcquire")                                     \
+  do_name(weakCompareAndSwapObjectRelease_name,  "weakCompareAndSwapObjectRelease")                                     \
+  do_name(weakCompareAndSwapLong_name,           "weakCompareAndSwapLong")                                              \
+  do_name(weakCompareAndSwapLongAcquire_name,    "weakCompareAndSwapLongAcquire")                                       \
+  do_name(weakCompareAndSwapLongRelease_name,    "weakCompareAndSwapLongRelease")                                       \
+  do_name(weakCompareAndSwapInt_name,            "weakCompareAndSwapInt")                                               \
+  do_name(weakCompareAndSwapIntAcquire_name,     "weakCompareAndSwapIntAcquire")                                        \
+  do_name(weakCompareAndSwapIntRelease_name,     "weakCompareAndSwapIntRelease")                                        \
+                                                                                                                        \
+  do_intrinsic(_compareAndSwapObject,             jdk_internal_misc_Unsafe,  compareAndSwapObject_name,             compareAndSwapObject_signature,     F_RN) \
+  do_intrinsic(_compareAndExchangeObjectVolatile, jdk_internal_misc_Unsafe,  compareAndExchangeObjectVolatile_name, compareAndExchangeObject_signature, F_RN) \
+  do_intrinsic(_compareAndExchangeObjectAcquire,  jdk_internal_misc_Unsafe,  compareAndExchangeObjectAcquire_name,  compareAndExchangeObject_signature, F_R)  \
+  do_intrinsic(_compareAndExchangeObjectRelease,  jdk_internal_misc_Unsafe,  compareAndExchangeObjectRelease_name,  compareAndExchangeObject_signature, F_R)  \
+  do_intrinsic(_compareAndSwapLong,               jdk_internal_misc_Unsafe,  compareAndSwapLong_name,               compareAndSwapLong_signature,       F_RN) \
+  do_intrinsic(_compareAndExchangeLongVolatile,   jdk_internal_misc_Unsafe,  compareAndExchangeLongVolatile_name,   compareAndExchangeLong_signature,   F_RN) \
+  do_intrinsic(_compareAndExchangeLongAcquire,    jdk_internal_misc_Unsafe,  compareAndExchangeLongAcquire_name,    compareAndExchangeLong_signature,   F_R)  \
+  do_intrinsic(_compareAndExchangeLongRelease,    jdk_internal_misc_Unsafe,  compareAndExchangeLongRelease_name,    compareAndExchangeLong_signature,   F_R)  \
+  do_intrinsic(_compareAndSwapInt,                jdk_internal_misc_Unsafe,  compareAndSwapInt_name,                compareAndSwapInt_signature,        F_RN) \
+  do_intrinsic(_compareAndExchangeIntVolatile,    jdk_internal_misc_Unsafe,  compareAndExchangeIntVolatile_name,    compareAndExchangeInt_signature,    F_RN) \
+  do_intrinsic(_compareAndExchangeIntAcquire,     jdk_internal_misc_Unsafe,  compareAndExchangeIntAcquire_name,     compareAndExchangeInt_signature,    F_R)  \
+  do_intrinsic(_compareAndExchangeIntRelease,     jdk_internal_misc_Unsafe,  compareAndExchangeIntRelease_name,     compareAndExchangeInt_signature,    F_R)  \
+                                                                                                                                                              \
+  do_intrinsic(_weakCompareAndSwapObject,         jdk_internal_misc_Unsafe,  weakCompareAndSwapObject_name,         compareAndSwapObject_signature,     F_R) \
+  do_intrinsic(_weakCompareAndSwapObjectAcquire,  jdk_internal_misc_Unsafe,  weakCompareAndSwapObjectAcquire_name,  compareAndSwapObject_signature,     F_R) \
+  do_intrinsic(_weakCompareAndSwapObjectRelease,  jdk_internal_misc_Unsafe,  weakCompareAndSwapObjectRelease_name,  compareAndSwapObject_signature,     F_R) \
+  do_intrinsic(_weakCompareAndSwapLong,           jdk_internal_misc_Unsafe,  weakCompareAndSwapLong_name,           compareAndSwapLong_signature,       F_R) \
+  do_intrinsic(_weakCompareAndSwapLongAcquire,    jdk_internal_misc_Unsafe,  weakCompareAndSwapLongAcquire_name,    compareAndSwapLong_signature,       F_R) \
+  do_intrinsic(_weakCompareAndSwapLongRelease,    jdk_internal_misc_Unsafe,  weakCompareAndSwapLongRelease_name,    compareAndSwapLong_signature,       F_R) \
+  do_intrinsic(_weakCompareAndSwapInt,            jdk_internal_misc_Unsafe,  weakCompareAndSwapInt_name,            compareAndSwapInt_signature,        F_R) \
+  do_intrinsic(_weakCompareAndSwapIntAcquire,     jdk_internal_misc_Unsafe,  weakCompareAndSwapIntAcquire_name,     compareAndSwapInt_signature,        F_R) \
+  do_intrinsic(_weakCompareAndSwapIntRelease,     jdk_internal_misc_Unsafe,  weakCompareAndSwapIntRelease_name,     compareAndSwapInt_signature,        F_R) \
+                                                                                                                        \
+  do_intrinsic(_putOrderedObject,         jdk_internal_misc_Unsafe,        putOrderedObject_name, putOrderedObject_signature, F_RN) \
+   do_name(     putOrderedObject_name,                           "putOrderedObject")                                    \
+   do_alias(    putOrderedObject_signature,                     /*(LObject;JLObject;)V*/ putObject_signature)           \
+  do_intrinsic(_putOrderedLong,           jdk_internal_misc_Unsafe,        putOrderedLong_name, putOrderedLong_signature, F_RN)  \
+   do_name(     putOrderedLong_name,                             "putOrderedLong")                                      \
+   do_alias(    putOrderedLong_signature,                       /*(Ljava/lang/Object;JJ)V*/ putLong_signature)          \
+  do_intrinsic(_putOrderedInt,            jdk_internal_misc_Unsafe,        putOrderedInt_name, putOrderedInt_signature,   F_RN)  \
+   do_name(     putOrderedInt_name,                              "putOrderedInt")                                       \
+   do_alias(    putOrderedInt_signature,                        /*(Ljava/lang/Object;JI)V*/ putInt_signature)           \
                                                                                                                         \
   do_intrinsic(_getAndAddInt,             jdk_internal_misc_Unsafe,     getAndAddInt_name, getAndAddInt_signature, F_R)       \
    do_name(     getAndAddInt_name,                                      "getAndAddInt")                                       \
--- a/hotspot/src/share/vm/code/codeCache.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/code/codeCache.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1023,7 +1023,7 @@
 // Keeps track of time spent for checking dependencies
 NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
 
-int CodeCache::mark_for_deoptimization(DepChange& changes) {
+int CodeCache::mark_for_deoptimization(KlassDepChange& changes) {
   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   int number_of_marked_CodeBlobs = 0;
 
--- a/hotspot/src/share/vm/code/codeCache.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/code/codeCache.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -72,7 +72,7 @@
 // Solaris and BSD.
 
 class OopClosure;
-class DepChange;
+class KlassDepChange;
 
 class CodeCache : AllStatic {
   friend class VMStructs;
@@ -223,7 +223,7 @@
 
   // Deoptimization
  private:
-  static int  mark_for_deoptimization(DepChange& changes);
+  static int  mark_for_deoptimization(KlassDepChange& changes);
 #ifdef HOTSWAP
   static int  mark_for_evol_deoptimization(instanceKlassHandle dependee);
 #endif // HOTSWAP
--- a/hotspot/src/share/vm/code/dependencies.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/code/dependencies.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -664,6 +664,8 @@
   virtual bool is_klass_change()     const { return false; }
   virtual bool is_call_site_change() const { return false; }
 
+  virtual void mark_for_deoptimization(nmethod* nm) = 0;
+
   // Subclass casting with assertions.
   KlassDepChange*    as_klass_change() {
     assert(is_klass_change(), "bad cast");
@@ -753,6 +755,10 @@
   // What kind of DepChange is this?
   virtual bool is_klass_change() const { return true; }
 
+  virtual void mark_for_deoptimization(nmethod* nm) {
+    nm->mark_for_deoptimization(/*inc_recompile_counts=*/true);
+  }
+
   Klass* new_type() { return _new_type(); }
 
   // involves_context(k) is true if k is new_type or any of the super types
@@ -772,6 +778,10 @@
   // What kind of DepChange is this?
   virtual bool is_call_site_change() const { return true; }
 
+  virtual void mark_for_deoptimization(nmethod* nm) {
+    nm->mark_for_deoptimization(/*inc_recompile_counts=*/false);
+  }
+
   oop call_site()     const { return _call_site();     }
   oop method_handle() const { return _method_handle(); }
 };
--- a/hotspot/src/share/vm/code/dependencyContext.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/code/dependencyContext.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -73,7 +73,7 @@
         nm->print();
         nm->print_dependencies();
       }
-      nm->mark_for_deoptimization();
+      changes.mark_for_deoptimization(nm);
       found++;
     }
   }
--- a/hotspot/src/share/vm/code/nmethod.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -536,7 +536,7 @@
   _has_method_handle_invokes  = 0;
   _lazy_critical_native       = 0;
   _has_wide_vectors           = 0;
-  _marked_for_deoptimization  = 0;
+  _mark_for_deoptimization_status = not_marked;
   _lock_count                 = 0;
   _stack_traversal_mark       = 0;
   _unload_reported            = false; // jvmti state
@@ -1459,7 +1459,7 @@
                   SharedRuntime::get_handle_wrong_method_stub());
     }
 
-    if (is_in_use()) {
+    if (is_in_use() && update_recompile_counts()) {
       // It's a true state change, so mark the method as decompiled.
       // Do it only for transition from alive.
       inc_decompile_count();
--- a/hotspot/src/share/vm/code/nmethod.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/code/nmethod.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -107,6 +107,7 @@
 //  [Implicit Null Pointer exception table]
 //  - implicit null table array
 
+class DepChange;
 class Dependencies;
 class ExceptionHandlerTable;
 class ImplicitExceptionTable;
@@ -188,7 +189,13 @@
   bool _has_flushed_dependencies;            // Used for maintenance of dependencies (CodeCache_lock)
 
   bool _marked_for_reclamation;              // Used by NMethodSweeper (set only by sweeper)
-  bool _marked_for_deoptimization;           // Used for stack deoptimization
+
+  enum MarkForDeoptimizationStatus {
+    not_marked,
+    deoptimize,
+    deoptimize_noupdate };
+
+  MarkForDeoptimizationStatus _mark_for_deoptimization_status; // Used for stack deoptimization
 
   // used by jvmti to track if an unload event has been posted for this nmethod.
   bool _unload_reported;
@@ -462,8 +469,16 @@
   void set_unloading_clock(unsigned char unloading_clock);
   unsigned char unloading_clock();
 
-  bool  is_marked_for_deoptimization() const      { return _marked_for_deoptimization; }
-  void  mark_for_deoptimization()                 { _marked_for_deoptimization = true; }
+  bool  is_marked_for_deoptimization() const      { return _mark_for_deoptimization_status != not_marked; }
+  void  mark_for_deoptimization(bool inc_recompile_counts = true) {
+    _mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate);
+  }
+  bool update_recompile_counts() const {
+    // Update recompile counts when either the update is explicitly requested (deoptimize)
+    // or the nmethod is not marked for deoptimization at all (not_marked).
+    // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
+    return _mark_for_deoptimization_status != deoptimize_noupdate;
+  }
 
   void  make_unloaded(BoolObjectClosure* is_alive, oop cause);
 
--- a/hotspot/src/share/vm/code/relocInfo.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/code/relocInfo.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -457,49 +457,6 @@
   return itr._rh;
 }
 
-int32_t Relocation::runtime_address_to_index(address runtime_address) {
-  assert(!is_reloc_index((intptr_t)runtime_address), "must not look like an index");
-
-  if (runtime_address == NULL)  return 0;
-
-  StubCodeDesc* p = StubCodeDesc::desc_for(runtime_address);
-  if (p != NULL && p->begin() == runtime_address) {
-    assert(is_reloc_index(p->index()), "there must not be too many stubs");
-    return (int32_t)p->index();
-  } else {
-    // Known "miscellaneous" non-stub pointers:
-    // os::get_polling_page(), SafepointSynchronize::address_of_state()
-    if (PrintRelocations) {
-      tty->print_cr("random unregistered address in relocInfo: " INTPTR_FORMAT, p2i(runtime_address));
-    }
-#ifndef _LP64
-    return (int32_t) (intptr_t)runtime_address;
-#else
-    // didn't fit return non-index
-    return -1;
-#endif /* _LP64 */
-  }
-}
-
-
-address Relocation::index_to_runtime_address(int32_t index) {
-  if (index == 0)  return NULL;
-
-  if (is_reloc_index(index)) {
-    StubCodeDesc* p = StubCodeDesc::desc_for_index(index);
-    assert(p != NULL, "there must be a stub for this index");
-    return p->begin();
-  } else {
-#ifndef _LP64
-    // this only works on 32bit machines
-    return (address) ((intptr_t) index);
-#else
-    fatal("Relocation::index_to_runtime_address, int32_t not pointer sized");
-    return NULL;
-#endif /* _LP64 */
-  }
-}
-
 address Relocation::old_addr_for(address newa,
                                  const CodeBuffer* src, CodeBuffer* dest) {
   int sect = dest->section_index_of(newa);
@@ -623,20 +580,13 @@
 
 void external_word_Relocation::pack_data_to(CodeSection* dest) {
   short* p = (short*) dest->locs_end();
-  int32_t index = runtime_address_to_index(_target);
 #ifndef _LP64
-  p = pack_1_int_to(p, index);
+  p = pack_1_int_to(p, (int32_t) (intptr_t)_target);
 #else
-  if (is_reloc_index(index)) {
-    p = pack_2_ints_to(p, index, 0);
-  } else {
-    jlong t = (jlong) _target;
-    int32_t lo = low(t);
-    int32_t hi = high(t);
-    p = pack_2_ints_to(p, lo, hi);
-    DEBUG_ONLY(jlong t1 = jlong_from(hi, lo));
-    assert(!is_reloc_index(t1) && (address) t1 == _target, "not symmetric");
-  }
+  jlong t = (jlong) _target;
+  int32_t lo = low(t);
+  int32_t hi = high(t);
+  p = pack_2_ints_to(p, lo, hi);
 #endif /* _LP64 */
   dest->set_locs_end((relocInfo*) p);
 }
@@ -644,16 +594,12 @@
 
 void external_word_Relocation::unpack_data() {
 #ifndef _LP64
-  _target = index_to_runtime_address(unpack_1_int());
+  _target = (address) (intptr_t)unpack_1_int();
 #else
   int32_t lo, hi;
   unpack_2_ints(lo, hi);
   jlong t = jlong_from(hi, lo);;
-  if (is_reloc_index(t)) {
-    _target = index_to_runtime_address(t);
-  } else {
-    _target = (address) t;
-  }
+  _target = (address) t;
 #endif /* _LP64 */
 }
 
--- a/hotspot/src/share/vm/code/relocInfo.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/code/relocInfo.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -707,10 +707,6 @@
     assert(datalen()==0 || type()==relocInfo::none, "no data here");
   }
 
-  static bool is_reloc_index(intptr_t index) {
-    return 0 < index && index < os::vm_page_size();
-  }
-
  protected:
   // Helper functions for pack_data_to() and unpack_data().
 
@@ -806,10 +802,6 @@
     return base + byte_offset;
   }
 
-  // these convert between indexes and addresses in the runtime system
-  static int32_t runtime_address_to_index(address runtime_address);
-  static address index_to_runtime_address(int32_t index);
-
   // helpers for mapping between old and new addresses after a move or resize
   address old_addr_for(address newa, const CodeBuffer* src, CodeBuffer* dest);
   address new_addr_for(address olda, const CodeBuffer* src, CodeBuffer* dest);
@@ -1253,7 +1245,8 @@
   // Some address looking values aren't safe to treat as relocations
   // and should just be treated as constants.
   static bool can_be_relocated(address target) {
-    return target != NULL && !is_reloc_index((intptr_t)target);
+    assert(target == NULL || (uintptr_t)target >= (uintptr_t)os::vm_page_size(), INTPTR_FORMAT, (intptr_t)target);
+    return target != NULL;
   }
 
  private:
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -469,7 +469,6 @@
 void CompileBroker::print_compile_queues(outputStream* st) {
   st->print_cr("Current compiles: ");
   MutexLocker locker(MethodCompileQueue_lock);
-  MutexLocker locker2(Threads_lock);
 
   char buf[2000];
   int buflen = sizeof(buf);
@@ -2152,18 +2151,33 @@
 
     if (CITime) {
       int bytes_compiled = method->code_size() + task->num_inlined_bytecodes();
-      JVMCI_ONLY(CompilerStatistics* stats = compiler(task->comp_level())->stats();)
       if (is_osr) {
         _t_osr_compilation.add(time);
         _sum_osr_bytes_compiled += bytes_compiled;
-        JVMCI_ONLY(stats->_osr.update(time, bytes_compiled);)
       } else {
         _t_standard_compilation.add(time);
         _sum_standard_bytes_compiled += method->code_size() + task->num_inlined_bytecodes();
-        JVMCI_ONLY(stats->_standard.update(time, bytes_compiled);)
       }
-      JVMCI_ONLY(stats->_nmethods_size += code->total_size();)
-      JVMCI_ONLY(stats->_nmethods_code_size += code->insts_size();)
+
+#if INCLUDE_JVMCI
+      AbstractCompiler* comp = compiler(task->comp_level());
+      if (comp) {
+        CompilerStatistics* stats = comp->stats();
+        if (stats) {
+          if (is_osr) {
+            stats->_osr.update(time, bytes_compiled);
+          } else {
+            stats->_standard.update(time, bytes_compiled);
+          }
+          stats->_nmethods_size += code->total_size();
+          stats->_nmethods_code_size += code->insts_size();
+        } else { // if (!stats)
+          assert(false, "Compiler statistics object must exist");
+        }
+      } else { // if (!comp)
+        assert(false, "Compiler object must exist");
+      }
+#endif // INCLUDE_JVMCI
     }
 
     if (UsePerfData) {
@@ -2222,11 +2236,15 @@
 #if INCLUDE_JVMCI
 void CompileBroker::print_times(AbstractCompiler* comp) {
   CompilerStatistics* stats = comp->stats();
-  tty->print_cr("  %s {speed: %d bytes/s; standard: %6.3f s, %d bytes, %d methods; osr: %6.3f s, %d bytes, %d methods; nmethods_size: %d bytes; nmethods_code_size: %d bytes}",
+  if (stats) {
+    tty->print_cr("  %s {speed: %d bytes/s; standard: %6.3f s, %d bytes, %d methods; osr: %6.3f s, %d bytes, %d methods; nmethods_size: %d bytes; nmethods_code_size: %d bytes}",
                 comp->name(), stats->bytes_per_second(),
                 stats->_standard._time.seconds(), stats->_standard._bytes, stats->_standard._count,
                 stats->_osr._time.seconds(), stats->_osr._bytes, stats->_osr._count,
                 stats->_nmethods_size, stats->_nmethods_code_size);
+  } else { // if (!stats)
+    assert(false, "Compiler statistics object must exist");
+  }
   comp->print_timers();
 }
 #endif // INCLUDE_JVMCI
@@ -2260,17 +2278,21 @@
       }
       CompilerStatistics* stats = comp->stats();
 
-      standard_compilation.add(stats->_standard._time);
-      osr_compilation.add(stats->_osr._time);
+      if (stats) {
+        standard_compilation.add(stats->_standard._time);
+        osr_compilation.add(stats->_osr._time);
 
-      standard_bytes_compiled += stats->_standard._bytes;
-      osr_bytes_compiled += stats->_osr._bytes;
+        standard_bytes_compiled += stats->_standard._bytes;
+        osr_bytes_compiled += stats->_osr._bytes;
 
-      standard_compile_count += stats->_standard._count;
-      osr_compile_count += stats->_osr._count;
+        standard_compile_count += stats->_standard._count;
+        osr_compile_count += stats->_osr._count;
 
-      nmethods_size += stats->_nmethods_size;
-      nmethods_code_size += stats->_nmethods_code_size;
+        nmethods_size += stats->_nmethods_size;
+        nmethods_code_size += stats->_nmethods_code_size;
+      } else { // if (!stats)
+        assert(false, "Compiler statistics object must exist");
+      }
 
       if (per_compiler) {
         print_times(comp);
--- a/hotspot/src/share/vm/gc/cms/parNewGeneration.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/cms/parNewGeneration.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -615,7 +615,7 @@
   : DefNewGeneration(rs, initial_byte_size, "PCopy"),
   _overflow_list(NULL),
   _is_alive_closure(this),
-  _plab_stats(YoungPLABSize, PLABWeight)
+  _plab_stats("Young", YoungPLABSize, PLABWeight)
 {
   NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;)
   NOT_PRODUCT(_num_par_pushes = 0;)
@@ -1008,9 +1008,7 @@
   from()->set_concurrent_iteration_safe_limit(from()->top());
   to()->set_concurrent_iteration_safe_limit(to()->top());
 
-  if (ResizePLAB) {
-    plab_stats()->adjust_desired_plab_sz();
-  }
+  plab_stats()->adjust_desired_plab_sz();
 
   TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats());
   TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats());
--- a/hotspot/src/share/vm/gc/g1/concurrentG1Refine.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/concurrentG1Refine.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,19 +36,19 @@
 {
   // Ergonomically select initial concurrent refinement parameters
   if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
-    FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, (intx)ParallelGCThreads);
+    FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, ParallelGCThreads);
   }
   set_green_zone(G1ConcRefinementGreenZone);
 
   if (FLAG_IS_DEFAULT(G1ConcRefinementYellowZone)) {
     FLAG_SET_DEFAULT(G1ConcRefinementYellowZone, green_zone() * 3);
   }
-  set_yellow_zone(MAX2<int>(G1ConcRefinementYellowZone, green_zone()));
+  set_yellow_zone(MAX2(G1ConcRefinementYellowZone, green_zone()));
 
   if (FLAG_IS_DEFAULT(G1ConcRefinementRedZone)) {
     FLAG_SET_DEFAULT(G1ConcRefinementRedZone, yellow_zone() * 2);
   }
-  set_red_zone(MAX2<int>(G1ConcRefinementRedZone, yellow_zone()));
+  set_red_zone(MAX2(G1ConcRefinementRedZone, yellow_zone()));
 }
 
 ConcurrentG1Refine* ConcurrentG1Refine::create(G1CollectedHeap* g1h, CardTableEntryClosure* refine_closure, jint* ecode) {
--- a/hotspot/src/share/vm/gc/g1/concurrentG1Refine.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/concurrentG1Refine.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -61,11 +61,11 @@
   * 2) green = 0. Means no caching. Can be a good way to minimize the
   *    amount of time spent updating rsets during a collection.
   */
-  int _green_zone;
-  int _yellow_zone;
-  int _red_zone;
+  size_t _green_zone;
+  size_t _yellow_zone;
+  size_t _red_zone;
 
-  int _thread_threshold_step;
+  size_t _thread_threshold_step;
 
   // We delay the refinement of 'hot' cards using the hot card cache.
   G1HotCardCache _hot_card_cache;
@@ -100,17 +100,17 @@
 
   void print_worker_threads_on(outputStream* st) const;
 
-  void set_green_zone(int x)  { _green_zone = x;  }
-  void set_yellow_zone(int x) { _yellow_zone = x; }
-  void set_red_zone(int x)    { _red_zone = x;    }
+  void set_green_zone(size_t x)  { _green_zone = x;  }
+  void set_yellow_zone(size_t x) { _yellow_zone = x; }
+  void set_red_zone(size_t x)    { _red_zone = x;    }
 
-  int green_zone() const      { return _green_zone;  }
-  int yellow_zone() const     { return _yellow_zone; }
-  int red_zone() const        { return _red_zone;    }
+  size_t green_zone() const      { return _green_zone;  }
+  size_t yellow_zone() const     { return _yellow_zone; }
+  size_t red_zone() const        { return _red_zone;    }
 
   uint worker_thread_num() const { return _n_worker_threads; }
 
-  int thread_threshold_step() const { return _thread_threshold_step; }
+  size_t thread_threshold_step() const { return _thread_threshold_step; }
 
   G1HotCardCache* hot_card_cache() { return &_hot_card_cache; }
 
--- a/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -67,10 +67,12 @@
 
 void ConcurrentG1RefineThread::initialize() {
   // Current thread activation threshold
-  _threshold = MIN2<int>(cg1r()->thread_threshold_step() * (_worker_id + 1) + cg1r()->green_zone(),
-                         cg1r()->yellow_zone());
+  _threshold = MIN2(cg1r()->thread_threshold_step() * (_worker_id + 1) + cg1r()->green_zone(),
+                    cg1r()->yellow_zone());
   // A thread deactivates once the number of buffer reached a deactivation threshold
-  _deactivation_threshold = MAX2<int>(_threshold - cg1r()->thread_threshold_step(), cg1r()->green_zone());
+   _deactivation_threshold =
+     MAX2(_threshold - MIN2(_threshold, cg1r()->thread_threshold_step()),
+          cg1r()->green_zone());
 }
 
 void ConcurrentG1RefineThread::wait_for_completed_buffers() {
@@ -127,14 +129,14 @@
     }
 
     DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
-    log_debug(gc, refine)("Activated %d, on threshold: %d, current: %d",
+    log_debug(gc, refine)("Activated %d, on threshold: " SIZE_FORMAT ", current: " SIZE_FORMAT,
                           _worker_id, _threshold, dcqs.completed_buffers_num());
 
     {
       SuspendibleThreadSetJoiner sts_join;
 
       do {
-        int curr_buffer_num = (int)dcqs.completed_buffers_num();
+        size_t curr_buffer_num = dcqs.completed_buffers_num();
         // If the number of the buffers falls down into the yellow zone,
         // that means that the transition period after the evacuation pause has ended.
         if (dcqs.completed_queue_padding() > 0 && curr_buffer_num <= cg1r()->yellow_zone()) {
@@ -151,7 +153,7 @@
                                                       false /* during_pause */));
 
       deactivate();
-      log_debug(gc, refine)("Deactivated %d, off threshold: %d, current: %d",
+      log_debug(gc, refine)("Deactivated %d, off threshold: " SIZE_FORMAT ", current: " SIZE_FORMAT,
                             _worker_id, _deactivation_threshold,
                             dcqs.completed_buffers_num());
     }
--- a/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,11 +53,11 @@
   // The closure applied to completed log buffers.
   CardTableEntryClosure* _refine_closure;
 
-  int _thread_threshold_step;
+  size_t _thread_threshold_step;
   // This thread activation threshold
-  int _threshold;
+  size_t _threshold;
   // This thread deactivation threshold
-  int _deactivation_threshold;
+  size_t _deactivation_threshold;
 
   void wait_for_completed_buffers();
 
--- a/hotspot/src/share/vm/gc/g1/dirtyCardQueue.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/dirtyCardQueue.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -207,22 +207,24 @@
 }
 
 
-BufferNode* DirtyCardQueueSet::get_completed_buffer(int stop_at) {
+BufferNode* DirtyCardQueueSet::get_completed_buffer(size_t stop_at) {
   BufferNode* nd = NULL;
   MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
 
-  if ((int)_n_completed_buffers <= stop_at) {
+  if (_n_completed_buffers <= stop_at) {
     _process_completed = false;
     return NULL;
   }
 
   if (_completed_buffers_head != NULL) {
     nd = _completed_buffers_head;
+    assert(_n_completed_buffers > 0, "Invariant");
     _completed_buffers_head = nd->next();
-    if (_completed_buffers_head == NULL)
+    _n_completed_buffers--;
+    if (_completed_buffers_head == NULL) {
+      assert(_n_completed_buffers == 0, "Invariant");
       _completed_buffers_tail = NULL;
-    _n_completed_buffers--;
-    assert(_n_completed_buffers >= 0, "Invariant");
+    }
   }
   DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
   return nd;
@@ -230,7 +232,7 @@
 
 bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
                                                           uint worker_i,
-                                                          int stop_at,
+                                                          size_t stop_at,
                                                           bool during_pause) {
   assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
   BufferNode* nd = get_completed_buffer(stop_at);
--- a/hotspot/src/share/vm/gc/g1/dirtyCardQueue.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/dirtyCardQueue.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -134,10 +134,10 @@
   // is returned to the completed buffer set, and this call returns false.
   bool apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
                                          uint worker_i,
-                                         int stop_at,
+                                         size_t stop_at,
                                          bool during_pause);
 
-  BufferNode* get_completed_buffer(int stop_at);
+  BufferNode* get_completed_buffer(size_t stop_at);
 
   // Applies the current closure to all completed buffers,
   // non-consumptively.
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1400,7 +1400,6 @@
       JavaThread::dirty_card_queue_set().abandon_logs();
       assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
 
-      _young_list->reset_sampled_info();
       // At this point there should be no regions in the
       // entire heap tagged as young.
       assert(check_young_list_empty(true /* check_heap */),
@@ -1761,8 +1760,8 @@
   _young_list(new YoungList(this)),
   _gc_time_stamp(0),
   _summary_bytes_used(0),
-  _survivor_evac_stats(YoungPLABSize, PLABWeight),
-  _old_evac_stats(OldPLABSize, PLABWeight),
+  _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
+  _old_evac_stats("Old", OldPLABSize, PLABWeight),
   _expand_heap_after_alloc_failure(true),
   _old_marking_cycles_started(0),
   _old_marking_cycles_completed(0),
@@ -1985,8 +1984,8 @@
   JavaThread::dirty_card_queue_set().initialize(_refine_cte_cl,
                                                 DirtyCardQ_CBL_mon,
                                                 DirtyCardQ_FL_lock,
-                                                concurrent_g1_refine()->yellow_zone(),
-                                                concurrent_g1_refine()->red_zone(),
+                                                (int)concurrent_g1_refine()->yellow_zone(),
+                                                (int)concurrent_g1_refine()->red_zone(),
                                                 Shared_DirtyCardQ_lock,
                                                 NULL,  // fl_owner
                                                 true); // init_free_ids
@@ -3385,13 +3384,15 @@
 
         g1_policy()->clear_collection_set();
 
+        record_obj_copy_mem_stats();
+        _survivor_evac_stats.adjust_desired_plab_sz();
+        _old_evac_stats.adjust_desired_plab_sz();
+
         // Start a new incremental collection set for the next pause.
         g1_policy()->start_incremental_cset_building();
 
         clear_cset_fast_test();
 
-        _young_list->reset_sampled_info();
-
         // Don't check the whole heap at this point as the
         // GC alloc regions from this pause have been tagged
         // as survivors and moved on to the survivor list.
@@ -4398,6 +4399,8 @@
   { }
 
   void work(uint worker_id) {
+    G1GCParPhaseTimesTracker x(_g1h->g1_policy()->phase_times(), G1GCPhaseTimes::PreserveCMReferents, worker_id);
+
     ResourceMark rm;
     HandleMark   hm;
 
@@ -4461,13 +4464,8 @@
   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
 }
 
-// Weak Reference processing during an evacuation pause (part 1).
-void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
-  double ref_proc_start = os::elapsedTime();
-
-  ReferenceProcessor* rp = _ref_processor_stw;
-  assert(rp->discovery_enabled(), "should have been enabled");
-
+void G1CollectedHeap::preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states) {
+  double preserve_cm_referents_start = os::elapsedTime();
   // Any reference objects, in the collection set, that were 'discovered'
   // by the CM ref processor should have already been copied (either by
   // applying the external root copy closure to the discovered lists, or
@@ -4495,9 +4493,18 @@
                                                  per_thread_states,
                                                  no_of_gc_workers,
                                                  _task_queues);
-
   workers()->run_task(&keep_cm_referents);
 
+  g1_policy()->phase_times()->record_preserve_cm_referents_time_ms((os::elapsedTime() - preserve_cm_referents_start) * 1000.0);
+}
+
+// Weak Reference processing during an evacuation pause (part 1).
+void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
+  double ref_proc_start = os::elapsedTime();
+
+  ReferenceProcessor* rp = _ref_processor_stw;
+  assert(rp->discovery_enabled(), "should have been enabled");
+
   // Closure to test whether a referent is alive.
   G1STWIsAliveClosure is_alive(this);
 
@@ -4529,6 +4536,8 @@
                                               NULL,
                                               _gc_timer_stw);
   } else {
+    uint no_of_gc_workers = workers()->active_workers();
+
     // Parallel reference processing
     assert(rp->num_q() == no_of_gc_workers, "sanity");
     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
@@ -4586,6 +4595,12 @@
   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
 }
 
+void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) {
+  double merge_pss_time_start = os::elapsedTime();
+  per_thread_states->flush();
+  g1_policy()->phase_times()->record_merge_pss_time_ms((os::elapsedTime() - merge_pss_time_start) * 1000.0);
+}
+
 void G1CollectedHeap::pre_evacuate_collection_set() {
   _expand_heap_after_alloc_failure = true;
   _evacuation_failed = false;
@@ -4644,6 +4659,7 @@
   // objects (and their reachable sub-graphs) that were
   // not copied during the pause.
   if (g1_policy()->should_process_references()) {
+    preserve_cm_referents(per_thread_states);
     process_discovered_references(per_thread_states);
   } else {
     ref_processor_stw()->verify_no_references_recorded();
@@ -4687,12 +4703,7 @@
 
   _allocator->release_gc_alloc_regions(evacuation_info);
 
-  per_thread_states->flush();
-
-  record_obj_copy_mem_stats();
-
-  _survivor_evac_stats.adjust_desired_plab_sz();
-  _old_evac_stats.adjust_desired_plab_sz();
+  merge_per_thread_state_info(per_thread_states);
 
   // Reset and re-enable the hot card cache.
   // Note the counts for the cards in the regions in the
@@ -5188,8 +5199,8 @@
   bool success() { return _success; }
 };
 
-bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
-  bool ret = _young_list->check_list_empty(check_sample);
+bool G1CollectedHeap::check_young_list_empty(bool check_heap) {
+  bool ret = _young_list->check_list_empty();
 
   if (check_heap) {
     NoYoungRegionsClosure closure;
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -511,6 +511,9 @@
   // allocated block, or else "NULL".
   HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
 
+  // Preserve any referents discovered by concurrent marking that have not yet been
+  // copied by the STW pause.
+  void preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states);
   // Process any reference objects discovered during
   // an incremental evacuation pause.
   void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
@@ -519,6 +522,9 @@
   // after processing.
   void enqueue_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 
+  // Merges the information gathered on a per-thread basis for all worker threads
+  // during GC into global variables.
+  void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
 public:
   WorkGang* workers() const { return _workers; }
 
@@ -1333,8 +1339,7 @@
     return _young_list->check_list_well_formed();
   }
 
-  bool check_young_list_empty(bool check_heap,
-                              bool check_sample = true);
+  bool check_young_list_empty(bool check_heap);
 
   // *** Stuff related to concurrent marking.  It's not clear to me that so
   // many of these need to be public.
--- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -787,10 +787,9 @@
   return survivor_regions_evac_time;
 }
 
-void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
+void G1CollectorPolicy::revise_young_list_target_length_if_necessary(size_t rs_lengths) {
   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
 
-  size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
   if (rs_lengths > _rs_lengths_prediction) {
     // add 10% to avoid having to recalculate often
     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
@@ -1118,14 +1117,15 @@
   _short_lived_surv_rate_group->start_adding_regions();
   // Do that for any other surv rate groups
 
+  double scan_hcc_time_ms = ConcurrentG1Refine::hot_card_cache_enabled() ? average_time_ms(G1GCPhaseTimes::ScanHCC) : 0.0;
+
   if (update_stats) {
     double cost_per_card_ms = 0.0;
-    double cost_scan_hcc = average_time_ms(G1GCPhaseTimes::ScanHCC);
     if (_pending_cards > 0) {
-      cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - cost_scan_hcc) / (double) _pending_cards;
+      cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms) / (double) _pending_cards;
       _cost_per_card_ms_seq->add(cost_per_card_ms);
     }
-    _cost_scan_hcc_seq->add(cost_scan_hcc);
+    _cost_scan_hcc_seq->add(scan_hcc_time_ms);
 
     double cost_per_entry_ms = 0.0;
     if (cards_scanned > 10) {
@@ -1215,8 +1215,6 @@
   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
 
-  double scan_hcc_time_ms = average_time_ms(G1GCPhaseTimes::ScanHCC);
-
   if (update_rs_time_goal_ms < scan_hcc_time_ms) {
     log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
                                 "Update RS time goal: %1.2fms Scan HCC time: %1.2fms",
@@ -1302,12 +1300,12 @@
     const int k_gy = 3, k_gr = 6;
     const double inc_k = 1.1, dec_k = 0.9;
 
-    int g = cg1r->green_zone();
+    size_t g = cg1r->green_zone();
     if (update_rs_time > goal_ms) {
-      g = (int)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
+      g = (size_t)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
     } else {
       if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
-        g = (int)MAX2(g * inc_k, g + 1.0);
+        g = (size_t)MAX2(g * inc_k, g + 1.0);
       }
     }
     // Change the refinement threads params
@@ -1316,15 +1314,15 @@
     cg1r->set_red_zone(g * k_gr);
     cg1r->reinitialize_threads();
 
-    int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * _predictor.sigma()), 1);
-    int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
+    size_t processing_threshold_delta = MAX2<size_t>(cg1r->green_zone() * _predictor.sigma(), 1);
+    size_t processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
                                     cg1r->yellow_zone());
     // Change the barrier params
-    dcqs.set_process_completed_threshold(processing_threshold);
-    dcqs.set_max_completed_queue(cg1r->red_zone());
+    dcqs.set_process_completed_threshold((int)processing_threshold);
+    dcqs.set_max_completed_queue((int)cg1r->red_zone());
   }
 
-  int curr_queue_size = dcqs.completed_buffers_num();
+  size_t curr_queue_size = dcqs.completed_buffers_num();
   if (curr_queue_size >= cg1r->yellow_zone()) {
     dcqs.set_completed_queue_padding(curr_queue_size);
   } else {
--- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -471,7 +471,7 @@
   // Check the current value of the young list RSet lengths and
   // compare it against the last prediction. If the current value is
   // higher, recalculate the young list target length prediction.
-  void revise_young_list_target_length_if_necessary();
+  void revise_young_list_target_length_if_necessary(size_t rs_lengths);
 
   // This should be called after the heap is resized.
   void record_new_heap_size(uint new_number_of_regions);
--- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1097,7 +1097,7 @@
     reset_marking_state();
   } else {
     {
-      GCTraceTime(Debug, gc) trace("GC Aggregate Data", g1h->gc_timer_cm());
+      GCTraceTime(Debug, gc) trace("Aggregate Data", g1h->gc_timer_cm());
 
       // Aggregate the per-task counting data that we have accumulated
       // while marking.
@@ -2018,7 +2018,7 @@
   // Inner scope to exclude the cleaning of the string and symbol
   // tables from the displayed time.
   {
-    GCTraceTime(Debug, gc) trace("GC Ref Proc", g1h->gc_timer_cm());
+    GCTraceTime(Debug, gc) trace("Reference Processing", g1h->gc_timer_cm());
 
     ReferenceProcessor* rp = g1h->ref_processor_cm();
 
@@ -2271,7 +2271,7 @@
   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
   guarantee(has_overflown() ||
             satb_mq_set.completed_buffers_num() == 0,
-            "Invariant: has_overflown = %s, num buffers = %d",
+            "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT,
             BOOL_TO_STR(has_overflown()),
             satb_mq_set.completed_buffers_num());
 
@@ -2702,11 +2702,8 @@
 };
 
 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
-  ReferenceProcessor* result = NULL;
-  if (G1UseConcMarkReferenceProcessing) {
-    result = g1h->ref_processor_cm();
-    assert(result != NULL, "should not be NULL");
-  }
+  ReferenceProcessor* result = g1h->ref_processor_cm();
+  assert(result != NULL, "CM reference processor should not be NULL");
   return result;
 }
 
--- a/hotspot/src/share/vm/gc/g1/g1EvacStats.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1EvacStats.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,15 +29,26 @@
 #include "logging/log.hpp"
 #include "trace/tracing.hpp"
 
+void G1EvacStats::log_plab_allocation() {
+  PLABStats::log_plab_allocation();
+  log_debug(gc, plab)("%s other allocation: "
+                      "region end waste: " SIZE_FORMAT "B, "
+                      "regions filled: %u, "
+                      "direct allocated: " SIZE_FORMAT "B, "
+                      "failure used: " SIZE_FORMAT "B, "
+                      "failure wasted: " SIZE_FORMAT "B",
+                      _description,
+                      _region_end_waste * HeapWordSize,
+                      _regions_filled,
+                      _direct_allocated * HeapWordSize,
+                      _failure_used * HeapWordSize,
+                      _failure_waste * HeapWordSize);
+}
+
 void G1EvacStats::adjust_desired_plab_sz() {
+  log_plab_allocation();
+
   if (!ResizePLAB) {
-    log_debug(gc, plab)(" (allocated = " SIZE_FORMAT " wasted = " SIZE_FORMAT " "
-                        "unused = " SIZE_FORMAT " used = " SIZE_FORMAT " "
-                        "undo_waste = " SIZE_FORMAT " region_end_waste = " SIZE_FORMAT " "
-                        "regions filled = %u direct_allocated = " SIZE_FORMAT " "
-                        "failure_used = " SIZE_FORMAT " failure_waste = " SIZE_FORMAT ") ",
-                        _allocated, _wasted, _unused, used(), _undo_wasted, _region_end_waste,
-                        _regions_filled, _direct_allocated, _failure_used, _failure_waste);
     // Clear accumulators for next round.
     reset();
     return;
@@ -107,18 +118,19 @@
   // Latch the result
   _desired_net_plab_sz = plab_sz;
 
-  log_debug(gc, plab)(" (allocated = " SIZE_FORMAT " wasted = " SIZE_FORMAT " "
-                      "unused = " SIZE_FORMAT " used = " SIZE_FORMAT " "
-                      "undo_waste = " SIZE_FORMAT " region_end_waste = " SIZE_FORMAT " "
-                      "regions filled = %u direct_allocated = " SIZE_FORMAT " "
-                      "failure_used = " SIZE_FORMAT " failure_waste = " SIZE_FORMAT ") "
-                      " (plab_sz = " SIZE_FORMAT " desired_plab_sz = " SIZE_FORMAT ")",
-                      _allocated, _wasted, _unused, used(), _undo_wasted, _region_end_waste,
-                      _regions_filled, _direct_allocated, _failure_used, _failure_waste,
-                      cur_plab_sz, plab_sz);
-
+  log_sizing(cur_plab_sz, plab_sz);
   // Clear accumulators for next round.
   reset();
 }
 
+G1EvacStats::G1EvacStats(const char* description, size_t desired_plab_sz_, unsigned wt) :
+  PLABStats(description, desired_plab_sz_, wt),
+  _region_end_waste(0),
+  _regions_filled(0),
+  _direct_allocated(0),
+  _failure_used(0),
+  _failure_waste(0) {
+}
+
+
 G1EvacStats::~G1EvacStats() { }
--- a/hotspot/src/share/vm/gc/g1/g1EvacStats.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1EvacStats.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -51,20 +51,15 @@
     _failure_waste = 0;
   }
 
+  virtual void log_plab_allocation();
+
  public:
-  G1EvacStats(size_t desired_plab_sz_, unsigned wt) : PLABStats(desired_plab_sz_, wt),
-    _region_end_waste(0), _regions_filled(0), _direct_allocated(0),
-    _failure_used(0), _failure_waste(0) {
-  }
+  G1EvacStats(const char* description, size_t desired_plab_sz_, unsigned wt);
+
+  ~G1EvacStats();
 
   virtual void adjust_desired_plab_sz();
 
-  size_t allocated() const { return _allocated; }
-  size_t wasted() const { return _wasted; }
-  size_t unused() const { return _unused; }
-  size_t used() const { return allocated() - (wasted() + unused()); }
-  size_t undo_wasted() const { return _undo_wasted; }
-
   uint regions_filled() const { return _regions_filled; }
   size_t region_end_waste() const { return _region_end_waste; }
   size_t direct_allocated() const { return _direct_allocated; }
@@ -77,8 +72,6 @@
   inline void add_direct_allocated(size_t value);
   inline void add_region_end_waste(size_t value);
   inline void add_failure_used_and_waste(size_t used, size_t waste);
-
-  ~G1EvacStats();
 };
 
 #endif // SHARE_VM_GC_G1_G1EVACSTATS_HPP
--- a/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,107 +28,70 @@
 #include "gc/g1/g1GCPhaseTimes.hpp"
 #include "gc/g1/g1StringDedup.hpp"
 #include "gc/g1/workerDataArray.inline.hpp"
-#include "memory/allocation.hpp"
+#include "memory/resourceArea.hpp"
 #include "logging/log.hpp"
 #include "runtime/timer.hpp"
 #include "runtime/os.hpp"
 
-// Helper class for avoiding interleaved logging
-class LineBuffer: public StackObj {
-
-private:
-  static const int BUFFER_LEN = 1024;
-  static const int INDENT_CHARS = 3;
-  char _buffer[BUFFER_LEN];
-  int _indent_level;
-  int _cur;
-
-  void vappend(const char* format, va_list ap)  ATTRIBUTE_PRINTF(2, 0) {
-    int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
-    if (res != -1) {
-      _cur += res;
-    } else {
-      DEBUG_ONLY(warning("buffer too small in LineBuffer");)
-      _buffer[BUFFER_LEN -1] = 0;
-      _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
-    }
-  }
-
-public:
-  explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
-    for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
-      _buffer[_cur] = ' ';
-    }
-  }
-
-#ifndef PRODUCT
-  ~LineBuffer() {
-    assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
-  }
-#endif
-
-  void append(const char* format, ...)  ATTRIBUTE_PRINTF(2, 3) {
-    va_list ap;
-    va_start(ap, format);
-    vappend(format, ap);
-    va_end(ap);
-  }
-
-  const char* to_string() {
-    _cur = _indent_level * INDENT_CHARS;
-    return _buffer;
-  }
-};
-
-static const char* Indents[4] = {"", "   ", "      ", "         "};
+static const char* Indents[5] = {"", "  ", "    ", "     ", "       "};
 
 G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
   _max_gc_threads(max_gc_threads)
 {
   assert(max_gc_threads > 0, "Must have some GC threads");
 
-  _gc_par_phases[GCWorkerStart] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Start:", false, 2);
-  _gc_par_phases[ExtRootScan] = new WorkerDataArray<double>(max_gc_threads, "Ext Root Scanning:", true, 2);
+  _gc_par_phases[GCWorkerStart] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Start (ms):");
+  _gc_par_phases[ExtRootScan] = new WorkerDataArray<double>(max_gc_threads, "Ext Root Scanning (ms):");
 
   // Root scanning phases
-  _gc_par_phases[ThreadRoots] = new WorkerDataArray<double>(max_gc_threads, "Thread Roots:", true, 3);
-  _gc_par_phases[StringTableRoots] = new WorkerDataArray<double>(max_gc_threads, "StringTable Roots:", true, 3);
-  _gc_par_phases[UniverseRoots] = new WorkerDataArray<double>(max_gc_threads, "Universe Roots:", true, 3);
-  _gc_par_phases[JNIRoots] = new WorkerDataArray<double>(max_gc_threads, "JNI Handles Roots:", true, 3);
-  _gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray<double>(max_gc_threads, "ObjectSynchronizer Roots:", true, 3);
-  _gc_par_phases[FlatProfilerRoots] = new WorkerDataArray<double>(max_gc_threads, "FlatProfiler Roots:", true, 3);
-  _gc_par_phases[ManagementRoots] = new WorkerDataArray<double>(max_gc_threads, "Management Roots:", true, 3);
-  _gc_par_phases[SystemDictionaryRoots] = new WorkerDataArray<double>(max_gc_threads, "SystemDictionary Roots:", true, 3);
-  _gc_par_phases[CLDGRoots] = new WorkerDataArray<double>(max_gc_threads, "CLDG Roots:", true, 3);
-  _gc_par_phases[JVMTIRoots] = new WorkerDataArray<double>(max_gc_threads, "JVMTI Roots:", true, 3);
-  _gc_par_phases[CMRefRoots] = new WorkerDataArray<double>(max_gc_threads, "CM RefProcessor Roots:", true, 3);
-  _gc_par_phases[WaitForStrongCLD] = new WorkerDataArray<double>(max_gc_threads, "Wait For Strong CLD:", true, 3);
-  _gc_par_phases[WeakCLDRoots] = new WorkerDataArray<double>(max_gc_threads, "Weak CLD Roots:", true, 3);
-  _gc_par_phases[SATBFiltering] = new WorkerDataArray<double>(max_gc_threads, "SATB Filtering:", true, 3);
+  _gc_par_phases[ThreadRoots] = new WorkerDataArray<double>(max_gc_threads, "Thread Roots (ms):");
+  _gc_par_phases[StringTableRoots] = new WorkerDataArray<double>(max_gc_threads, "StringTable Roots (ms):");
+  _gc_par_phases[UniverseRoots] = new WorkerDataArray<double>(max_gc_threads, "Universe Roots (ms):");
+  _gc_par_phases[JNIRoots] = new WorkerDataArray<double>(max_gc_threads, "JNI Handles Roots (ms):");
+  _gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray<double>(max_gc_threads, "ObjectSynchronizer Roots (ms):");
+  _gc_par_phases[FlatProfilerRoots] = new WorkerDataArray<double>(max_gc_threads, "FlatProfiler Roots (ms):");
+  _gc_par_phases[ManagementRoots] = new WorkerDataArray<double>(max_gc_threads, "Management Roots (ms):");
+  _gc_par_phases[SystemDictionaryRoots] = new WorkerDataArray<double>(max_gc_threads, "SystemDictionary Roots (ms):");
+  _gc_par_phases[CLDGRoots] = new WorkerDataArray<double>(max_gc_threads, "CLDG Roots (ms):");
+  _gc_par_phases[JVMTIRoots] = new WorkerDataArray<double>(max_gc_threads, "JVMTI Roots (ms):");
+  _gc_par_phases[CMRefRoots] = new WorkerDataArray<double>(max_gc_threads, "CM RefProcessor Roots (ms):");
+  _gc_par_phases[WaitForStrongCLD] = new WorkerDataArray<double>(max_gc_threads, "Wait For Strong CLD (ms):");
+  _gc_par_phases[WeakCLDRoots] = new WorkerDataArray<double>(max_gc_threads, "Weak CLD Roots (ms):");
+  _gc_par_phases[SATBFiltering] = new WorkerDataArray<double>(max_gc_threads, "SATB Filtering (ms):");
 
-  _gc_par_phases[UpdateRS] = new WorkerDataArray<double>(max_gc_threads, "Update RS:", true, 2);
-  _gc_par_phases[ScanHCC] = new WorkerDataArray<double>(max_gc_threads, "Scan HCC:", true, 3);
-  _gc_par_phases[ScanHCC]->set_enabled(ConcurrentG1Refine::hot_card_cache_enabled());
-  _gc_par_phases[ScanRS] = new WorkerDataArray<double>(max_gc_threads, "Scan RS:", true, 2);
-  _gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scanning:", true, 2);
-  _gc_par_phases[ObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Object Copy:", true, 2);
-  _gc_par_phases[Termination] = new WorkerDataArray<double>(max_gc_threads, "Termination:", true, 2);
-  _gc_par_phases[GCWorkerTotal] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Total:", true, 2);
-  _gc_par_phases[GCWorkerEnd] = new WorkerDataArray<double>(max_gc_threads, "GC Worker End:", false, 2);
-  _gc_par_phases[Other] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Other:", true, 2);
+  _gc_par_phases[UpdateRS] = new WorkerDataArray<double>(max_gc_threads, "Update RS (ms):");
+  if (ConcurrentG1Refine::hot_card_cache_enabled()) {
+    _gc_par_phases[ScanHCC] = new WorkerDataArray<double>(max_gc_threads, "Scan HCC (ms):");
+  } else {
+    _gc_par_phases[ScanHCC] = NULL;
+  }
+  _gc_par_phases[ScanRS] = new WorkerDataArray<double>(max_gc_threads, "Scan RS (ms):");
+  _gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scanning (ms):");
+  _gc_par_phases[ObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Object Copy (ms):");
+  _gc_par_phases[Termination] = new WorkerDataArray<double>(max_gc_threads, "Termination (ms):");
+  _gc_par_phases[GCWorkerTotal] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Total (ms):");
+  _gc_par_phases[GCWorkerEnd] = new WorkerDataArray<double>(max_gc_threads, "GC Worker End (ms):");
+  _gc_par_phases[Other] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Other (ms):");
 
-  _update_rs_processed_buffers = new WorkerDataArray<size_t>(max_gc_threads, "Processed Buffers:", true, 3);
+  _update_rs_processed_buffers = new WorkerDataArray<size_t>(max_gc_threads, "Processed Buffers:");
   _gc_par_phases[UpdateRS]->link_thread_work_items(_update_rs_processed_buffers);
 
-  _termination_attempts = new WorkerDataArray<size_t>(max_gc_threads, "Termination Attempts:", true, 3);
+  _termination_attempts = new WorkerDataArray<size_t>(max_gc_threads, "Termination Attempts:");
   _gc_par_phases[Termination]->link_thread_work_items(_termination_attempts);
 
-  _gc_par_phases[StringDedupQueueFixup] = new WorkerDataArray<double>(max_gc_threads, "Queue Fixup:", true, 2);
-  _gc_par_phases[StringDedupTableFixup] = new WorkerDataArray<double>(max_gc_threads, "Table Fixup:", true, 2);
+  if (UseStringDeduplication) {
+    _gc_par_phases[StringDedupQueueFixup] = new WorkerDataArray<double>(max_gc_threads, "Queue Fixup (ms):");
+    _gc_par_phases[StringDedupTableFixup] = new WorkerDataArray<double>(max_gc_threads, "Table Fixup (ms):");
+  } else {
+    _gc_par_phases[StringDedupQueueFixup] = NULL;
+    _gc_par_phases[StringDedupTableFixup] = NULL;
+  }
 
-  _gc_par_phases[RedirtyCards] = new WorkerDataArray<double>(max_gc_threads, "Parallel Redirty:", true, 3);
-  _redirtied_cards = new WorkerDataArray<size_t>(max_gc_threads, "Redirtied Cards:", true, 3);
+  _gc_par_phases[RedirtyCards] = new WorkerDataArray<double>(max_gc_threads, "Parallel Redirty (ms):");
+  _redirtied_cards = new WorkerDataArray<size_t>(max_gc_threads, "Redirtied Cards:");
   _gc_par_phases[RedirtyCards]->link_thread_work_items(_redirtied_cards);
+
+  _gc_par_phases[PreserveCMReferents] = new WorkerDataArray<double>(max_gc_threads, "Parallel Preserve CM Refs (ms):");
 }
 
 void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) {
@@ -140,11 +103,10 @@
   _external_accounted_time_ms = 0.0;
 
   for (int i = 0; i < GCParPhasesSentinel; i++) {
-    _gc_par_phases[i]->reset();
+    if (_gc_par_phases[i] != NULL) {
+      _gc_par_phases[i]->reset();
+    }
   }
-
-  _gc_par_phases[StringDedupQueueFixup]->set_enabled(G1StringDedup::is_enabled());
-  _gc_par_phases[StringDedupTableFixup]->set_enabled(G1StringDedup::is_enabled());
 }
 
 void G1GCPhaseTimes::note_gc_end() {
@@ -166,45 +128,12 @@
   }
 
   for (int i = 0; i < GCParPhasesSentinel; i++) {
-    _gc_par_phases[i]->verify(_active_gc_threads);
+    if (_gc_par_phases[i] != NULL) {
+      _gc_par_phases[i]->verify(_active_gc_threads);
+    }
   }
 }
 
-void G1GCPhaseTimes::print_stats(const char* indent, const char* str, double value) {
-  log_debug(gc, phases)("%s%s: %.1lf ms", indent, str, value);
-}
-
-double G1GCPhaseTimes::accounted_time_ms() {
-    // First subtract any externally accounted time
-    double misc_time_ms = _external_accounted_time_ms;
-
-    // Subtract the root region scanning wait time. It's initialized to
-    // zero at the start of the pause.
-    misc_time_ms += _root_region_scan_wait_time_ms;
-
-    misc_time_ms += _cur_collection_par_time_ms;
-
-    // Now subtract the time taken to fix up roots in generated code
-    misc_time_ms += _cur_collection_code_root_fixup_time_ms;
-
-    // Strong code root purge time
-    misc_time_ms += _cur_strong_code_root_purge_time_ms;
-
-    if (G1StringDedup::is_enabled()) {
-      // String dedup fixup time
-      misc_time_ms += _cur_string_dedup_fixup_time_ms;
-    }
-
-    // Subtract the time taken to clean the card table from the
-    // current value of "other time"
-    misc_time_ms += _cur_clear_ct_time_ms;
-
-    // Remove expand heap time from "other time"
-    misc_time_ms += _cur_expand_heap_time_ms;
-
-    return misc_time_ms;
-}
-
 // record the time a phase took in seconds
 void G1GCPhaseTimes::record_time_secs(GCParPhases phase, uint worker_i, double secs) {
   _gc_par_phases[phase]->set(worker_i, secs);
@@ -224,193 +153,144 @@
   return _gc_par_phases[phase]->average(_active_gc_threads) * 1000.0;
 }
 
-double G1GCPhaseTimes::get_time_ms(GCParPhases phase, uint worker_i) {
-  return _gc_par_phases[phase]->get(worker_i) * 1000.0;
-}
-
-double G1GCPhaseTimes::sum_time_ms(GCParPhases phase) {
-  return _gc_par_phases[phase]->sum(_active_gc_threads) * 1000.0;
-}
-
-double G1GCPhaseTimes::min_time_ms(GCParPhases phase) {
-  return _gc_par_phases[phase]->minimum(_active_gc_threads) * 1000.0;
-}
-
-double G1GCPhaseTimes::max_time_ms(GCParPhases phase) {
-  return _gc_par_phases[phase]->maximum(_active_gc_threads) * 1000.0;
-}
-
-size_t G1GCPhaseTimes::get_thread_work_item(GCParPhases phase, uint worker_i) {
-  assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
-  return _gc_par_phases[phase]->thread_work_items()->get(worker_i);
-}
-
 size_t G1GCPhaseTimes::sum_thread_work_items(GCParPhases phase) {
   assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
   return _gc_par_phases[phase]->thread_work_items()->sum(_active_gc_threads);
 }
 
-double G1GCPhaseTimes::average_thread_work_items(GCParPhases phase) {
-  assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
-  return _gc_par_phases[phase]->thread_work_items()->average(_active_gc_threads);
+template <class T>
+void G1GCPhaseTimes::details(T* phase, const char* indent) {
+  LogHandle(gc, phases, task) log;
+  if (log.is_level(LogLevel::Trace)) {
+    outputStream* trace_out = log.trace_stream();
+    trace_out->print("%s", indent);
+    phase->print_details_on(trace_out, _active_gc_threads);
+  }
 }
 
-size_t G1GCPhaseTimes::min_thread_work_items(GCParPhases phase) {
-  assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
-  return _gc_par_phases[phase]->thread_work_items()->minimum(_active_gc_threads);
-}
+void G1GCPhaseTimes::log_phase(WorkerDataArray<double>* phase, uint indent, outputStream* out, bool print_sum) {
+  out->print("%s", Indents[indent]);
+  phase->print_summary_on(out, _active_gc_threads, print_sum);
+  details(phase, Indents[indent]);
 
-size_t G1GCPhaseTimes::max_thread_work_items(GCParPhases phase) {
-  assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
-  return _gc_par_phases[phase]->thread_work_items()->maximum(_active_gc_threads);
+  WorkerDataArray<size_t>* work_items = phase->thread_work_items();
+  if (work_items != NULL) {
+    out->print("%s", Indents[indent + 1]);
+    work_items->print_summary_on(out, _active_gc_threads, true);
+    details(work_items, Indents[indent + 1]);
+  }
 }
 
-class G1GCParPhasePrinter : public StackObj {
-  G1GCPhaseTimes* _phase_times;
- public:
-  G1GCParPhasePrinter(G1GCPhaseTimes* phase_times) : _phase_times(phase_times) {}
-
-  void print(G1GCPhaseTimes::GCParPhases phase_id) {
-    WorkerDataArray<double>* phase = _phase_times->_gc_par_phases[phase_id];
-
-    if (phase->_length == 1) {
-      print_single_length(phase_id, phase);
-    } else {
-      print_multi_length(phase_id, phase);
-    }
+void G1GCPhaseTimes::debug_phase(WorkerDataArray<double>* phase) {
+  LogHandle(gc, phases) log;
+  if (log.is_level(LogLevel::Debug)) {
+    ResourceMark rm;
+    log_phase(phase, 2, log.debug_stream(), true);
   }
-
+}
 
- private:
-  void print_single_length(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {
-    // No need for min, max, average and sum for only one worker
-    log_debug(gc, phases)("%s%s:  %.1lf", Indents[phase->_indent_level], phase->_title, _phase_times->get_time_ms(phase_id, 0));
-
-    WorkerDataArray<size_t>* work_items = phase->_thread_work_items;
-    if (work_items != NULL) {
-      log_debug(gc, phases)("%s%s:  " SIZE_FORMAT, Indents[work_items->_indent_level], work_items->_title, _phase_times->sum_thread_work_items(phase_id));
-    }
+void G1GCPhaseTimes::trace_phase(WorkerDataArray<double>* phase, bool print_sum) {
+  LogHandle(gc, phases) log;
+  if (log.is_level(LogLevel::Trace)) {
+    ResourceMark rm;
+    log_phase(phase, 3, log.trace_stream(), print_sum);
   }
+}
 
-  void print_time_values(const char* indent, G1GCPhaseTimes::GCParPhases phase_id) {
-    if (log_is_enabled(Trace, gc)) {
-      LineBuffer buf(0);
-      uint active_length = _phase_times->_active_gc_threads;
-      for (uint i = 0; i < active_length; ++i) {
-        buf.append(" %4.1lf", _phase_times->get_time_ms(phase_id, i));
-      }
-      const char* line = buf.to_string();
-      log_trace(gc, phases)("%s%-25s%s", indent, "", line);
-    }
-  }
+#define PHASE_DOUBLE_FORMAT "%s%s: %.1lfms"
+#define PHASE_SIZE_FORMAT "%s%s: " SIZE_FORMAT
 
-  void print_count_values(const char* indent, G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<size_t>* thread_work_items) {
-    if (log_is_enabled(Trace, gc)) {
-      LineBuffer buf(0);
-      uint active_length = _phase_times->_active_gc_threads;
-      for (uint i = 0; i < active_length; ++i) {
-        buf.append("  " SIZE_FORMAT, _phase_times->get_thread_work_item(phase_id, i));
-      }
-      const char* line = buf.to_string();
-      log_trace(gc, phases)("%s%-25s%s", indent, "", line);
-    }
-  }
+#define info_line(str, value) \
+  log_info(gc, phases)(PHASE_DOUBLE_FORMAT, Indents[1], str, value);
 
-  void print_thread_work_items(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<size_t>* thread_work_items) {
-    const char* indent = Indents[thread_work_items->_indent_level];
-
-    assert(thread_work_items->_print_sum, "%s does not have print sum true even though it is a count", thread_work_items->_title);
+#define debug_line(str, value) \
+  log_debug(gc, phases)(PHASE_DOUBLE_FORMAT, Indents[2], str, value);
 
-    log_debug(gc, phases)("%s%-25s Min: " SIZE_FORMAT ", Avg: %4.1lf, Max: " SIZE_FORMAT ", Diff: " SIZE_FORMAT ", Sum: " SIZE_FORMAT,
-        indent, thread_work_items->_title,
-        _phase_times->min_thread_work_items(phase_id), _phase_times->average_thread_work_items(phase_id), _phase_times->max_thread_work_items(phase_id),
-        _phase_times->max_thread_work_items(phase_id) - _phase_times->min_thread_work_items(phase_id), _phase_times->sum_thread_work_items(phase_id));
+#define trace_line(str, value) \
+  log_trace(gc, phases)(PHASE_DOUBLE_FORMAT, Indents[3], str, value);
 
-    print_count_values(indent, phase_id, thread_work_items);
-  }
-
-  void print_multi_length(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {
-    const char* indent = Indents[phase->_indent_level];
+#define trace_line_sz(str, value) \
+  log_trace(gc, phases)(PHASE_SIZE_FORMAT, Indents[3], str, value);
 
-    if (phase->_print_sum) {
-      log_debug(gc, phases)("%s%-25s Min: %4.1lf, Avg: %4.1lf, Max: %4.1lf, Diff: %4.1lf, Sum: %4.1lf",
-          indent, phase->_title,
-          _phase_times->min_time_ms(phase_id), _phase_times->average_time_ms(phase_id), _phase_times->max_time_ms(phase_id),
-          _phase_times->max_time_ms(phase_id) - _phase_times->min_time_ms(phase_id), _phase_times->sum_time_ms(phase_id));
-    } else {
-      log_debug(gc, phases)("%s%-25s Min: %4.1lf, Avg: %4.1lf, Max: %4.1lf, Diff: %4.1lf",
-          indent, phase->_title,
-          _phase_times->min_time_ms(phase_id), _phase_times->average_time_ms(phase_id), _phase_times->max_time_ms(phase_id),
-          _phase_times->max_time_ms(phase_id) - _phase_times->min_time_ms(phase_id));
-    }
+#define trace_line_ms(str, value) \
+  log_trace(gc, phases)(PHASE_SIZE_FORMAT, Indents[3], str, value);
 
-    print_time_values(indent, phase_id);
-
-    if (phase->_thread_work_items != NULL) {
-      print_thread_work_items(phase_id, phase->_thread_work_items);
-    }
-  }
-};
+#define info_line_and_account(str, value) \
+  info_line(str, value);                  \
+  accounted_time_ms += value;
 
 void G1GCPhaseTimes::print() {
   note_gc_end();
 
-  G1GCParPhasePrinter par_phase_printer(this);
-
+  double accounted_time_ms = _external_accounted_time_ms;
   if (_root_region_scan_wait_time_ms > 0.0) {
-    print_stats(Indents[1], "Root Region Scan Waiting", _root_region_scan_wait_time_ms);
-  }
-
-  print_stats(Indents[1], "Parallel Time", _cur_collection_par_time_ms);
-  for (int i = 0; i <= GCMainParPhasesLast; i++) {
-    par_phase_printer.print((GCParPhases) i);
+    info_line_and_account("Root Region Scan Waiting", _root_region_scan_wait_time_ms);
   }
 
-  print_stats(Indents[1], "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
-  print_stats(Indents[1], "Code Root Purge", _cur_strong_code_root_purge_time_ms);
+  info_line_and_account("Evacuate Collection Set", _cur_collection_par_time_ms);
+  trace_phase(_gc_par_phases[GCWorkerStart], false);
+  debug_phase(_gc_par_phases[ExtRootScan]);
+  for (int i = ThreadRoots; i <= SATBFiltering; i++) {
+    trace_phase(_gc_par_phases[i]);
+  }
+  debug_phase(_gc_par_phases[UpdateRS]);
+  if (ConcurrentG1Refine::hot_card_cache_enabled()) {
+    trace_phase(_gc_par_phases[ScanHCC]);
+  }
+  debug_phase(_gc_par_phases[ScanRS]);
+  debug_phase(_gc_par_phases[CodeRoots]);
+  debug_phase(_gc_par_phases[ObjCopy]);
+  debug_phase(_gc_par_phases[Termination]);
+  debug_phase(_gc_par_phases[Other]);
+  debug_phase(_gc_par_phases[GCWorkerTotal]);
+  trace_phase(_gc_par_phases[GCWorkerEnd], false);
+
+  info_line_and_account("Code Roots", _cur_collection_code_root_fixup_time_ms + _cur_strong_code_root_purge_time_ms);
+  debug_line("Code Roots Fixup", _cur_collection_code_root_fixup_time_ms);
+  debug_line("Code Roots Purge", _cur_strong_code_root_purge_time_ms);
+
   if (G1StringDedup::is_enabled()) {
-    print_stats(Indents[1], "String Dedup Fixup", _cur_string_dedup_fixup_time_ms);
-    for (int i = StringDedupPhasesFirst; i <= StringDedupPhasesLast; i++) {
-      par_phase_printer.print((GCParPhases) i);
-    }
+    info_line_and_account("String Dedup Fixup", _cur_string_dedup_fixup_time_ms);
+    debug_phase(_gc_par_phases[StringDedupQueueFixup]);
+    debug_phase(_gc_par_phases[StringDedupTableFixup]);
   }
-  print_stats(Indents[1], "Clear CT", _cur_clear_ct_time_ms);
-  print_stats(Indents[1], "Expand Heap After Collection", _cur_expand_heap_time_ms);
-  double misc_time_ms = _gc_pause_time_ms - accounted_time_ms();
-  print_stats(Indents[1], "Other", misc_time_ms);
+  info_line_and_account("Clear Card Table", _cur_clear_ct_time_ms);
+  info_line_and_account("Expand Heap After Collection", _cur_expand_heap_time_ms);
+
+  double free_cset_time = _recorded_young_free_cset_time_ms + _recorded_non_young_free_cset_time_ms;
+  info_line_and_account("Free Collection Set", free_cset_time);
+  debug_line("Young Free Collection Set", _recorded_young_free_cset_time_ms);
+  debug_line("Non-Young Free Collection Set", _recorded_non_young_free_cset_time_ms);
+  info_line_and_account("Merge Per-Thread State", _recorded_merge_pss_time_ms);
+
+  info_line("Other", _gc_pause_time_ms - accounted_time_ms);
   if (_cur_verify_before_time_ms > 0.0) {
-    print_stats(Indents[2], "Verify Before", _cur_verify_before_time_ms);
+    debug_line("Verify Before", _cur_verify_before_time_ms);
   }
   if (G1CollectedHeap::heap()->evacuation_failed()) {
     double evac_fail_handling = _cur_evac_fail_recalc_used + _cur_evac_fail_remove_self_forwards +
       _cur_evac_fail_restore_remsets;
-    print_stats(Indents[2], "Evacuation Failure", evac_fail_handling);
-    log_trace(gc, phases)("%sRecalculate Used: %.1lf ms", Indents[3], _cur_evac_fail_recalc_used);
-    log_trace(gc, phases)("%sRemove Self Forwards: %.1lf ms", Indents[3], _cur_evac_fail_remove_self_forwards);
-    log_trace(gc, phases)("%sRestore RemSet: %.1lf ms", Indents[3], _cur_evac_fail_restore_remsets);
+    debug_line("Evacuation Failure", evac_fail_handling);
+    trace_line("Recalculate Used", _cur_evac_fail_recalc_used);
+    trace_line("Remove Self Forwards",_cur_evac_fail_remove_self_forwards);
+    trace_line("Restore RemSet", _cur_evac_fail_restore_remsets);
   }
-  print_stats(Indents[2], "Choose CSet",
-    (_recorded_young_cset_choice_time_ms +
-    _recorded_non_young_cset_choice_time_ms));
-  print_stats(Indents[2], "Ref Proc", _cur_ref_proc_time_ms);
-  print_stats(Indents[2], "Ref Enq", _cur_ref_enq_time_ms);
-  print_stats(Indents[2], "Redirty Cards", _recorded_redirty_logged_cards_time_ms);
-  par_phase_printer.print(RedirtyCards);
+  debug_line("Choose CSet", (_recorded_young_cset_choice_time_ms + _recorded_non_young_cset_choice_time_ms));
+  debug_line("Preserve CM Refs", _recorded_preserve_cm_referents_time_ms);
+  debug_line("Reference Processing", _cur_ref_proc_time_ms);
+  debug_line("Reference Enqueuing", _cur_ref_enq_time_ms);
+  debug_line("Redirty Cards", _recorded_redirty_logged_cards_time_ms);
+  trace_phase(_gc_par_phases[RedirtyCards]);
+  trace_phase(_gc_par_phases[PreserveCMReferents]);
   if (G1EagerReclaimHumongousObjects) {
-    print_stats(Indents[2], "Humongous Register", _cur_fast_reclaim_humongous_register_time_ms);
-
-    log_trace(gc, phases)("%sHumongous Total: " SIZE_FORMAT, Indents[3], _cur_fast_reclaim_humongous_total);
-    log_trace(gc, phases)("%sHumongous Candidate: " SIZE_FORMAT, Indents[3], _cur_fast_reclaim_humongous_candidates);
-    print_stats(Indents[2], "Humongous Reclaim", _cur_fast_reclaim_humongous_time_ms);
-    log_trace(gc, phases)("%sHumongous Reclaimed: " SIZE_FORMAT, Indents[3], _cur_fast_reclaim_humongous_reclaimed);
+    debug_line("Humongous Register", _cur_fast_reclaim_humongous_register_time_ms);
+    trace_line_sz("Humongous Total", _cur_fast_reclaim_humongous_total);
+    trace_line_sz("Humongous Candidate", _cur_fast_reclaim_humongous_candidates);
+    debug_line("Humongous Reclaim", _cur_fast_reclaim_humongous_time_ms);
+    trace_line_sz("Humongous Reclaimed", _cur_fast_reclaim_humongous_reclaimed);
   }
-  print_stats(Indents[2], "Free CSet",
-    (_recorded_young_free_cset_time_ms +
-    _recorded_non_young_free_cset_time_ms));
-  log_trace(gc, phases)("%sYoung Free CSet: %.1lf ms", Indents[3], _recorded_young_free_cset_time_ms);
-  log_trace(gc, phases)("%sNon-Young Free CSet: %.1lf ms", Indents[3], _recorded_non_young_free_cset_time_ms);
   if (_cur_verify_after_time_ms > 0.0) {
-    print_stats(Indents[2], "Verify After", _cur_verify_after_time_ms);
+    debug_line("Verify After", _cur_verify_after_time_ms);
   }
 }
 
--- a/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,8 +32,6 @@
 template <class T> class WorkerDataArray;
 
 class G1GCPhaseTimes : public CHeapObj<mtGC> {
-  friend class G1GCParPhasePrinter;
-
   uint _active_gc_threads;
   uint _max_gc_threads;
   jlong _gc_start_counter;
@@ -69,6 +67,7 @@
     StringDedupQueueFixup,
     StringDedupTableFixup,
     RedirtyCards,
+    PreserveCMReferents,
     GCParPhasesSentinel
   };
 
@@ -108,6 +107,10 @@
 
   double _recorded_redirty_logged_cards_time_ms;
 
+  double _recorded_preserve_cm_referents_time_ms;
+
+  double _recorded_merge_pss_time_ms;
+
   double _recorded_young_free_cset_time_ms;
   double _recorded_non_young_free_cset_time_ms;
 
@@ -120,10 +123,13 @@
   double _cur_verify_before_time_ms;
   double _cur_verify_after_time_ms;
 
-  // Helper methods for detailed logging
-  void print_stats(const char*, const char* str, double value);
+  void note_gc_end();
 
-  void note_gc_end();
+  template <class T>
+  void details(T* phase, const char* indent);
+  void log_phase(WorkerDataArray<double>* phase, uint indent, outputStream* out, bool print_sum);
+  void debug_phase(WorkerDataArray<double>* phase);
+  void trace_phase(WorkerDataArray<double>* phase, bool print_sum = true);
 
  public:
   G1GCPhaseTimes(uint max_gc_threads);
@@ -143,16 +149,6 @@
 
   size_t sum_thread_work_items(GCParPhases phase);
 
- private:
-  double get_time_ms(GCParPhases phase, uint worker_i);
-  double sum_time_ms(GCParPhases phase);
-  double min_time_ms(GCParPhases phase);
-  double max_time_ms(GCParPhases phase);
-  size_t get_thread_work_item(GCParPhases phase, uint worker_i);
-  double average_thread_work_items(GCParPhases phase);
-  size_t min_thread_work_items(GCParPhases phase);
-  size_t max_thread_work_items(GCParPhases phase);
-
  public:
 
   void record_clear_ct_time(double ms) {
@@ -234,6 +230,14 @@
     _recorded_redirty_logged_cards_time_ms = time_ms;
   }
 
+  void record_preserve_cm_referents_time_ms(double time_ms) {
+    _recorded_preserve_cm_referents_time_ms = time_ms;
+  }
+
+  void record_merge_pss_time_ms(double time_ms) {
+    _recorded_merge_pss_time_ms = time_ms;
+  }
+
   void record_cur_collection_start_sec(double time_ms) {
     _cur_collection_start_sec = time_ms;
   }
@@ -250,8 +254,6 @@
     _external_accounted_time_ms += time_ms;
   }
 
-  double accounted_time_ms();
-
   double cur_collection_start_sec() {
     return _cur_collection_start_sec;
   }
--- a/hotspot/src/share/vm/gc/g1/g1HotCardCache.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1HotCardCache.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -81,10 +81,7 @@
 }
 
 void G1HotCardCache::drain(CardTableEntryClosure* cl, uint worker_i) {
-  if (!default_use_cache()) {
-    assert(_hot_cache == NULL, "Logic");
-    return;
-  }
+  assert(default_use_cache(), "Drain only necessary if we use the hot card cache.");
 
   assert(_hot_cache != NULL, "Logic");
   assert(!use_cache(), "cache should be disabled");
--- a/hotspot/src/share/vm/gc/g1/g1IHOPControl.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1IHOPControl.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,14 +47,16 @@
 
 void G1IHOPControl::print() {
   size_t cur_conc_mark_start_threshold = get_conc_mark_start_threshold();
-  log_debug(gc, ihop)("Basic information (value update), threshold: " SIZE_FORMAT "B (%1.2f), target occupancy: " SIZE_FORMAT "B, current occupancy: " SIZE_FORMAT "B,"
-                      " recent old gen allocation rate: %1.2f, recent marking phase length: %1.2f",
+  log_debug(gc, ihop)("Basic information (value update), threshold: " SIZE_FORMAT "B (%1.2f), target occupancy: " SIZE_FORMAT "B, current occupancy: " SIZE_FORMAT "B, "
+                      "recent allocation size: " SIZE_FORMAT "B, recent allocation duration: %1.2fms, recent old gen allocation rate: %1.2fB/s, recent marking phase length: %1.2fms",
                       cur_conc_mark_start_threshold,
                       cur_conc_mark_start_threshold * 100.0 / _target_occupancy,
                       _target_occupancy,
                       G1CollectedHeap::heap()->used(),
+                      _last_allocated_bytes,
+                      _last_allocation_time_s * 1000.0,
                       _last_allocation_time_s > 0.0 ? _last_allocated_bytes / _last_allocation_time_s : 0.0,
-                      last_marking_length_s());
+                      last_marking_length_s() * 1000.0);
 }
 
 void G1IHOPControl::send_trace_event(G1NewTracer* tracer) {
@@ -191,13 +193,16 @@
 void G1AdaptiveIHOPControl::print() {
   G1IHOPControl::print();
   size_t actual_target = actual_target_threshold();
-  log_debug(gc, ihop)("Adaptive IHOP information (value update), threshold: " SIZE_FORMAT "B (%1.2f), internal target occupancy: " SIZE_FORMAT "B,"
-                      " predicted old gen allocation rate: %1.2f, predicted marking phase length: %1.2f, prediction active: %s",
+  log_debug(gc, ihop)("Adaptive IHOP information (value update), threshold: " SIZE_FORMAT "B (%1.2f), internal target occupancy: " SIZE_FORMAT "B, "
+                      "occupancy: " SIZE_FORMAT "B, additional buffer size: " SIZE_FORMAT "B, predicted old gen allocation rate: %1.2fB/s, "
+                      "predicted marking phase length: %1.2fms, prediction active: %s",
                       get_conc_mark_start_threshold(),
                       percent_of(get_conc_mark_start_threshold(), actual_target),
                       actual_target,
+                      G1CollectedHeap::heap()->used(),
+                      _last_unrestrained_young_size,
                       _predictor->get_new_prediction(&_allocation_rate_s),
-                      _predictor->get_new_prediction(&_marking_times_s),
+                      _predictor->get_new_prediction(&_marking_times_s) * 1000.0,
                       have_enough_data_for_prediction() ? "true" : "false");
 }
 
--- a/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -327,6 +327,9 @@
 
 G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) {
   assert(worker_id < _n_workers, "out of bounds access");
+  if (_states[worker_id] == NULL) {
+    _states[worker_id] = new_par_scan_state(worker_id, _young_cset_length);
+  }
   return _states[worker_id];
 }
 
@@ -352,6 +355,10 @@
   for (uint worker_index = 0; worker_index < _n_workers; ++worker_index) {
     G1ParScanThreadState* pss = _states[worker_index];
 
+    if (pss == NULL) {
+      continue;
+    }
+
     _total_cards_scanned += _cards_scanned[worker_index];
 
     pss->flush(_surviving_young_words_total);
--- a/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -200,6 +200,7 @@
   size_t* _surviving_young_words_total;
   size_t* _cards_scanned;
   size_t _total_cards_scanned;
+  size_t _young_cset_length;
   uint _n_workers;
   bool _flushed;
 
@@ -210,10 +211,11 @@
       _surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, young_cset_length, mtGC)),
       _cards_scanned(NEW_C_HEAP_ARRAY(size_t, n_workers, mtGC)),
       _total_cards_scanned(0),
+      _young_cset_length(young_cset_length),
       _n_workers(n_workers),
       _flushed(false) {
     for (uint i = 0; i < n_workers; ++i) {
-      _states[i] = new_par_scan_state(i, young_cset_length);
+      _states[i] = NULL;
     }
     memset(_surviving_young_words_total, 0, young_cset_length * sizeof(size_t));
     memset(_cards_scanned, 0, n_workers * sizeof(size_t));
--- a/hotspot/src/share/vm/gc/g1/g1RemSet.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1RemSet.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -238,7 +238,7 @@
   RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
 
   G1GCParPhaseTimesTracker x(_g1p->phase_times(), G1GCPhaseTimes::UpdateRS, worker_i);
-  {
+  if (ConcurrentG1Refine::hot_card_cache_enabled()) {
     // Apply the closure to the entries of the hot card cache.
     G1GCParPhaseTimesTracker y(_g1p->phase_times(), G1GCPhaseTimes::ScanHCC, worker_i);
     _g1->iterate_hcc_closure(&into_cset_update_rs_cl, worker_i);
@@ -291,7 +291,6 @@
   _g1->cleanUpCardTable();
 
   DirtyCardQueueSet& into_cset_dcqs = _into_cset_dirty_card_queue_set;
-  int into_cset_n_buffers = into_cset_dcqs.completed_buffers_num();
 
   if (_g1->evacuation_failed()) {
     double restore_remembered_set_start = os::elapsedTime();
--- a/hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -26,6 +26,8 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1YoungRemSetSamplingThread.hpp"
+#include "gc/g1/heapRegion.inline.hpp"
+#include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/g1/suspendibleThreadSet.hpp"
 #include "runtime/mutexLocker.hpp"
 
@@ -55,21 +57,21 @@
   }
 }
 
-G1YoungRemSetSamplingThread::G1YoungRemSetSamplingThread() : ConcurrentGCThread() {
-  _monitor = new Monitor(Mutex::nonleaf,
-                         "G1YoungRemSetSamplingThread monitor",
-                         true,
-                         Monitor::_safepoint_check_never);
-
+G1YoungRemSetSamplingThread::G1YoungRemSetSamplingThread() :
+    ConcurrentGCThread(),
+    _monitor(Mutex::nonleaf,
+             "G1YoungRemSetSamplingThread monitor",
+             true,
+             Monitor::_safepoint_check_never) {
   set_name("G1 Young RemSet Sampling");
   create_and_start();
 }
 
 void G1YoungRemSetSamplingThread::sleep_before_next_cycle() {
-  MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
+  MutexLockerEx x(&_monitor, Mutex::_no_safepoint_check_flag);
   if (!_should_terminate) {
-    intx waitms = G1ConcRefinementServiceIntervalMillis; // 300, really should be?
-    _monitor->wait(Mutex::_no_safepoint_check_flag, waitms);
+    uintx waitms = G1ConcRefinementServiceIntervalMillis; // 300, really should be?
+    _monitor.wait(Mutex::_no_safepoint_check_flag, waitms);
   }
 }
 
@@ -90,8 +92,8 @@
 }
 
 void G1YoungRemSetSamplingThread::stop_service() {
-  MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
-  _monitor->notify();
+  MutexLockerEx x(&_monitor, Mutex::_no_safepoint_check_flag);
+  _monitor.notify();
 }
 
 void G1YoungRemSetSamplingThread::sample_young_list_rs_lengths() {
@@ -100,22 +102,35 @@
   G1CollectorPolicy* g1p = g1h->g1_policy();
   if (g1p->adaptive_young_list_length()) {
     int regions_visited = 0;
-    g1h->young_list()->rs_length_sampling_init();
-    while (g1h->young_list()->rs_length_sampling_more()) {
-      g1h->young_list()->rs_length_sampling_next();
+    HeapRegion* hr = g1h->young_list()->first_region();
+    size_t sampled_rs_lengths = 0;
+
+    while (hr != NULL) {
+      size_t rs_length = hr->rem_set()->occupied();
+      sampled_rs_lengths += rs_length;
+
+      // The current region may not yet have been added to the
+      // incremental collection set (it gets added when it is
+      // retired as the current allocation region).
+      if (hr->in_collection_set()) {
+        // Update the collection set policy information for this region
+        g1p->update_incremental_cset_info(hr, rs_length);
+      }
+
       ++regions_visited;
 
       // we try to yield every time we visit 10 regions
       if (regions_visited == 10) {
         if (sts.should_yield()) {
           sts.yield();
-          // we just abandon the iteration
-          break;
+          // A gc may have occurred and our sampling data is stale and further
+          // traversal of the young list is unsafe
+          return;
         }
         regions_visited = 0;
       }
+      hr = hr->get_next_young_region();
     }
-
-    g1p->revise_young_list_target_length_if_necessary();
+    g1p->revise_young_list_target_length_if_necessary(sampled_rs_lengths);
   }
 }
--- a/hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,7 +41,7 @@
 // increase the young gen size to keep pause time length goal.
 class G1YoungRemSetSamplingThread: public ConcurrentGCThread {
 private:
-  Monitor* _monitor;
+  Monitor _monitor;
 
   void sample_young_list_rs_lengths();
 
--- a/hotspot/src/share/vm/gc/g1/g1_globals.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1_globals.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -71,10 +71,6 @@
           "draining concurrent marking work queues.")                       \
           range(1, max_intx)                                                \
                                                                             \
-  experimental(bool, G1UseConcMarkReferenceProcessing, true,                \
-          "If true, enable reference discovery during concurrent "          \
-          "marking and reference processing at the end of remark.")         \
-                                                                            \
   experimental(double, G1LastPLABAverageOccupancy, 50.0,                    \
                "The expected average occupancy of the last PLAB in "        \
                "percent.")                                                  \
@@ -107,35 +103,35 @@
           "Size of an update buffer")                                       \
           range(1, NOT_LP64(32*M) LP64_ONLY(1*G))                           \
                                                                             \
-  product(intx, G1ConcRefinementYellowZone, 0,                              \
+  product(size_t, G1ConcRefinementYellowZone, 0,                            \
           "Number of enqueued update buffers that will "                    \
           "trigger concurrent processing. Will be selected ergonomically "  \
           "by default.")                                                    \
           range(0, max_intx)                                                \
                                                                             \
-  product(intx, G1ConcRefinementRedZone, 0,                                 \
+  product(size_t, G1ConcRefinementRedZone, 0,                               \
           "Maximum number of enqueued update buffers before mutator "       \
           "threads start processing new ones instead of enqueueing them. "  \
           "Will be selected ergonomically by default. Zero will disable "   \
           "concurrent processing.")                                         \
           range(0, max_intx)                                                \
                                                                             \
-  product(intx, G1ConcRefinementGreenZone, 0,                               \
+  product(size_t, G1ConcRefinementGreenZone, 0,                             \
           "The number of update buffers that are left in the queue by the " \
           "concurrent processing threads. Will be selected ergonomically "  \
           "by default.")                                                    \
           range(0, max_intx)                                                \
                                                                             \
-  product(intx, G1ConcRefinementServiceIntervalMillis, 300,                 \
+  product(uintx, G1ConcRefinementServiceIntervalMillis, 300,                \
           "The last concurrent refinement thread wakes up every "           \
           "specified number of milliseconds to do miscellaneous work.")     \
           range(0, max_jint)                                                \
                                                                             \
-  product(intx, G1ConcRefinementThresholdStep, 0,                           \
+  product(size_t, G1ConcRefinementThresholdStep, 0,                         \
           "Each time the rset update queue increases by this amount "       \
           "activate the next refinement thread if available. "              \
           "Will be selected ergonomically by default.")                     \
-          range(0, max_jint)                                                \
+          range(0, SIZE_MAX)                                                \
                                                                             \
   product(intx, G1RSetUpdatingPauseTimePercent, 10,                         \
           "A target percentage of time that is allowed to be spend on "     \
--- a/hotspot/src/share/vm/gc/g1/heapRegion.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/heapRegion.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -781,7 +781,9 @@
           ResourceMark rm;
           _containing_obj->print_on(log.error_stream());
           log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT, p2i(obj), HR_FORMAT_PARAMS(to));
-          obj->print_on(log.error_stream());
+          if (obj->is_oop()) {
+            obj->print_on(log.error_stream());
+          }
           log.error("Obj head CTE = %d, field CTE = %d.", cv_obj, cv_field);
           log.error("----------");
           _failures = true;
--- a/hotspot/src/share/vm/gc/g1/ptrQueue.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/ptrQueue.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,8 @@
 #include "runtime/mutexLocker.hpp"
 #include "runtime/thread.inline.hpp"
 
+#include <new>
+
 PtrQueue::PtrQueue(PtrQueueSet* qset, bool permanent, bool active) :
   _qset(qset), _buf(NULL), _index(0), _sz(0), _active(active),
   _permanent(permanent), _lock(NULL)
@@ -87,6 +89,19 @@
 }
 
 
+BufferNode* BufferNode::allocate(size_t byte_size) {
+  assert(byte_size > 0, "precondition");
+  assert(is_size_aligned(byte_size, sizeof(void**)),
+         "Invalid buffer size " SIZE_FORMAT, byte_size);
+  void* data = NEW_C_HEAP_ARRAY(char, buffer_offset() + byte_size, mtGC);
+  return new (data) BufferNode;
+}
+
+void BufferNode::deallocate(BufferNode* node) {
+  node->~BufferNode();
+  FREE_C_HEAP_ARRAY(char, node);
+}
+
 PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
   _max_completed_queue(0),
   _cbl_mon(NULL), _fl_lock(NULL),
@@ -123,17 +138,23 @@
 
 void** PtrQueueSet::allocate_buffer() {
   assert(_sz > 0, "Didn't set a buffer size.");
-  MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
-  if (_fl_owner->_buf_free_list != NULL) {
-    void** res = BufferNode::make_buffer_from_node(_fl_owner->_buf_free_list);
-    _fl_owner->_buf_free_list = _fl_owner->_buf_free_list->next();
-    _fl_owner->_buf_free_list_sz--;
-    return res;
+  BufferNode* node = NULL;
+  {
+    MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
+    node = _fl_owner->_buf_free_list;
+    if (node != NULL) {
+      _fl_owner->_buf_free_list = node->next();
+      _fl_owner->_buf_free_list_sz--;
+    }
+  }
+  if (node == NULL) {
+    node = BufferNode::allocate(_sz);
   } else {
-    // Allocate space for the BufferNode in front of the buffer.
-    char *b =  NEW_C_HEAP_ARRAY(char, _sz + BufferNode::aligned_size(), mtGC);
-    return BufferNode::make_buffer_from_block(b);
+    // Reinitialize buffer obtained from free list.
+    node->set_index(0);
+    node->set_next(NULL);
   }
+  return BufferNode::make_buffer_from_node(node);
 }
 
 void PtrQueueSet::deallocate_buffer(void** buf) {
@@ -150,13 +171,13 @@
   // For now we'll adopt the strategy of deleting half.
   MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag);
   size_t n = _buf_free_list_sz / 2;
-  while (n > 0) {
-    assert(_buf_free_list != NULL, "_buf_free_list_sz must be wrong.");
-    void* b = BufferNode::make_block_from_node(_buf_free_list);
-    _buf_free_list = _buf_free_list->next();
-    FREE_C_HEAP_ARRAY(char, b);
-    _buf_free_list_sz --;
-    n--;
+  for (size_t i = 0; i < n; ++i) {
+    assert(_buf_free_list != NULL,
+           "_buf_free_list_sz is wrong: " SIZE_FORMAT, _buf_free_list_sz);
+    BufferNode* node = _buf_free_list;
+    _buf_free_list = node->next();
+    _buf_free_list_sz--;
+    BufferNode::deallocate(node);
   }
 }
 
@@ -236,8 +257,9 @@
 
 void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index) {
   MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
-  BufferNode* cbn = BufferNode::new_from_buffer(buf);
+  BufferNode* cbn = BufferNode::make_node_from_buffer(buf);
   cbn->set_index(index);
+  cbn->set_next(NULL);
   if (_completed_buffers_tail == NULL) {
     assert(_completed_buffers_head == NULL, "Well-formedness");
     _completed_buffers_head = cbn;
@@ -249,16 +271,17 @@
   _n_completed_buffers++;
 
   if (!_process_completed && _process_completed_threshold >= 0 &&
-      _n_completed_buffers >= _process_completed_threshold) {
+      _n_completed_buffers >= (size_t)_process_completed_threshold) {
     _process_completed = true;
-    if (_notify_when_complete)
+    if (_notify_when_complete) {
       _cbl_mon->notify();
+    }
   }
   DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
 }
 
-int PtrQueueSet::completed_buffers_list_length() {
-  int n = 0;
+size_t PtrQueueSet::completed_buffers_list_length() {
+  size_t n = 0;
   BufferNode* cbn = _completed_buffers_head;
   while (cbn != NULL) {
     n++;
@@ -312,7 +335,8 @@
 
 void PtrQueueSet::notify_if_necessary() {
   MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
-  if (_n_completed_buffers >= _process_completed_threshold || _max_completed_queue == 0) {
+  assert(_process_completed_threshold >= 0, "_process_completed is negative");
+  if (_n_completed_buffers >= (size_t)_process_completed_threshold || _max_completed_queue == 0) {
     _process_completed = true;
     if (_notify_when_complete)
       _cbl_mon->notify();
--- a/hotspot/src/share/vm/gc/g1/ptrQueue.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/ptrQueue.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,9 +33,6 @@
 // the addresses of modified old-generation objects.  This type supports
 // this operation.
 
-// The definition of placement operator new(size_t, void*) in the <new>.
-#include <new>
-
 class PtrQueueSet;
 class PtrQueue VALUE_OBJ_CLASS_SPEC {
   friend class VMStructs;
@@ -168,42 +165,38 @@
 class BufferNode {
   size_t _index;
   BufferNode* _next;
+  void* _buffer[1];             // Pseudo flexible array member.
+
+  BufferNode() : _index(0), _next(NULL) { }
+  ~BufferNode() { }
+
+  static size_t buffer_offset() {
+    return offset_of(BufferNode, _buffer);
+  }
+
 public:
-  BufferNode() : _index(0), _next(NULL) { }
   BufferNode* next() const     { return _next;  }
   void set_next(BufferNode* n) { _next = n;     }
   size_t index() const         { return _index; }
   void set_index(size_t i)     { _index = i;    }
 
-  // Align the size of the structure to the size of the pointer
-  static size_t aligned_size() {
-    static const size_t alignment = round_to(sizeof(BufferNode), sizeof(void*));
-    return alignment;
-  }
+  // Allocate a new BufferNode with the "buffer" having size bytes.
+  static BufferNode* allocate(size_t byte_size);
 
-  // BufferNode is allocated before the buffer.
-  // The chunk of memory that holds both of them is a block.
+  // Free a BufferNode.
+  static void deallocate(BufferNode* node);
 
-  // Produce a new BufferNode given a buffer.
-  static BufferNode* new_from_buffer(void** buf) {
-    return new (make_block_from_buffer(buf)) BufferNode;
+  // Return the BufferNode containing the buffer.
+  static BufferNode* make_node_from_buffer(void** buffer) {
+    return reinterpret_cast<BufferNode*>(
+      reinterpret_cast<char*>(buffer) - buffer_offset());
   }
 
-  // The following are the required conversion routines:
-  static BufferNode* make_node_from_buffer(void** buf) {
-    return (BufferNode*)make_block_from_buffer(buf);
-  }
+  // Return the buffer for node.
   static void** make_buffer_from_node(BufferNode *node) {
-    return make_buffer_from_block(node);
-  }
-  static void* make_block_from_node(BufferNode *node) {
-    return (void*)node;
-  }
-  static void** make_buffer_from_block(void* p) {
-    return (void**)((char*)p + aligned_size());
-  }
-  static void* make_block_from_buffer(void** p) {
-    return (void*)((char*)p - aligned_size());
+    // &_buffer[0] might lead to index out of bounds warnings.
+    return reinterpret_cast<void**>(
+      reinterpret_cast<char*>(node) + buffer_offset());
   }
 };
 
@@ -216,7 +209,7 @@
   Monitor* _cbl_mon;  // Protects the fields below.
   BufferNode* _completed_buffers_head;
   BufferNode* _completed_buffers_tail;
-  int _n_completed_buffers;
+  size_t _n_completed_buffers;
   int _process_completed_threshold;
   volatile bool _process_completed;
 
@@ -240,9 +233,9 @@
   // Maximum number of elements allowed on completed queue: after that,
   // enqueuer does the work itself.  Zero indicates no maximum.
   int _max_completed_queue;
-  int _completed_queue_padding;
+  size_t _completed_queue_padding;
 
-  int completed_buffers_list_length();
+  size_t completed_buffers_list_length();
   void assert_completed_buffer_list_len_correct_locked();
   void assert_completed_buffer_list_len_correct();
 
@@ -306,15 +299,15 @@
   // list size may be reduced, if that is deemed desirable.
   void reduce_free_list();
 
-  int completed_buffers_num() { return _n_completed_buffers; }
+  size_t completed_buffers_num() { return _n_completed_buffers; }
 
   void merge_bufferlists(PtrQueueSet* src);
 
   void set_max_completed_queue(int m) { _max_completed_queue = m; }
   int max_completed_queue() { return _max_completed_queue; }
 
-  void set_completed_queue_padding(int padding) { _completed_queue_padding = padding; }
-  int completed_queue_padding() { return _completed_queue_padding; }
+  void set_completed_queue_padding(size_t padding) { _completed_queue_padding = padding; }
+  size_t completed_queue_padding() { return _completed_queue_padding; }
 
   // Notify the consumer if the number of buffers crossed the threshold
   void notify_if_necessary();
--- a/hotspot/src/share/vm/gc/g1/workerDataArray.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/workerDataArray.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -24,18 +24,53 @@
 
 #include "precompiled.hpp"
 #include "gc/g1/workerDataArray.inline.hpp"
+#include "utilities/ostream.hpp"
+
+template <>
+void WorkerDataArray<double>::WDAPrinter::summary(outputStream* out, const char* title, double min, double avg, double max, double diff, double sum, bool print_sum) {
+  out->print("%-25s Min: %4.1lf, Avg: %4.1lf, Max: %4.1lf, Diff: %4.1lf", title, min * MILLIUNITS, avg * MILLIUNITS, max * MILLIUNITS, diff* MILLIUNITS);
+  if (print_sum) {
+    out->print_cr(", Sum: %4.1lf", sum * MILLIUNITS);
+  } else {
+    out->cr();
+  }
+}
+
+template <>
+void WorkerDataArray<size_t>::WDAPrinter::summary(outputStream* out, const char* title, size_t min, double avg, size_t max, size_t diff, size_t sum, bool print_sum) {
+  out->print("%-25s Min: " SIZE_FORMAT ", Avg: %4.1lf, Max: " SIZE_FORMAT ", Diff: " SIZE_FORMAT, title, min, avg, max, diff);
+  if (print_sum) {
+    out->print_cr(", Sum: " SIZE_FORMAT, sum);
+  } else {
+    out->cr();
+  }
+}
+
+template <>
+void WorkerDataArray<double>::WDAPrinter::details(const WorkerDataArray<double>* phase, outputStream* out, uint active_threads) {
+  out->print("%-25s", "");
+  for (uint i = 0; i < active_threads; ++i) {
+    out->print(" %4.1lf", phase->get(i) * 1000.0);
+  }
+  out->cr();
+}
+
+template <>
+void WorkerDataArray<size_t>::WDAPrinter::details(const WorkerDataArray<size_t>* phase, outputStream* out, uint active_threads) {
+  out->print("%-25s", "");
+  for (uint i = 0; i < active_threads; ++i) {
+    out->print("  " SIZE_FORMAT, phase->get(i));
+  }
+  out->cr();
+}
 
 #ifndef PRODUCT
 void WorkerDataArray_test() {
   const uint length = 3;
   const char* title = "Test array";
-  const bool print_sum = false;
-  const uint indent_level = 2;
 
-  WorkerDataArray<size_t> array(length, title, print_sum, indent_level);
+  WorkerDataArray<size_t> array(length, title);
   assert(strncmp(array.title(), title, strlen(title)) == 0 , "Expected titles to match");
-  assert(array.should_print_sum() == print_sum, "Expected should_print_sum to match print_sum");
-  assert(array.indentation() == indent_level, "Expected indentation to match");
 
   const size_t expected[length] = {5, 3, 7};
   for (uint i = 0; i < length; i++) {
@@ -46,10 +81,7 @@
   }
 
   assert(array.sum(length) == (5 + 3 + 7), "Expected sums to match");
-  assert(array.minimum(length) == 3, "Expected mininum to match");
-  assert(array.maximum(length) == 7, "Expected maximum to match");
-  assert(array.diff(length) == (7 - 3), "Expected diffs to match");
-  assert(array.average(length) == 5, "Expected averages to match");
+  assert(array.average(length) == 5.0, "Expected averages to match");
 
   for (uint i = 0; i < length; i++) {
     array.add(i, 1);
--- a/hotspot/src/share/vm/gc/g1/workerDataArray.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/workerDataArray.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -22,18 +22,19 @@
  *
  */
 
+#ifndef SHARE_VM_GC_G1_WORKERDATAARRAY_HPP
+#define SHARE_VM_GC_G1_WORKERDATAARRAY_HPP
+
 #include "memory/allocation.hpp"
 #include "utilities/debug.hpp"
 
+class outputStream;
+
 template <class T>
 class WorkerDataArray  : public CHeapObj<mtGC> {
-  friend class G1GCParPhasePrinter;
   T*          _data;
   uint        _length;
   const char* _title;
-  bool        _print_sum;
-  uint        _indent_level;
-  bool        _enabled;
 
   WorkerDataArray<size_t>* _thread_work_items;
 
@@ -42,11 +43,7 @@
   void set_all(T value);
 
  public:
-  WorkerDataArray(uint length,
-                  const char* title,
-                  bool print_sum,
-                  uint indent_level);
-
+  WorkerDataArray(uint length, const char* title);
   ~WorkerDataArray();
 
   void link_thread_work_items(WorkerDataArray<size_t>* thread_work_items);
@@ -62,27 +59,30 @@
 
   double average(uint active_threads) const;
   T sum(uint active_threads) const;
-  T minimum(uint active_threads) const;
-  T maximum(uint active_threads) const;
-  T diff(uint active_threads) const;
-
-  uint indentation() const {
-    return _indent_level;
-  }
 
   const char* title() const {
     return _title;
   }
 
-  bool should_print_sum() const {
-    return _print_sum;
-  }
-
   void clear();
-  void set_enabled(bool enabled) {
-    _enabled = enabled;
-  }
 
   void reset() PRODUCT_RETURN;
   void verify(uint active_threads) const PRODUCT_RETURN;
+
+
+ private:
+  class WDAPrinter {
+  public:
+    static void summary(outputStream* out, const char* title, double min, double avg, double max, double diff, double sum, bool print_sum);
+    static void summary(outputStream* out, const char* title, size_t min, double avg, size_t max, size_t diff, size_t sum, bool print_sum);
+
+    static void details(const WorkerDataArray<double>* phase, outputStream* out, uint active_threads);
+    static void details(const WorkerDataArray<size_t>* phase, outputStream* out, uint active_threads);
+  };
+
+ public:
+  void print_summary_on(outputStream* out, uint active_threads, bool print_sum = true) const;
+  void print_details_on(outputStream* out, uint active_threads) const;
 };
+
+#endif // SHARE_VM_GC_G1_WORKERDATAARRAY_HPP
--- a/hotspot/src/share/vm/gc/g1/workerDataArray.inline.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/workerDataArray.inline.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -22,20 +22,18 @@
  *
  */
 
+#ifndef SHARE_VM_GC_G1_WORKERDATAARRAY_INLINE_HPP
+#define SHARE_VM_GC_G1_WORKERDATAARRAY_INLINE_HPP
+
 #include "gc/g1/workerDataArray.hpp"
 #include "memory/allocation.inline.hpp"
+#include "utilities/ostream.hpp"
 
 template <typename T>
-WorkerDataArray<T>::WorkerDataArray(uint length,
-                                    const char* title,
-                                    bool print_sum,
-                                    uint indent_level) :
+WorkerDataArray<T>::WorkerDataArray(uint length, const char* title) :
  _title(title),
  _length(0),
- _print_sum(print_sum),
- _indent_level(indent_level),
- _thread_work_items(NULL),
- _enabled(true) {
+ _thread_work_items(NULL) {
   assert(length > 0, "Must have some workers to store data for");
   _length = length;
   _data = NEW_C_HEAP_ARRAY(T, _length, mtGC);
@@ -94,29 +92,6 @@
 }
 
 template <typename T>
-T WorkerDataArray<T>::minimum(uint active_threads) const {
-  T min = get(0);
-  for (uint i = 1; i < active_threads; ++i) {
-    min = MIN2(min, get(i));
-  }
-  return min;
-}
-
-template <typename T>
-T WorkerDataArray<T>::maximum(uint active_threads) const {
-  T max = get(0);
-  for (uint i = 1; i < active_threads; ++i) {
-    max = MAX2(max, get(i));
-  }
-  return max;
-}
-
-template <typename T>
-T WorkerDataArray<T>::diff(uint active_threads) const {
-  return maximum(active_threads) - minimum(active_threads);
-}
-
-template <typename T>
 void WorkerDataArray<T>::clear() {
   set_all(0);
 }
@@ -128,6 +103,27 @@
   }
 }
 
+template <class T>
+void WorkerDataArray<T>::print_summary_on(outputStream* out, uint active_threads, bool print_sum) const {
+  T max = get(0);
+  T min = max;
+  T sum = 0;
+  for (uint i = 1; i < active_threads; ++i) {
+    T value = get(i);
+    max = MAX2(max, value);
+    min = MIN2(min, value);
+    sum += value;
+  }
+  T diff = max - min;
+  double avg = sum / (double) active_threads;
+  WDAPrinter::summary(out, title(), min, avg, max, diff, sum, print_sum);
+}
+
+template <class T>
+void WorkerDataArray<T>::print_details_on(outputStream* out, uint active_threads) const {
+  WDAPrinter::details(this, out, active_threads);
+}
+
 #ifndef PRODUCT
 template <typename T>
 void WorkerDataArray<T>::reset() {
@@ -139,10 +135,6 @@
 
 template <typename T>
 void WorkerDataArray<T>::verify(uint active_threads) const {
-  if (!_enabled) {
-    return;
-  }
-
   assert(active_threads <= _length, "Wrong number of active threads");
   for (uint i = 0; i < active_threads; i++) {
     assert(_data[i] != uninitialized(),
@@ -163,3 +155,5 @@
   return -1.0;
 }
 #endif
+
+#endif // SHARE_VM_GC_G1_WORKERDATAARRAY_INLINE_HPP
--- a/hotspot/src/share/vm/gc/g1/youngList.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/youngList.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -33,9 +33,9 @@
 #include "utilities/ostream.hpp"
 
 YoungList::YoungList(G1CollectedHeap* g1h) :
-    _g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
+    _g1h(g1h), _head(NULL), _length(0),
     _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
-  guarantee(check_list_empty(false), "just making sure...");
+  guarantee(check_list_empty(), "just making sure...");
 }
 
 void YoungList::push_region(HeapRegion *hr) {
@@ -86,9 +86,7 @@
   _survivor_tail = NULL;
   _survivor_length = 0;
 
-  _last_sampled_rs_lengths = 0;
-
-  assert(check_list_empty(false), "just making sure...");
+  assert(check_list_empty(), "just making sure...");
 }
 
 bool YoungList::check_list_well_formed() {
@@ -119,17 +117,13 @@
   return ret;
 }
 
-bool YoungList::check_list_empty(bool check_sample) {
+bool YoungList::check_list_empty() {
   bool ret = true;
 
   if (_length != 0) {
     log_error(gc, verify)("### YOUNG LIST should have 0 length, not %u", _length);
     ret = false;
   }
-  if (check_sample && _last_sampled_rs_lengths != 0) {
-    log_error(gc, verify)("### YOUNG LIST has non-zero last sampled RS lengths");
-    ret = false;
-  }
   if (_head != NULL) {
     log_error(gc, verify)("### YOUNG LIST does not have a NULL head");
     ret = false;
@@ -142,38 +136,6 @@
 }
 
 void
-YoungList::rs_length_sampling_init() {
-  _sampled_rs_lengths = 0;
-  _curr               = _head;
-}
-
-bool
-YoungList::rs_length_sampling_more() {
-  return _curr != NULL;
-}
-
-void
-YoungList::rs_length_sampling_next() {
-  assert( _curr != NULL, "invariant" );
-  size_t rs_length = _curr->rem_set()->occupied();
-
-  _sampled_rs_lengths += rs_length;
-
-  // The current region may not yet have been added to the
-  // incremental collection set (it gets added when it is
-  // retired as the current allocation region).
-  if (_curr->in_collection_set()) {
-    // Update the collection set policy information for this region
-    _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
-  }
-
-  _curr = _curr->get_next_young_region();
-  if (_curr == NULL) {
-    _last_sampled_rs_lengths = _sampled_rs_lengths;
-  }
-}
-
-void
 YoungList::reset_auxilary_lists() {
   guarantee( is_empty(), "young list should be empty" );
   assert(check_list_well_formed(), "young list should be well formed");
--- a/hotspot/src/share/vm/gc/g1/youngList.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/g1/youngList.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -37,14 +37,9 @@
   HeapRegion* _survivor_head;
   HeapRegion* _survivor_tail;
 
-  HeapRegion* _curr;
-
   uint        _length;
   uint        _survivor_length;
 
-  size_t      _last_sampled_rs_lengths;
-  size_t      _sampled_rs_lengths;
-
   void         empty_list(HeapRegion* list);
 
 public:
@@ -72,15 +67,6 @@
     return (size_t) survivor_length() * HeapRegion::GrainBytes;
   }
 
-  void rs_length_sampling_init();
-  bool rs_length_sampling_more();
-  void rs_length_sampling_next();
-
-  void reset_sampled_info() {
-    _last_sampled_rs_lengths =   0;
-  }
-  size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; }
-
   // for development purposes
   void reset_auxilary_lists();
   void clear() { _head = NULL; _length = 0; }
@@ -97,7 +83,7 @@
 
   // debugging
   bool          check_list_well_formed();
-  bool          check_list_empty(bool check_sample = true);
+  bool          check_list_empty();
   void          print();
 };
 
--- a/hotspot/src/share/vm/gc/shared/collectedHeap.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/shared/collectedHeap.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -405,7 +405,9 @@
 oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
   // If a previous card-mark was deferred, flush it now.
   flush_deferred_store_barrier(thread);
-  if (can_elide_initializing_store_barrier(new_obj)) {
+  if (can_elide_initializing_store_barrier(new_obj) ||
+      new_obj->is_typeArray()) {
+    // Arrays of non-references don't need a pre-barrier.
     // The deferred_card_mark region should be empty
     // following the flush above.
     assert(thread->deferred_card_mark().is_empty(), "Error");
--- a/hotspot/src/share/vm/gc/shared/copyFailedInfo.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/shared/copyFailedInfo.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_SHARED_COPYFAILEDINFO_HPP
 
 #include "runtime/thread.hpp"
+#include "trace/traceMacros.hpp"
 #include "utilities/globalDefinitions.hpp"
 
 class CopyFailedInfo : public CHeapObj<mtGC> {
@@ -63,26 +64,28 @@
 };
 
 class PromotionFailedInfo : public CopyFailedInfo {
-  OSThread* _thread;
+  traceid _thread_trace_id;
 
  public:
-  PromotionFailedInfo() : CopyFailedInfo(), _thread(NULL) {}
+  PromotionFailedInfo() : CopyFailedInfo(), _thread_trace_id(0) {}
 
   void register_copy_failure(size_t size) {
     CopyFailedInfo::register_copy_failure(size);
-    if (_thread == NULL) {
-      _thread = Thread::current()->osthread();
+    if (_thread_trace_id == 0) {
+      _thread_trace_id = THREAD_TRACE_ID(Thread::current());
     } else {
-      assert(_thread == Thread::current()->osthread(), "The PromotionFailedInfo should be thread local.");
+      assert(THREAD_TRACE_ID(Thread::current()) == _thread_trace_id,
+        "The PromotionFailedInfo should be thread local.");
     }
   }
 
   void reset() {
     CopyFailedInfo::reset();
-    _thread = NULL;
+    _thread_trace_id = 0;
   }
 
-  OSThread* thread() const { return _thread; }
+  traceid thread_trace_id() const { return _thread_trace_id; }
+
 };
 
 class EvacuationFailedInfo : public CopyFailedInfo {};
--- a/hotspot/src/share/vm/gc/shared/gcTraceSend.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/shared/gcTraceSend.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -174,7 +174,7 @@
   if (e.should_commit()) {
     e.set_gcId(GCId::current());
     e.set_data(to_trace_struct(pf_info));
-    e.set_thread(pf_info.thread()->thread_id());
+    e.set_thread(pf_info.thread_trace_id());
     e.commit();
   }
 }
--- a/hotspot/src/share/vm/gc/shared/plab.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/shared/plab.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -110,6 +110,30 @@
   }
 }
 
+void PLABStats::log_plab_allocation() {
+  log_debug(gc, plab)("%s PLAB allocation: "
+                      "allocated: " SIZE_FORMAT "B, "
+                      "wasted: " SIZE_FORMAT "B, "
+                      "unused: " SIZE_FORMAT "B, "
+                      "used: " SIZE_FORMAT "B, "
+                      "undo waste: " SIZE_FORMAT "B, ",
+                      _description,
+                      _allocated * HeapWordSize,
+                      _wasted * HeapWordSize,
+                      _unused * HeapWordSize,
+                      used() * HeapWordSize,
+                      _undo_wasted * HeapWordSize);
+}
+
+void PLABStats::log_sizing(size_t calculated_words, size_t net_desired_words) {
+  log_debug(gc, plab)("%s sizing: "
+                      "calculated: " SIZE_FORMAT "B, "
+                      "actual: " SIZE_FORMAT "B",
+                      _description,
+                      calculated_words * HeapWordSize,
+                      net_desired_words * HeapWordSize);
+}
+
 // Calculates plab size for current number of gc worker threads.
 size_t PLABStats::desired_plab_sz(uint no_of_gc_workers) {
   return MAX2(min_size(), (size_t)align_object_size(_desired_net_plab_sz / no_of_gc_workers));
@@ -119,7 +143,13 @@
 // use. This should be called once at the end of parallel
 // scavenge; it clears the sensor accumulators.
 void PLABStats::adjust_desired_plab_sz() {
-  assert(ResizePLAB, "Not set");
+  log_plab_allocation();
+
+  if (!ResizePLAB) {
+    // Clear accumulators for next round.
+    reset();
+    return;
+  }
 
   assert(is_object_aligned(max_size()) && min_size() <= max_size(),
          "PLAB clipping computation may be incorrect");
@@ -150,8 +180,9 @@
   new_plab_sz = MIN2(max_size(), new_plab_sz);
   new_plab_sz = align_object_size(new_plab_sz);
   // Latch the result
-  log_trace(gc, plab)("plab_size = " SIZE_FORMAT " desired_net_plab_sz = " SIZE_FORMAT ") ", recent_plab_sz, new_plab_sz);
   _desired_net_plab_sz = new_plab_sz;
 
+  log_sizing(recent_plab_sz, new_plab_sz);
+
   reset();
 }
--- a/hotspot/src/share/vm/gc/shared/plab.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/gc/shared/plab.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -146,6 +146,8 @@
 // PLAB book-keeping.
 class PLABStats : public CHeapObj<mtGC> {
  protected:
+  const char* _description;   // Identifying string.
+
   size_t _allocated;          // Total allocated
   size_t _wasted;             // of which wasted (internal fragmentation)
   size_t _undo_wasted;        // of which wasted on undo (is not used for calculation of PLAB size)
@@ -160,8 +162,12 @@
     _undo_wasted = 0;
     _unused      = 0;
   }
+
+  virtual void log_plab_allocation();
+  virtual void log_sizing(size_t calculated, size_t net_desired);
  public:
-  PLABStats(size_t desired_net_plab_sz_, unsigned wt) :
+  PLABStats(const char* description, size_t desired_net_plab_sz_, unsigned wt) :
+    _description(description),
     _allocated(0),
     _wasted(0),
     _undo_wasted(0),
@@ -172,6 +178,12 @@
 
   virtual ~PLABStats() { }
 
+  size_t allocated() const { return _allocated; }
+  size_t wasted() const { return _wasted; }
+  size_t unused() const { return _unused; }
+  size_t used() const { return allocated() - (wasted() + unused()); }
+  size_t undo_wasted() const { return _undo_wasted; }
+
   static const size_t min_size() {
     return PLAB::min_size();
   }
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -762,14 +762,6 @@
   ConstantPoolCacheEntry* cp_cache_entry = cache_entry(thread);
   if (cp_cache_entry->is_resolved(bytecode)) return;
 
-  if (bytecode == Bytecodes::_invokeinterface) {
-    if (log_develop_is_enabled(Trace, itables)) {
-      ResourceMark rm(thread);
-      log_develop_trace(itables)("Resolving: klass: %s to method: %s",
-                                 info.resolved_klass()->name()->as_C_string(),
-                                 info.resolved_method()->name()->as_C_string());
-    }
-  }
 #ifdef ASSERT
   if (bytecode == Bytecodes::_invokeinterface) {
     if (info.resolved_method()->method_holder() ==
--- a/hotspot/src/share/vm/jvmci/commandLineFlagConstraintsJVMCI.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "jvmci/commandLineFlagConstraintsJVMCI.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/globals.hpp"
-#include "utilities/defaultStream.hpp"
-
-Flag::Error EnableJVMCIMustBeEnabledConstraintFunc(bool value, bool verbose) {
-  if (!EnableJVMCI) {
-    if (verbose == true) {
-      jio_fprintf(defaultStream::error_stream(), "EnableJVMCI must be enabled\n");
-    }
-    return Flag::VIOLATES_CONSTRAINT;
-  } else {
-    return Flag::SUCCESS;
-  }
-}
-
-Flag::Error EnableJVMCIMustBeEnabledConstraintFunc(intx value, bool verbose) {
-  if (!EnableJVMCI) {
-    if (verbose == true) {
-      jio_fprintf(defaultStream::error_stream(), "EnableJVMCI must be enabled\n");
-    }
-    return Flag::VIOLATES_CONSTRAINT;
-  } else {
-    return Flag::SUCCESS;
-  }
-}
--- a/hotspot/src/share/vm/jvmci/commandLineFlagConstraintsJVMCI.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_JVMCI_COMMANDLINEFLAGCONSTRAINTSJVMCI_HPP
-#define SHARE_VM_JVMCI_COMMANDLINEFLAGCONSTRAINTSJVMCI_HPP
-
-#include "runtime/globals.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-/*
- * Here we have JVMCI arguments constraints functions, which are called automatically
- * whenever flag's value changes. If the constraint fails the function should return
- * an appropriate error value.
- */
-
-Flag::Error EnableJVMCIMustBeEnabledConstraintFunc(bool value, bool verbose);
-Flag::Error EnableJVMCIMustBeEnabledConstraintFunc(intx value, bool verbose);
-
-#endif /* SHARE_VM_JVMCI_COMMANDLINEFLAGCONSTRAINTSJVMCI_HPP */
--- a/hotspot/src/share/vm/jvmci/jvmciCodeInstaller.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/jvmci/jvmciCodeInstaller.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -551,6 +551,14 @@
                                        compiler, _debug_recorder, _dependencies, env, id,
                                        has_unsafe_access, _has_wide_vector, installed_code, compiled_code, speculation_log);
     cb = nm;
+    if (nm != NULL && env == NULL) {
+      DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, compiler);
+      bool printnmethods = directive->PrintAssemblyOption || directive->PrintNMethodsOption;
+      if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
+        nm->print_nmethod(printnmethods);
+      }
+      DirectivesStack::release(directive);
+    }
   }
 
   if (cb != NULL) {
--- a/hotspot/src/share/vm/jvmci/jvmciRuntime.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/jvmci/jvmciRuntime.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -293,13 +293,11 @@
     // tracing
     if (log_is_enabled(Info, exceptions)) {
       ResourceMark rm;
-      log_info(exceptions)("Exception <%s> (" INTPTR_FORMAT ") thrown in"
-                           " compiled method <%s> at PC " INTPTR_FORMAT
-                           " for thread " INTPTR_FORMAT,
-                           exception->print_value_string(),
-                           p2i((address)exception()),
-                           nm->method()->print_value_string(), p2i(pc),
-                           p2i(thread));
+      stringStream tempst;
+      tempst.print("compiled method <%s>\n"
+                   " at PC" INTPTR_FORMAT " for thread " INTPTR_FORMAT,
+                   nm->method()->print_value_string(), p2i(pc), p2i(thread));
+      Exceptions::log_exception(exception, tempst);
     }
     // for AbortVMOnException flag
     NOT_PRODUCT(Exceptions::debug_check_abort(exception));
--- a/hotspot/src/share/vm/jvmci/jvmci_globals.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/jvmci/jvmci_globals.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -24,6 +24,8 @@
 
 #include "precompiled.hpp"
 #include "jvmci/jvmci_globals.hpp"
+#include "utilities/defaultStream.hpp"
+#include "runtime/globals_extension.hpp"
 
 JVMCI_FLAGS(MATERIALIZE_DEVELOPER_FLAG, \
             MATERIALIZE_PD_DEVELOPER_FLAG, \
@@ -34,3 +36,185 @@
             MATERIALIZE_NOTPRODUCT_FLAG,
             IGNORE_RANGE, \
             IGNORE_CONSTRAINT)
+
+#define JVMCI_IGNORE_FLAG_FOUR_PARAM(type, name, value, doc)
+#define JVMCI_IGNORE_FLAG_THREE_PARAM(type, name, doc)
+
+// Return true if jvmci flags are consistent.
+bool JVMCIGlobals::check_jvmci_flags_are_consistent() {
+  if (EnableJVMCI) {
+    return true;
+  }
+
+  // "FLAG_IS_DEFAULT" fail count.
+  int fail_count = 0;
+  // Number of "FLAG_IS_DEFAULT" fails that should be skipped before code
+  // detect real consistency failure.
+  int skip_fail_count;
+
+  // EnableJVMCI flag is false here.
+  // If any other flag is changed, consistency check should fail.
+  // JVMCI_FLAGS macros added below can handle all JVMCI flags automatically.
+  // But it contains check for EnableJVMCI flag too, which is required to be
+  // skipped. This can't be handled easily!
+  // So the code looks for at-least two flag changes to detect consistency
+  // failure when EnableJVMCI flag is changed.
+  // Otherwise one flag change is sufficient to detect consistency failure.
+  // Set skip_fail_count to 0 if EnableJVMCI flag is default.
+  // Set skip_fail_count to 1 if EnableJVMCI flag is changed.
+  // This value will be used to skip fails in macro expanded code later.
+  if (!FLAG_IS_DEFAULT(EnableJVMCI)) {
+    skip_fail_count = 1;
+  } else {
+    skip_fail_count = 0;
+  }
+
+#define EMIT_FLAG_VALUE_CHANGED_CHECK_CODE(FLAG)  \
+  if (!FLAG_IS_DEFAULT(FLAG)) {                   \
+    fail_count++;                                 \
+    if (fail_count > skip_fail_count) {           \
+      return false;                               \
+    }                                             \
+  }
+
+#define JVMCI_DIAGNOSTIC_FLAG_VALUE_CHANGED_CHECK_CODE(type, name, value, doc)     EMIT_FLAG_VALUE_CHANGED_CHECK_CODE(name)
+#define JVMCI_EXPERIMENTAL_FLAG_VALUE_CHANGED_CHECK_CODE(type, name, value, doc)   EMIT_FLAG_VALUE_CHANGED_CHECK_CODE(name)
+
+  // Check consistency of diagnostic flags if UnlockDiagnosticVMOptions is true
+  // or not default. UnlockDiagnosticVMOptions is default true in debug builds.
+  if (UnlockDiagnosticVMOptions || !FLAG_IS_DEFAULT(UnlockDiagnosticVMOptions)) {
+    JVMCI_FLAGS(JVMCI_IGNORE_FLAG_FOUR_PARAM, \
+                JVMCI_IGNORE_FLAG_THREE_PARAM, \
+                JVMCI_IGNORE_FLAG_FOUR_PARAM, \
+                JVMCI_IGNORE_FLAG_THREE_PARAM, \
+                JVMCI_DIAGNOSTIC_FLAG_VALUE_CHANGED_CHECK_CODE, \
+                JVMCI_IGNORE_FLAG_FOUR_PARAM, \
+                JVMCI_IGNORE_FLAG_FOUR_PARAM, \
+                IGNORE_RANGE, \
+                IGNORE_CONSTRAINT)
+  }
+
+  // Check consistency of experimental flags if UnlockExperimentalVMOptions is
+  // true or not default.
+  if (UnlockExperimentalVMOptions || !FLAG_IS_DEFAULT(UnlockExperimentalVMOptions)) {
+    JVMCI_FLAGS(JVMCI_IGNORE_FLAG_FOUR_PARAM, \
+                JVMCI_IGNORE_FLAG_THREE_PARAM, \
+                JVMCI_IGNORE_FLAG_FOUR_PARAM, \
+                JVMCI_IGNORE_FLAG_THREE_PARAM, \
+                JVMCI_IGNORE_FLAG_FOUR_PARAM, \
+                JVMCI_EXPERIMENTAL_FLAG_VALUE_CHANGED_CHECK_CODE, \
+                JVMCI_IGNORE_FLAG_FOUR_PARAM, \
+                IGNORE_RANGE, \
+                IGNORE_CONSTRAINT)
+  }
+
+#ifndef PRODUCT
+#define JVMCI_DEVELOP_FLAG_VALUE_CHANGED_CHECK_CODE(type, name, value, doc)        EMIT_FLAG_VALUE_CHANGED_CHECK_CODE(name)
+#define JVMCI_PD_DEVELOP_FLAG_VALUE_CHANGED_CHECK_CODE(type, name, doc)            EMIT_FLAG_VALUE_CHANGED_CHECK_CODE(name)
+#define JVMCI_NOTPRODUCT_FLAG_VALUE_CHANGED_CHECK_CODE(type, name, value, doc)     EMIT_FLAG_VALUE_CHANGED_CHECK_CODE(name)
+#else
+#define JVMCI_DEVELOP_FLAG_VALUE_CHANGED_CHECK_CODE(type, name, value, doc)
+#define JVMCI_PD_DEVELOP_FLAG_VALUE_CHANGED_CHECK_CODE(type, name, doc)
+#define JVMCI_NOTPRODUCT_FLAG_VALUE_CHANGED_CHECK_CODE(type, name, value, doc)
+#endif
+
+#define JVMCI_PD_PRODUCT_FLAG_VALUE_CHANGED_CHECK_CODE(type, name, doc)            EMIT_FLAG_VALUE_CHANGED_CHECK_CODE(name)
+#define JVMCI_PRODUCT_FLAG_VALUE_CHANGED_CHECK_CODE(type, name, value, doc)        EMIT_FLAG_VALUE_CHANGED_CHECK_CODE(name)
+
+  JVMCI_FLAGS(JVMCI_DEVELOP_FLAG_VALUE_CHANGED_CHECK_CODE, \
+              JVMCI_PD_DEVELOP_FLAG_VALUE_CHANGED_CHECK_CODE, \
+              JVMCI_PRODUCT_FLAG_VALUE_CHANGED_CHECK_CODE, \
+              JVMCI_PD_PRODUCT_FLAG_VALUE_CHANGED_CHECK_CODE, \
+              JVMCI_IGNORE_FLAG_FOUR_PARAM, \
+              JVMCI_IGNORE_FLAG_FOUR_PARAM, \
+              JVMCI_NOTPRODUCT_FLAG_VALUE_CHANGED_CHECK_CODE, \
+              IGNORE_RANGE, \
+              IGNORE_CONSTRAINT)
+
+#undef EMIT_FLAG_VALUE_CHANGED_CHECK_CODE
+#undef JVMCI_DEVELOP_FLAG_VALUE_CHANGED_CHECK_CODE
+#undef JVMCI_PD_DEVELOP_FLAG_VALUE_CHANGED_CHECK_CODE
+#undef JVMCI_NOTPRODUCT_FLAG_VALUE_CHANGED_CHECK_CODE
+#undef JVMCI_DIAGNOSTIC_FLAG_VALUE_CHANGED_CHECK_CODE
+#undef JVMCI_PD_PRODUCT_FLAG_VALUE_CHANGED_CHECK_CODE
+#undef JVMCI_PRODUCT_FLAG_VALUE_CHANGED_CHECK_CODE
+#undef JVMCI_EXPERIMENTAL_FLAG_VALUE_CHANGED_CHECK_CODE
+
+  return true;
+}
+
+// Print jvmci arguments inconsistency error message.
+void JVMCIGlobals::print_jvmci_args_inconsistency_error_message() {
+  const char* error_msg = "Improperly specified VM option '%s'\n";
+  jio_fprintf(defaultStream::error_stream(), "EnableJVMCI must be enabled\n");
+
+#define EMIT_CHECK_PRINT_ERR_MSG_CODE(FLAG)                         \
+  if (!FLAG_IS_DEFAULT(FLAG)) {                                     \
+    if (strcmp(#FLAG, "EnableJVMCI")) {                             \
+      jio_fprintf(defaultStream::error_stream(), error_msg, #FLAG); \
+    }                                                               \
+  }
+
+#define JVMCI_DIAGNOSTIC_FLAG_CHECK_PRINT_ERR_MSG_CODE(type, name, value, doc)     EMIT_CHECK_PRINT_ERR_MSG_CODE(name)
+#define JVMCI_EXPERIMENTAL_FLAG_CHECK_PRINT_ERR_MSG_CODE(type, name, value, doc)   EMIT_CHECK_PRINT_ERR_MSG_CODE(name)
+
+  if (UnlockDiagnosticVMOptions || !FLAG_IS_DEFAULT(UnlockDiagnosticVMOptions)) {
+    JVMCI_FLAGS(JVMCI_IGNORE_FLAG_FOUR_PARAM, \
+                JVMCI_IGNORE_FLAG_THREE_PARAM, \
+                JVMCI_IGNORE_FLAG_FOUR_PARAM, \
+                JVMCI_IGNORE_FLAG_THREE_PARAM, \
+                JVMCI_DIAGNOSTIC_FLAG_CHECK_PRINT_ERR_MSG_CODE, \
+                JVMCI_IGNORE_FLAG_FOUR_PARAM, \
+                JVMCI_IGNORE_FLAG_FOUR_PARAM, \
+                IGNORE_RANGE, \
+                IGNORE_CONSTRAINT)
+  }
+
+  if (UnlockExperimentalVMOptions || !FLAG_IS_DEFAULT(UnlockExperimentalVMOptions)) {
+    JVMCI_FLAGS(JVMCI_IGNORE_FLAG_FOUR_PARAM, \
+                JVMCI_IGNORE_FLAG_THREE_PARAM, \
+                JVMCI_IGNORE_FLAG_FOUR_PARAM, \
+                JVMCI_IGNORE_FLAG_THREE_PARAM, \
+                JVMCI_IGNORE_FLAG_FOUR_PARAM, \
+                JVMCI_EXPERIMENTAL_FLAG_CHECK_PRINT_ERR_MSG_CODE, \
+                JVMCI_IGNORE_FLAG_FOUR_PARAM, \
+                IGNORE_RANGE, \
+                IGNORE_CONSTRAINT)
+  }
+
+#ifndef PRODUCT
+#define JVMCI_DEVELOP_FLAG_CHECK_PRINT_ERR_MSG_CODE(type, name, value, doc)        EMIT_CHECK_PRINT_ERR_MSG_CODE(name)
+#define JVMCI_PD_DEVELOP_FLAG_CHECK_PRINT_ERR_MSG_CODE(type, name, doc)            EMIT_CHECK_PRINT_ERR_MSG_CODE(name)
+#define JVMCI_NOTPRODUCT_FLAG_CHECK_PRINT_ERR_MSG_CODE(type, name, value, doc)     EMIT_CHECK_PRINT_ERR_MSG_CODE(name)
+#else
+#define JVMCI_DEVELOP_FLAG_CHECK_PRINT_ERR_MSG_CODE(type, name, value, doc)
+#define JVMCI_PD_DEVELOP_FLAG_CHECK_PRINT_ERR_MSG_CODE(type, name, doc)
+#define JVMCI_NOTPRODUCT_FLAG_CHECK_PRINT_ERR_MSG_CODE(type, name, value, doc)
+#endif
+
+#define JVMCI_PD_PRODUCT_FLAG_CHECK_PRINT_ERR_MSG_CODE(type, name, doc)            EMIT_CHECK_PRINT_ERR_MSG_CODE(name)
+#define JVMCI_PRODUCT_FLAG_CHECK_PRINT_ERR_MSG_CODE(type, name, value, doc)        EMIT_CHECK_PRINT_ERR_MSG_CODE(name)
+
+  JVMCI_FLAGS(JVMCI_DEVELOP_FLAG_CHECK_PRINT_ERR_MSG_CODE, \
+              JVMCI_PD_DEVELOP_FLAG_CHECK_PRINT_ERR_MSG_CODE, \
+              JVMCI_PRODUCT_FLAG_CHECK_PRINT_ERR_MSG_CODE, \
+              JVMCI_PD_PRODUCT_FLAG_CHECK_PRINT_ERR_MSG_CODE, \
+              JVMCI_IGNORE_FLAG_FOUR_PARAM, \
+              JVMCI_IGNORE_FLAG_FOUR_PARAM, \
+              JVMCI_NOTPRODUCT_FLAG_CHECK_PRINT_ERR_MSG_CODE, \
+              IGNORE_RANGE, \
+              IGNORE_CONSTRAINT)
+
+#undef EMIT_CHECK_PRINT_ERR_MSG_CODE
+#undef JVMCI_DEVELOP_FLAG_CHECK_PRINT_ERR_MSG_CODE
+#undef JVMCI_PD_DEVELOP_FLAG_CHECK_PRINT_ERR_MSG_CODE
+#undef JVMCI_NOTPRODUCT_FLAG_CHECK_PRINT_ERR_MSG_CODE
+#undef JVMCI_PD_PRODUCT_FLAG_CHECK_PRINT_ERR_MSG_CODE
+#undef JVMCI_PRODUCT_FLAG_CHECK_PRINT_ERR_MSG_CODE
+#undef JVMCI_DIAGNOSTIC_FLAG_CHECK_PRINT_ERR_MSG_CODE
+#undef JVMCI_EXPERIMENTAL_FLAG_CHECK_PRINT_ERR_MSG_CODE
+
+}
+
+#undef JVMCI_IGNORE_FLAG_FOUR_PARAM
+#undef JVMCI_IGNORE_FLAG_THREE_PARAM
--- a/hotspot/src/share/vm/jvmci/jvmci_globals.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/jvmci/jvmci_globals.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -39,29 +39,23 @@
                                                                             \
   experimental(bool, UseJVMCICompiler, false,                               \
           "Use JVMCI as the default compiler")                              \
-          constraint(EnableJVMCIMustBeEnabledConstraintFunc,AtParse)        \
                                                                             \
   experimental(bool, BootstrapJVMCI, false,                                 \
           "Bootstrap JVMCI before running Java main method")                \
-          constraint(EnableJVMCIMustBeEnabledConstraintFunc,AtParse)        \
                                                                             \
   experimental(bool, PrintBootstrap, true,                                  \
           "Print JVMCI bootstrap progress and summary")                     \
-          constraint(EnableJVMCIMustBeEnabledConstraintFunc,AtParse)        \
                                                                             \
   experimental(intx, JVMCIThreads, 1,                                       \
           "Force number of JVMCI compiler threads to use")                  \
           range(1, max_jint)                                                \
-          constraint(EnableJVMCIMustBeEnabledConstraintFunc,AtParse)        \
                                                                             \
   experimental(intx, JVMCIHostThreads, 1,                                   \
           "Force number of compiler threads for JVMCI host compiler")       \
           range(1, max_jint)                                                \
-          constraint(EnableJVMCIMustBeEnabledConstraintFunc,AtParse)        \
                                                                             \
   experimental(bool, CodeInstallSafepointChecks, true,                      \
           "Perform explicit safepoint checks while installing code")        \
-          constraint(EnableJVMCIMustBeEnabledConstraintFunc,AtParse)        \
                                                                             \
   NOT_COMPILER2(product(intx, MaxVectorSize, 64,                            \
           "Max vector size in bytes, "                                      \
@@ -74,28 +68,22 @@
           "Trace level for JVMCI: "                                         \
           "1 means emit a message for each CompilerToVM call,"              \
           "levels greater than 1 provide progressively greater detail")     \
-          constraint(EnableJVMCIMustBeEnabledConstraintFunc,AtParse)        \
                                                                             \
   experimental(intx, JVMCICounterSize, 0,                                   \
           "Reserved size for benchmark counters")                           \
           range(0, max_jint)                                                \
-          constraint(EnableJVMCIMustBeEnabledConstraintFunc,AtParse)        \
                                                                             \
   experimental(bool, JVMCICountersExcludeCompiler, true,                    \
           "Exclude JVMCI compiler threads from benchmark counters")         \
-          constraint(EnableJVMCIMustBeEnabledConstraintFunc,AtParse)        \
                                                                             \
   develop(bool, JVMCIUseFastLocking, true,                                  \
           "Use fast inlined locking code")                                  \
-          constraint(EnableJVMCIMustBeEnabledConstraintFunc,AtParse)        \
                                                                             \
   experimental(intx, JVMCINMethodSizeLimit, (80*K)*wordSize,                \
           "Maximum size of a compiled method.")                             \
-          constraint(EnableJVMCIMustBeEnabledConstraintFunc,AtParse)        \
                                                                             \
   develop(bool, TraceUncollectedSpeculations, false,                        \
-          "Print message when a failed speculation was not collected")      \
-          constraint(EnableJVMCIMustBeEnabledConstraintFunc,AtParse)        \
+          "Print message when a failed speculation was not collected")
 
 
 // Read default values for JVMCI globals
@@ -110,4 +98,11 @@
             IGNORE_RANGE, \
             IGNORE_CONSTRAINT)
 
+class JVMCIGlobals {
+ public:
+  // Return true if jvmci flags are consistent.
+  static bool check_jvmci_flags_are_consistent();
+  // Print jvmci arguments inconsistency error message.
+  static void print_jvmci_args_inconsistency_error_message();
+};
 #endif // SHARE_VM_JVMCI_JVMCIGLOBALS_HPP
--- a/hotspot/src/share/vm/jvmci/vmStructs_jvmci.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/jvmci/vmStructs_jvmci.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -592,6 +592,14 @@
 #endif // TARGET_OS_FAMILY_bsd
 
 
+#ifdef TARGET_ARCH_aarch64
+
+#define VM_STRUCTS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \
+  volatile_nonstatic_field(JavaFrameAnchor, _last_Java_fp, intptr_t*)
+
+#endif // TARGET_ARCH_aarch64
+
+
 #ifdef TARGET_ARCH_x86
 
 #define VM_STRUCTS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \
--- a/hotspot/src/share/vm/logging/logPrefix.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/logging/logPrefix.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -58,6 +58,7 @@
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, metaspace)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, phases)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, phases, start)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, phases, task)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, plab)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, region)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, remset)) \
--- a/hotspot/src/share/vm/logging/logTag.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/logging/logTag.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -43,6 +43,7 @@
   LOG_TAG(classload) /* Trace all classes loaded */ \
   LOG_TAG(classloaderdata) /* class loader loader_data lifetime */ \
   LOG_TAG(classunload) /* Trace unloading of classes */ \
+  LOG_TAG(classpath) \
   LOG_TAG(compaction) \
   LOG_TAG(cpu) \
   LOG_TAG(cset) \
@@ -66,6 +67,7 @@
   LOG_TAG(phases) \
   LOG_TAG(plab) \
   LOG_TAG(promotion) \
+  LOG_TAG(protectiondomain) /* "Trace protection domain verification" */ \
   LOG_TAG(ref) \
   LOG_TAG(refine) \
   LOG_TAG(region) \
@@ -81,6 +83,7 @@
   LOG_TAG(survivor) \
   LOG_TAG(sweep) \
   LOG_TAG(task) \
+  LOG_TAG(thread) \
   LOG_TAG(tlab) \
   LOG_TAG(time) \
   LOG_TAG(verify) \
--- a/hotspot/src/share/vm/memory/filemap.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/memory/filemap.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -208,9 +208,7 @@
         count ++;
         bytes += (int)entry_size;
         bytes += name_bytes;
-        if (TraceClassPaths) {
-          tty->print_cr("[Add main shared path (%s) %s]", (cpe->is_jar_file() ? "jar" : "dir"), name);
-        }
+        log_info(classpath)("add main shared path (%s) %s", (cpe->is_jar_file() ? "jar" : "dir"), name);
       } else {
         SharedClassPathEntry* ent = shared_classpath(cur_entry);
         if (cpe->is_jar_file()) {
@@ -275,9 +273,7 @@
     struct stat st;
     const char* name = ent->_name;
     bool ok = true;
-    if (TraceClassPaths) {
-      tty->print_cr("[Checking shared classpath entry: %s]", name);
-    }
+    log_info(classpath)("checking shared classpath entry: %s", name);
     if (os::stat(name, &st) != 0) {
       fail_continue("Required classpath entry does not exist: %s", name);
       ok = false;
@@ -301,9 +297,7 @@
       }
     }
     if (ok) {
-      if (TraceClassPaths) {
-        tty->print_cr("[ok]");
-      }
+      log_info(classpath)("ok");
     } else if (!PrintSharedArchiveAndExit) {
       _validating_classpath_entry_table = false;
       return false;
@@ -888,10 +882,8 @@
   char header_version[JVM_IDENT_MAX];
   get_header_version(header_version);
   if (strncmp(_jvm_ident, header_version, JVM_IDENT_MAX-1) != 0) {
-    if (TraceClassPaths) {
-      tty->print_cr("Expected: %s", header_version);
-      tty->print_cr("Actual:   %s", _jvm_ident);
-    }
+    log_info(classpath)("expected: %s", header_version);
+    log_info(classpath)("actual:   %s", _jvm_ident);
     FileMapInfo::fail_continue("The shared archive file was created by a different"
                   " version or build of HotSpot");
     return false;
@@ -919,7 +911,7 @@
   if (status) {
     if (!ClassLoader::check_shared_paths_misc_info(_paths_misc_info, _header->_paths_misc_info_size)) {
       if (!PrintSharedArchiveAndExit) {
-        fail_continue("shared class paths mismatch (hint: enable -XX:+TraceClassPaths to diagnose the failure)");
+        fail_continue("shared class paths mismatch (hint: enable -Xlog:classpath=info to diagnose the failure)");
         status = false;
       }
     }
--- a/hotspot/src/share/vm/oops/arrayKlass.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/oops/arrayKlass.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -89,7 +89,7 @@
     set_super(Universe::is_bootstrapping() ? (Klass*)NULL : SystemDictionary::Object_klass());
     set_layout_helper(Klass::_lh_neutral_value);
     set_is_cloneable(); // All arrays are considered to be cloneable (See JLS 20.1.5)
-    TRACE_INIT_ID(this);
+    TRACE_INIT_KLASS_ID(this);
 }
 
 
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1879,7 +1879,7 @@
   return dep_context;
 }
 
-int InstanceKlass::mark_dependent_nmethods(DepChange& changes) {
+int InstanceKlass::mark_dependent_nmethods(KlassDepChange& changes) {
   return dependencies().mark_dependent_nmethods(changes);
 }
 
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -56,7 +56,7 @@
 // forward declaration for class -- see below for definition
 class BreakpointInfo;
 class ClassFileParser;
-class DepChange;
+class KlassDepChange;
 class DependencyContext;
 class fieldDescriptor;
 class jniIdMapBase;
@@ -821,7 +821,7 @@
 
   // maintenance of deoptimization dependencies
   inline DependencyContext dependencies();
-  int  mark_dependent_nmethods(DepChange& changes);
+  int  mark_dependent_nmethods(KlassDepChange& changes);
   void add_dependent_nmethod(nmethod* nm);
   void remove_dependent_nmethod(nmethod* nm, bool delete_immediately);
 
@@ -839,7 +839,7 @@
 
   // support for stub routines
   static ByteSize init_state_offset()  { return in_ByteSize(offset_of(InstanceKlass, _init_state)); }
-  TRACE_DEFINE_OFFSET;
+  TRACE_DEFINE_KLASS_TRACE_ID_OFFSET;
   static ByteSize init_thread_offset() { return in_ByteSize(offset_of(InstanceKlass, _init_thread)); }
 
   // subclass/subinterface checks
--- a/hotspot/src/share/vm/oops/klass.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/oops/klass.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -494,7 +494,7 @@
 }
 
 void Klass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) {
-  TRACE_INIT_ID(this);
+  TRACE_INIT_KLASS_ID(this);
   // If an exception happened during CDS restore, some of these fields may already be
   // set.  We leave the class on the CLD list, even if incomplete so that we don't
   // modify the CLD list outside a safepoint.
--- a/hotspot/src/share/vm/oops/klass.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/oops/klass.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -132,7 +132,7 @@
   jint        _modifier_flags;  // Processed access flags, for use by Class.getModifiers.
   AccessFlags _access_flags;    // Access flags. The class/interface distinction is stored here.
 
-  TRACE_DEFINE_KLASS_TRACE_ID;
+  TRACE_DEFINE_TRACE_ID_FIELD;
 
   // Biased locking implementation and statistics
   // (the 64-bit chunk goes first, to avoid some fragmentation)
@@ -569,7 +569,7 @@
   jlong last_biased_lock_bulk_revocation_time() { return _last_biased_lock_bulk_revocation_time; }
   void  set_last_biased_lock_bulk_revocation_time(jlong cur_time) { _last_biased_lock_bulk_revocation_time = cur_time; }
 
-  TRACE_DEFINE_KLASS_METHODS;
+  TRACE_DEFINE_TRACE_ID_METHODS;
 
   // garbage collection support
   void oops_do(OopClosure* cl);
--- a/hotspot/src/share/vm/oops/typeArrayOop.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/oops/typeArrayOop.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -129,7 +129,7 @@
   Metadata* metadata_at(int which) const {
     return (Metadata*)*long_at_addr(which); }
   void metadata_at_put(int which, Metadata* contents) {
-    *long_at_addr(which) = (long)contents;
+    *long_at_addr(which) = (jlong)contents;
   }
 #else
   Metadata* metadata_at(int which) const {
--- a/hotspot/src/share/vm/opto/c2compiler.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/c2compiler.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -243,14 +243,72 @@
   case vmIntrinsics::_reverseBytes_l:
     if (!Matcher::match_rule_supported(Op_ReverseBytesL)) return false;
     break;
+
+  /* CompareAndSwap, Object: */
   case vmIntrinsics::_compareAndSwapObject:
 #ifdef _LP64
+    if ( UseCompressedOops && !Matcher::match_rule_supported(Op_CompareAndSwapN)) return false;
     if (!UseCompressedOops && !Matcher::match_rule_supported(Op_CompareAndSwapP)) return false;
+#else
+    if (!Matcher::match_rule_supported(Op_CompareAndSwapP)) return false;
 #endif
     break;
+  case vmIntrinsics::_weakCompareAndSwapObject:
+  case vmIntrinsics::_weakCompareAndSwapObjectAcquire:
+  case vmIntrinsics::_weakCompareAndSwapObjectRelease:
+#ifdef _LP64
+    if ( UseCompressedOops && !Matcher::match_rule_supported(Op_WeakCompareAndSwapN)) return false;
+    if (!UseCompressedOops && !Matcher::match_rule_supported(Op_WeakCompareAndSwapP)) return false;
+#else
+    if (!Matcher::match_rule_supported(Op_WeakCompareAndSwapP)) return false;
+#endif
+    break;
+  /* CompareAndSwap, Long: */
   case vmIntrinsics::_compareAndSwapLong:
     if (!Matcher::match_rule_supported(Op_CompareAndSwapL)) return false;
     break;
+  case vmIntrinsics::_weakCompareAndSwapLong:
+  case vmIntrinsics::_weakCompareAndSwapLongAcquire:
+  case vmIntrinsics::_weakCompareAndSwapLongRelease:
+    if (!Matcher::match_rule_supported(Op_WeakCompareAndSwapL)) return false;
+    break;
+
+  /* CompareAndSwap, Int: */
+  case vmIntrinsics::_compareAndSwapInt:
+    if (!Matcher::match_rule_supported(Op_CompareAndSwapI)) return false;
+    break;
+  case vmIntrinsics::_weakCompareAndSwapInt:
+  case vmIntrinsics::_weakCompareAndSwapIntAcquire:
+  case vmIntrinsics::_weakCompareAndSwapIntRelease:
+    if (!Matcher::match_rule_supported(Op_WeakCompareAndSwapL)) return false;
+    break;
+
+  /* CompareAndExchange, Object: */
+  case vmIntrinsics::_compareAndExchangeObjectVolatile:
+  case vmIntrinsics::_compareAndExchangeObjectAcquire:
+  case vmIntrinsics::_compareAndExchangeObjectRelease:
+#ifdef _LP64
+    if ( UseCompressedOops && !Matcher::match_rule_supported(Op_CompareAndExchangeN)) return false;
+    if (!UseCompressedOops && !Matcher::match_rule_supported(Op_CompareAndExchangeP)) return false;
+#else
+    if (!Matcher::match_rule_supported(Op_CompareAndExchangeP)) return false;
+#endif
+    break;
+
+  /* CompareAndExchange, Long: */
+  case vmIntrinsics::_compareAndExchangeLongVolatile:
+  case vmIntrinsics::_compareAndExchangeLongAcquire:
+  case vmIntrinsics::_compareAndExchangeLongRelease:
+    if (!Matcher::match_rule_supported(Op_CompareAndExchangeL)) return false;
+    break;
+
+  /* CompareAndExchange, Int: */
+  case vmIntrinsics::_compareAndExchangeIntVolatile:
+  case vmIntrinsics::_compareAndExchangeIntAcquire:
+  case vmIntrinsics::_compareAndExchangeIntRelease:
+    if (!Matcher::match_rule_supported(Op_CompareAndExchangeI)) return false;
+    break;
+
   case vmIntrinsics::_getAndAddInt:
     if (!Matcher::match_rule_supported(Op_GetAndAddI)) return false;
     break;
@@ -382,6 +440,42 @@
   case vmIntrinsics::_putLongVolatile:
   case vmIntrinsics::_putFloatVolatile:
   case vmIntrinsics::_putDoubleVolatile:
+  case vmIntrinsics::_getObjectAcquire:
+  case vmIntrinsics::_getBooleanAcquire:
+  case vmIntrinsics::_getByteAcquire:
+  case vmIntrinsics::_getShortAcquire:
+  case vmIntrinsics::_getCharAcquire:
+  case vmIntrinsics::_getIntAcquire:
+  case vmIntrinsics::_getLongAcquire:
+  case vmIntrinsics::_getFloatAcquire:
+  case vmIntrinsics::_getDoubleAcquire:
+  case vmIntrinsics::_putObjectRelease:
+  case vmIntrinsics::_putBooleanRelease:
+  case vmIntrinsics::_putByteRelease:
+  case vmIntrinsics::_putShortRelease:
+  case vmIntrinsics::_putCharRelease:
+  case vmIntrinsics::_putIntRelease:
+  case vmIntrinsics::_putLongRelease:
+  case vmIntrinsics::_putFloatRelease:
+  case vmIntrinsics::_putDoubleRelease:
+  case vmIntrinsics::_getObjectOpaque:
+  case vmIntrinsics::_getBooleanOpaque:
+  case vmIntrinsics::_getByteOpaque:
+  case vmIntrinsics::_getShortOpaque:
+  case vmIntrinsics::_getCharOpaque:
+  case vmIntrinsics::_getIntOpaque:
+  case vmIntrinsics::_getLongOpaque:
+  case vmIntrinsics::_getFloatOpaque:
+  case vmIntrinsics::_getDoubleOpaque:
+  case vmIntrinsics::_putObjectOpaque:
+  case vmIntrinsics::_putBooleanOpaque:
+  case vmIntrinsics::_putByteOpaque:
+  case vmIntrinsics::_putShortOpaque:
+  case vmIntrinsics::_putCharOpaque:
+  case vmIntrinsics::_putIntOpaque:
+  case vmIntrinsics::_putLongOpaque:
+  case vmIntrinsics::_putFloatOpaque:
+  case vmIntrinsics::_putDoubleOpaque:
   case vmIntrinsics::_getShortUnaligned:
   case vmIntrinsics::_getCharUnaligned:
   case vmIntrinsics::_getIntUnaligned:
@@ -390,7 +484,6 @@
   case vmIntrinsics::_putCharUnaligned:
   case vmIntrinsics::_putIntUnaligned:
   case vmIntrinsics::_putLongUnaligned:
-  case vmIntrinsics::_compareAndSwapInt:
   case vmIntrinsics::_putOrderedObject:
   case vmIntrinsics::_putOrderedInt:
   case vmIntrinsics::_putOrderedLong:
@@ -400,8 +493,6 @@
   case vmIntrinsics::_currentThread:
   case vmIntrinsics::_isInterrupted:
 #ifdef TRACE_HAVE_INTRINSICS
-  case vmIntrinsics::_classID:
-  case vmIntrinsics::_threadID:
   case vmIntrinsics::_counterTime:
 #endif
   case vmIntrinsics::_currentTimeMillis:
--- a/hotspot/src/share/vm/opto/classes.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/classes.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -85,6 +85,14 @@
 macro(CompareAndSwapL)
 macro(CompareAndSwapP)
 macro(CompareAndSwapN)
+macro(WeakCompareAndSwapI)
+macro(WeakCompareAndSwapL)
+macro(WeakCompareAndSwapP)
+macro(WeakCompareAndSwapN)
+macro(CompareAndExchangeI)
+macro(CompareAndExchangeL)
+macro(CompareAndExchangeP)
+macro(CompareAndExchangeN)
 macro(GetAndAddI)
 macro(GetAndAddL)
 macro(GetAndSetI)
--- a/hotspot/src/share/vm/opto/compile.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/compile.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -88,7 +88,27 @@
 
 // Return the index at which m must be inserted (or already exists).
 // The sort order is by the address of the ciMethod, with is_virtual as minor key.
-int Compile::intrinsic_insertion_index(ciMethod* m, bool is_virtual) {
+class IntrinsicDescPair {
+ private:
+  ciMethod* _m;
+  bool _is_virtual;
+ public:
+  IntrinsicDescPair(ciMethod* m, bool is_virtual) : _m(m), _is_virtual(is_virtual) {}
+  static int compare(IntrinsicDescPair* const& key, CallGenerator* const& elt) {
+    ciMethod* m= elt->method();
+    ciMethod* key_m = key->_m;
+    if (key_m < m)      return -1;
+    else if (key_m > m) return 1;
+    else {
+      bool is_virtual = elt->is_virtual();
+      bool key_virtual = key->_is_virtual;
+      if (key_virtual < is_virtual)      return -1;
+      else if (key_virtual > is_virtual) return 1;
+      else                               return 0;
+    }
+  }
+};
+int Compile::intrinsic_insertion_index(ciMethod* m, bool is_virtual, bool& found) {
 #ifdef ASSERT
   for (int i = 1; i < _intrinsics->length(); i++) {
     CallGenerator* cg1 = _intrinsics->at(i-1);
@@ -99,63 +119,28 @@
            "compiler intrinsics list must stay sorted");
   }
 #endif
-  // Binary search sorted list, in decreasing intervals [lo, hi].
-  int lo = 0, hi = _intrinsics->length()-1;
-  while (lo <= hi) {
-    int mid = (uint)(hi + lo) / 2;
-    ciMethod* mid_m = _intrinsics->at(mid)->method();
-    if (m < mid_m) {
-      hi = mid-1;
-    } else if (m > mid_m) {
-      lo = mid+1;
-    } else {
-      // look at minor sort key
-      bool mid_virt = _intrinsics->at(mid)->is_virtual();
-      if (is_virtual < mid_virt) {
-        hi = mid-1;
-      } else if (is_virtual > mid_virt) {
-        lo = mid+1;
-      } else {
-        return mid;  // exact match
-      }
-    }
-  }
-  return lo;  // inexact match
+  IntrinsicDescPair pair(m, is_virtual);
+  return _intrinsics->find_sorted<IntrinsicDescPair*, IntrinsicDescPair::compare>(&pair, found);
 }
 
 void Compile::register_intrinsic(CallGenerator* cg) {
   if (_intrinsics == NULL) {
     _intrinsics = new (comp_arena())GrowableArray<CallGenerator*>(comp_arena(), 60, 0, NULL);
   }
-  // This code is stolen from ciObjectFactory::insert.
-  // Really, GrowableArray should have methods for
-  // insert_at, remove_at, and binary_search.
   int len = _intrinsics->length();
-  int index = intrinsic_insertion_index(cg->method(), cg->is_virtual());
-  if (index == len) {
-    _intrinsics->append(cg);
-  } else {
-#ifdef ASSERT
-    CallGenerator* oldcg = _intrinsics->at(index);
-    assert(oldcg->method() != cg->method() || oldcg->is_virtual() != cg->is_virtual(), "don't register twice");
-#endif
-    _intrinsics->append(_intrinsics->at(len-1));
-    int pos;
-    for (pos = len-2; pos >= index; pos--) {
-      _intrinsics->at_put(pos+1,_intrinsics->at(pos));
-    }
-    _intrinsics->at_put(index, cg);
-  }
+  bool found = false;
+  int index = intrinsic_insertion_index(cg->method(), cg->is_virtual(), found);
+  assert(!found, "registering twice");
+  _intrinsics->insert_before(index, cg);
   assert(find_intrinsic(cg->method(), cg->is_virtual()) == cg, "registration worked");
 }
 
 CallGenerator* Compile::find_intrinsic(ciMethod* m, bool is_virtual) {
   assert(m->is_loaded(), "don't try this on unloaded methods");
   if (_intrinsics != NULL) {
-    int index = intrinsic_insertion_index(m, is_virtual);
-    if (index < _intrinsics->length()
-        && _intrinsics->at(index)->method() == m
-        && _intrinsics->at(index)->is_virtual() == is_virtual) {
+    bool found = false;
+    int index = intrinsic_insertion_index(m, is_virtual, found);
+     if (found) {
       return _intrinsics->at(index);
     }
   }
@@ -2801,6 +2786,14 @@
   case Op_CompareAndSwapL:
   case Op_CompareAndSwapP:
   case Op_CompareAndSwapN:
+  case Op_WeakCompareAndSwapI:
+  case Op_WeakCompareAndSwapL:
+  case Op_WeakCompareAndSwapP:
+  case Op_WeakCompareAndSwapN:
+  case Op_CompareAndExchangeI:
+  case Op_CompareAndExchangeL:
+  case Op_CompareAndExchangeP:
+  case Op_CompareAndExchangeN:
   case Op_GetAndAddI:
   case Op_GetAndAddL:
   case Op_GetAndSetI:
--- a/hotspot/src/share/vm/opto/compile.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/compile.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1250,7 +1250,7 @@
   // Intrinsic setup.
   void           register_library_intrinsics();                            // initializer
   CallGenerator* make_vm_intrinsic(ciMethod* m, bool is_virtual);          // constructor
-  int            intrinsic_insertion_index(ciMethod* m, bool is_virtual);  // helper
+  int            intrinsic_insertion_index(ciMethod* m, bool is_virtual, bool& found);  // helper
   CallGenerator* find_intrinsic(ciMethod* m, bool is_virtual);             // query fn
   void           register_intrinsic(CallGenerator* cg);                    // update fn
 
--- a/hotspot/src/share/vm/opto/escape.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/escape.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -490,6 +490,8 @@
       }
       break;
     }
+    case Op_CompareAndExchangeP:
+    case Op_CompareAndExchangeN:
     case Op_GetAndSetP:
     case Op_GetAndSetN: {
       add_objload_to_connection_graph(n, delayed_worklist);
@@ -499,6 +501,8 @@
     case Op_StoreN:
     case Op_StoreNKlass:
     case Op_StorePConditional:
+    case Op_WeakCompareAndSwapP:
+    case Op_WeakCompareAndSwapN:
     case Op_CompareAndSwapP:
     case Op_CompareAndSwapN: {
       Node* adr = n->in(MemNode::Address);
@@ -698,8 +702,12 @@
     case Op_StoreN:
     case Op_StoreNKlass:
     case Op_StorePConditional:
+    case Op_CompareAndExchangeP:
+    case Op_CompareAndExchangeN:
     case Op_CompareAndSwapP:
     case Op_CompareAndSwapN:
+    case Op_WeakCompareAndSwapP:
+    case Op_WeakCompareAndSwapN:
     case Op_GetAndSetP:
     case Op_GetAndSetN: {
       Node* adr = n->in(MemNode::Address);
--- a/hotspot/src/share/vm/opto/graphKit.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/graphKit.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1179,8 +1179,10 @@
 // Helper function to do a NULL pointer check.  Returned value is
 // the incoming address with NULL casted away.  You are allowed to use the
 // not-null value only if you are control dependent on the test.
+#ifndef PRODUCT
 extern int explicit_null_checks_inserted,
            explicit_null_checks_elided;
+#endif
 Node* GraphKit::null_check_common(Node* value, BasicType type,
                                   // optional arguments for variations:
                                   bool assert_null,
@@ -1193,7 +1195,7 @@
     value = cast_not_null(value);   // Make it appear to be non-null (4962416).
     return value;
   }
-  explicit_null_checks_inserted++;
+  NOT_PRODUCT(explicit_null_checks_inserted++);
 
   // Construct NULL check
   Node *chk = NULL;
@@ -1233,7 +1235,7 @@
         // See if the type is contained in NULL_PTR.
         // If so, then the value is already null.
         if (t->higher_equal(TypePtr::NULL_PTR)) {
-          explicit_null_checks_elided++;
+          NOT_PRODUCT(explicit_null_checks_elided++);
           return value;           // Elided null assert quickly!
         }
       } else {
@@ -1242,7 +1244,7 @@
         // type.  In other words, "value" was not-null.
         if (t->meet(TypePtr::NULL_PTR) != t->remove_speculative()) {
           // same as: if (!TypePtr::NULL_PTR->higher_equal(t)) ...
-          explicit_null_checks_elided++;
+          NOT_PRODUCT(explicit_null_checks_elided++);
           return value;           // Elided null check quickly!
         }
       }
@@ -1282,7 +1284,7 @@
         set_control(cfg);
         Node *res = cast_not_null(value);
         set_control(oldcontrol);
-        explicit_null_checks_elided++;
+        NOT_PRODUCT(explicit_null_checks_elided++);
         return res;
       }
       cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true);
@@ -1326,15 +1328,18 @@
     IfNode* iff = create_and_map_if(control(), tst, ok_prob, COUNT_UNKNOWN);
     Node* null_true = _gvn.transform( new IfFalseNode(iff));
     set_control(      _gvn.transform( new IfTrueNode(iff)));
-    if (null_true == top())
+#ifndef PRODUCT
+    if (null_true == top()) {
       explicit_null_checks_elided++;
+    }
+#endif
     (*null_control) = null_true;
   } else {
     BuildCutout unless(this, tst, ok_prob);
     // Check for optimizer eliding test at parse time
     if (stopped()) {
       // Failure not possible; do not bother making uncommon trap.
-      explicit_null_checks_elided++;
+      NOT_PRODUCT(explicit_null_checks_elided++);
     } else if (assert_null) {
       uncommon_trap(reason,
                     Deoptimization::Action_make_not_entrant,
@@ -3149,6 +3154,19 @@
   return membar;
 }
 
+void GraphKit::insert_store_load_for_barrier() {
+  Node* mem = reset_memory();
+  MemBarNode* mb = MemBarNode::make(C, Op_MemBarVolatile, Compile::AliasIdxBot);
+  mb->init_req(TypeFunc::Control, control());
+  mb->init_req(TypeFunc::Memory, mem);
+  Node* membar = _gvn.transform(mb);
+  set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control)));
+  Node* newmem = _gvn.transform(new ProjNode(membar, TypeFunc::Memory));
+  set_all_memory(mem);
+  set_memory(newmem, Compile::AliasIdxRaw);
+}
+
+
 //------------------------------shared_lock------------------------------------
 // Emit locking code.
 FastLockNode* GraphKit::shared_lock(Node* obj) {
@@ -3840,7 +3858,7 @@
   BasicType bt = T_BYTE;
 
   if (UseConcMarkSweepGC && UseCondCardMark) {
-    insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
+    insert_store_load_for_barrier();
     __ sync_kit(this);
   }
 
@@ -4280,8 +4298,7 @@
 
         __ if_then(card_val, BoolTest::ne, young_card); {
           sync_kit(ideal);
-          // Use Op_MemBarVolatile to achieve the effect of a StoreLoad barrier.
-          insert_mem_bar(Op_MemBarVolatile, oop_store);
+          insert_store_load_for_barrier();
           __ sync_kit(this);
 
           Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
--- a/hotspot/src/share/vm/opto/graphKit.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/graphKit.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -834,6 +834,7 @@
   int next_monitor();
   Node* insert_mem_bar(int opcode, Node* precedent = NULL);
   Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = NULL);
+  void insert_store_load_for_barrier();
   // Optional 'precedent' is appended as an extra edge, to force ordering.
   FastLockNode* shared_lock(Node* obj);
   void shared_unlock(Node* box, Node* obj);
--- a/hotspot/src/share/vm/opto/ifnode.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/ifnode.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -40,7 +40,9 @@
 // Optimization - Graph Style
 
 
+#ifndef PRODUCT
 extern int explicit_null_checks_elided;
+#endif
 
 //=============================================================================
 //------------------------------Value------------------------------------------
@@ -1504,24 +1506,28 @@
   Node* prev_dom = this;
   int op = Opcode();
   // Search up the dominator tree for an If with an identical test
-  while( dom->Opcode() != op    ||  // Not same opcode?
+  while (dom->Opcode() != op    ||  // Not same opcode?
          dom->in(1)    != in(1) ||  // Not same input 1?
          (req() == 3 && dom->in(2) != in(2)) || // Not same input 2?
-         prev_dom->in(0) != dom ) { // One path of test does not dominate?
-    if( dist < 0 ) return NULL;
+         prev_dom->in(0) != dom) {  // One path of test does not dominate?
+    if (dist < 0) return NULL;
 
     dist--;
     prev_dom = dom;
-    dom = up_one_dom( dom );
-    if( !dom ) return NULL;
+    dom = up_one_dom(dom);
+    if (!dom) return NULL;
   }
 
   // Check that we did not follow a loop back to ourselves
-  if( this == dom )
+  if (this == dom) {
     return NULL;
+  }
 
-  if( dist > 2 )              // Add to count of NULL checks elided
+#ifndef PRODUCT
+  if (dist > 2) { // Add to count of NULL checks elided
     explicit_null_checks_elided++;
+  }
+#endif
 
   return prev_dom;
 }
--- a/hotspot/src/share/vm/opto/lcm.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/lcm.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -348,8 +348,10 @@
   }
 
   // ---- Found an implicit null check
+#ifndef PRODUCT
   extern int implicit_null_checks;
   implicit_null_checks++;
+#endif
 
   if( is_decoden ) {
     // Check if we need to hoist decodeHeapOop_not_null first.
--- a/hotspot/src/share/vm/opto/library_call.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -49,7 +49,9 @@
 #include "opto/subnode.hpp"
 #include "prims/nativeLookup.hpp"
 #include "runtime/sharedRuntime.hpp"
+#ifdef TRACE_HAVE_INTRINSICS
 #include "trace/traceMacros.hpp"
+#endif
 
 class LibraryIntrinsic : public InlineCallGenerator {
   // Extend the set of intrinsics known to the runtime:
@@ -241,15 +243,14 @@
   // Generates the guards that check whether the result of
   // Unsafe.getObject should be recorded in an SATB log buffer.
   void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar);
-  bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile, bool is_unaligned);
+
+  typedef enum { Relaxed, Opaque, Volatile, Acquire, Release } AccessKind;
+  bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, AccessKind kind, bool is_unaligned);
   static bool klass_needs_init_guard(Node* kls);
   bool inline_unsafe_allocate();
   bool inline_unsafe_copyMemory();
   bool inline_native_currentThread();
-#ifdef TRACE_HAVE_INTRINSICS
-  bool inline_native_classID();
-  bool inline_native_threadID();
-#endif
+
   bool inline_native_time_funcs(address method, const char* funcName);
   bool inline_native_isInterrupted();
   bool inline_native_Class_query(vmIntrinsics::ID id);
@@ -274,9 +275,10 @@
   JVMState* arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp);
   void arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms, int saved_reexecute_sp);
 
-  typedef enum { LS_xadd, LS_xchg, LS_cmpxchg } LoadStoreKind;
-  bool inline_unsafe_load_store(BasicType type,  LoadStoreKind kind);
-  bool inline_unsafe_ordered_store(BasicType type);
+  typedef enum { LS_get_add, LS_get_set, LS_cmp_swap, LS_cmp_swap_weak, LS_cmp_exchange } LoadStoreKind;
+  MemNode::MemOrd access_kind_to_memord_LS(AccessKind access_kind, bool is_store);
+  MemNode::MemOrd access_kind_to_memord(AccessKind access_kind);
+  bool inline_unsafe_load_store(BasicType type,  LoadStoreKind kind, AccessKind access_kind);
   bool inline_unsafe_fence(vmIntrinsics::ID id);
   bool inline_fp_conversions(vmIntrinsics::ID id);
   bool inline_number_methods(vmIntrinsics::ID id);
@@ -553,86 +555,147 @@
   case vmIntrinsics::_inflateStringC:
   case vmIntrinsics::_inflateStringB:           return inline_string_copy(!is_compress);
 
-  case vmIntrinsics::_getObject:                return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT,  !is_volatile, false);
-  case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, !is_volatile, false);
-  case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE,    !is_volatile, false);
-  case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,   !is_volatile, false);
-  case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,    !is_volatile, false);
-  case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,     !is_volatile, false);
-  case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,    !is_volatile, false);
-  case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT,   !is_volatile, false);
-  case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE,  !is_volatile, false);
-  case vmIntrinsics::_putObject:                return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,  !is_volatile, false);
-  case vmIntrinsics::_putBoolean:               return inline_unsafe_access(!is_native_ptr,  is_store, T_BOOLEAN, !is_volatile, false);
-  case vmIntrinsics::_putByte:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_BYTE,    !is_volatile, false);
-  case vmIntrinsics::_putShort:                 return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,   !is_volatile, false);
-  case vmIntrinsics::_putChar:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,    !is_volatile, false);
-  case vmIntrinsics::_putInt:                   return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,     !is_volatile, false);
-  case vmIntrinsics::_putLong:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,    !is_volatile, false);
-  case vmIntrinsics::_putFloat:                 return inline_unsafe_access(!is_native_ptr,  is_store, T_FLOAT,   !is_volatile, false);
-  case vmIntrinsics::_putDouble:                return inline_unsafe_access(!is_native_ptr,  is_store, T_DOUBLE,  !is_volatile, false);
-
-  case vmIntrinsics::_getByte_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_BYTE,    !is_volatile, false);
-  case vmIntrinsics::_getShort_raw:             return inline_unsafe_access( is_native_ptr, !is_store, T_SHORT,   !is_volatile, false);
-  case vmIntrinsics::_getChar_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_CHAR,    !is_volatile, false);
-  case vmIntrinsics::_getInt_raw:               return inline_unsafe_access( is_native_ptr, !is_store, T_INT,     !is_volatile, false);
-  case vmIntrinsics::_getLong_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_LONG,    !is_volatile, false);
-  case vmIntrinsics::_getFloat_raw:             return inline_unsafe_access( is_native_ptr, !is_store, T_FLOAT,   !is_volatile, false);
-  case vmIntrinsics::_getDouble_raw:            return inline_unsafe_access( is_native_ptr, !is_store, T_DOUBLE,  !is_volatile, false);
-  case vmIntrinsics::_getAddress_raw:           return inline_unsafe_access( is_native_ptr, !is_store, T_ADDRESS, !is_volatile, false);
-
-  case vmIntrinsics::_putByte_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_BYTE,    !is_volatile, false);
-  case vmIntrinsics::_putShort_raw:             return inline_unsafe_access( is_native_ptr,  is_store, T_SHORT,   !is_volatile, false);
-  case vmIntrinsics::_putChar_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_CHAR,    !is_volatile, false);
-  case vmIntrinsics::_putInt_raw:               return inline_unsafe_access( is_native_ptr,  is_store, T_INT,     !is_volatile, false);
-  case vmIntrinsics::_putLong_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_LONG,    !is_volatile, false);
-  case vmIntrinsics::_putFloat_raw:             return inline_unsafe_access( is_native_ptr,  is_store, T_FLOAT,   !is_volatile, false);
-  case vmIntrinsics::_putDouble_raw:            return inline_unsafe_access( is_native_ptr,  is_store, T_DOUBLE,  !is_volatile, false);
-  case vmIntrinsics::_putAddress_raw:           return inline_unsafe_access( is_native_ptr,  is_store, T_ADDRESS, !is_volatile, false);
-
-  case vmIntrinsics::_getObjectVolatile:        return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT,   is_volatile, false);
-  case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN,  is_volatile, false);
-  case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE,     is_volatile, false);
-  case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,    is_volatile, false);
-  case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,     is_volatile, false);
-  case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,      is_volatile, false);
-  case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,     is_volatile, false);
-  case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT,    is_volatile, false);
-  case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE,   is_volatile, false);
-
-  case vmIntrinsics::_putObjectVolatile:        return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,   is_volatile, false);
-  case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access(!is_native_ptr,  is_store, T_BOOLEAN,  is_volatile, false);
-  case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_BYTE,     is_volatile, false);
-  case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,    is_volatile, false);
-  case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,     is_volatile, false);
-  case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,      is_volatile, false);
-  case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,     is_volatile, false);
-  case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access(!is_native_ptr,  is_store, T_FLOAT,    is_volatile, false);
-  case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access(!is_native_ptr,  is_store, T_DOUBLE,   is_volatile, false);
-
-  case vmIntrinsics::_getShortUnaligned:        return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,   !is_volatile, true);
-  case vmIntrinsics::_getCharUnaligned:         return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,    !is_volatile, true);
-  case vmIntrinsics::_getIntUnaligned:          return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,     !is_volatile, true);
-  case vmIntrinsics::_getLongUnaligned:         return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,    !is_volatile, true);
-
-  case vmIntrinsics::_putShortUnaligned:        return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,   !is_volatile, true);
-  case vmIntrinsics::_putCharUnaligned:         return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,    !is_volatile, true);
-  case vmIntrinsics::_putIntUnaligned:          return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,     !is_volatile, true);
-  case vmIntrinsics::_putLongUnaligned:         return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,    !is_volatile, true);
-
-  case vmIntrinsics::_compareAndSwapObject:     return inline_unsafe_load_store(T_OBJECT, LS_cmpxchg);
-  case vmIntrinsics::_compareAndSwapInt:        return inline_unsafe_load_store(T_INT,    LS_cmpxchg);
-  case vmIntrinsics::_compareAndSwapLong:       return inline_unsafe_load_store(T_LONG,   LS_cmpxchg);
-
-  case vmIntrinsics::_putOrderedObject:         return inline_unsafe_ordered_store(T_OBJECT);
-  case vmIntrinsics::_putOrderedInt:            return inline_unsafe_ordered_store(T_INT);
-  case vmIntrinsics::_putOrderedLong:           return inline_unsafe_ordered_store(T_LONG);
-
-  case vmIntrinsics::_getAndAddInt:             return inline_unsafe_load_store(T_INT,    LS_xadd);
-  case vmIntrinsics::_getAndAddLong:            return inline_unsafe_load_store(T_LONG,   LS_xadd);
-  case vmIntrinsics::_getAndSetInt:             return inline_unsafe_load_store(T_INT,    LS_xchg);
-  case vmIntrinsics::_getAndSetLong:            return inline_unsafe_load_store(T_LONG,   LS_xchg);
-  case vmIntrinsics::_getAndSetObject:          return inline_unsafe_load_store(T_OBJECT, LS_xchg);
+  case vmIntrinsics::_getObject:                return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT,   Relaxed, false);
+  case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN,  Relaxed, false);
+  case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE,     Relaxed, false);
+  case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,    Relaxed, false);
+  case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,     Relaxed, false);
+  case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,      Relaxed, false);
+  case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,     Relaxed, false);
+  case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT,    Relaxed, false);
+  case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE,   Relaxed, false);
+
+  case vmIntrinsics::_putObject:                return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,   Relaxed, false);
+  case vmIntrinsics::_putBoolean:               return inline_unsafe_access(!is_native_ptr,  is_store, T_BOOLEAN,  Relaxed, false);
+  case vmIntrinsics::_putByte:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_BYTE,     Relaxed, false);
+  case vmIntrinsics::_putShort:                 return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,    Relaxed, false);
+  case vmIntrinsics::_putChar:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,     Relaxed, false);
+  case vmIntrinsics::_putInt:                   return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,      Relaxed, false);
+  case vmIntrinsics::_putLong:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,     Relaxed, false);
+  case vmIntrinsics::_putFloat:                 return inline_unsafe_access(!is_native_ptr,  is_store, T_FLOAT,    Relaxed, false);
+  case vmIntrinsics::_putDouble:                return inline_unsafe_access(!is_native_ptr,  is_store, T_DOUBLE,   Relaxed, false);
+
+  case vmIntrinsics::_getByte_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_BYTE,     Relaxed, false);
+  case vmIntrinsics::_getShort_raw:             return inline_unsafe_access( is_native_ptr, !is_store, T_SHORT,    Relaxed, false);
+  case vmIntrinsics::_getChar_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_CHAR,     Relaxed, false);
+  case vmIntrinsics::_getInt_raw:               return inline_unsafe_access( is_native_ptr, !is_store, T_INT,      Relaxed, false);
+  case vmIntrinsics::_getLong_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_LONG,     Relaxed, false);
+  case vmIntrinsics::_getFloat_raw:             return inline_unsafe_access( is_native_ptr, !is_store, T_FLOAT,    Relaxed, false);
+  case vmIntrinsics::_getDouble_raw:            return inline_unsafe_access( is_native_ptr, !is_store, T_DOUBLE,   Relaxed, false);
+  case vmIntrinsics::_getAddress_raw:           return inline_unsafe_access( is_native_ptr, !is_store, T_ADDRESS,  Relaxed, false);
+
+  case vmIntrinsics::_putByte_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_BYTE,     Relaxed, false);
+  case vmIntrinsics::_putShort_raw:             return inline_unsafe_access( is_native_ptr,  is_store, T_SHORT,    Relaxed, false);
+  case vmIntrinsics::_putChar_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_CHAR,     Relaxed, false);
+  case vmIntrinsics::_putInt_raw:               return inline_unsafe_access( is_native_ptr,  is_store, T_INT,      Relaxed, false);
+  case vmIntrinsics::_putLong_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_LONG,     Relaxed, false);
+  case vmIntrinsics::_putFloat_raw:             return inline_unsafe_access( is_native_ptr,  is_store, T_FLOAT,    Relaxed, false);
+  case vmIntrinsics::_putDouble_raw:            return inline_unsafe_access( is_native_ptr,  is_store, T_DOUBLE,   Relaxed, false);
+  case vmIntrinsics::_putAddress_raw:           return inline_unsafe_access( is_native_ptr,  is_store, T_ADDRESS,  Relaxed, false);
+
+  case vmIntrinsics::_getObjectVolatile:        return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT,   Volatile, false);
+  case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN,  Volatile, false);
+  case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE,     Volatile, false);
+  case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,    Volatile, false);
+  case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,     Volatile, false);
+  case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,      Volatile, false);
+  case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,     Volatile, false);
+  case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT,    Volatile, false);
+  case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE,   Volatile, false);
+
+  case vmIntrinsics::_putObjectVolatile:        return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,   Volatile, false);
+  case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access(!is_native_ptr,  is_store, T_BOOLEAN,  Volatile, false);
+  case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_BYTE,     Volatile, false);
+  case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,    Volatile, false);
+  case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,     Volatile, false);
+  case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,      Volatile, false);
+  case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,     Volatile, false);
+  case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access(!is_native_ptr,  is_store, T_FLOAT,    Volatile, false);
+  case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access(!is_native_ptr,  is_store, T_DOUBLE,   Volatile, false);
+
+  case vmIntrinsics::_getShortUnaligned:        return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,    Relaxed, true);
+  case vmIntrinsics::_getCharUnaligned:         return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,     Relaxed, true);
+  case vmIntrinsics::_getIntUnaligned:          return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,      Relaxed, true);
+  case vmIntrinsics::_getLongUnaligned:         return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,     Relaxed, true);
+
+  case vmIntrinsics::_putShortUnaligned:        return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,    Relaxed, true);
+  case vmIntrinsics::_putCharUnaligned:         return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,     Relaxed, true);
+  case vmIntrinsics::_putIntUnaligned:          return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,      Relaxed, true);
+  case vmIntrinsics::_putLongUnaligned:         return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,     Relaxed, true);
+
+  case vmIntrinsics::_putOrderedObject:         return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,   Release, false);
+  case vmIntrinsics::_putOrderedInt:            return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,      Release, false);
+  case vmIntrinsics::_putOrderedLong:           return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,     Release, false);
+
+  case vmIntrinsics::_getObjectAcquire:         return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT,   Acquire, false);
+  case vmIntrinsics::_getBooleanAcquire:        return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN,  Acquire, false);
+  case vmIntrinsics::_getByteAcquire:           return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE,     Acquire, false);
+  case vmIntrinsics::_getShortAcquire:          return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,    Acquire, false);
+  case vmIntrinsics::_getCharAcquire:           return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,     Acquire, false);
+  case vmIntrinsics::_getIntAcquire:            return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,      Acquire, false);
+  case vmIntrinsics::_getLongAcquire:           return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,     Acquire, false);
+  case vmIntrinsics::_getFloatAcquire:          return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT,    Acquire, false);
+  case vmIntrinsics::_getDoubleAcquire:         return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE,   Acquire, false);
+
+  case vmIntrinsics::_putObjectRelease:         return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,   Release, false);
+  case vmIntrinsics::_putBooleanRelease:        return inline_unsafe_access(!is_native_ptr,  is_store, T_BOOLEAN,  Release, false);
+  case vmIntrinsics::_putByteRelease:           return inline_unsafe_access(!is_native_ptr,  is_store, T_BYTE,     Release, false);
+  case vmIntrinsics::_putShortRelease:          return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,    Release, false);
+  case vmIntrinsics::_putCharRelease:           return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,     Release, false);
+  case vmIntrinsics::_putIntRelease:            return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,      Release, false);
+  case vmIntrinsics::_putLongRelease:           return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,     Release, false);
+  case vmIntrinsics::_putFloatRelease:          return inline_unsafe_access(!is_native_ptr,  is_store, T_FLOAT,    Release, false);
+  case vmIntrinsics::_putDoubleRelease:         return inline_unsafe_access(!is_native_ptr,  is_store, T_DOUBLE,   Release, false);
+
+  case vmIntrinsics::_getObjectOpaque:          return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT,   Opaque, false);
+  case vmIntrinsics::_getBooleanOpaque:         return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN,  Opaque, false);
+  case vmIntrinsics::_getByteOpaque:            return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE,     Opaque, false);
+  case vmIntrinsics::_getShortOpaque:           return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,    Opaque, false);
+  case vmIntrinsics::_getCharOpaque:            return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,     Opaque, false);
+  case vmIntrinsics::_getIntOpaque:             return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,      Opaque, false);
+  case vmIntrinsics::_getLongOpaque:            return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,     Opaque, false);
+  case vmIntrinsics::_getFloatOpaque:           return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT,    Opaque, false);
+  case vmIntrinsics::_getDoubleOpaque:          return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE,   Opaque, false);
+
+  case vmIntrinsics::_putObjectOpaque:          return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,   Opaque, false);
+  case vmIntrinsics::_putBooleanOpaque:         return inline_unsafe_access(!is_native_ptr,  is_store, T_BOOLEAN,  Opaque, false);
+  case vmIntrinsics::_putByteOpaque:            return inline_unsafe_access(!is_native_ptr,  is_store, T_BYTE,     Opaque, false);
+  case vmIntrinsics::_putShortOpaque:           return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,    Opaque, false);
+  case vmIntrinsics::_putCharOpaque:            return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,     Opaque, false);
+  case vmIntrinsics::_putIntOpaque:             return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,      Opaque, false);
+  case vmIntrinsics::_putLongOpaque:            return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,     Opaque, false);
+  case vmIntrinsics::_putFloatOpaque:           return inline_unsafe_access(!is_native_ptr,  is_store, T_FLOAT,    Opaque, false);
+  case vmIntrinsics::_putDoubleOpaque:          return inline_unsafe_access(!is_native_ptr,  is_store, T_DOUBLE,   Opaque, false);
+
+  case vmIntrinsics::_compareAndSwapObject:             return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap,      Volatile);
+  case vmIntrinsics::_compareAndSwapInt:                return inline_unsafe_load_store(T_INT,    LS_cmp_swap,      Volatile);
+  case vmIntrinsics::_compareAndSwapLong:               return inline_unsafe_load_store(T_LONG,   LS_cmp_swap,      Volatile);
+
+  case vmIntrinsics::_weakCompareAndSwapObject:         return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Relaxed);
+  case vmIntrinsics::_weakCompareAndSwapObjectAcquire:  return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Acquire);
+  case vmIntrinsics::_weakCompareAndSwapObjectRelease:  return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Release);
+  case vmIntrinsics::_weakCompareAndSwapInt:            return inline_unsafe_load_store(T_INT,    LS_cmp_swap_weak, Relaxed);
+  case vmIntrinsics::_weakCompareAndSwapIntAcquire:     return inline_unsafe_load_store(T_INT,    LS_cmp_swap_weak, Acquire);
+  case vmIntrinsics::_weakCompareAndSwapIntRelease:     return inline_unsafe_load_store(T_INT,    LS_cmp_swap_weak, Release);
+  case vmIntrinsics::_weakCompareAndSwapLong:           return inline_unsafe_load_store(T_LONG,   LS_cmp_swap_weak, Relaxed);
+  case vmIntrinsics::_weakCompareAndSwapLongAcquire:    return inline_unsafe_load_store(T_LONG,   LS_cmp_swap_weak, Acquire);
+  case vmIntrinsics::_weakCompareAndSwapLongRelease:    return inline_unsafe_load_store(T_LONG,   LS_cmp_swap_weak, Release);
+
+  case vmIntrinsics::_compareAndExchangeObjectVolatile: return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange,  Volatile);
+  case vmIntrinsics::_compareAndExchangeObjectAcquire:  return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange,  Acquire);
+  case vmIntrinsics::_compareAndExchangeObjectRelease:  return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange,  Release);
+  case vmIntrinsics::_compareAndExchangeIntVolatile:    return inline_unsafe_load_store(T_INT,    LS_cmp_exchange,  Volatile);
+  case vmIntrinsics::_compareAndExchangeIntAcquire:     return inline_unsafe_load_store(T_INT,    LS_cmp_exchange,  Acquire);
+  case vmIntrinsics::_compareAndExchangeIntRelease:     return inline_unsafe_load_store(T_INT,    LS_cmp_exchange,  Release);
+  case vmIntrinsics::_compareAndExchangeLongVolatile:   return inline_unsafe_load_store(T_LONG,   LS_cmp_exchange,  Volatile);
+  case vmIntrinsics::_compareAndExchangeLongAcquire:    return inline_unsafe_load_store(T_LONG,   LS_cmp_exchange,  Acquire);
+  case vmIntrinsics::_compareAndExchangeLongRelease:    return inline_unsafe_load_store(T_LONG,   LS_cmp_exchange,  Release);
+
+  case vmIntrinsics::_getAndAddInt:                     return inline_unsafe_load_store(T_INT,    LS_get_add,       Volatile);
+  case vmIntrinsics::_getAndAddLong:                    return inline_unsafe_load_store(T_LONG,   LS_get_add,       Volatile);
+  case vmIntrinsics::_getAndSetInt:                     return inline_unsafe_load_store(T_INT,    LS_get_set,       Volatile);
+  case vmIntrinsics::_getAndSetLong:                    return inline_unsafe_load_store(T_LONG,   LS_get_set,       Volatile);
+  case vmIntrinsics::_getAndSetObject:                  return inline_unsafe_load_store(T_OBJECT, LS_get_set,       Volatile);
 
   case vmIntrinsics::_loadFence:
   case vmIntrinsics::_storeFence:
@@ -642,8 +705,6 @@
   case vmIntrinsics::_isInterrupted:            return inline_native_isInterrupted();
 
 #ifdef TRACE_HAVE_INTRINSICS
-  case vmIntrinsics::_classID:                  return inline_native_classID();
-  case vmIntrinsics::_threadID:                 return inline_native_threadID();
   case vmIntrinsics::_counterTime:              return inline_native_time_funcs(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), "counterTime");
 #endif
   case vmIntrinsics::_currentTimeMillis:        return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
@@ -1584,6 +1645,13 @@
   assert (type2aelembytes(T_CHAR) == type2aelembytes(T_BYTE)*2,
           "sanity: byte[] and char[] scales agree");
 
+  // Bail when getChar over constants is requested: constant folding would
+  // reject folding mismatched char access over byte[]. A normal inlining for getChar
+  // Java method would constant fold nicely instead.
+  if (!is_store && value->is_Con() && index->is_Con()) {
+    return false;
+  }
+
   Node* adr = array_element_address(value, index, T_CHAR);
   if (is_store) {
     (void) store_to_memory(control(), adr, ch, T_CHAR, TypeAryPtr::BYTES, MemNode::unordered,
@@ -2277,8 +2345,10 @@
   return NULL;
 }
 
-bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile, bool unaligned) {
+bool LibraryCallKit::inline_unsafe_access(const bool is_native_ptr, bool is_store, const BasicType type, const AccessKind kind, const bool unaligned) {
   if (callee()->is_static())  return false;  // caller must have the capability!
+  guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
+  guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
 
 #ifndef PRODUCT
   {
@@ -2367,7 +2437,42 @@
   // the barriers get omitted and the unsafe reference begins to "pollute"
   // the alias analysis of the rest of the graph, either Compile::can_alias
   // or Compile::must_alias will throw a diagnostic assert.)
-  bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
+  bool need_mem_bar;
+  switch (kind) {
+      case Relaxed:
+          need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
+          break;
+      case Opaque:
+          // Opaque uses CPUOrder membars for protection against code movement.
+      case Acquire:
+      case Release:
+      case Volatile:
+          need_mem_bar = true;
+          break;
+      default:
+          ShouldNotReachHere();
+  }
+
+  // Some accesses require access atomicity for all types, notably longs and doubles.
+  // When AlwaysAtomicAccesses is enabled, all accesses are atomic.
+  bool requires_atomic_access = false;
+  switch (kind) {
+      case Relaxed:
+      case Opaque:
+          requires_atomic_access = AlwaysAtomicAccesses;
+          break;
+      case Acquire:
+      case Release:
+      case Volatile:
+          requires_atomic_access = true;
+          break;
+      default:
+          ShouldNotReachHere();
+  }
+
+  // Figure out the memory ordering.
+  // Acquire/Release/Volatile accesses require marking the loads/stores with MemOrd
+  MemNode::MemOrd mo = access_kind_to_memord_LS(kind, is_store);
 
   // If we are reading the value of the referent field of a Reference
   // object (either by using Unsafe directly or through reflection)
@@ -2394,22 +2499,30 @@
   // and it is not possible to fully distinguish unintended nulls
   // from intended ones in this API.
 
-  if (is_volatile) {
-    // We need to emit leading and trailing CPU membars (see below) in
-    // addition to memory membars when is_volatile. This is a little
-    // too strong, but avoids the need to insert per-alias-type
-    // volatile membars (for stores; compare Parse::do_put_xxx), which
-    // we cannot do effectively here because we probably only have a
-    // rough approximation of type.
-    need_mem_bar = true;
-    // For Stores, place a memory ordering barrier now.
-    if (is_store) {
-      insert_mem_bar(Op_MemBarRelease);
-    } else {
-      if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
-        insert_mem_bar(Op_MemBarVolatile);
+  // We need to emit leading and trailing CPU membars (see below) in
+  // addition to memory membars for special access modes. This is a little
+  // too strong, but avoids the need to insert per-alias-type
+  // volatile membars (for stores; compare Parse::do_put_xxx), which
+  // we cannot do effectively here because we probably only have a
+  // rough approximation of type.
+
+  switch(kind) {
+    case Relaxed:
+    case Opaque:
+    case Acquire:
+      break;
+    case Release:
+    case Volatile:
+      if (is_store) {
+        insert_mem_bar(Op_MemBarRelease);
+      } else {
+        if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
+          insert_mem_bar(Op_MemBarVolatile);
+        }
       }
-    }
+      break;
+    default:
+      ShouldNotReachHere();
   }
 
   // Memory barrier to prevent normal and 'unsafe' accesses from
@@ -2425,10 +2538,12 @@
   if (alias_type->element() != NULL || alias_type->field() != NULL) {
     BasicType bt;
     if (alias_type->element() != NULL) {
-      const Type* element = alias_type->element();
+      // Use address type to get the element type. Alias type doesn't provide
+      // enough information (e.g., doesn't differentiate between byte[] and boolean[]).
+      const Type* element = adr_type->is_aryptr()->elem();
       bt = element->isa_narrowoop() ? T_OBJECT : element->array_element_basic_type();
     } else {
-      bt = alias_type->field()->type()->basic_type();
+      bt = alias_type->field()->layout_type();
     }
     if (bt == T_ARRAY) {
       // accessing an array field with getObject is not a mismatch
@@ -2445,7 +2560,7 @@
     // Try to constant fold a load from a constant field
     ciField* field = alias_type->field();
     if (heap_base_oop != top() &&
-        field != NULL && field->is_constant() && field->layout_type() == type) {
+        field != NULL && field->is_constant() && !mismatched) {
       // final or stable field
       const Type* con_type = Type::make_constant(alias_type->field(), heap_base_oop);
       if (con_type != NULL) {
@@ -2453,10 +2568,9 @@
       }
     }
     if (p == NULL) {
-      MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
       // To be valid, unsafe loads may depend on other conditions than
       // the one that guards them: pin the Load node
-      p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile, unaligned, mismatched);
+      p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, requires_atomic_access, unaligned, mismatched);
       // load value
       switch (type) {
       case T_BOOLEAN:
@@ -2470,7 +2584,9 @@
         break;
       case T_OBJECT:
         if (need_read_barrier) {
-          insert_pre_barrier(heap_base_oop, offset, p, !(is_volatile || need_mem_bar));
+          // We do not require a mem bar inside pre_barrier if need_mem_bar
+          // is set: the barriers would be emitted by us.
+          insert_pre_barrier(heap_base_oop, offset, p, !need_mem_bar);
         }
         break;
       case T_ADDRESS:
@@ -2501,9 +2617,8 @@
       break;
     }
 
-    MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered;
-    if (type != T_OBJECT ) {
-      (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile, unaligned, mismatched);
+    if (type != T_OBJECT) {
+      (void) store_to_memory(control(), adr, val, type, adr_type, mo, requires_atomic_access, unaligned, mismatched);
     } else {
       // Possibly an oop being stored to Java heap or native memory
       if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) {
@@ -2524,7 +2639,7 @@
           // Update IdealKit memory.
           __ sync_kit(this);
         } __ else_(); {
-          __ store(__ ctrl(), adr, val, type, alias_type->index(), mo, is_volatile, mismatched);
+          __ store(__ ctrl(), adr, val, type, alias_type->index(), mo, requires_atomic_access, mismatched);
         } __ end_if();
         // Final sync IdealKit and GraphKit.
         final_sync(ideal);
@@ -2533,14 +2648,23 @@
     }
   }
 
-  if (is_volatile) {
-    if (!is_store) {
-      insert_mem_bar(Op_MemBarAcquire);
-    } else {
-      if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
-        insert_mem_bar(Op_MemBarVolatile);
+  switch(kind) {
+    case Relaxed:
+    case Opaque:
+    case Release:
+      break;
+    case Acquire:
+    case Volatile:
+      if (!is_store) {
+        insert_mem_bar(Op_MemBarAcquire);
+      } else {
+        if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
+          insert_mem_bar(Op_MemBarVolatile);
+        }
       }
-    }
+      break;
+    default:
+      ShouldNotReachHere();
   }
 
   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
@@ -2551,21 +2675,52 @@
 //----------------------------inline_unsafe_load_store----------------------------
 // This method serves a couple of different customers (depending on LoadStoreKind):
 //
-// LS_cmpxchg:
-//   public final native boolean compareAndSwapObject(Object o, long offset, Object expected, Object x);
-//   public final native boolean compareAndSwapInt(   Object o, long offset, int    expected, int    x);
-//   public final native boolean compareAndSwapLong(  Object o, long offset, long   expected, long   x);
+// LS_cmp_swap:
+//
+//   boolean compareAndSwapObject(Object o, long offset, Object expected, Object x);
+//   boolean compareAndSwapInt(   Object o, long offset, int    expected, int    x);
+//   boolean compareAndSwapLong(  Object o, long offset, long   expected, long   x);
+//
+// LS_cmp_swap_weak:
+//
+//   boolean weakCompareAndSwapObject(       Object o, long offset, Object expected, Object x);
+//   boolean weakCompareAndSwapObjectAcquire(Object o, long offset, Object expected, Object x);
+//   boolean weakCompareAndSwapObjectRelease(Object o, long offset, Object expected, Object x);
+//
+//   boolean weakCompareAndSwapInt(          Object o, long offset, int    expected, int    x);
+//   boolean weakCompareAndSwapIntAcquire(   Object o, long offset, int    expected, int    x);
+//   boolean weakCompareAndSwapIntRelease(   Object o, long offset, int    expected, int    x);
+//
+//   boolean weakCompareAndSwapLong(         Object o, long offset, long   expected, long   x);
+//   boolean weakCompareAndSwapLongAcquire(  Object o, long offset, long   expected, long   x);
+//   boolean weakCompareAndSwapLongRelease(  Object o, long offset, long   expected, long   x);
 //
-// LS_xadd:
-//   public int  getAndAddInt( Object o, long offset, int  delta)
-//   public long getAndAddLong(Object o, long offset, long delta)
+// LS_cmp_exchange:
+//
+//   Object compareAndExchangeObjectVolatile(Object o, long offset, Object expected, Object x);
+//   Object compareAndExchangeObjectAcquire( Object o, long offset, Object expected, Object x);
+//   Object compareAndExchangeObjectRelease( Object o, long offset, Object expected, Object x);
+//
+//   Object compareAndExchangeIntVolatile(   Object o, long offset, Object expected, Object x);
+//   Object compareAndExchangeIntAcquire(    Object o, long offset, Object expected, Object x);
+//   Object compareAndExchangeIntRelease(    Object o, long offset, Object expected, Object x);
 //
-// LS_xchg:
+//   Object compareAndExchangeLongVolatile(  Object o, long offset, Object expected, Object x);
+//   Object compareAndExchangeLongAcquire(   Object o, long offset, Object expected, Object x);
+//   Object compareAndExchangeLongRelease(   Object o, long offset, Object expected, Object x);
+//
+// LS_get_add:
+//
+//   int  getAndAddInt( Object o, long offset, int  delta)
+//   long getAndAddLong(Object o, long offset, long delta)
+//
+// LS_get_set:
+//
 //   int    getAndSet(Object o, long offset, int    newValue)
 //   long   getAndSet(Object o, long offset, long   newValue)
 //   Object getAndSet(Object o, long offset, Object newValue)
 //
-bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind) {
+bool LibraryCallKit::inline_unsafe_load_store(const BasicType type, const LoadStoreKind kind, const AccessKind access_kind) {
   // This basic scheme here is the same as inline_unsafe_access, but
   // differs in enough details that combining them would make the code
   // overly confusing.  (This is a true fact! I originally combined
@@ -2582,7 +2737,9 @@
     // Check the signatures.
     ciSignature* sig = callee()->signature();
     rtype = sig->return_type()->basic_type();
-    if (kind == LS_xadd || kind == LS_xchg) {
+    switch(kind) {
+      case LS_get_add:
+      case LS_get_set: {
       // Check the signatures.
 #ifdef ASSERT
       assert(rtype == type, "get and set must return the expected type");
@@ -2591,7 +2748,10 @@
       assert(sig->type_at(1)->basic_type() == T_LONG, "get and set offset is long");
       assert(sig->type_at(2)->basic_type() == type, "get and set must take expected type as new value/delta");
 #endif // ASSERT
-    } else if (kind == LS_cmpxchg) {
+        break;
+      }
+      case LS_cmp_swap:
+      case LS_cmp_swap_weak: {
       // Check the signatures.
 #ifdef ASSERT
       assert(rtype == T_BOOLEAN, "CAS must return boolean");
@@ -2599,8 +2759,20 @@
       assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
       assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
 #endif // ASSERT
-    } else {
-      ShouldNotReachHere();
+        break;
+      }
+      case LS_cmp_exchange: {
+      // Check the signatures.
+#ifdef ASSERT
+      assert(rtype == type, "CAS must return the expected type");
+      assert(sig->count() == 4, "CAS has 4 arguments");
+      assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
+      assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
+#endif // ASSERT
+        break;
+      }
+      default:
+        ShouldNotReachHere();
     }
   }
 #endif //PRODUCT
@@ -2613,19 +2785,29 @@
   Node* offset   = NULL;
   Node* oldval   = NULL;
   Node* newval   = NULL;
-  if (kind == LS_cmpxchg) {
-    const bool two_slot_type = type2size[type] == 2;
-    receiver = argument(0);  // type: oop
-    base     = argument(1);  // type: oop
-    offset   = argument(2);  // type: long
-    oldval   = argument(4);  // type: oop, int, or long
-    newval   = argument(two_slot_type ? 6 : 5);  // type: oop, int, or long
-  } else if (kind == LS_xadd || kind == LS_xchg){
-    receiver = argument(0);  // type: oop
-    base     = argument(1);  // type: oop
-    offset   = argument(2);  // type: long
-    oldval   = NULL;
-    newval   = argument(4);  // type: oop, int, or long
+  switch(kind) {
+    case LS_cmp_swap:
+    case LS_cmp_swap_weak:
+    case LS_cmp_exchange: {
+      const bool two_slot_type = type2size[type] == 2;
+      receiver = argument(0);  // type: oop
+      base     = argument(1);  // type: oop
+      offset   = argument(2);  // type: long
+      oldval   = argument(4);  // type: oop, int, or long
+      newval   = argument(two_slot_type ? 6 : 5);  // type: oop, int, or long
+      break;
+    }
+    case LS_get_add:
+    case LS_get_set: {
+      receiver = argument(0);  // type: oop
+      base     = argument(1);  // type: oop
+      offset   = argument(2);  // type: long
+      oldval   = NULL;
+      newval   = argument(4);  // type: oop, int, or long
+      break;
+    }
+    default:
+      ShouldNotReachHere();
   }
 
   // Null check receiver.
@@ -2650,11 +2832,23 @@
   Compile::AliasType* alias_type = C->alias_type(adr_type);
   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
 
-  if (kind == LS_xchg && type == T_OBJECT) {
-    const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
-    if (tjp != NULL) {
-      value_type = tjp;
+  switch (kind) {
+    case LS_get_set:
+    case LS_cmp_exchange: {
+      if (type == T_OBJECT) {
+        const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
+        if (tjp != NULL) {
+          value_type = tjp;
+        }
+      }
+      break;
     }
+    case LS_cmp_swap:
+    case LS_cmp_swap_weak:
+    case LS_get_add:
+      break;
+    default:
+      ShouldNotReachHere();
   }
 
   int alias_idx = C->get_alias_index(adr_type);
@@ -2664,9 +2858,22 @@
   // into actual barriers on most machines, but we still need rest of
   // compiler to respect ordering.
 
-  insert_mem_bar(Op_MemBarRelease);
+  switch (access_kind) {
+    case Relaxed:
+    case Acquire:
+      break;
+    case Release:
+    case Volatile:
+      insert_mem_bar(Op_MemBarRelease);
+      break;
+    default:
+      ShouldNotReachHere();
+  }
   insert_mem_bar(Op_MemBarCPUOrder);
 
+  // Figure out the memory ordering.
+  MemNode::MemOrd mo = access_kind_to_memord(access_kind);
+
   // 4984716: MemBars must be inserted before this
   //          memory node in order to avoid a false
   //          dependency which will confuse the scheduler.
@@ -2677,25 +2884,45 @@
   Node* load_store = NULL;
   switch(type) {
   case T_INT:
-    if (kind == LS_xadd) {
-      load_store = _gvn.transform(new GetAndAddINode(control(), mem, adr, newval, adr_type));
-    } else if (kind == LS_xchg) {
-      load_store = _gvn.transform(new GetAndSetINode(control(), mem, adr, newval, adr_type));
-    } else if (kind == LS_cmpxchg) {
-      load_store = _gvn.transform(new CompareAndSwapINode(control(), mem, adr, newval, oldval));
-    } else {
-      ShouldNotReachHere();
+    switch(kind) {
+      case LS_get_add:
+        load_store = _gvn.transform(new GetAndAddINode(control(), mem, adr, newval, adr_type));
+        break;
+      case LS_get_set:
+        load_store = _gvn.transform(new GetAndSetINode(control(), mem, adr, newval, adr_type));
+        break;
+      case LS_cmp_swap_weak:
+        load_store = _gvn.transform(new WeakCompareAndSwapINode(control(), mem, adr, newval, oldval, mo));
+        break;
+      case LS_cmp_swap:
+        load_store = _gvn.transform(new CompareAndSwapINode(control(), mem, adr, newval, oldval, mo));
+        break;
+      case LS_cmp_exchange:
+        load_store = _gvn.transform(new CompareAndExchangeINode(control(), mem, adr, newval, oldval, adr_type, mo));
+        break;
+      default:
+        ShouldNotReachHere();
     }
     break;
   case T_LONG:
-    if (kind == LS_xadd) {
-      load_store = _gvn.transform(new GetAndAddLNode(control(), mem, adr, newval, adr_type));
-    } else if (kind == LS_xchg) {
-      load_store = _gvn.transform(new GetAndSetLNode(control(), mem, adr, newval, adr_type));
-    } else if (kind == LS_cmpxchg) {
-      load_store = _gvn.transform(new CompareAndSwapLNode(control(), mem, adr, newval, oldval));
-    } else {
-      ShouldNotReachHere();
+    switch(kind) {
+      case LS_get_add:
+        load_store = _gvn.transform(new GetAndAddLNode(control(), mem, adr, newval, adr_type));
+        break;
+      case LS_get_set:
+        load_store = _gvn.transform(new GetAndSetLNode(control(), mem, adr, newval, adr_type));
+        break;
+      case LS_cmp_swap_weak:
+        load_store = _gvn.transform(new WeakCompareAndSwapLNode(control(), mem, adr, newval, oldval, mo));
+        break;
+      case LS_cmp_swap:
+        load_store = _gvn.transform(new CompareAndSwapLNode(control(), mem, adr, newval, oldval, mo));
+        break;
+      case LS_cmp_exchange:
+        load_store = _gvn.transform(new CompareAndExchangeLNode(control(), mem, adr, newval, oldval, adr_type, mo));
+        break;
+      default:
+        ShouldNotReachHere();
     }
     break;
   case T_OBJECT:
@@ -2706,65 +2933,109 @@
       newval = _gvn.makecon(TypePtr::NULL_PTR);
 
     // Reference stores need a store barrier.
-    if (kind == LS_xchg) {
-      // If pre-barrier must execute before the oop store, old value will require do_load here.
-      if (!can_move_pre_barrier()) {
-        pre_barrier(true /* do_load*/,
-                    control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
-                    NULL /* pre_val*/,
+    switch(kind) {
+      case LS_get_set: {
+        // If pre-barrier must execute before the oop store, old value will require do_load here.
+        if (!can_move_pre_barrier()) {
+          pre_barrier(true /* do_load*/,
+                      control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
+                      NULL /* pre_val*/,
+                      T_OBJECT);
+        } // Else move pre_barrier to use load_store value, see below.
+        break;
+      }
+      case LS_cmp_swap_weak:
+      case LS_cmp_swap:
+      case LS_cmp_exchange: {
+        // Same as for newval above:
+        if (_gvn.type(oldval) == TypePtr::NULL_PTR) {
+          oldval = _gvn.makecon(TypePtr::NULL_PTR);
+        }
+        // The only known value which might get overwritten is oldval.
+        pre_barrier(false /* do_load */,
+                    control(), NULL, NULL, max_juint, NULL, NULL,
+                    oldval /* pre_val */,
                     T_OBJECT);
-      } // Else move pre_barrier to use load_store value, see below.
-    } else if (kind == LS_cmpxchg) {
-      // Same as for newval above:
-      if (_gvn.type(oldval) == TypePtr::NULL_PTR) {
-        oldval = _gvn.makecon(TypePtr::NULL_PTR);
+        break;
       }
-      // The only known value which might get overwritten is oldval.
-      pre_barrier(false /* do_load */,
-                  control(), NULL, NULL, max_juint, NULL, NULL,
-                  oldval /* pre_val */,
-                  T_OBJECT);
-    } else {
-      ShouldNotReachHere();
+      default:
+        ShouldNotReachHere();
     }
 
 #ifdef _LP64
     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
       Node *newval_enc = _gvn.transform(new EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
-      if (kind == LS_xchg) {
-        load_store = _gvn.transform(new GetAndSetNNode(control(), mem, adr,
-                                                       newval_enc, adr_type, value_type->make_narrowoop()));
-      } else {
-        assert(kind == LS_cmpxchg, "wrong LoadStore operation");
-        Node *oldval_enc = _gvn.transform(new EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
-        load_store = _gvn.transform(new CompareAndSwapNNode(control(), mem, adr,
-                                                                newval_enc, oldval_enc));
+
+      switch(kind) {
+        case LS_get_set:
+          load_store = _gvn.transform(new GetAndSetNNode(control(), mem, adr, newval_enc, adr_type, value_type->make_narrowoop()));
+          break;
+        case LS_cmp_swap_weak: {
+          Node *oldval_enc = _gvn.transform(new EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
+          load_store = _gvn.transform(new WeakCompareAndSwapNNode(control(), mem, adr, newval_enc, oldval_enc, mo));
+          break;
+        }
+        case LS_cmp_swap: {
+          Node *oldval_enc = _gvn.transform(new EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
+          load_store = _gvn.transform(new CompareAndSwapNNode(control(), mem, adr, newval_enc, oldval_enc, mo));
+          break;
+        }
+        case LS_cmp_exchange: {
+          Node *oldval_enc = _gvn.transform(new EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
+          load_store = _gvn.transform(new CompareAndExchangeNNode(control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo));
+          break;
+        }
+        default:
+          ShouldNotReachHere();
       }
     } else
 #endif
-    {
-      if (kind == LS_xchg) {
+    switch (kind) {
+      case LS_get_set:
         load_store = _gvn.transform(new GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));
-      } else {
-        assert(kind == LS_cmpxchg, "wrong LoadStore operation");
-        load_store = _gvn.transform(new CompareAndSwapPNode(control(), mem, adr, newval, oldval));
-      }
+        break;
+      case LS_cmp_swap_weak:
+        load_store = _gvn.transform(new WeakCompareAndSwapPNode(control(), mem, adr, newval, oldval, mo));
+        break;
+      case LS_cmp_swap:
+        load_store = _gvn.transform(new CompareAndSwapPNode(control(), mem, adr, newval, oldval, mo));
+        break;
+      case LS_cmp_exchange:
+        load_store = _gvn.transform(new CompareAndExchangePNode(control(), mem, adr, newval, oldval, adr_type, value_type->is_oopptr(), mo));
+        break;
+      default:
+        ShouldNotReachHere();
     }
-    if (kind == LS_cmpxchg) {
-      // Emit the post barrier only when the actual store happened.
-      // This makes sense to check only for compareAndSet that can fail to set the value.
-      // CAS success path is marked more likely since we anticipate this is a performance
-      // critical path, while CAS failure path can use the penalty for going through unlikely
-      // path as backoff. Which is still better than doing a store barrier there.
-      IdealKit ideal(this);
-      ideal.if_then(load_store, BoolTest::ne, ideal.ConI(0), PROB_STATIC_FREQUENT); {
-        sync_kit(ideal);
-        post_barrier(ideal.ctrl(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
-        ideal.sync_kit(this);
-      } ideal.end_if();
-      final_sync(ideal);
-    } else {
-      post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
+
+    // Emit the post barrier only when the actual store happened. This makes sense
+    // to check only for LS_cmp_* that can fail to set the value.
+    // LS_cmp_exchange does not produce any branches by default, so there is no
+    // boolean result to piggyback on. TODO: When we merge CompareAndSwap with
+    // CompareAndExchange and move branches here, it would make sense to conditionalize
+    // post_barriers for LS_cmp_exchange as well.
+    //
+    // CAS success path is marked more likely since we anticipate this is a performance
+    // critical path, while CAS failure path can use the penalty for going through unlikely
+    // path as backoff. Which is still better than doing a store barrier there.
+    switch (kind) {
+      case LS_get_set:
+      case LS_cmp_exchange: {
+        post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
+        break;
+      }
+      case LS_cmp_swap_weak:
+      case LS_cmp_swap: {
+        IdealKit ideal(this);
+        ideal.if_then(load_store, BoolTest::ne, ideal.ConI(0), PROB_STATIC_FREQUENT); {
+          sync_kit(ideal);
+          post_barrier(ideal.ctrl(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
+          ideal.sync_kit(this);
+        } ideal.end_if();
+        final_sync(ideal);
+        break;
+      }
+      default:
+        ShouldNotReachHere();
     }
     break;
   default:
@@ -2778,7 +3049,7 @@
   Node* proj = _gvn.transform(new SCMemProjNode(load_store));
   set_memory(proj, alias_idx);
 
-  if (type == T_OBJECT && kind == LS_xchg) {
+  if (type == T_OBJECT && (kind == LS_get_set || kind == LS_cmp_exchange)) {
 #ifdef _LP64
     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
       load_store = _gvn.transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
@@ -2797,74 +3068,52 @@
 
   // Add the trailing membar surrounding the access
   insert_mem_bar(Op_MemBarCPUOrder);
-  insert_mem_bar(Op_MemBarAcquire);
+
+  switch (access_kind) {
+    case Relaxed:
+    case Release:
+      break; // do nothing
+    case Acquire:
+    case Volatile:
+      insert_mem_bar(Op_MemBarAcquire);
+      break;
+    default:
+      ShouldNotReachHere();
+  }
 
   assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
   set_result(load_store);
   return true;
 }
 
-//----------------------------inline_unsafe_ordered_store----------------------
-// public native void Unsafe.putOrderedObject(Object o, long offset, Object x);
-// public native void Unsafe.putOrderedInt(Object o, long offset, int x);
-// public native void Unsafe.putOrderedLong(Object o, long offset, long x);
-bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
-  // This is another variant of inline_unsafe_access, differing in
-  // that it always issues store-store ("release") barrier and ensures
-  // store-atomicity (which only matters for "long").
-
-  if (callee()->is_static())  return false;  // caller must have the capability!
-
-#ifndef PRODUCT
-  {
-    ResourceMark rm;
-    // Check the signatures.
-    ciSignature* sig = callee()->signature();
-#ifdef ASSERT
-    BasicType rtype = sig->return_type()->basic_type();
-    assert(rtype == T_VOID, "must return void");
-    assert(sig->count() == 3, "has 3 arguments");
-    assert(sig->type_at(0)->basic_type() == T_OBJECT, "base is object");
-    assert(sig->type_at(1)->basic_type() == T_LONG, "offset is long");
-#endif // ASSERT
-  }
-#endif //PRODUCT
-
-  C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
-
-  // Get arguments:
-  Node* receiver = argument(0);  // type: oop
-  Node* base     = argument(1);  // type: oop
-  Node* offset   = argument(2);  // type: long
-  Node* val      = argument(4);  // type: oop, int, or long
-
-  // Null check receiver.
-  receiver = null_check(receiver);
-  if (stopped()) {
-    return true;
-  }
-
-  // Build field offset expression.
-  assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
-  // 32-bit machines ignore the high half of long offsets
-  offset = ConvL2X(offset);
-  Node* adr = make_unsafe_address(base, offset);
-  const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
-  const Type *value_type = Type::get_const_basic_type(type);
-  Compile::AliasType* alias_type = C->alias_type(adr_type);
-
-  insert_mem_bar(Op_MemBarRelease);
-  insert_mem_bar(Op_MemBarCPUOrder);
-  // Ensure that the store is atomic for longs:
-  const bool require_atomic_access = true;
-  Node* store;
-  if (type == T_OBJECT) // reference stores need a store barrier.
-    store = store_oop_to_unknown(control(), base, adr, adr_type, val, type, MemNode::release);
-  else {
-    store = store_to_memory(control(), adr, val, type, adr_type, MemNode::release, require_atomic_access);
-  }
-  insert_mem_bar(Op_MemBarCPUOrder);
-  return true;
+MemNode::MemOrd LibraryCallKit::access_kind_to_memord_LS(AccessKind kind, bool is_store) {
+  MemNode::MemOrd mo = MemNode::unset;
+  switch(kind) {
+    case Opaque:
+    case Relaxed:  mo = MemNode::unordered; break;
+    case Acquire:  mo = MemNode::acquire;   break;
+    case Release:  mo = MemNode::release;   break;
+    case Volatile: mo = is_store ? MemNode::release : MemNode::acquire; break;
+    default:
+      ShouldNotReachHere();
+  }
+  guarantee(mo != MemNode::unset, "Should select memory ordering");
+  return mo;
+}
+
+MemNode::MemOrd LibraryCallKit::access_kind_to_memord(AccessKind kind) {
+  MemNode::MemOrd mo = MemNode::unset;
+  switch(kind) {
+    case Opaque:
+    case Relaxed:  mo = MemNode::unordered; break;
+    case Acquire:  mo = MemNode::acquire;   break;
+    case Release:  mo = MemNode::release;   break;
+    case Volatile: mo = MemNode::seqcst;    break;
+    default:
+      ShouldNotReachHere();
+  }
+  guarantee(mo != MemNode::unset, "Should select memory ordering");
+  return mo;
 }
 
 bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) {
@@ -2932,52 +3181,6 @@
   return true;
 }
 
-#ifdef TRACE_HAVE_INTRINSICS
-/*
- * oop -> myklass
- * myklass->trace_id |= USED
- * return myklass->trace_id & ~0x3
- */
-bool LibraryCallKit::inline_native_classID() {
-  null_check_receiver();  // null-check, then ignore
-  Node* cls = null_check(argument(1), T_OBJECT);
-  Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
-  kls = null_check(kls, T_OBJECT);
-  ByteSize offset = TRACE_ID_OFFSET;
-  Node* insp = basic_plus_adr(kls, in_bytes(offset));
-  Node* tvalue = make_load(NULL, insp, TypeLong::LONG, T_LONG, MemNode::unordered);
-  Node* bits = longcon(~0x03l); // ignore bit 0 & 1
-  Node* andl = _gvn.transform(new AndLNode(tvalue, bits));
-  Node* clsused = longcon(0x01l); // set the class bit
-  Node* orl = _gvn.transform(new OrLNode(tvalue, clsused));
-
-  const TypePtr *adr_type = _gvn.type(insp)->isa_ptr();
-  store_to_memory(control(), insp, orl, T_LONG, adr_type, MemNode::unordered);
-  set_result(andl);
-  return true;
-}
-
-bool LibraryCallKit::inline_native_threadID() {
-  Node* tls_ptr = NULL;
-  Node* cur_thr = generate_current_thread(tls_ptr);
-  Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
-  Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
-  p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::thread_id_offset()));
-
-  Node* threadid = NULL;
-  size_t thread_id_size = OSThread::thread_id_size();
-  if (thread_id_size == (size_t) BytesPerLong) {
-    threadid = ConvL2I(make_load(control(), p, TypeLong::LONG, T_LONG, MemNode::unordered));
-  } else if (thread_id_size == (size_t) BytesPerInt) {
-    threadid = make_load(control(), p, TypeInt::INT, T_INT, MemNode::unordered);
-  } else {
-    ShouldNotReachHere();
-  }
-  set_result(threadid);
-  return true;
-}
-#endif
-
 //------------------------inline_native_time_funcs--------------
 // inline code for System.currentTimeMillis() and System.nanoTime()
 // these have the same type and signature
--- a/hotspot/src/share/vm/opto/loopTransform.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/loopTransform.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -2417,6 +2417,14 @@
             ((bol->in(1)->Opcode() == Op_StorePConditional ) ||
              (bol->in(1)->Opcode() == Op_StoreIConditional ) ||
              (bol->in(1)->Opcode() == Op_StoreLConditional ) ||
+             (bol->in(1)->Opcode() == Op_CompareAndExchangeI ) ||
+             (bol->in(1)->Opcode() == Op_CompareAndExchangeL ) ||
+             (bol->in(1)->Opcode() == Op_CompareAndExchangeP ) ||
+             (bol->in(1)->Opcode() == Op_CompareAndExchangeN ) ||
+             (bol->in(1)->Opcode() == Op_WeakCompareAndSwapI ) ||
+             (bol->in(1)->Opcode() == Op_WeakCompareAndSwapL ) ||
+             (bol->in(1)->Opcode() == Op_WeakCompareAndSwapP ) ||
+             (bol->in(1)->Opcode() == Op_WeakCompareAndSwapN ) ||
              (bol->in(1)->Opcode() == Op_CompareAndSwapI ) ||
              (bol->in(1)->Opcode() == Op_CompareAndSwapL ) ||
              (bol->in(1)->Opcode() == Op_CompareAndSwapP ) ||
--- a/hotspot/src/share/vm/opto/loopnode.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/loopnode.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -285,20 +285,29 @@
   Node *incr() const                { Node *tmp = cmp_node(); return (tmp && tmp->req()==3) ? tmp->in(1) : NULL; }
   Node *limit() const               { Node *tmp = cmp_node(); return (tmp && tmp->req()==3) ? tmp->in(2) : NULL; }
   Node *stride() const              { Node *tmp = incr    (); return (tmp && tmp->req()==3) ? tmp->in(2) : NULL; }
-  Node *phi() const                 { Node *tmp = incr    (); return (tmp && tmp->req()==3) ? tmp->in(1) : NULL; }
   Node *init_trip() const           { Node *tmp = phi     (); return (tmp && tmp->req()==3) ? tmp->in(1) : NULL; }
   int stride_con() const;
   bool stride_is_con() const        { Node *tmp = stride  (); return (tmp != NULL && tmp->is_Con()); }
   BoolTest::mask test_trip() const  { return in(TestValue)->as_Bool()->_test._test; }
+  PhiNode *phi() const {
+    Node *tmp = incr();
+    if (tmp && tmp->req() == 3) {
+      Node* phi = tmp->in(1);
+      if (phi->is_Phi()) {
+        return phi->as_Phi();
+      }
+    }
+    return NULL;
+  }
   CountedLoopNode *loopnode() const {
     // The CountedLoopNode that goes with this CountedLoopEndNode may
     // have been optimized out by the IGVN so be cautious with the
     // pattern matching on the graph
-    if (phi() == NULL) {
+    PhiNode* iv_phi = phi();
+    if (iv_phi == NULL) {
       return NULL;
     }
-    assert(phi()->is_Phi(), "should be PhiNode");
-    Node *ln = phi()->in(0);
+    Node *ln = iv_phi->in(0);
     if (ln->is_CountedLoop() && ln->as_CountedLoop()->loopexit() == this) {
       return (CountedLoopNode*)ln;
     }
--- a/hotspot/src/share/vm/opto/macroArrayCopy.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/macroArrayCopy.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -880,8 +880,14 @@
       Node* sptr = basic_plus_adr(src,  src_off);
       Node* dptr = basic_plus_adr(dest, dest_off);
       uint alias_idx = C->get_alias_index(adr_type);
-      Node* sval = transform_later(LoadNode::make(_igvn, *ctrl, (*mem)->memory_at(alias_idx), sptr, adr_type, TypeInt::INT, T_INT, MemNode::unordered));
-      Node* st = transform_later(StoreNode::make(_igvn, *ctrl, (*mem)->memory_at(alias_idx), dptr, adr_type, sval, T_INT, MemNode::unordered));
+      bool is_mismatched = (basic_elem_type != T_INT);
+      Node* sval = transform_later(
+          LoadNode::make(_igvn, *ctrl, (*mem)->memory_at(alias_idx), sptr, adr_type,
+                         TypeInt::INT, T_INT, MemNode::unordered, LoadNode::DependsOnlyOnTest,
+                         false /*unaligned*/, is_mismatched));
+      Node* st = transform_later(
+          StoreNode::make(_igvn, *ctrl, (*mem)->memory_at(alias_idx), dptr, adr_type,
+                          sval, T_INT, MemNode::unordered));
       (*mem)->set_memory_at(alias_idx, st);
       src_off += BytesPerInt;
       dest_off += BytesPerInt;
--- a/hotspot/src/share/vm/opto/matcher.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/matcher.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -2307,6 +2307,14 @@
       case Op_StorePConditional:
       case Op_StoreIConditional:
       case Op_StoreLConditional:
+      case Op_CompareAndExchangeI:
+      case Op_CompareAndExchangeL:
+      case Op_CompareAndExchangeP:
+      case Op_CompareAndExchangeN:
+      case Op_WeakCompareAndSwapI:
+      case Op_WeakCompareAndSwapL:
+      case Op_WeakCompareAndSwapP:
+      case Op_WeakCompareAndSwapN:
       case Op_CompareAndSwapI:
       case Op_CompareAndSwapL:
       case Op_CompareAndSwapP:
@@ -2407,8 +2415,10 @@
 
       bool push_it = false;
       if( proj->Opcode() == Op_IfTrue ) {
+#ifndef PRODUCT
         extern int all_null_checks_found;
         all_null_checks_found++;
+#endif
         if( b->_test._test == BoolTest::ne ) {
           push_it = true;
         }
@@ -2522,6 +2532,14 @@
     // that a monitor exit operation contains a serializing instruction.
 
     if (xop == Op_MemBarVolatile ||
+        xop == Op_CompareAndExchangeI ||
+        xop == Op_CompareAndExchangeL ||
+        xop == Op_CompareAndExchangeP ||
+        xop == Op_CompareAndExchangeN ||
+        xop == Op_WeakCompareAndSwapL ||
+        xop == Op_WeakCompareAndSwapP ||
+        xop == Op_WeakCompareAndSwapN ||
+        xop == Op_WeakCompareAndSwapI ||
         xop == Op_CompareAndSwapL ||
         xop == Op_CompareAndSwapP ||
         xop == Op_CompareAndSwapN ||
--- a/hotspot/src/share/vm/opto/memnode.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/memnode.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1582,6 +1582,21 @@
   return NULL;
 }
 
+static bool is_mismatched_access(ciConstant con, BasicType loadbt) {
+  BasicType conbt = con.basic_type();
+  switch (conbt) {
+    case T_BOOLEAN: conbt = T_BYTE;   break;
+    case T_ARRAY:   conbt = T_OBJECT; break;
+  }
+  switch (loadbt) {
+    case T_BOOLEAN:   loadbt = T_BYTE;   break;
+    case T_NARROWOOP: loadbt = T_OBJECT; break;
+    case T_ARRAY:     loadbt = T_OBJECT; break;
+    case T_ADDRESS:   loadbt = T_OBJECT; break;
+  }
+  return (conbt != loadbt);
+}
+
 // Try to constant-fold a stable array element.
 static const Type* fold_stable_ary_elem(const TypeAryPtr* ary, int off, BasicType loadbt) {
   assert(ary->const_oop(), "array should be constant");
@@ -1590,10 +1605,12 @@
   // Decode the results of GraphKit::array_element_address.
   ciArray* aobj = ary->const_oop()->as_array();
   ciConstant con = aobj->element_value_by_offset(off);
-
   if (con.basic_type() != T_ILLEGAL && !con.is_null_or_zero()) {
+    bool is_mismatched = is_mismatched_access(con, loadbt);
+    assert(!is_mismatched, "conbt=%s; loadbt=%s", type2name(con.basic_type()), type2name(loadbt));
     const Type* con_type = Type::make_from_constant(con);
-    if (con_type != NULL) {
+    // Guard against erroneous constant folding.
+    if (!is_mismatched && con_type != NULL) {
       if (con_type->isa_aryptr()) {
         // Join with the array element type, in case it is also stable.
         int dim = ary->stable_dimension();
@@ -1642,7 +1659,7 @@
     const bool off_beyond_header = ((uint)off >= (uint)min_base_off);
 
     // Try to constant-fold a stable array element.
-    if (FoldStableValues && ary->is_stable() && ary->const_oop() != NULL) {
+    if (FoldStableValues && !is_mismatched_access() && ary->is_stable() && ary->const_oop() != NULL) {
       // Make sure the reference is not into the header and the offset is constant
       if (off_beyond_header && adr->is_AddP() && off != Type::OffsetBot) {
         const Type* con_type = fold_stable_ary_elem(ary, off, memory_type());
--- a/hotspot/src/share/vm/opto/memnode.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/memnode.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -56,7 +56,9 @@
   };
   typedef enum { unordered = 0,
                  acquire,       // Load has to acquire or be succeeded by MemBarAcquire.
-                 release        // Store has to release or be preceded by MemBarRelease.
+                 release,       // Store has to release or be preceded by MemBarRelease.
+                 seqcst,        // LoadStore has to have both acquire and release semantics.
+                 unset          // The memory ordering is not set (used for testing)
   } MemOrd;
 protected:
   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
@@ -848,34 +850,121 @@
   virtual uint ideal_reg() const { return Op_RegFlags; }
 };
 
+class CompareAndSwapNode : public LoadStoreConditionalNode {
+private:
+  const MemNode::MemOrd _mem_ord;
+public:
+  CompareAndSwapNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : LoadStoreConditionalNode(c, mem, adr, val, ex), _mem_ord(mem_ord) {}
+  MemNode::MemOrd order() const {
+    return _mem_ord;
+  }
+};
+
+class CompareAndExchangeNode : public LoadStoreNode {
+private:
+  const MemNode::MemOrd _mem_ord;
+public:
+  enum {
+    ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
+  };
+  CompareAndExchangeNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord, const TypePtr* at, const Type* t) :
+    LoadStoreNode(c, mem, adr, val, at, t, 5), _mem_ord(mem_ord) {
+     init_req(ExpectedIn, ex );
+  }
+
+  MemNode::MemOrd order() const {
+    return _mem_ord;
+  }
+};
 
 //------------------------------CompareAndSwapLNode---------------------------
-class CompareAndSwapLNode : public LoadStoreConditionalNode {
+class CompareAndSwapLNode : public CompareAndSwapNode {
 public:
-  CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
+  CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
   virtual int Opcode() const;
 };
 
 
 //------------------------------CompareAndSwapINode---------------------------
-class CompareAndSwapINode : public LoadStoreConditionalNode {
+class CompareAndSwapINode : public CompareAndSwapNode {
 public:
-  CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
+  CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
   virtual int Opcode() const;
 };
 
 
 //------------------------------CompareAndSwapPNode---------------------------
-class CompareAndSwapPNode : public LoadStoreConditionalNode {
+class CompareAndSwapPNode : public CompareAndSwapNode {
 public:
-  CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
+  CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
   virtual int Opcode() const;
 };
 
 //------------------------------CompareAndSwapNNode---------------------------
-class CompareAndSwapNNode : public LoadStoreConditionalNode {
+class CompareAndSwapNNode : public CompareAndSwapNode {
+public:
+  CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
+  virtual int Opcode() const;
+};
+
+
+//------------------------------WeakCompareAndSwapLNode---------------------------
+class WeakCompareAndSwapLNode : public CompareAndSwapNode {
+public:
+  WeakCompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
+  virtual int Opcode() const;
+};
+
+
+//------------------------------WeakCompareAndSwapINode---------------------------
+class WeakCompareAndSwapINode : public CompareAndSwapNode {
+public:
+  WeakCompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
+  virtual int Opcode() const;
+};
+
+
+//------------------------------WeakCompareAndSwapPNode---------------------------
+class WeakCompareAndSwapPNode : public CompareAndSwapNode {
 public:
-  CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
+  WeakCompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
+  virtual int Opcode() const;
+};
+
+//------------------------------WeakCompareAndSwapNNode---------------------------
+class WeakCompareAndSwapNNode : public CompareAndSwapNode {
+public:
+  WeakCompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
+  virtual int Opcode() const;
+};
+
+//------------------------------CompareAndExchangeLNode---------------------------
+class CompareAndExchangeLNode : public CompareAndExchangeNode {
+public:
+  CompareAndExchangeLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeLong::LONG) { }
+  virtual int Opcode() const;
+};
+
+
+//------------------------------CompareAndExchangeINode---------------------------
+class CompareAndExchangeINode : public CompareAndExchangeNode {
+public:
+  CompareAndExchangeINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::INT) { }
+  virtual int Opcode() const;
+};
+
+
+//------------------------------CompareAndExchangePNode---------------------------
+class CompareAndExchangePNode : public CompareAndExchangeNode {
+public:
+  CompareAndExchangePNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { }
+  virtual int Opcode() const;
+};
+
+//------------------------------CompareAndExchangeNNode---------------------------
+class CompareAndExchangeNNode : public CompareAndExchangeNode {
+public:
+  CompareAndExchangeNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { }
   virtual int Opcode() const;
 };
 
--- a/hotspot/src/share/vm/opto/node.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/node.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -576,17 +576,11 @@
 
 //------------------------------~Node------------------------------------------
 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage
-extern int reclaim_idx ;
-extern int reclaim_in  ;
-extern int reclaim_node;
 void Node::destruct() {
   // Eagerly reclaim unique Node numberings
   Compile* compile = Compile::current();
   if ((uint)_idx+1 == compile->unique()) {
     compile->set_unique(compile->unique()-1);
-#ifdef ASSERT
-    reclaim_idx++;
-#endif
   }
   // Clear debug info:
   Node_Notes* nn = compile->node_notes_at(_idx);
@@ -604,43 +598,25 @@
   int out_edge_size = _outmax*sizeof(void*);
   char *edge_end = ((char*)_in) + edge_size;
   char *out_array = (char*)(_out == NO_OUT_ARRAY? NULL: _out);
-  char *out_edge_end = out_array + out_edge_size;
   int node_size = size_of();
 
   // Free the output edge array
   if (out_edge_size > 0) {
-#ifdef ASSERT
-    if( out_edge_end == compile->node_arena()->hwm() )
-      reclaim_in  += out_edge_size;  // count reclaimed out edges with in edges
-#endif
     compile->node_arena()->Afree(out_array, out_edge_size);
   }
 
   // Free the input edge array and the node itself
   if( edge_end == (char*)this ) {
-#ifdef ASSERT
-    if( edge_end+node_size == compile->node_arena()->hwm() ) {
-      reclaim_in  += edge_size;
-      reclaim_node+= node_size;
-    }
-#else
     // It was; free the input array and object all in one hit
+#ifndef ASSERT
     compile->node_arena()->Afree(_in,edge_size+node_size);
 #endif
   } else {
-
     // Free just the input array
-#ifdef ASSERT
-    if( edge_end == compile->node_arena()->hwm() )
-      reclaim_in  += edge_size;
-#endif
     compile->node_arena()->Afree(_in,edge_size);
 
     // Free just the object
-#ifdef ASSERT
-    if( ((char*)this) + node_size == compile->node_arena()->hwm() )
-      reclaim_node+= node_size;
-#else
+#ifndef ASSERT
     compile->node_arena()->Afree(this,node_size);
 #endif
   }
--- a/hotspot/src/share/vm/opto/node.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/node.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -60,6 +60,8 @@
 class CodeBuffer;
 class ConstraintCastNode;
 class ConNode;
+class CompareAndSwapNode;
+class CompareAndExchangeNode;
 class CountedLoopNode;
 class CountedLoopEndNode;
 class DecodeNarrowPtrNode;
@@ -679,6 +681,9 @@
       DEFINE_CLASS_ID(Store, Mem, 1)
         DEFINE_CLASS_ID(StoreVector, Store, 0)
       DEFINE_CLASS_ID(LoadStore, Mem, 2)
+        DEFINE_CLASS_ID(LoadStoreConditional, LoadStore, 0)
+          DEFINE_CLASS_ID(CompareAndSwap, LoadStoreConditional, 0)
+        DEFINE_CLASS_ID(CompareAndExchangeNode, LoadStore, 1)
 
     DEFINE_CLASS_ID(Region, Node, 5)
       DEFINE_CLASS_ID(Loop, Region, 0)
--- a/hotspot/src/share/vm/opto/parse.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/parse.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -104,13 +104,6 @@
   // For temporary (stack-allocated, stateless) ilts:
   InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int max_inline_level);
 
-  // InlineTree enum
-  enum InlineStyle {
-    Inline_do_not_inline             =   0, //
-    Inline_cha_is_monomorphic        =   1, //
-    Inline_type_profile_monomorphic  =   2  //
-  };
-
   // See if it is OK to inline.
   // The receiver is the inline tree for the caller.
   //
@@ -349,9 +342,6 @@
   Block*            _block;     // block currently getting parsed
   ciBytecodeStream  _iter;      // stream of this method's bytecodes
 
-  int           _blocks_merged; // Progress meter: state merges from BB preds
-  int           _blocks_parsed; // Progress meter: BBs actually parsed
-
   const FastLockNode* _synch_lock; // FastLockNode for synchronized method
 
 #ifndef PRODUCT
--- a/hotspot/src/share/vm/opto/parse1.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/parse1.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -45,6 +45,7 @@
 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
 // and eventually should be encapsulated in a proper class (gri 8/18/98).
 
+#ifndef PRODUCT
 int nodes_created              = 0;
 int methods_parsed             = 0;
 int methods_seen               = 0;
@@ -53,42 +54,42 @@
 
 int explicit_null_checks_inserted = 0;
 int explicit_null_checks_elided   = 0;
-int all_null_checks_found         = 0, implicit_null_checks              = 0;
-int implicit_null_throws          = 0;
+int all_null_checks_found         = 0;
+int implicit_null_checks          = 0;
 
-int reclaim_idx  = 0;
-int reclaim_in   = 0;
-int reclaim_node = 0;
-
-#ifndef PRODUCT
 bool Parse::BytecodeParseHistogram::_initialized = false;
 uint Parse::BytecodeParseHistogram::_bytecodes_parsed [Bytecodes::number_of_codes];
 uint Parse::BytecodeParseHistogram::_nodes_constructed[Bytecodes::number_of_codes];
 uint Parse::BytecodeParseHistogram::_nodes_transformed[Bytecodes::number_of_codes];
 uint Parse::BytecodeParseHistogram::_new_values       [Bytecodes::number_of_codes];
-#endif
 
 //------------------------------print_statistics-------------------------------
-#ifndef PRODUCT
 void Parse::print_statistics() {
   tty->print_cr("--- Compiler Statistics ---");
   tty->print("Methods seen: %d  Methods parsed: %d", methods_seen, methods_parsed);
   tty->print("  Nodes created: %d", nodes_created);
   tty->cr();
-  if (methods_seen != methods_parsed)
+  if (methods_seen != methods_parsed) {
     tty->print_cr("Reasons for parse failures (NOT cumulative):");
+  }
   tty->print_cr("Blocks parsed: %d  Blocks seen: %d", blocks_parsed, blocks_seen);
 
-  if( explicit_null_checks_inserted )
-    tty->print_cr("%d original NULL checks - %d elided (%2d%%); optimizer leaves %d,", explicit_null_checks_inserted, explicit_null_checks_elided, (100*explicit_null_checks_elided)/explicit_null_checks_inserted, all_null_checks_found);
-  if( all_null_checks_found )
+  if (explicit_null_checks_inserted) {
+    tty->print_cr("%d original NULL checks - %d elided (%2d%%); optimizer leaves %d,",
+                  explicit_null_checks_inserted, explicit_null_checks_elided,
+                  (100*explicit_null_checks_elided)/explicit_null_checks_inserted,
+                  all_null_checks_found);
+  }
+  if (all_null_checks_found) {
     tty->print_cr("%d made implicit (%2d%%)", implicit_null_checks,
                   (100*implicit_null_checks)/all_null_checks_found);
-  if( implicit_null_throws )
+  }
+  if (SharedRuntime::_implicit_null_throws) {
     tty->print_cr("%d implicit null exceptions at runtime",
-                  implicit_null_throws);
+                  SharedRuntime::_implicit_null_throws);
+  }
 
-  if( PrintParseStatistics && BytecodeParseHistogram::initialized() ) {
+  if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
     BytecodeParseHistogram::print();
   }
 }
@@ -495,7 +496,7 @@
     C->dependencies()->assert_evol_method(method());
   }
 
-  methods_seen++;
+  NOT_PRODUCT(methods_seen++);
 
   // Do some special top-level things.
   if (depth() == 1 && C->is_osr_compilation()) {
@@ -530,8 +531,8 @@
   }
 #endif
 
+#ifndef PRODUCT
   methods_parsed++;
-#ifndef PRODUCT
   // add method size here to guarantee that inlined methods are added too
   if (CITime)
     _total_bytes_compiled += method()->code_size();
@@ -652,7 +653,7 @@
         continue;
       }
 
-      blocks_parsed++;
+      NOT_PRODUCT(blocks_parsed++);
 
       progress = true;
       if (block->is_loop_head() || block->is_handler() || has_irreducible && !block->is_ready()) {
@@ -712,9 +713,9 @@
     }
   }
 
+#ifndef PRODUCT
   blocks_seen += block_count();
 
-#ifndef PRODUCT
   // Make sure there are no half-processed blocks remaining.
   // Every remaining unprocessed block is dead and may be ignored now.
   for (int rpo = 0; rpo < block_count(); rpo++) {
@@ -1446,7 +1447,6 @@
 
   assert(block()->is_merged(), "must be merged before being parsed");
   block()->mark_parsed();
-  ++_blocks_parsed;
 
   // Set iterator to start of block.
   iter().reset_to_bci(block()->start());
@@ -1596,9 +1596,6 @@
       return;
     }
 
-    // Record that a new block has been merged.
-    ++_blocks_merged;
-
     // Make a region if we know there are multiple or unpredictable inputs.
     // (Also, if this is a plain fall-through, we might see another region,
     // which must not be allowed into this block's map.)
--- a/hotspot/src/share/vm/opto/parse2.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/parse2.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -44,8 +44,10 @@
 #include "runtime/deoptimization.hpp"
 #include "runtime/sharedRuntime.hpp"
 
+#ifndef PRODUCT
 extern int explicit_null_checks_inserted,
            explicit_null_checks_elided;
+#endif
 
 //---------------------------------array_load----------------------------------
 void Parse::array_load(BasicType elem_type) {
@@ -997,7 +999,7 @@
     return;
   }
 
-  explicit_null_checks_inserted++;
+  NOT_PRODUCT(explicit_null_checks_inserted++);
 
   // Generate real control flow
   Node   *tst = _gvn.transform( new BoolNode( c, btest ) );
@@ -1013,7 +1015,7 @@
     set_control(iftrue);
 
     if (stopped()) {            // Path is dead?
-      explicit_null_checks_elided++;
+      NOT_PRODUCT(explicit_null_checks_elided++);
       if (C->eliminate_boxing()) {
         // Mark the successor block as parsed
         branch_block->next_path_num();
@@ -1033,7 +1035,7 @@
   set_control(iffalse);
 
   if (stopped()) {              // Path is dead?
-    explicit_null_checks_elided++;
+    NOT_PRODUCT(explicit_null_checks_elided++);
     if (C->eliminate_boxing()) {
       // Mark the successor block as parsed
       next_block->next_path_num();
--- a/hotspot/src/share/vm/opto/phaseX.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/phaseX.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1471,6 +1471,27 @@
   }
 }
 
+// Return counted loop Phi if as a counted loop exit condition, cmp
+// compares the the induction variable with n
+static PhiNode* countedloop_phi_from_cmp(CmpINode* cmp, Node* n) {
+  for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
+    Node* bol = cmp->fast_out(i);
+    for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) {
+      Node* iff = bol->fast_out(i2);
+      if (iff->is_CountedLoopEnd()) {
+        CountedLoopEndNode* cle = iff->as_CountedLoopEnd();
+        if (cle->limit() == n) {
+          PhiNode* phi = cle->phi();
+          if (phi != NULL) {
+            return phi;
+          }
+        }
+      }
+    }
+  }
+  return NULL;
+}
+
 void PhaseIterGVN::add_users_to_worklist( Node *n ) {
   add_users_to_worklist0(n);
 
@@ -1500,18 +1521,7 @@
         Node* bol = use->raw_out(0);
         if (bol->outcnt() > 0) {
           Node* iff = bol->raw_out(0);
-          if (use_op == Op_CmpI &&
-              iff->is_CountedLoopEnd()) {
-            CountedLoopEndNode* cle = iff->as_CountedLoopEnd();
-            if (cle->limit() == n && cle->phi() != NULL) {
-              // If an opaque node feeds into the limit condition of a
-              // CountedLoop, we need to process the Phi node for the
-              // induction variable when the opaque node is removed:
-              // the range of values taken by the Phi is now known and
-              // so its type is also known.
-              _worklist.push(cle->phi());
-            }
-          } else if (iff->outcnt() == 2) {
+          if (iff->outcnt() == 2) {
             // Look for the 'is_x2logic' pattern: "x ? : 0 : 1" and put the
             // phi merging either 0 or 1 onto the worklist
             Node* ifproj0 = iff->raw_out(0);
@@ -1526,6 +1536,15 @@
         }
       }
       if (use_op == Op_CmpI) {
+        Node* phi = countedloop_phi_from_cmp((CmpINode*)use, n);
+        if (phi != NULL) {
+          // If an opaque node feeds into the limit condition of a
+          // CountedLoop, we need to process the Phi node for the
+          // induction variable when the opaque node is removed:
+          // the range of values taken by the Phi is now known and
+          // so its type is also known.
+          _worklist.push(phi);
+        }
         Node* in1 = use->in(1);
         for (uint i = 0; i < in1->outcnt(); i++) {
           if (in1->raw_out(i)->Opcode() == Op_CastII) {
@@ -1714,6 +1733,15 @@
             }
           }
         }
+        // If n is used in a counted loop exit condition then the type
+        // of the counted loop's Phi depends on the type of n. See
+        // PhiNode::Value().
+        if (m_op == Op_CmpI) {
+          PhiNode* phi = countedloop_phi_from_cmp((CmpINode*)m, n);
+          if (phi != NULL) {
+            worklist.push(phi);
+          }
+        }
       }
     }
   }
--- a/hotspot/src/share/vm/opto/phaseX.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/phaseX.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -431,7 +431,7 @@
 // Phase for iteratively performing local, pessimistic GVN-style optimizations.
 // and ideal transformations on the graph.
 class PhaseIterGVN : public PhaseGVN {
- private:
+private:
   bool _delay_transform;  // When true simply register the node when calling transform
                           // instead of actually optimizing it
 
--- a/hotspot/src/share/vm/prims/jni.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/prims/jni.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -73,6 +73,7 @@
 #include "runtime/vm_operations.hpp"
 #include "services/memTracker.hpp"
 #include "services/runtimeService.hpp"
+#include "trace/traceMacros.hpp"
 #include "trace/tracing.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/dtrace.hpp"
@@ -88,7 +89,7 @@
 #include "jvmci/jvmciRuntime.hpp"
 #endif
 
-static jint CurrentVersion = JNI_VERSION_1_8;
+static jint CurrentVersion = JNI_VERSION_9;
 
 #ifdef _WIN32
 extern LONG WINAPI topLevelExceptionFilter(_EXCEPTION_POINTERS* );
@@ -3929,7 +3930,7 @@
 
     EventThreadStart event;
     if (event.should_commit()) {
-      event.set_javalangthread(java_lang_Thread::thread_id(thread->threadObj()));
+      event.set_thread(THREAD_TRACE_ID(thread));
       event.commit();
     }
 
@@ -4149,7 +4150,7 @@
 
   EventThreadStart event;
   if (event.should_commit()) {
-    event.set_javalangthread(java_lang_Thread::thread_id(thread->threadObj()));
+    event.set_thread(THREAD_TRACE_ID(thread));
     event.commit();
   }
 
--- a/hotspot/src/share/vm/prims/jni.h	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/prims/jni.h	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1952,6 +1952,7 @@
 #define JNI_VERSION_1_4 0x00010004
 #define JNI_VERSION_1_6 0x00010006
 #define JNI_VERSION_1_8 0x00010008
+#define JNI_VERSION_9   0x00090000
 
 #ifdef __cplusplus
 } /* extern "C" */
--- a/hotspot/src/share/vm/prims/jvmtiCodeBlobEvents.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiCodeBlobEvents.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -173,9 +173,7 @@
   _global_code_blobs = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JvmtiCodeBlobDesc*>(50,true);
 
   // iterate over the stub code descriptors and put them in the list first.
-  int index = 0;
-  StubCodeDesc* desc;
-  while ((desc = StubCodeDesc::desc_for_index(++index)) != NULL) {
+  for (StubCodeDesc* desc = StubCodeDesc::first(); desc != NULL; desc = StubCodeDesc::next(desc)) {
     _global_code_blobs->append(new JvmtiCodeBlobDesc(desc->name(), desc->begin(), desc->end()));
   }
 
--- a/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -3940,6 +3940,10 @@
   scratch_class->set_methods(_old_methods);     // To prevent potential GCing of the old methods,
                                           // and to be able to undo operation easily.
 
+  Array<int>* old_ordering = the_class->method_ordering();
+  the_class->set_method_ordering(scratch_class->method_ordering());
+  scratch_class->set_method_ordering(old_ordering);
+
   ConstantPool* old_constants = the_class->constants();
   the_class->set_constants(scratch_class->constants());
   scratch_class->set_constants(old_constants);  // See the previous comment.
--- a/hotspot/src/share/vm/prims/unsafe.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/prims/unsafe.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1117,6 +1117,44 @@
 
 // JSR166 ------------------------------------------------------------------
 
+UNSAFE_ENTRY(jobject, Unsafe_CompareAndExchangeObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject e_h, jobject x_h))
+  UnsafeWrapper("Unsafe_CompareAndExchangeObject");
+  oop x = JNIHandles::resolve(x_h);
+  oop e = JNIHandles::resolve(e_h);
+  oop p = JNIHandles::resolve(obj);
+  HeapWord* addr = (HeapWord *)index_oop_from_field_offset_long(p, offset);
+  oop res = oopDesc::atomic_compare_exchange_oop(x, addr, e, true);
+  if (res == e)
+    update_barrier_set((void*)addr, x);
+  return JNIHandles::make_local(env, res);
+UNSAFE_END
+
+UNSAFE_ENTRY(jint, Unsafe_CompareAndExchangeInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x))
+  UnsafeWrapper("Unsafe_CompareAndExchangeInt");
+  oop p = JNIHandles::resolve(obj);
+  jint* addr = (jint *) index_oop_from_field_offset_long(p, offset);
+  return (jint)(Atomic::cmpxchg(x, addr, e));
+UNSAFE_END
+
+UNSAFE_ENTRY(jlong, Unsafe_CompareAndExchangeLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x))
+  UnsafeWrapper("Unsafe_CompareAndExchangeLong");
+  Handle p (THREAD, JNIHandles::resolve(obj));
+  jlong* addr = (jlong*)(index_oop_from_field_offset_long(p(), offset));
+#ifdef SUPPORTS_NATIVE_CX8
+  return (jlong)(Atomic::cmpxchg(x, addr, e));
+#else
+  if (VM_Version::supports_cx8())
+    return (jlong)(Atomic::cmpxchg(x, addr, e));
+  else {
+    MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
+    jlong val = Atomic::load(addr);
+    if (val == e)
+      Atomic::store(x, addr);
+    return val;
+  }
+#endif
+UNSAFE_END
+
 UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject e_h, jobject x_h))
   UnsafeWrapper("Unsafe_CompareAndSwapObject");
   oop x = JNIHandles::resolve(x_h);
@@ -1384,6 +1422,10 @@
     {CC "compareAndSwapObject", CC "(" OBJ "J" OBJ "" OBJ ")Z", FN_PTR(Unsafe_CompareAndSwapObject)},
     {CC "compareAndSwapInt",  CC "(" OBJ "J""I""I"")Z",  FN_PTR(Unsafe_CompareAndSwapInt)},
     {CC "compareAndSwapLong", CC "(" OBJ "J""J""J"")Z",  FN_PTR(Unsafe_CompareAndSwapLong)},
+    {CC "compareAndExchangeObjectVolatile", CC "(" OBJ "J" OBJ "" OBJ ")" OBJ, FN_PTR(Unsafe_CompareAndExchangeObject)},
+    {CC "compareAndExchangeIntVolatile",  CC "(" OBJ "J""I""I"")I", FN_PTR(Unsafe_CompareAndExchangeInt)},
+    {CC "compareAndExchangeLongVolatile", CC "(" OBJ "J""J""J"")J", FN_PTR(Unsafe_CompareAndExchangeLong)},
+
     {CC "putOrderedObject",   CC "(" OBJ "J" OBJ ")V",   FN_PTR(Unsafe_SetOrderedObject)},
     {CC "putOrderedInt",      CC "(" OBJ "JI)V",         FN_PTR(Unsafe_SetOrderedInt)},
     {CC "putOrderedLong",     CC "(" OBJ "JJ)V",         FN_PTR(Unsafe_SetOrderedLong)},
--- a/hotspot/src/share/vm/prims/whitebox.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/prims/whitebox.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -34,6 +34,7 @@
 #include "memory/metadataFactory.hpp"
 #include "memory/metaspaceShared.hpp"
 #include "memory/universe.hpp"
+#include "oops/constantPool.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/wbtestmethods/parserTests.hpp"
 #include "prims/whitebox.hpp"
@@ -643,12 +644,12 @@
   return (mh->queued_for_compilation() || nm != NULL);
 WB_END
 
-WB_ENTRY(jboolean, WB_ShouldPrintAssembly(JNIEnv* env, jobject o, jobject method))
+WB_ENTRY(jboolean, WB_ShouldPrintAssembly(JNIEnv* env, jobject o, jobject method, jint comp_level))
   jmethodID jmid = reflected_method_to_jmid(thread, env, method);
   CHECK_JNI_EXCEPTION_(env, JNI_FALSE);
 
   methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
-  DirectiveSet* directive = DirectivesStack::getMatchingDirective(mh, CompileBroker::compiler(CompLevel_simple));
+  DirectiveSet* directive = DirectivesStack::getMatchingDirective(mh, CompileBroker::compiler(comp_level));
   bool result = directive->PrintAssemblyOption;
   DirectivesStack::release(directive);
 
@@ -1305,6 +1306,38 @@
   return (jlong) ikh->constants();
 WB_END
 
+WB_ENTRY(jint, WB_GetConstantPoolCacheIndexTag(JNIEnv* env, jobject wb))
+  return ConstantPool::CPCACHE_INDEX_TAG;
+WB_END
+
+WB_ENTRY(jint, WB_GetConstantPoolCacheLength(JNIEnv* env, jobject wb, jclass klass))
+  instanceKlassHandle ikh(java_lang_Class::as_Klass(JNIHandles::resolve(klass)));
+  ConstantPool* cp = ikh->constants();
+  if (cp->cache() == NULL) {
+      return -1;
+  }
+  return cp->cache()->length();
+WB_END
+
+WB_ENTRY(jint, WB_ConstantPoolRemapInstructionOperandFromCache(JNIEnv* env, jobject wb, jclass klass, jint index))
+  instanceKlassHandle ikh(java_lang_Class::as_Klass(JNIHandles::resolve(klass)));
+  ConstantPool* cp = ikh->constants();
+  if (cp->cache() == NULL) {
+    THROW_MSG_0(vmSymbols::java_lang_IllegalStateException(), "Constant pool does not have a cache");
+  }
+  jint cpci = index;
+  jint cpciTag = ConstantPool::CPCACHE_INDEX_TAG;
+  if (cpciTag > cpci || cpci >= cp->cache()->length() + cpciTag) {
+    THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "Constant pool cache index is out of range");
+  }
+  jint cpi = cp->remap_instruction_operand_from_cache(cpci);
+  return cpi;
+WB_END
+
+WB_ENTRY(jint, WB_ConstantPoolEncodeIndyIndex(JNIEnv* env, jobject wb, jint index))
+  return ConstantPool::encode_invokedynamic_index(index);
+WB_END
+
 WB_ENTRY(void, WB_ClearInlineCaches(JNIEnv* env, jobject wb))
   VM_ClearICs clear_ics;
   VMThread::execute(&clear_ics);
@@ -1523,8 +1556,8 @@
 #endif // INCLUDE_NMT
   {CC"deoptimizeFrames",   CC"(Z)I",                  (void*)&WB_DeoptimizeFrames  },
   {CC"deoptimizeAll",      CC"()V",                   (void*)&WB_DeoptimizeAll     },
-    {CC"deoptimizeMethod0",   CC"(Ljava/lang/reflect/Executable;Z)I",
-                                                        (void*)&WB_DeoptimizeMethod  },
+  {CC"deoptimizeMethod0",   CC"(Ljava/lang/reflect/Executable;Z)I",
+                                                      (void*)&WB_DeoptimizeMethod  },
   {CC"isMethodCompiled0",   CC"(Ljava/lang/reflect/Executable;Z)Z",
                                                       (void*)&WB_IsMethodCompiled  },
   {CC"isMethodCompilable0", CC"(Ljava/lang/reflect/Executable;IZ)Z",
@@ -1559,7 +1592,7 @@
       CC"(Ljava/lang/reflect/Executable;Ljava/lang/String;)I",
                                                       (void*)&WB_MatchesInline},
   {CC"shouldPrintAssembly",
-        CC"(Ljava/lang/reflect/Executable;)Z",
+        CC"(Ljava/lang/reflect/Executable;I)Z",
                                                         (void*)&WB_ShouldPrintAssembly},
 
   {CC"isConstantVMFlag",   CC"(Ljava/lang/String;)Z", (void*)&WB_IsConstantVMFlag},
@@ -1620,6 +1653,12 @@
   {CC"isMonitorInflated0", CC"(Ljava/lang/Object;)Z", (void*)&WB_IsMonitorInflated  },
   {CC"forceSafepoint",     CC"()V",                   (void*)&WB_ForceSafepoint     },
   {CC"getConstantPool0",   CC"(Ljava/lang/Class;)J",  (void*)&WB_GetConstantPool    },
+  {CC"getConstantPoolCacheIndexTag0", CC"()I",  (void*)&WB_GetConstantPoolCacheIndexTag},
+  {CC"getConstantPoolCacheLength0", CC"(Ljava/lang/Class;)I",  (void*)&WB_GetConstantPoolCacheLength},
+  {CC"remapInstructionOperandFromCPCache0",
+      CC"(Ljava/lang/Class;I)I",                      (void*)&WB_ConstantPoolRemapInstructionOperandFromCache},
+  {CC"encodeConstantPoolIndyIndex0",
+      CC"(I)I",                      (void*)&WB_ConstantPoolEncodeIndyIndex},
   {CC"getMethodBooleanOption",
       CC"(Ljava/lang/reflect/Executable;Ljava/lang/String;)Ljava/lang/Boolean;",
                                                       (void*)&WB_GetMethodBooleaneOption},
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -372,6 +372,7 @@
   { "PreInflateSpin",                JDK_Version::undefined(), JDK_Version::jdk(9), JDK_Version::jdk(10) },
   { "JNIDetachReleasesMonitors",     JDK_Version::undefined(), JDK_Version::jdk(9), JDK_Version::jdk(10) },
   { "UseAltSigs",                    JDK_Version::undefined(), JDK_Version::jdk(9), JDK_Version::jdk(10) },
+  { "SegmentedHeapDumpThreshold",    JDK_Version::undefined(), JDK_Version::jdk(9), JDK_Version::jdk(10) },
 
 #ifdef TEST_VERIFY_SPECIAL_JVM_FLAGS
   { "dep > obs",                    JDK_Version::jdk(9), JDK_Version::jdk(8), JDK_Version::undefined() },
@@ -405,8 +406,9 @@
 
 static AliasedLoggingFlag const aliased_logging_flags[] = {
   { "TraceClassLoading",         LogLevel::Info,  true,  LogTag::_classload },
+  { "TraceClassPaths",           LogLevel::Info,  true,  LogTag::_classpath },
+  { "TraceClassResolution",      LogLevel::Info,  true,  LogTag::_classresolve },
   { "TraceClassUnloading",       LogLevel::Info,  true,  LogTag::_classunload },
-  { "TraceClassResolution",      LogLevel::Info,  true,  LogTag::_classresolve },
   { "TraceExceptions",           LogLevel::Info,  true,  LogTag::_exceptions },
   { "TraceMonitorInflation",     LogLevel::Debug, true,  LogTag::_monitorinflation },
   { "TraceBiasedLocking",        LogLevel::Info,  true,  LogTag::_biasedlocking },
@@ -2315,6 +2317,17 @@
 //===========================================================================================================
 // Parsing of main arguments
 
+#if INCLUDE_JVMCI
+// Check consistency of jvmci vm argument settings.
+bool Arguments::check_jvmci_args_consistency() {
+  if (!EnableJVMCI && !JVMCIGlobals::check_jvmci_flags_are_consistent()) {
+    JVMCIGlobals::print_jvmci_args_inconsistency_error_message();
+    return false;
+  }
+  return true;
+}
+#endif //INCLUDE_JVMCI
+
 // Check consistency of GC selection
 bool Arguments::check_gc_consistency() {
   // Ensure that the user has not selected conflicting sets
@@ -2411,6 +2424,9 @@
 #endif
   }
 #if INCLUDE_JVMCI
+
+  status = status && check_jvmci_args_consistency();
+
   if (EnableJVMCI) {
     if (!ScavengeRootsInCode) {
       warning("forcing ScavengeRootsInCode non-zero because JVMCI is enabled");
@@ -3255,7 +3271,7 @@
 
   // PrintSharedArchiveAndExit will turn on
   //   -Xshare:on
-  //   -XX:+TraceClassPaths
+  //   -Xlog:classpath=info
   if (PrintSharedArchiveAndExit) {
     if (FLAG_SET_CMDLINE(bool, UseSharedSpaces, true) != Flag::SUCCESS) {
       return JNI_EINVAL;
@@ -3263,9 +3279,7 @@
     if (FLAG_SET_CMDLINE(bool, RequireSharedSpaces, true) != Flag::SUCCESS) {
       return JNI_EINVAL;
     }
-    if (FLAG_SET_CMDLINE(bool, TraceClassPaths, true) != Flag::SUCCESS) {
-      return JNI_EINVAL;
-    }
+    LogConfiguration::configure_stdout(LogLevel::Info, true, LOG_TAGS(classpath));
   }
 
   // Change the default value for flags  which have different default values
@@ -3318,10 +3332,6 @@
     _java_class_path->set_value(copy);
     FreeHeap(copy); // a copy was made by set_value, so don't need this anymore
   }
-
-  if (!PrintSharedArchiveAndExit) {
-    ClassLoader::trace_class_path(tty, "[classpath: ", _java_class_path->value());
-  }
 }
 
 static bool has_jar_files(const char* directory) {
--- a/hotspot/src/share/vm/runtime/arguments.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/arguments.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -505,7 +505,10 @@
   static void set_gc_specific_flags();
   static inline bool gc_selected(); // whether a gc has been selected
   static void select_gc_ergonomically();
-
+#if INCLUDE_JVMCI
+  // Check consistency of jvmci vm argument settings.
+  static bool check_jvmci_args_consistency();
+#endif
   // Check for consistency in the selection of the garbage collector.
   static bool check_gc_consistency();        // Check user-selected gc
   // Check consistency or otherwise of VM argument settings
--- a/hotspot/src/share/vm/runtime/commandLineFlagConstraintList.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/commandLineFlagConstraintList.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -33,9 +33,6 @@
 #include "runtime/commandLineFlagConstraintsRuntime.hpp"
 #include "runtime/os.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_JVMCI
-#include "jvmci/commandLineFlagConstraintsJVMCI.hpp"
-#endif
 
 class CommandLineFlagConstraint_bool : public CommandLineFlagConstraint {
   CommandLineFlagConstraintFunc_bool _constraint;
@@ -254,17 +251,6 @@
                                      IGNORE_RANGE,
                                      EMIT_CONSTRAINT_CHECK));
 
-#if INCLUDE_JVMCI
-  emit_constraint_no(NULL JVMCI_FLAGS(EMIT_CONSTRAINT_DEVELOPER_FLAG,
-                                      EMIT_CONSTRAINT_PD_DEVELOPER_FLAG,
-                                      EMIT_CONSTRAINT_PRODUCT_FLAG,
-                                      EMIT_CONSTRAINT_PD_PRODUCT_FLAG,
-                                      EMIT_CONSTRAINT_DIAGNOSTIC_FLAG,
-                                      EMIT_CONSTRAINT_EXPERIMENTAL_FLAG,
-                                      EMIT_CONSTRAINT_NOTPRODUCT_FLAG,
-                                      IGNORE_RANGE,
-                                      EMIT_CONSTRAINT_CHECK));
-#endif // INCLUDE_JVMCI
 
 #ifdef COMPILER1
   emit_constraint_no(NULL C1_FLAGS(EMIT_CONSTRAINT_DEVELOPER_FLAG,
--- a/hotspot/src/share/vm/runtime/fprofiler.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/fprofiler.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -836,8 +836,7 @@
     vm_thread_profiler->inc_thread_ticks();
 
     // Get a snapshot of a current VMThread pc (and leave it running!)
-    // The call may fail if, for instance the VM thread is interrupted while
-    // holding the Interrupt_lock or for other reasons.
+    // The call may fail in some circumstances
     epc = os::get_thread_pc(VMThread::vm_thread());
     if(epc.pc() != NULL) {
       if (os::dll_address_to_function_name(epc.pc(), buf, sizeof(buf), NULL)) {
--- a/hotspot/src/share/vm/runtime/frame.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/frame.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -662,14 +662,14 @@
         st->print("J %d%s %s ",
                   nm->compile_id(), (nm->is_osr_method() ? "%" : ""),
                   ((nm->compiler() != NULL) ? nm->compiler()->name() : ""));
+        st->print("%s (%d bytes) @ " PTR_FORMAT " [" PTR_FORMAT "+" INTPTR_FORMAT "]",
+                  buf, m->code_size(), p2i(_pc), p2i(_cb->code_begin()), _pc - _cb->code_begin());
 #if INCLUDE_JVMCI
         char* jvmciName = nm->jvmci_installed_code_name(buf, buflen);
         if (jvmciName != NULL) {
           st->print(" (%s)", jvmciName);
         }
 #endif
-        st->print("%s (%d bytes) @ " PTR_FORMAT " [" PTR_FORMAT "+" INTPTR_FORMAT "]",
-                  buf, m->code_size(), p2i(_pc), p2i(_cb->code_begin()), _pc - _cb->code_begin());
       } else {
         st->print("J  " PTR_FORMAT, p2i(pc()));
       }
--- a/hotspot/src/share/vm/runtime/globals.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1055,10 +1055,6 @@
           "directory) of the dump file (defaults to java_pid<pid>.hprof "   \
           "in the working directory)")                                      \
                                                                             \
-  develop(size_t, SegmentedHeapDumpThreshold, 2*G,                          \
-          "Generate a segmented heap dump (JAVA PROFILE 1.0.2 format) "     \
-          "when the heap usage is larger than this")                        \
-                                                                            \
   develop(size_t, HeapDumpSegmentSize, 1*G,                                 \
           "Approximate segment size when generating a segmented heap dump") \
                                                                             \
@@ -1437,9 +1433,6 @@
   product(bool, VerifyMergedCPBytecodes, true,                              \
           "Verify bytecodes after RedefineClasses constant pool merging")   \
                                                                             \
-  develop(bool, TraceJNIHandleAllocation, false,                            \
-          "Trace allocation/deallocation of JNI handle blocks")             \
-                                                                            \
   develop(bool, TraceBytecodes, false,                                      \
           "Trace bytecode execution")                                       \
                                                                             \
@@ -1482,9 +1475,6 @@
   develop(bool, TraceCompiledIC, false,                                     \
           "Trace changes of compiled IC")                                   \
                                                                             \
-  develop(bool, TraceProtectionDomainVerification, false,                   \
-          "Trace protection domain verification")                           \
-                                                                            \
   develop(bool, TraceClearedExceptions, false,                              \
           "Print when an exception is forcibly cleared")                    \
                                                                             \
@@ -2403,9 +2393,6 @@
   product(bool, IgnoreEmptyClassPaths, false,                               \
           "Ignore empty path elements in -classpath")                       \
                                                                             \
-  product(bool, TraceClassPaths, false,                                     \
-          "Trace processing of class paths")                                \
-                                                                            \
   product(bool, TraceClassLoadingPreorder, false,                           \
           "Trace all classes loaded in order referenced (not loaded)")      \
                                                                             \
--- a/hotspot/src/share/vm/runtime/java.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/java.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -64,6 +64,7 @@
 #include "runtime/timer.hpp"
 #include "runtime/vm_operations.hpp"
 #include "services/memTracker.hpp"
+#include "trace/traceMacros.hpp"
 #include "trace/tracing.hpp"
 #include "utilities/dtrace.hpp"
 #include "utilities/globalDefinitions.hpp"
@@ -485,7 +486,7 @@
 
   EventThreadEnd event;
   if (event.should_commit()) {
-      event.set_javalangthread(java_lang_Thread::thread_id(thread->threadObj()));
+      event.set_thread(THREAD_TRACE_ID(thread));
       event.commit();
   }
 
--- a/hotspot/src/share/vm/runtime/jniHandles.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/jniHandles.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -278,10 +278,6 @@
       // Allocate new block
       block = new JNIHandleBlock();
       _blocks_allocated++;
-      if (TraceJNIHandleAllocation) {
-        tty->print_cr("JNIHandleBlock " INTPTR_FORMAT " allocated (%d total blocks)",
-                      p2i(block), _blocks_allocated);
-      }
       if (ZapJNIHandleArea) block->zap();
       #ifndef PRODUCT
       // Link new block to list of all allocated blocks
@@ -499,10 +495,6 @@
     // Not as many free handles as we would like - compute number of new blocks to append
     _allocate_before_rebuild = (extra + block_size_in_oops - 1) / block_size_in_oops;
   }
-  if (TraceJNIHandleAllocation) {
-    tty->print_cr("Rebuild free list JNIHandleBlock " INTPTR_FORMAT " blocks=%d used=%d free=%d add=%d",
-                  p2i(this), blocks, total-free, free, _allocate_before_rebuild);
-  }
 }
 
 
--- a/hotspot/src/share/vm/runtime/mutex.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/mutex.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1320,15 +1320,12 @@
     // The rank Mutex::native  is an exception in that it is not subject
     // to the verification rules.
     // Here are some further notes relating to mutex acquisition anomalies:
-    // . under Solaris, the interrupt lock gets acquired when doing
-    //   profiling, so any lock could be held.
     // . it is also ok to acquire Safepoint_lock at the very end while we
     //   already hold Terminator_lock - may happen because of periodic safepoints
     if (this->rank() != Mutex::native &&
         this->rank() != Mutex::suspend_resume &&
         locks != NULL && locks->rank() <= this->rank() &&
         !SafepointSynchronize::is_at_safepoint() &&
-        this != Interrupt_lock && this != ProfileVM_lock &&
         !(this == Safepoint_lock && contains(locks, Terminator_lock) &&
         SafepointSynchronize::is_synchronizing())) {
       new_owner->print_owned_locks();
--- a/hotspot/src/share/vm/runtime/mutexLocker.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/mutexLocker.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,7 +50,6 @@
 Mutex*   JfieldIdCreation_lock        = NULL;
 Monitor* JNICritical_lock             = NULL;
 Mutex*   JvmtiThreadState_lock        = NULL;
-Monitor* JvmtiPendingEvent_lock       = NULL;
 Monitor* Heap_lock                    = NULL;
 Mutex*   ExpandHeap_lock              = NULL;
 Mutex*   AdapterHandlerLibrary_lock   = NULL;
@@ -73,8 +72,6 @@
 Monitor* STS_lock                     = NULL;
 Monitor* SLT_lock                     = NULL;
 Monitor* FullGCCount_lock             = NULL;
-Monitor* CMark_lock                   = NULL;
-Mutex*   CMRegionStack_lock           = NULL;
 Mutex*   SATB_Q_FL_lock               = NULL;
 Monitor* SATB_Q_CBL_mon               = NULL;
 Mutex*   Shared_SATB_Q_lock           = NULL;
@@ -94,11 +91,8 @@
 Monitor* Terminator_lock              = NULL;
 Monitor* BeforeExit_lock              = NULL;
 Monitor* Notify_lock                  = NULL;
-Monitor* Interrupt_lock               = NULL;
-Monitor* ProfileVM_lock               = NULL;
 Mutex*   ProfilePrint_lock            = NULL;
 Mutex*   ExceptionCache_lock          = NULL;
-Monitor* ObjAllocPost_lock            = NULL;
 Mutex*   OsrList_lock                 = NULL;
 
 #ifndef PRODUCT
@@ -184,8 +178,6 @@
   }
   if (UseG1GC) {
 
-    def(CMark_lock                 , Monitor, nonleaf,     true,  Monitor::_safepoint_check_never);      // coordinate concurrent mark thread
-    def(CMRegionStack_lock         , Mutex,   leaf,        true,  Monitor::_safepoint_check_never);
     def(SATB_Q_FL_lock             , Mutex  , special,     true,  Monitor::_safepoint_check_never);
     def(SATB_Q_CBL_mon             , Monitor, nonleaf,     true,  Monitor::_safepoint_check_never);
     def(Shared_SATB_Q_lock         , Mutex,   nonleaf,     true,  Monitor::_safepoint_check_never);
@@ -206,12 +198,10 @@
   def(ParGCRareEvent_lock          , Mutex  , leaf     ,   true,  Monitor::_safepoint_check_sometimes);
   def(DerivedPointerTableGC_lock   , Mutex,   leaf,        true,  Monitor::_safepoint_check_never);
   def(CodeCache_lock               , Mutex  , special,     true,  Monitor::_safepoint_check_never);
-  def(Interrupt_lock               , Monitor, special,     true,  Monitor::_safepoint_check_never);      // used for interrupt processing
   def(RawMonitor_lock              , Mutex,   special,     true,  Monitor::_safepoint_check_never);
   def(OopMapCacheAlloc_lock        , Mutex,   leaf,        true,  Monitor::_safepoint_check_always);     // used for oop_map_cache allocation.
 
   def(Patching_lock                , Mutex  , special,     true,  Monitor::_safepoint_check_never);      // used for safepointing and code patching.
-  def(ObjAllocPost_lock            , Monitor, special,     false, Monitor::_safepoint_check_never);
   def(Service_lock                 , Monitor, special,     true,  Monitor::_safepoint_check_never);      // used for service thread operations
   def(JmethodIdCreation_lock       , Mutex  , leaf,        true,  Monitor::_safepoint_check_always);     // used for creating jmethodIDs.
 
@@ -267,7 +257,6 @@
   def(MultiArray_lock              , Mutex  , nonleaf+2,   false, Monitor::_safepoint_check_always);     // locks SymbolTable_lock
 
   def(JvmtiThreadState_lock        , Mutex  , nonleaf+2,   false, Monitor::_safepoint_check_always);     // Used by JvmtiThreadState/JvmtiEventController
-  def(JvmtiPendingEvent_lock       , Monitor, nonleaf,     false, Monitor::_safepoint_check_never);      // Used by JvmtiCodeBlobEvents
   def(Management_lock              , Mutex  , nonleaf+2,   false, Monitor::_safepoint_check_always);     // used for JVM management
 
   def(Compile_lock                 , Mutex  , nonleaf+3,   true,  Monitor::_safepoint_check_sometimes);
@@ -277,7 +266,6 @@
   def(MethodCompileQueue_lock      , Monitor, nonleaf+4,   true,  Monitor::_safepoint_check_always);
   def(Debug2_lock                  , Mutex  , nonleaf+4,   true,  Monitor::_safepoint_check_never);
   def(Debug3_lock                  , Mutex  , nonleaf+4,   true,  Monitor::_safepoint_check_never);
-  def(ProfileVM_lock               , Monitor, special,     false, Monitor::_safepoint_check_never);      // used for profiling of the VMThread
   def(CompileThread_lock           , Monitor, nonleaf+5,   false, Monitor::_safepoint_check_always);
   def(PeriodicTask_lock            , Monitor, nonleaf+5,   true,  Monitor::_safepoint_check_sometimes);
   if (WhiteBoxAPI) {
--- a/hotspot/src/share/vm/runtime/mutexLocker.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/mutexLocker.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,7 +43,6 @@
 extern Mutex*   JfieldIdCreation_lock;           // a lock on creating JNI static field identifiers
 extern Monitor* JNICritical_lock;                // a lock used while entering and exiting JNI critical regions, allows GC to sometimes get in
 extern Mutex*   JvmtiThreadState_lock;           // a lock on modification of JVMTI thread data
-extern Monitor* JvmtiPendingEvent_lock;          // a lock on the JVMTI pending events list
 extern Monitor* Heap_lock;                       // a lock on the heap
 extern Mutex*   ExpandHeap_lock;                 // a lock on expanding the heap
 extern Mutex*   AdapterHandlerLibrary_lock;      // a lock on the AdapterHandlerLibrary
@@ -68,8 +67,6 @@
 extern Monitor* STS_lock;                        // used for joining/leaving SuspendibleThreadSet.
 extern Monitor* SLT_lock;                        // used in CMS GC for acquiring PLL
 extern Monitor* FullGCCount_lock;                // in support of "concurrent" full gc
-extern Monitor* CMark_lock;                      // used for concurrent mark thread coordination
-extern Mutex*   CMRegionStack_lock;              // used for protecting accesses to the CM region stack
 extern Mutex*   SATB_Q_FL_lock;                  // Protects SATB Q
                                                  // buffer free list.
 extern Monitor* SATB_Q_CBL_mon;                  // Protects SATB Q
@@ -98,8 +95,6 @@
 extern Monitor* Terminator_lock;                 // a lock used to guard termination of the vm
 extern Monitor* BeforeExit_lock;                 // a lock used to guard cleanups and shutdown hooks
 extern Monitor* Notify_lock;                     // a lock used to synchronize the start-up of the vm
-extern Monitor* Interrupt_lock;                  // a lock used for condition variable mediated interrupt processing
-extern Monitor* ProfileVM_lock;                  // a lock used for profiling the VMThread
 extern Mutex*   ProfilePrint_lock;               // a lock used to serialize the printing of profiles
 extern Mutex*   ExceptionCache_lock;             // a lock used to synchronize exception cache updates
 extern Mutex*   OsrList_lock;                    // a lock used to serialize access to OSR queues
--- a/hotspot/src/share/vm/runtime/objectMonitor.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/objectMonitor.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -401,7 +401,7 @@
 
   if (event.should_commit()) {
     event.set_klass(((oop)this->object())->klass());
-    event.set_previousOwner((TYPE_JAVALANGTHREAD)_previous_owner_tid);
+    event.set_previousOwner((TYPE_THREAD)_previous_owner_tid);
     event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
     event.commit();
   }
@@ -937,7 +937,7 @@
   // get the owner's thread id for the MonitorEnter event
   // if it is enabled and the thread isn't suspended
   if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) {
-    _previous_owner_tid = SharedRuntime::get_java_tid(Self);
+    _previous_owner_tid = THREAD_TRACE_ID(Self);
   }
 #endif
 
@@ -1391,11 +1391,12 @@
                                             jlong notifier_tid,
                                             jlong timeout,
                                             bool timedout) {
+  assert(event != NULL, "invariant");
   event->set_klass(((oop)this->object())->klass());
-  event->set_timeout((TYPE_ULONG)timeout);
-  event->set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
-  event->set_notifier((TYPE_OSTHREAD)notifier_tid);
-  event->set_timedOut((TYPE_BOOLEAN)timedout);
+  event->set_timeout(timeout);
+  event->set_address((TYPE_ADDRESS)this->object_addr());
+  event->set_notifier(notifier_tid);
+  event->set_timedOut(timedout);
   event->commit();
 }
 
@@ -1655,7 +1656,7 @@
       iterator->TState = ObjectWaiter::TS_ENTER;
     }
     iterator->_notified = 1;
-    iterator->_notifier_tid = Self->osthread()->thread_id();
+    iterator->_notifier_tid = THREAD_TRACE_ID(Self);
 
     ObjectWaiter * list = _EntryList;
     if (list != NULL) {
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1134,12 +1134,19 @@
         MethodHandles::is_signature_polymorphic_intrinsic(id)) {
       bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
 
-      // Need to adjust invokehandle since inlining through signature-polymorphic
-      // method happened.
-      if (bc == Bytecodes::_invokehandle &&
-          !MethodHandles::is_signature_polymorphic_method(attached_method())) {
-        bc = attached_method->is_static() ? Bytecodes::_invokestatic
-                                          : Bytecodes::_invokevirtual;
+      // Adjust invocation mode according to the attached method.
+      switch (bc) {
+        case Bytecodes::_invokeinterface:
+          if (!attached_method->method_holder()->is_interface()) {
+            bc = Bytecodes::_invokevirtual;
+          }
+          break;
+        case Bytecodes::_invokehandle:
+          if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
+            bc = attached_method->is_static() ? Bytecodes::_invokestatic
+                                              : Bytecodes::_invokevirtual;
+          }
+          break;
       }
     }
   } else {
--- a/hotspot/src/share/vm/runtime/stubCodeGenerator.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/stubCodeGenerator.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -36,19 +36,13 @@
 // Implementation of StubCodeDesc
 
 StubCodeDesc* StubCodeDesc::_list = NULL;
-int           StubCodeDesc::_count = 0;
 bool          StubCodeDesc::_frozen = false;
 
 StubCodeDesc* StubCodeDesc::desc_for(address pc) {
   StubCodeDesc* p = _list;
-  while (p != NULL && !p->contains(pc)) p = p->_next;
-  // p == NULL || p->contains(pc)
-  return p;
-}
-
-StubCodeDesc* StubCodeDesc::desc_for_index(int index) {
-  StubCodeDesc* p = _list;
-  while (p != NULL && p->index() != index) p = p->_next;
+  while (p != NULL && !p->contains(pc)) {
+    p = p->_next;
+  }
   return p;
 }
 
@@ -73,43 +67,17 @@
 // Implementation of StubCodeGenerator
 
 StubCodeGenerator::StubCodeGenerator(CodeBuffer* code, bool print_code) {
-  _masm = new MacroAssembler(code);
-  _first_stub = _last_stub = NULL;
-  _print_code = print_code;
-}
-
-extern "C" {
-  static int compare_cdesc(const void* void_a, const void* void_b) {
-    int ai = (*((StubCodeDesc**) void_a))->index();
-    int bi = (*((StubCodeDesc**) void_b))->index();
-    return ai - bi;
-  }
+  _masm = new MacroAssembler(code );
+  _print_code = PrintStubCode || print_code;
 }
 
 StubCodeGenerator::~StubCodeGenerator() {
-  if (PrintStubCode || _print_code) {
+  if (_print_code) {
     CodeBuffer* cbuf = _masm->code();
     CodeBlob*   blob = CodeCache::find_blob_unsafe(cbuf->insts()->start());
     if (blob != NULL) {
       blob->set_strings(cbuf->strings());
     }
-    bool saw_first = false;
-    StubCodeDesc* toprint[1000];
-    int toprint_len = 0;
-    for (StubCodeDesc* cdesc = _last_stub; cdesc != NULL; cdesc = cdesc->_next) {
-      toprint[toprint_len++] = cdesc;
-      if (cdesc == _first_stub) { saw_first = true; break; }
-    }
-    assert(toprint_len == 0 || saw_first, "must get both first & last");
-    // Print in reverse order:
-    qsort(toprint, toprint_len, sizeof(toprint[0]), compare_cdesc);
-    for (int i = 0; i < toprint_len; i++) {
-      StubCodeDesc* cdesc = toprint[i];
-      cdesc->print();
-      tty->cr();
-      Disassembler::decode(cdesc->begin(), cdesc->end());
-      tty->cr();
-    }
   }
 }
 
@@ -118,9 +86,12 @@
 }
 
 void StubCodeGenerator::stub_epilog(StubCodeDesc* cdesc) {
-  // default implementation - record the cdesc
-  if (_first_stub == NULL)  _first_stub = cdesc;
-  _last_stub = cdesc;
+  if (_print_code) {
+    cdesc->print();
+    tty->cr();
+    Disassembler::decode(cdesc->begin(), cdesc->end());
+    tty->cr();
+  }
 }
 
 
--- a/hotspot/src/share/vm/runtime/stubCodeGenerator.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/stubCodeGenerator.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -39,13 +39,11 @@
 class StubCodeDesc: public CHeapObj<mtCode> {
  private:
   static StubCodeDesc* _list;                  // the list of all descriptors
-  static int           _count;                 // length of list
   static bool          _frozen;                // determines whether _list modifications are allowed
 
   StubCodeDesc*        _next;                  // the next element in the linked list
   const char*          _group;                 // the group to which the stub code belongs
   const char*          _name;                  // the name assigned to the stub code
-  int                  _index;                 // serial number assigned to the stub
   address              _begin;                 // points to the first byte of the stub code    (included)
   address              _end;                   // points to the first byte after the stub code (excluded)
 
@@ -64,8 +62,10 @@
   friend class StubCodeGenerator;
 
  public:
+  static StubCodeDesc* first() { return _list; }
+  static StubCodeDesc* next(StubCodeDesc* desc)  { return desc->_next; }
+
   static StubCodeDesc* desc_for(address pc);     // returns the code descriptor for the code containing pc or NULL
-  static StubCodeDesc* desc_for_index(int);      // returns the code descriptor for the index or NULL
   static const char*   name_for(address pc);     // returns the name of the code containing pc or NULL
 
   StubCodeDesc(const char* group, const char* name, address begin, address end = NULL) {
@@ -74,7 +74,6 @@
     _next           = _list;
     _group          = group;
     _name           = name;
-    _index          = ++_count; // (never zero)
     _begin          = begin;
     _end            = end;
     _list           = this;
@@ -84,7 +83,6 @@
 
   const char* group() const                      { return _group; }
   const char* name() const                       { return _name; }
-  int         index() const                      { return _index; }
   address     begin() const                      { return _begin; }
   address     end() const                        { return _end; }
   int         size_in_bytes() const              { return _end - _begin; }
@@ -97,13 +95,12 @@
 // Provides utility functions.
 
 class StubCodeGenerator: public StackObj {
+ private:
+  bool _print_code;
+
  protected:
   MacroAssembler*  _masm;
 
-  StubCodeDesc* _first_stub;
-  StubCodeDesc* _last_stub;
-  bool _print_code;
-
  public:
   StubCodeGenerator(CodeBuffer* code, bool print_code = false);
   ~StubCodeGenerator();
--- a/hotspot/src/share/vm/runtime/thread.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -324,6 +324,10 @@
   // record thread's native stack, stack grows downward
   MemTracker::record_thread_stack(stack_end(), stack_size());
 #endif // INCLUDE_NMT
+  log_debug(os, thread)("Thread " UINTX_FORMAT " stack dimensions: "
+    PTR_FORMAT "-" PTR_FORMAT " (" SIZE_FORMAT "k).",
+    os::current_thread_id(), p2i(stack_base() - stack_size()),
+    p2i(stack_base()), stack_size()/1024);
 }
 
 
@@ -1690,7 +1694,7 @@
 
   EventThreadStart event;
   if (event.should_commit()) {
-    event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj()));
+    event.set_thread(THREAD_TRACE_ID(this));
     event.commit();
   }
 
@@ -1795,7 +1799,7 @@
     // from java_lang_Thread object
     EventThreadEnd event;
     if (event.should_commit()) {
-      event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj()));
+      event.set_thread(THREAD_TRACE_ID(this));
       event.commit();
     }
 
@@ -1924,6 +1928,10 @@
   }
 #endif // INCLUDE_ALL_GCS
 
+  log_info(os, thread)("JavaThread %s (tid: " UINTX_FORMAT ").",
+    exit_type == JavaThread::normal_exit ? "exiting" : "detaching",
+    os::current_thread_id());
+
   // Remove from list of active threads list, and notify VM thread if we are the last non-daemon thread
   Threads::remove(this);
 }
@@ -2491,18 +2499,25 @@
   // warning("Guarding at " PTR_FORMAT " for len " SIZE_FORMAT "\n", low_addr, len);
 
   if (allocate && !os::create_stack_guard_pages((char *) low_addr, len)) {
-    warning("Attempt to allocate stack guard pages failed.");
+    log_warning(os, thread)("Attempt to allocate stack guard pages failed.");
     return;
   }
 
   if (os::guard_memory((char *) low_addr, len)) {
     _stack_guard_state = stack_guard_enabled;
   } else {
-    warning("Attempt to protect stack guard pages failed.");
+    log_warning(os, thread)("Attempt to protect stack guard pages failed ("
+      PTR_FORMAT "-" PTR_FORMAT ").", p2i(low_addr), p2i(low_addr + len));
     if (os::uncommit_memory((char *) low_addr, len)) {
-      warning("Attempt to deallocate stack guard pages failed.");
+      log_warning(os, thread)("Attempt to deallocate stack guard pages failed.");
     }
+    return;
   }
+
+  log_debug(os, thread)("Thread " UINTX_FORMAT " stack guard pages activated: "
+    PTR_FORMAT "-" PTR_FORMAT ".",
+    os::current_thread_id(), p2i(low_addr), p2i(low_addr + len));
+
 }
 
 void JavaThread::remove_stack_guard_pages() {
@@ -2515,16 +2530,25 @@
     if (os::remove_stack_guard_pages((char *) low_addr, len)) {
       _stack_guard_state = stack_guard_unused;
     } else {
-      warning("Attempt to deallocate stack guard pages failed.");
+      log_warning(os, thread)("Attempt to deallocate stack guard pages failed ("
+        PTR_FORMAT "-" PTR_FORMAT ").", p2i(low_addr), p2i(low_addr + len));
+      return;
     }
   } else {
     if (_stack_guard_state == stack_guard_unused) return;
     if (os::unguard_memory((char *) low_addr, len)) {
       _stack_guard_state = stack_guard_unused;
     } else {
-      warning("Attempt to unprotect stack guard pages failed.");
+      log_warning(os, thread)("Attempt to unprotect stack guard pages failed ("
+        PTR_FORMAT "-" PTR_FORMAT ").", p2i(low_addr), p2i(low_addr + len));
+      return;
     }
   }
+
+  log_debug(os, thread)("Thread " UINTX_FORMAT " stack guard pages removed: "
+    PTR_FORMAT "-" PTR_FORMAT ".",
+    os::current_thread_id(), p2i(low_addr), p2i(low_addr + len));
+
 }
 
 void JavaThread::enable_stack_reserved_zone() {
@@ -3530,6 +3554,10 @@
     return status;
   }
 
+  if (TRACE_INITIALIZE() != JNI_OK) {
+    vm_exit_during_initialization("Failed to initialize tracing backend");
+  }
+
   // Should be done after the heap is fully created
   main_thread->cache_global_variables();
 
@@ -3598,11 +3626,6 @@
 
   quicken_jni_functions();
 
-  // Must be run after init_ft which initializes ft_enabled
-  if (TRACE_INITIALIZE() != JNI_OK) {
-    vm_exit_during_initialization("Failed to initialize tracing backend");
-  }
-
   // No more stub generation allowed after that point.
   StubCodeDesc::freeze();
 
@@ -4110,6 +4133,7 @@
   if (version == JNI_VERSION_1_4) return JNI_TRUE;
   if (version == JNI_VERSION_1_6) return JNI_TRUE;
   if (version == JNI_VERSION_1_8) return JNI_TRUE;
+  if (version == JNI_VERSION_9) return JNI_TRUE;
   return JNI_FALSE;
 }
 
--- a/hotspot/src/share/vm/runtime/thread.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -448,7 +448,8 @@
   void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
   inline jlong cooked_allocated_bytes();
 
-  TRACE_DATA* trace_data()              { return &_trace_data; }
+  TRACE_DEFINE_THREAD_TRACE_DATA_OFFSET;
+  TRACE_DATA* trace_data() const        { return &_trace_data; }
 
   const ThreadExt& ext() const          { return _ext; }
   ThreadExt& ext()                      { return _ext; }
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -960,7 +960,6 @@
   nonstatic_field(nmethod,                     _compile_id,                                   int)                                   \
   nonstatic_field(nmethod,                     _comp_level,                                   int)                                   \
   nonstatic_field(nmethod,                     _exception_cache,                              ExceptionCache*)                       \
-  nonstatic_field(nmethod,                     _marked_for_deoptimization,                    bool)                                  \
                                                                                                                                      \
   unchecked_c2_static_field(Deoptimization,    _trap_reason_name,                             void*)                                 \
                                                                                                                                      \
@@ -2006,10 +2005,20 @@
   declare_c2_type(LoadStoreNode, Node)                                    \
   declare_c2_type(StorePConditionalNode, LoadStoreNode)                   \
   declare_c2_type(StoreLConditionalNode, LoadStoreNode)                   \
-  declare_c2_type(CompareAndSwapLNode, LoadStoreNode)                     \
-  declare_c2_type(CompareAndSwapINode, LoadStoreNode)                     \
-  declare_c2_type(CompareAndSwapPNode, LoadStoreNode)                     \
-  declare_c2_type(CompareAndSwapNNode, LoadStoreNode)                     \
+  declare_c2_type(CompareAndSwapNode, LoadStoreConditionalNode)           \
+  declare_c2_type(CompareAndSwapLNode, CompareAndSwapNode)                \
+  declare_c2_type(CompareAndSwapINode, CompareAndSwapNode)                \
+  declare_c2_type(CompareAndSwapPNode, CompareAndSwapNode)                \
+  declare_c2_type(CompareAndSwapNNode, CompareAndSwapNode)                \
+  declare_c2_type(WeakCompareAndSwapLNode, CompareAndSwapNode)            \
+  declare_c2_type(WeakCompareAndSwapINode, CompareAndSwapNode)            \
+  declare_c2_type(WeakCompareAndSwapPNode, CompareAndSwapNode)            \
+  declare_c2_type(WeakCompareAndSwapNNode, CompareAndSwapNode)            \
+  declare_c2_type(CompareAndExchangeNode, LoadStoreNode)                  \
+  declare_c2_type(CompareAndExchangeLNode, CompareAndExchangeNode)        \
+  declare_c2_type(CompareAndExchangeINode, CompareAndExchangeNode)        \
+  declare_c2_type(CompareAndExchangePNode, CompareAndExchangeNode)        \
+  declare_c2_type(CompareAndExchangeNNode, CompareAndExchangeNode)        \
   declare_c2_type(MulNode, Node)                                          \
   declare_c2_type(MulINode, MulNode)                                      \
   declare_c2_type(MulLNode, MulNode)                                      \
--- a/hotspot/src/share/vm/runtime/vmThread.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/vmThread.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -359,7 +359,7 @@
       // Only write caller thread information for non-concurrent vm operations.
       // For concurrent vm operations, the thread id is set to 0 indicating thread is unknown.
       // This is because the caller thread could have exited already.
-      event.set_caller(is_concurrent ? 0 : op->calling_thread()->osthread()->thread_id());
+      event.set_caller(is_concurrent ? 0 : THREAD_TRACE_ID(op->calling_thread()));
       event.commit();
     }
 
--- a/hotspot/src/share/vm/runtime/vm_operations.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/vm_operations.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -485,6 +485,10 @@
   }
 }
 
+void VM_PrintCompileQueue::doit() {
+  CompileBroker::print_compile_queues(_out);
+}
+
 #if INCLUDE_SERVICES
 void VM_PrintClassHierarchy::doit() {
   KlassHierarchy::print_class_hierarchy(_out, _print_interfaces, _print_subclasses, _classname);
--- a/hotspot/src/share/vm/runtime/vm_operations.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/vm_operations.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -105,6 +105,7 @@
   template(DumpHashtable)                         \
   template(DumpTouchedMethods)                    \
   template(MarkActiveNMethods)                    \
+  template(PrintCompileQueue)                     \
   template(PrintClassHierarchy)                   \
 
 class VM_Operation: public CHeapObj<mtInternal> {
@@ -421,6 +422,17 @@
   void doit();
 };
 
+class VM_PrintCompileQueue: public VM_Operation {
+ private:
+  outputStream* _out;
+
+ public:
+  VM_PrintCompileQueue(outputStream* st) : _out(st) {}
+  VMOp_Type type() const { return VMOp_PrintCompileQueue; }
+  Mode evaluation_mode() const { return _safepoint; }
+  void doit();
+};
+
 #if INCLUDE_SERVICES
 class VM_PrintClassHierarchy: public VM_Operation {
  private:
--- a/hotspot/src/share/vm/services/diagnosticCommand.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/services/diagnosticCommand.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -893,7 +893,8 @@
 }
 
 void CompileQueueDCmd::execute(DCmdSource source, TRAPS) {
-  CompileBroker::print_compile_queues(output());
+  VM_PrintCompileQueue printCompileQueueOp(output());
+  VMThread::execute(&printCompileQueueOp);
 }
 
 void CodeListDCmd::execute(DCmdSource source, TRAPS) {
--- a/hotspot/src/share/vm/services/heapDumper.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/services/heapDumper.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,8 +53,7 @@
  *   src/share/demo/jvmti/hprof/hprof_io.c
  *
  *
- *  header    "JAVA PROFILE 1.0.1" or "JAVA PROFILE 1.0.2"
- *            (0-terminated)
+ *  header    "JAVA PROFILE 1.0.2" (0-terminated)
  *
  *  u4        size of identifiers. Identifiers are used to represent
  *            UTF8 strings, objects, stack traces, etc. They usually
@@ -385,6 +384,8 @@
   size_t _size;
   size_t _pos;
 
+  jlong _dump_start;
+
   char* _error;   // error message when I/O fails
 
   void set_file_descriptor(int fd)              { _fd = fd; }
@@ -408,6 +409,10 @@
   bool is_open() const                  { return file_descriptor() >= 0; }
   void flush();
 
+  jlong dump_start() const                      { return _dump_start; }
+  void set_dump_start(jlong pos);
+  julong current_record_length();
+
   // total number of bytes written to the disk
   julong bytes_written() const          { return _bytes_written; }
 
@@ -449,6 +454,7 @@
   _pos = 0;
   _error = NULL;
   _bytes_written = 0L;
+  _dump_start = (jlong)-1;
   _fd = os::create_binary_file(path, false);    // don't replace existing file
 
   // if the open failed we record the error
@@ -476,6 +482,22 @@
   }
 }
 
+// sets the dump starting position
+void DumpWriter::set_dump_start(jlong pos) {
+  _dump_start = pos;
+}
+
+julong DumpWriter::current_record_length() {
+  if (is_open()) {
+    // calculate the size of the dump record
+    julong dump_end = bytes_written() + bytes_unwritten();
+    assert(dump_end == (size_t)current_offset(), "checking");
+    julong dump_len = dump_end - dump_start() - 4;
+    return dump_len;
+  }
+  return 0;
+}
+
 // write directly to the file
 void DumpWriter::write_internal(void* s, size_t len) {
   if (is_open()) {
@@ -645,6 +667,18 @@
   static void dump_prim_array(DumpWriter* writer, typeArrayOop array);
   // create HPROF_FRAME record for the given method and bci
   static void dump_stack_frame(DumpWriter* writer, int frame_serial_num, int class_serial_num, Method* m, int bci);
+
+  // check if we need to truncate an array
+  static int calculate_array_max_length(DumpWriter* writer, arrayOop array, short header_size);
+
+  // writes a HPROF_HEAP_DUMP_SEGMENT record
+  static void write_dump_header(DumpWriter* writer);
+
+  // fixes up the length of the current dump record
+  static void write_current_dump_record_length(DumpWriter* writer);
+
+  // fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
+  static void end_of_dump(DumpWriter* writer);
 };
 
 // write a header of the given type
@@ -1005,50 +1039,102 @@
   }
 }
 
+// Hprof uses an u4 as record length field,
+// which means we need to truncate arrays that are too long.
+int DumperSupport::calculate_array_max_length(DumpWriter* writer, arrayOop array, short header_size) {
+  BasicType type = ArrayKlass::cast(array->klass())->element_type();
+  assert(type >= T_BOOLEAN && type <= T_OBJECT, "invalid array element type");
+
+  int length = array->length();
+
+  int type_size;
+  if (type == T_OBJECT) {
+    type_size = sizeof(address);
+  } else {
+    type_size = type2aelembytes(type);
+  }
+
+  size_t length_in_bytes = (size_t)length * type_size;
+
+  // Create a new record if the current record is non-empty and the array can't fit.
+  julong current_record_length = writer->current_record_length();
+  if (current_record_length > 0 &&
+      (current_record_length + header_size + length_in_bytes) > max_juint) {
+    write_current_dump_record_length(writer);
+    write_dump_header(writer);
+
+    // We now have an empty record.
+    current_record_length = 0;
+  }
+
+  // Calculate max bytes we can use.
+  uint max_bytes = max_juint - (header_size + current_record_length);
+
+  // Array too long for the record?
+  // Calculate max length and return it.
+  if (length_in_bytes > max_bytes) {
+    length = max_bytes / type_size;
+    length_in_bytes = (size_t)length * type_size;
+
+    warning("cannot dump array of type %s[] with length %d; truncating to length %d",
+            type2name_tab[type], array->length(), length);
+  }
+  return length;
+}
+
 // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
 void DumperSupport::dump_object_array(DumpWriter* writer, objArrayOop array) {
+  // sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID) + sizeof(classID)
+  short header_size = 1 + 2 * 4 + 2 * sizeof(address);
+
+  int length = calculate_array_max_length(writer, array, header_size);
 
   writer->write_u1(HPROF_GC_OBJ_ARRAY_DUMP);
   writer->write_objectID(array);
   writer->write_u4(STACK_TRACE_ID);
-  writer->write_u4((u4)array->length());
+  writer->write_u4(length);
 
   // array class ID
   writer->write_classID(array->klass());
 
   // [id]* elements
-  for (int index=0; index<array->length(); index++) {
+  for (int index = 0; index < length; index++) {
     oop o = array->obj_at(index);
     writer->write_objectID(o);
   }
 }
 
-#define WRITE_ARRAY(Array, Type, Size) \
-  for (int i=0; i<Array->length(); i++) { writer->write_##Size((Size)array->Type##_at(i)); }
-
+#define WRITE_ARRAY(Array, Type, Size, Length) \
+  for (int i = 0; i < Length; i++) { writer->write_##Size((Size)Array->Type##_at(i)); }
 
 // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
 void DumperSupport::dump_prim_array(DumpWriter* writer, typeArrayOop array) {
   BasicType type = TypeArrayKlass::cast(array->klass())->element_type();
 
+  // 2 * sizeof(u1) + 2 * sizeof(u4) + sizeof(objectID)
+  short header_size = 2 * 1 + 2 * 4 + sizeof(address);
+
+  int length = calculate_array_max_length(writer, array, header_size);
+  int type_size = type2aelembytes(type);
+  u4 length_in_bytes = (u4)length * type_size;
+
   writer->write_u1(HPROF_GC_PRIM_ARRAY_DUMP);
   writer->write_objectID(array);
   writer->write_u4(STACK_TRACE_ID);
-  writer->write_u4((u4)array->length());
+  writer->write_u4(length);
   writer->write_u1(type2tag(type));
 
   // nothing to copy
-  if (array->length() == 0) {
+  if (length == 0) {
     return;
   }
 
   // If the byte ordering is big endian then we can copy most types directly
-  u4 length_in_bytes = (u4)array->length() * type2aelembytes(type);
 
   switch (type) {
     case T_INT : {
       if (Bytes::is_Java_byte_ordering_different()) {
-        WRITE_ARRAY(array, int, u4);
+        WRITE_ARRAY(array, int, u4, length);
       } else {
         writer->write_raw((void*)(array->int_at_addr(0)), length_in_bytes);
       }
@@ -1060,7 +1146,7 @@
     }
     case T_CHAR : {
       if (Bytes::is_Java_byte_ordering_different()) {
-        WRITE_ARRAY(array, char, u2);
+        WRITE_ARRAY(array, char, u2, length);
       } else {
         writer->write_raw((void*)(array->char_at_addr(0)), length_in_bytes);
       }
@@ -1068,7 +1154,7 @@
     }
     case T_SHORT : {
       if (Bytes::is_Java_byte_ordering_different()) {
-        WRITE_ARRAY(array, short, u2);
+        WRITE_ARRAY(array, short, u2, length);
       } else {
         writer->write_raw((void*)(array->short_at_addr(0)), length_in_bytes);
       }
@@ -1076,7 +1162,7 @@
     }
     case T_BOOLEAN : {
       if (Bytes::is_Java_byte_ordering_different()) {
-        WRITE_ARRAY(array, bool, u1);
+        WRITE_ARRAY(array, bool, u1, length);
       } else {
         writer->write_raw((void*)(array->bool_at_addr(0)), length_in_bytes);
       }
@@ -1084,7 +1170,7 @@
     }
     case T_LONG : {
       if (Bytes::is_Java_byte_ordering_different()) {
-        WRITE_ARRAY(array, long, u8);
+        WRITE_ARRAY(array, long, u8, length);
       } else {
         writer->write_raw((void*)(array->long_at_addr(0)), length_in_bytes);
       }
@@ -1096,14 +1182,14 @@
     // use IEEE 754.
 
     case T_FLOAT : {
-      for (int i=0; i<array->length(); i++) {
-        dump_float( writer, array->float_at(i) );
+      for (int i = 0; i < length; i++) {
+        dump_float(writer, array->float_at(i));
       }
       break;
     }
     case T_DOUBLE : {
-      for (int i=0; i<array->length(); i++) {
-        dump_double( writer, array->double_at(i) );
+      for (int i = 0; i < length; i++) {
+        dump_double(writer, array->double_at(i));
       }
       break;
     }
@@ -1320,8 +1406,6 @@
   JavaThread*           _oome_thread;
   Method*               _oome_constructor;
   bool _gc_before_heap_dump;
-  bool _is_segmented_dump;
-  jlong _dump_start;
   GrowableArray<Klass*>* _klass_map;
   ThreadStackTrace** _stack_traces;
   int _num_threads;
@@ -1340,11 +1424,6 @@
   void clear_global_dumper() { _global_dumper = NULL; }
   void clear_global_writer() { _global_writer = NULL; }
 
-  bool is_segmented_dump() const                { return _is_segmented_dump; }
-  void set_segmented_dump()                     { _is_segmented_dump = true; }
-  jlong dump_start() const                      { return _dump_start; }
-  void set_dump_start(jlong pos);
-
   bool skip_operation() const;
 
   // writes a HPROF_LOAD_CLASS record
@@ -1369,16 +1448,6 @@
   // HPROF_TRACE and HPROF_FRAME records
   void dump_stack_traces();
 
-  // writes a HPROF_HEAP_DUMP or HPROF_HEAP_DUMP_SEGMENT record
-  void write_dump_header();
-
-  // fixes up the length of the current dump record
-  void write_current_dump_record_length();
-
-  // fixes up the current dump record )and writes HPROF_HEAP_DUMP_END
-  // record in the case of a segmented heap dump)
-  void end_of_dump();
-
  public:
   VM_HeapDumper(DumpWriter* writer, bool gc_before_heap_dump, bool oome) :
     VM_GC_Operation(0 /* total collections,      dummy, ignored */,
@@ -1387,8 +1456,6 @@
                     gc_before_heap_dump) {
     _local_writer = writer;
     _gc_before_heap_dump = gc_before_heap_dump;
-    _is_segmented_dump = false;
-    _dump_start = (jlong)-1;
     _klass_map = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Klass*>(INITIAL_CLASS_COUNT, true);
     _stack_traces = NULL;
     _num_threads = 0;
@@ -1428,35 +1495,23 @@
   return false;
 }
 
-// sets the dump starting position
-void VM_HeapDumper::set_dump_start(jlong pos) {
-  _dump_start = pos;
-}
-
- // writes a HPROF_HEAP_DUMP or HPROF_HEAP_DUMP_SEGMENT record
-void VM_HeapDumper::write_dump_header() {
-  if (writer()->is_open()) {
-    if (is_segmented_dump()) {
-      writer()->write_u1(HPROF_HEAP_DUMP_SEGMENT);
-    } else {
-      writer()->write_u1(HPROF_HEAP_DUMP);
-    }
-    writer()->write_u4(0); // current ticks
+ // writes a HPROF_HEAP_DUMP_SEGMENT record
+void DumperSupport::write_dump_header(DumpWriter* writer) {
+  if (writer->is_open()) {
+    writer->write_u1(HPROF_HEAP_DUMP_SEGMENT);
+    writer->write_u4(0); // current ticks
 
     // record the starting position for the dump (its length will be fixed up later)
-    set_dump_start(writer()->current_offset());
-    writer()->write_u4(0);
+    writer->set_dump_start(writer->current_offset());
+    writer->write_u4(0);
   }
 }
 
 // fixes up the length of the current dump record
-void VM_HeapDumper::write_current_dump_record_length() {
-  if (writer()->is_open()) {
-    assert(dump_start() >= 0, "no dump start recorded");
-
-    // calculate the size of the dump record
-    julong dump_end = writer()->current_offset();
-    julong dump_len = (dump_end - dump_start() - 4);
+void DumperSupport::write_current_dump_record_length(DumpWriter* writer) {
+  if (writer->is_open()) {
+    julong dump_end = writer->bytes_written() + writer->bytes_unwritten();
+    julong dump_len = writer->current_record_length();
 
     // record length must fit in a u4
     if (dump_len > max_juint) {
@@ -1464,17 +1519,18 @@
     }
 
     // seek to the dump start and fix-up the length
-    writer()->seek_to_offset(dump_start());
-    writer()->write_u4((u4)dump_len);
+    assert(writer->dump_start() >= 0, "no dump start recorded");
+    writer->seek_to_offset(writer->dump_start());
+    writer->write_u4((u4)dump_len);
 
     // adjust the total size written to keep the bytes written correct.
-    writer()->adjust_bytes_written(-((jlong) sizeof(u4)));
+    writer->adjust_bytes_written(-((jlong) sizeof(u4)));
 
     // seek to dump end so we can continue
-    writer()->seek_to_offset(dump_end);
+    writer->seek_to_offset(dump_end);
 
     // no current dump record
-    set_dump_start((jlong)-1);
+    writer->set_dump_start((jlong)-1);
   }
 }
 
@@ -1482,33 +1538,23 @@
 // new segment.
 void VM_HeapDumper::check_segment_length() {
   if (writer()->is_open()) {
-    if (is_segmented_dump()) {
-      // don't use current_offset that would be too expensive on a per record basis
-      julong dump_end = writer()->bytes_written() + writer()->bytes_unwritten();
-      assert(dump_end == (julong)writer()->current_offset(), "checking");
-      julong dump_len = (dump_end - dump_start() - 4);
-      assert(dump_len <= max_juint, "bad dump length");
+    julong dump_len = writer()->current_record_length();
 
-      if (dump_len > HeapDumpSegmentSize) {
-        write_current_dump_record_length();
-        write_dump_header();
-      }
+    if (dump_len > 2UL*G) {
+      DumperSupport::write_current_dump_record_length(writer());
+      DumperSupport::write_dump_header(writer());
     }
   }
 }
 
-// fixes up the current dump record )and writes HPROF_HEAP_DUMP_END
-// record in the case of a segmented heap dump)
-void VM_HeapDumper::end_of_dump() {
-  if (writer()->is_open()) {
-    write_current_dump_record_length();
+// fixes up the current dump record and writes HPROF_HEAP_DUMP_END record
+void DumperSupport::end_of_dump(DumpWriter* writer) {
+  if (writer->is_open()) {
+    write_current_dump_record_length(writer);
 
-    // for segmented dump we write the end record
-    if (is_segmented_dump()) {
-      writer()->write_u1(HPROF_HEAP_DUMP_END);
-      writer()->write_u4(0);
-      writer()->write_u4(0);
-    }
+    writer->write_u1(HPROF_HEAP_DUMP_END);
+    writer->write_u4(0);
+    writer->write_u4(0);
   }
 }
 
@@ -1686,16 +1732,17 @@
 //  [HPROF_LOAD_CLASS]*
 //  [[HPROF_FRAME]*|HPROF_TRACE]*
 //  [HPROF_GC_CLASS_DUMP]*
-//  HPROF_HEAP_DUMP
+//  [HPROF_HEAP_DUMP_SEGMENT]*
+//  HPROF_HEAP_DUMP_END
 //
 // The HPROF_TRACE records represent the stack traces where the heap dump
 // is generated and a "dummy trace" record which does not include
 // any frames. The dummy trace record is used to be referenced as the
 // unknown object alloc site.
 //
-// The HPROF_HEAP_DUMP record has a length following by sub-records. To allow
-// the heap dump be generated in a single pass we remember the position of
-// the dump length and fix it up after all sub-records have been written.
+// Each HPROF_HEAP_DUMP_SEGMENT record has a length followed by sub-records.
+// To allow the heap dump be generated in a single pass we remember the position
+// of the dump length and fix it up after all sub-records have been written.
 // To generate the sub-records we iterate over the heap, writing
 // HPROF_GC_INSTANCE_DUMP, HPROF_GC_OBJ_ARRAY_DUMP, and HPROF_GC_PRIM_ARRAY_DUMP
 // records as we go. Once that is done we write records for some of the GC
@@ -1722,15 +1769,9 @@
   set_global_dumper();
   set_global_writer();
 
-  // Write the file header - use 1.0.2 for large heaps, otherwise 1.0.1
+  // Write the file header - we always use 1.0.2
   size_t used = ch->used();
-  const char* header;
-  if (used > SegmentedHeapDumpThreshold) {
-    set_segmented_dump();
-    header = "JAVA PROFILE 1.0.2";
-  } else {
-    header = "JAVA PROFILE 1.0.1";
-  }
+  const char* header = "JAVA PROFILE 1.0.2";
 
   // header is few bytes long - no chance to overflow int
   writer()->write_raw((void*)header, (int)strlen(header));
@@ -1750,8 +1791,8 @@
   // this must be called after _klass_map is built when iterating the classes above.
   dump_stack_traces();
 
-  // write HPROF_HEAP_DUMP or HPROF_HEAP_DUMP_SEGMENT
-  write_dump_header();
+  // write HPROF_HEAP_DUMP_SEGMENT
+  DumperSupport::write_dump_header(writer());
 
   // Writes HPROF_GC_CLASS_DUMP records
   ClassLoaderDataGraph::classes_do(&do_class_dump);
@@ -1759,9 +1800,9 @@
   check_segment_length();
 
   // writes HPROF_GC_INSTANCE_DUMP records.
-  // After each sub-record is written check_segment_length will be invoked. When
-  // generated a segmented heap dump this allows us to check if the current
-  // segment exceeds a threshold and if so, then a new segment is started.
+  // After each sub-record is written check_segment_length will be invoked
+  // to check if the current segment exceeds a threshold. If so, a new
+  // segment is started.
   // The HPROF_GC_CLASS_DUMP and HPROF_GC_INSTANCE_DUMP are the vast bulk
   // of the heap dump.
   HeapObjectDumper obj_dumper(this, writer());
@@ -1785,9 +1826,8 @@
   StickyClassDumper class_dumper(writer());
   SystemDictionary::always_strong_classes_do(&class_dumper);
 
-  // fixes up the length of the dump record. In the case of a segmented
-  // heap then the HPROF_HEAP_DUMP_END record is also written.
-  end_of_dump();
+  // fixes up the length of the dump record and writes the HPROF_HEAP_DUMP_END record.
+  DumperSupport::end_of_dump(writer());
 
   // Now we clear the global variables, so that a future dumper might run.
   clear_global_dumper();
--- a/hotspot/src/share/vm/trace/trace.xml	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/trace/trace.xml	Sat Mar 05 20:46:42 2016 -0800
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="utf-8"?>
 <!--
- Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
  This code is free software; you can redistribute it and/or modify it
@@ -73,12 +73,12 @@
   <events>
     <event id="ThreadStart" path="java/thread_start" label="Java Thread Start"
            has_thread="true" is_instant="true">
-      <value type="JAVALANGTHREAD" field="javalangthread" label="Java Thread"/>
+      <value type="THREAD" field="thread" label="Java Thread"/>
     </event>
 
     <event id="ThreadEnd" path="java/thread_end" label="Java Thread End"
            has_thread="true" is_instant="true">
-      <value type="JAVALANGTHREAD" field="javalangthread" label="Java Thread"/>
+      <value type="THREAD" field="thread" label="Java Thread"/>
     </event>
 
     <event id="ThreadSleep" path="java/thread_sleep" label="Java Thread Sleep"
@@ -96,14 +96,14 @@
     <event id="JavaMonitorEnter" path="java/monitor_enter" label="Java Monitor Blocked"
             has_thread="true" has_stacktrace="true" is_instant="false">
       <value type="CLASS" field="klass" label="Monitor Class"/>
-      <value type="JAVALANGTHREAD" field="previousOwner" label="Previous Monitor Owner"/>
+      <value type="THREAD" field="previousOwner" label="Previous Monitor Owner"/>
       <value type="ADDRESS" field="address" label="Monitor Address" relation="JAVA_MONITOR_ADDRESS"/>
     </event>
 
     <event id="JavaMonitorWait" path="java/monitor_wait" label="Java Monitor Wait" description="Waiting on a Java monitor"
             has_thread="true" has_stacktrace="true" is_instant="false">
       <value type="CLASS" field="klass" label="Monitor Class" description="Class of object waited on"/>
-      <value type="OSTHREAD" field="notifier" label="Notifier Thread" description="Notifying Thread"/>
+      <value type="THREAD" field="notifier" label="Notifier Thread" description="Notifying Thread"/>
       <value type="MILLIS" field="timeout" label="Timeout" description="Maximum wait time"/>
       <value type="BOOLEAN" field="timedOut" label="Timed Out" description="Wait has been timed out"/>
       <value type="ADDRESS" field="address" label="Monitor Address" description="Address of object waited on" relation="JAVA_MONITOR_ADDRESS"/>
@@ -434,7 +434,7 @@
            description="Promotion of an object failed">
       <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
       <structvalue type="CopyFailed" field="data" label="Data"/>
-      <value type="OSTHREAD" field="thread" label="Running thread"/>
+      <value type="THREAD" field="thread" label="Running thread"/>
     </event>
 
     <event id="EvacuationFailed" path="vm/gc/detailed/evacuation_failed" label="Evacuation Failed" is_instant="true"
@@ -496,6 +496,11 @@
       <value type="UINT" field="allocContext" label="Allocation Context" />
     </event>
 
+    <event id="VMError" path="vm/runtime/vm_error" label="VM Error"
+           description="VM shutdown due to an error" has_stacktrace="true" has_thread="true">
+      <value type="BOOLEAN" field="out_of_java_memory" label="Java Out Of Memory"/>
+    </event>
+
     <!-- Compiler events -->
 
     <event id="Compilation" path="vm/compiler/compilation" label="Compilation"
@@ -569,7 +574,7 @@
       <value type="VMOPERATIONTYPE" field="operation" label="Operation" />
       <value type="BOOLEAN" field="safepoint" label="At Safepoint" description="If the operation occured at a safepoint."/>
       <value type="BOOLEAN" field="blocking" label="Caller Blocked" description="If the calling thread was blocked until the operation was complete."/>
-      <value type="OSTHREAD" field="caller" label="Caller" transition="FROM" description="Thread requesting operation. If non-blocking, will be set to 0 indicating thread is unknown."/>
+      <value type="THREAD" field="caller" label="Caller" transition="FROM" description="Thread requesting operation. If non-blocking, will be set to 0 indicating thread is unknown."/>
     </event>
 
     <!-- Allocation events -->
--- a/hotspot/src/share/vm/trace/traceBackend.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/trace/traceBackend.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,6 +47,10 @@
 
   static void on_unloading_classes(void) {
   }
+
+  static void on_vm_error(bool) {
+  }
+
 };
 
 class TraceThreadData {
--- a/hotspot/src/share/vm/trace/traceDataTypes.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/trace/traceDataTypes.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,39 +31,32 @@
 
 enum {
   CONTENT_TYPE_NONE             = 0,
-  CONTENT_TYPE_BYTES            = 1,
-  CONTENT_TYPE_EPOCHMILLIS      = 2,
-  CONTENT_TYPE_MILLIS           = 3,
-  CONTENT_TYPE_NANOS            = 4,
-  CONTENT_TYPE_TICKS            = 5,
-  CONTENT_TYPE_ADDRESS          = 6,
+  CONTENT_TYPE_CLASS            = 20,
+  CONTENT_TYPE_UTF8             = 21,
+  CONTENT_TYPE_THREAD           = 22,
+  CONTENT_TYPE_STACKTRACE       = 23,
+  CONTENT_TYPE_BYTES            = 24,
+  CONTENT_TYPE_EPOCHMILLIS      = 25,
+  CONTENT_TYPE_MILLIS           = 26,
+  CONTENT_TYPE_NANOS            = 27,
+  CONTENT_TYPE_TICKS            = 28,
+  CONTENT_TYPE_ADDRESS          = 29,
+  CONTENT_TYPE_PERCENTAGE       = 30,
 
-  CONTENT_TYPE_OSTHREAD,
-  CONTENT_TYPE_JAVALANGTHREAD,
-  CONTENT_TYPE_STACKTRACE,
-  CONTENT_TYPE_CLASS,
-  CONTENT_TYPE_PERCENTAGE,
-
-  JVM_CONTENT_TYPES_START       = 30,
-  JVM_CONTENT_TYPES_END         = 100
+  JVM_CONTENT_TYPES_START       = 33,
+  JVM_CONTENT_TYPES_END         = 255
 };
 
 enum ReservedEvent {
-  EVENT_PRODUCERS,
+  EVENT_METADATA,
   EVENT_CHECKPOINT,
   EVENT_BUFFERLOST,
 
-  NUM_RESERVED_EVENTS
+  NUM_RESERVED_EVENTS = JVM_CONTENT_TYPES_END
 };
 
 typedef enum ReservedEvent ReservedEvent;
 
-typedef u8 classid;
-typedef u8 stacktraceid;
-typedef u8 methodid;
-typedef u8 fieldid;
-
-class TraceUnicodeString;
+class Symbol;
 
 #endif // SHARE_VM_TRACE_TRACEDATATYPES_HPP
-
--- a/hotspot/src/share/vm/trace/traceEvent.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/trace/traceEvent.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -62,7 +62,6 @@
     _endTime = time;
   }
 
- public:
   TraceEvent(EventStartTime timing=TIMED) :
     _startTime(0),
     _endTime(0),
@@ -76,12 +75,21 @@
   {
     if (T::is_enabled()) {
       _started = true;
-      if (timing == TIMED && !T::isInstant) {
-        static_cast<T *>(this)->set_starttime(Tracing::time());
+      if (TIMED == timing && !T::isInstant) {
+        static_cast<T*>(this)->set_starttime(Tracing::time());
       }
     }
   }
 
+ public:
+  void set_starttime(const Ticks& time) {
+    _startTime = time.value();
+  }
+
+  void set_endtime(const Ticks& time) {
+    _endTime = time.value();
+  }
+
   static bool is_enabled() {
     return Tracing::is_event_enabled(T::eventId);
   }
@@ -90,67 +98,68 @@
     return _started;
   }
 
-  void ignoreCheck() {
-    DEBUG_ONLY(_ignore_check = true);
-  }
-
   void commit() {
     if (!should_commit()) {
-        cancel();
-        return;
+      DEBUG_ONLY(cancel());
+      return;
     }
-    if (_endTime == 0) {
+    assert(!_cancelled, "Committing an event that has already been cancelled");
+    if (_startTime == 0) {
+      static_cast<T*>(this)->set_starttime(Tracing::time());
+    } else if (_endTime == 0) {
       static_cast<T*>(this)->set_endtime(Tracing::time());
     }
     if (static_cast<T*>(this)->should_write()) {
       static_cast<T*>(this)->writeEvent();
     }
-    set_commited();
+    DEBUG_ONLY(set_commited());
   }
 
-  void set_starttime(const Ticks& time) {
-    _startTime = time.value();
-  }
-
-  void set_endtime(const Ticks& time) {
-    _endTime = time.value();
-  }
-
-  TraceEventId id() const {
+  static TraceEventId id() {
     return T::eventId;
   }
 
-  bool is_instant() const {
+  static bool is_instant() {
     return T::isInstant;
   }
 
-  bool is_requestable() const {
+  static bool is_requestable() {
     return T::isRequestable;
   }
 
-  bool has_thread() const {
+  static bool has_thread() {
     return T::hasThread;
   }
 
-  bool has_stacktrace() const {
+  static bool has_stacktrace() {
     return T::hasStackTrace;
   }
 
   void cancel() {
-    assert(!_committed && !_cancelled, "event was already committed/cancelled");
+    assert(!_committed && !_cancelled,
+      "event was already committed/cancelled");
     DEBUG_ONLY(_cancelled = true);
   }
 
-  void set_commited() {
-    assert(!_committed, "event has already been committed");
-    DEBUG_ONLY(_committed = true);
-  }
-
   ~TraceEvent() {
     if (_started) {
-      assert(_ignore_check || _committed || _cancelled, "event was not committed/cancelled");
+      assert(_ignore_check || _committed || _cancelled,
+        "event was not committed/cancelled");
     }
   }
+
+#ifdef ASSERT
+ protected:
+  void ignoreCheck() {
+    _ignore_check = true;
+  }
+
+ private:
+  void set_commited() {
+    assert(!_committed, "event has already been committed");
+    _committed = true;
+  }
+#endif // ASSERT
 };
 
 #endif // INCLUDE_TRACE
--- a/hotspot/src/share/vm/trace/traceEventClasses.xsl	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/trace/traceEventClasses.xsl	Sat Mar 05 20:46:42 2016 -0800
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="utf-8"?>
 <!--
- Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
  This code is free software; you can redistribute it and/or modify it
@@ -52,8 +52,8 @@
 class TraceEvent {
 public:
   TraceEvent() {}
-  void set_starttime(const Ticks&amp; time) {}
-  void set_endtime(const Ticks&amp; time) {}
+  void set_starttime(const Ticks&amp; ignore) {}
+  void set_endtime(const Ticks&amp; ignore) {}
   bool should_commit() const { return false; }
   static bool is_enabled() { return false; }
   void commit() {}
--- a/hotspot/src/share/vm/trace/traceEventIds.xsl	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/trace/traceEventIds.xsl	Sat Mar 05 20:46:42 2016 -0800
@@ -43,7 +43,7 @@
   _traceeventbase = (NUM_RESERVED_EVENTS-1), // Make sure we start at right index.
   
   // Events -> enum entry
-<xsl:for-each select="trace/events/event">
+<xsl:for-each select="trace/events/*">
   <xsl:value-of select="concat('  Trace', @id, 'Event,', $newline)"/>
 </xsl:for-each>
   MaxTraceEventId
--- a/hotspot/src/share/vm/trace/traceMacros.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/trace/traceMacros.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,19 +25,29 @@
 #ifndef SHARE_VM_TRACE_TRACEMACROS_HPP
 #define SHARE_VM_TRACE_TRACEMACROS_HPP
 
+typedef u8 traceid;
+
 #define EVENT_THREAD_EXIT(thread)
 #define EVENT_THREAD_DESTRUCT(thread)
+#define TRACE_KLASS_CREATION(k, p, t)
 
-#define TRACE_INIT_ID(k)
+#define TRACE_INIT_KLASS_ID(k)
+#define TRACE_INIT_THREAD_ID(td)
 #define TRACE_DATA TraceThreadData
 
+#define THREAD_TRACE_ID(thread) ((traceid)thread->osthread()->thread_id())
 #define TRACE_START() JNI_OK
 #define TRACE_INITIALIZE() JNI_OK
 
-#define TRACE_DEFINE_KLASS_METHODS typedef int ___IGNORED_hs_trace_type1
-#define TRACE_DEFINE_KLASS_TRACE_ID typedef int ___IGNORED_hs_trace_type2
-#define TRACE_DEFINE_OFFSET typedef int ___IGNORED_hs_trace_type3
-#define TRACE_ID_OFFSET in_ByteSize(0); ShouldNotReachHere()
+#define TRACE_DEFINE_TRACE_ID_METHODS typedef int ___IGNORED_hs_trace_type1
+#define TRACE_DEFINE_TRACE_ID_FIELD typedef int ___IGNORED_hs_trace_type2
+#define TRACE_DEFINE_KLASS_TRACE_ID_OFFSET typedef int ___IGNORED_hs_trace_type3
+#define TRACE_KLASS_TRACE_ID_OFFSET in_ByteSize(0); ShouldNotReachHere()
+#define TRACE_DEFINE_THREAD_TRACE_DATA_OFFSET typedef int ___IGNORED_hs_trace_type4
+#define TRACE_THREAD_TRACE_DATA_OFFSET in_ByteSize(0); ShouldNotReachHere()
+#define TRACE_DEFINE_THREAD_TRACE_ID_OFFSET typedef int ___IGNORED_hs_trace_type5
+#define TRACE_THREAD_TRACE_ID_OFFSET in_ByteSize(0); ShouldNotReachHere()
+#define TRACE_DEFINE_THREAD_ID_SIZE typedef int ___IGNORED_hs_trace_type6
 #define TRACE_TEMPLATES(template)
 #define TRACE_INTRINSICS(do_intrinsic, do_class, do_name, do_signature, do_alias)
 
--- a/hotspot/src/share/vm/trace/tracetypes.xml	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/trace/tracetypes.xml	Sat Mar 05 20:46:42 2016 -0800
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="utf-8"?>
 <!--
- Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
  This code is free software; you can redistribute it and/or modify it
@@ -60,27 +60,16 @@
  <types>
   <content_types>
     <content_type id="Thread" hr_name="Thread"
-                  type="U4" builtin_type="OSTHREAD">
-      <value type="UTF8" field="name" label="Thread name"/>
-    </content_type>
-
-    <content_type id="VMThread" hr_name="VM Thread"
-                  type="U8" jvm_type="VMTHREAD">
-      <value type="OSTHREAD" field="thread" label="VM Thread"/>
-    </content_type>
-
-    <content_type id="JavaThread" hr_name="Java thread"
-                  type="U8" builtin_type="JAVALANGTHREAD">
-      <value type="OSTHREAD" field="thread" label="OS Thread ID"/>
-      <value type="BYTES64" field="allocInsideTla"
-             label="Allocated bytes inside TLAs"/>
-      <value type="BYTES64" field="allocOutsideTla"
-             label="Allocated bytes outside TLAs"/>
+                  type="U8" builtin_type="THREAD">
+      <value type="UTF8" field="osName" label="OS Thread Name"/>
+      <value type="LONG" field="osThreadID" label="OS Thread ID"/>
+      <value type="UTF8" field="javaName" label="Java Lang Thread Name"/>
+      <value type="LONG" field="javaThreadID" label="Java Lang Thread ID"/>
       <value type="THREADGROUP" field="group" label="Java Thread Group"/>
     </content_type>
 
     <content_type id="ThreadGroup" hr_name="Thread group"
-                  type="U4" jvm_type="THREADGROUP">
+                  type="U8" jvm_type="THREADGROUP">
       <value type="THREADGROUP" field="parent" label="Parent"/>
       <value type="UTF8" field="name" label="Name"/>
     </content_type>
@@ -107,82 +96,82 @@
     </content_type>
 
     <content_type id="ThreadState" hr_name="Java Thread State"
-                  type="U2" jvm_type="THREADSTATE">
+                  type="U8" jvm_type="THREADSTATE">
       <value type="UTF8" field="name" label="Name"/>
     </content_type>
 
     <content_type id="GCName" hr_name="GC Name"
-                  type="U1" jvm_type="GCNAME">
+                  type="U8" jvm_type="GCNAME">
       <value type="UTF8" field="name" label="name" />
     </content_type>
 
     <content_type id="GCCause" hr_name="GC Cause"
-                  type="U2" jvm_type="GCCAUSE">
+                  type="U8" jvm_type="GCCAUSE">
       <value type="UTF8" field="cause" label="cause" />
     </content_type>
 
     <content_type id="GCWhen" hr_name="GC When"
-                  type="U1" jvm_type="GCWHEN">
+                  type="U8" jvm_type="GCWHEN">
       <value type="UTF8" field="when" label="when" />
     </content_type>
 
     <content_type id="G1HeapRegionType" hr_name="G1 Heap Region Type"
-                  type="U1" jvm_type="G1HEAPREGIONTYPE">
+                  type="U8" jvm_type="G1HEAPREGIONTYPE">
       <value type="UTF8" field="type" label="type" />
     </content_type>
     
     <content_type id="G1YCType" hr_name="G1 YC Type"
-                  type="U1" jvm_type="G1YCTYPE">
+                  type="U8" jvm_type="G1YCTYPE">
       <value type="UTF8" field="type" label="type" />
     </content_type>
 
     <content_type id="GCThresholdUpdater" hr_name="GC Treshold Updater"
-                  type="U1" jvm_type="GCTHRESHOLDUPDATER">
+                  type="U8" jvm_type="GCTHRESHOLDUPDATER">
       <value type="UTF8" field="updater" label="updater" />
     </content_type>
 
     <content_type id="ReferenceType" hr_name="Reference Type"
-                  type="U1" jvm_type="REFERENCETYPE">
+                  type="U8" jvm_type="REFERENCETYPE">
       <value type="UTF8" field="type" label="type" />
     </content_type>
 
     <content_type id="MetadataType" hr_name="Metadata Type"
-                  type="U1" jvm_type="METADATATYPE">
+                  type="U8" jvm_type="METADATATYPE">
       <value type="UTF8" field="type" label="type" />
     </content_type>
 
     <content_type id="MetaspaceObjectType" hr_name="Metaspace Object Type"
-                  type="U1" jvm_type="METASPACEOBJTYPE">
+                  type="U8" jvm_type="METASPACEOBJTYPE">
       <value type="UTF8" field="type" label="type" />
     </content_type>
 
     <content_type id="NARROW_OOP_MODE" hr_name="Narrow Oop Mode"
-                  type="U1" jvm_type="NARROWOOPMODE">
+                  type="U8" jvm_type="NARROWOOPMODE">
       <value type="UTF8" field="mode" label="mode" />
     </content_type>
 
     <content_type id="VMOperationType" hr_name="VM Operation Type"
-                  type="U2" jvm_type="VMOPERATIONTYPE">
+                  type="U8" jvm_type="VMOPERATIONTYPE">
       <value type="UTF8" field="type" label="type" />
     </content_type>
 
     <content_type id="CompilerPhaseType" hr_name="Compiler Phase Type"
-                  type="U1" jvm_type="COMPILERPHASETYPE">
+                  type="U8" jvm_type="COMPILERPHASETYPE">
       <value type="UTF8" field="phase" label="phase" />
     </content_type>
 
     <content_type id="FlagValueOrigin" hr_name="Flag Value Origin"
-                  type="U1" jvm_type="FLAGVALUEORIGIN">
+                  type="U8" jvm_type="FLAGVALUEORIGIN">
       <value type="UTF8" field="origin" label="origin" />
     </content_type>
 
     <content_type id="CodeBlobType" hr_name="Code Blob Type"
-                  type="U1" jvm_type="CODEBLOBTYPE">
+                  type="U8" jvm_type="CODEBLOBTYPE">
       <value type="UTF8" field="type" label="type" />
     </content_type>
 
     <content_type id="InflateCause" hr_name="Inflation Cause"
-                  type="U1" jvm_type="INFLATECAUSE">
+                  type="U8" jvm_type="INFLATECAUSE">
       <value type="UTF8" field="cause" label="cause" />
     </content_type>
   </content_types>
@@ -245,11 +234,11 @@
                   type="bool" sizeop="1"/>
 
     <!-- 32-bit unsigned integer, SEMANTIC value BYTES -->
-    <primary_type symbol="BYTES" datatype="U4" contenttype="BYTES"
-                  type="u4" sizeop="sizeof(u4)"/>
+    <primary_type symbol="BYTES" datatype="U8" contenttype="BYTES"
+                  type="u8" sizeop="sizeof(u8)"/>
 
-    <primary_type symbol="IOBYTES" datatype="U4" contenttype="BYTES"
-                  type="u4" sizeop="sizeof(u4)"/>
+    <primary_type symbol="IOBYTES" datatype="U8" contenttype="BYTES"
+                  type="u8" sizeop="sizeof(u8)"/>
 
     <!-- 64-bit unsigned integer, SEMANTIC value BYTES -->
     <primary_type symbol="BYTES64" datatype="U8" contenttype="BYTES"
@@ -280,122 +269,109 @@
                   type="u8" sizeop="sizeof(u8)"/>
 
     <!-- 32-bit float, SEMANTIC value PERCENTAGE (0.0-1.0) -->
-    <primary_type symbol="PERCENT" datatype="FLOAT" contenttype="PERCENTAGE"
+    <primary_type symbol="PERCENTAGE" datatype="FLOAT" contenttype="PERCENTAGE"
                   type="float" sizeop="sizeof(float)"/>
 
-    <!-- UTF-encoded string, max length 64k -->
+    <!-- UTF8-encoded string, max length Integer.MAX_VALUE -->
     <primary_type symbol="UTF8" datatype="UTF8" contenttype="NONE"
-                  type="const char *" sizeop="sizeof_utf(%)"/>
-
-    <!-- UTF-16 encoded (Unicode) string, max length maxjuint -->
-    <primary_type symbol="STRING" datatype="STRING" contenttype="NONE"
-                  type="TraceUnicodeString*" sizeop="sizeof_unicode(%)"/>
+                  type="const char*" sizeop="sizeof_utf(%)"/>
 
     <!-- Symbol* constant. Note that this may currently ONLY be used by
           classes, methods fields.  This restriction might be lifted. -->
     <primary_type symbol="SYMBOL" datatype="U8" contenttype="SYMBOL"
-                  type="Symbol *" sizeop="sizeof(u8)"/>
+                  type="const Symbol*" sizeop="sizeof(u8)"/>
 
     <!-- A Klass *. The actual class is marked as "used" and will
          eventually be written into the recording constant pool -->
     <primary_type symbol="CLASS" datatype="U8" contenttype="CLASS"
-                  type="Klass *" sizeop="sizeof(u8)"/>
+                  type="const Klass*" sizeop="sizeof(u8)"/>
 
     <!-- A Method *. The method is marked as "used" and will eventually be
          written into the recording constant pool. -->
     <primary_type symbol="METHOD" datatype="U8" contenttype="METHOD"
-                  type="Method *" sizeop="sizeof(u8)"/>
+                  type="const Method*" sizeop="sizeof(u8)"/>
 
     <!--  The type for stacktraces in the recording. Shoudl not be used by
           events explicitly -->
     <primary_type symbol="STACKTRACE" datatype="U8" contenttype="STACKTRACE"
                   type="u8" sizeop="sizeof(u8)"/>
 
-    <!-- OS Thread ID -->
-    <primary_type symbol="OSTHREAD" datatype="U4" contenttype="OSTHREAD"
-                  type="u4" sizeop="sizeof(u4)"/>
-
-    <!-- VM Thread ID Note: changed from U2 to U8 for hotspot -->
-    <primary_type symbol="VMTHREAD" datatype="U8" contenttype="VMTHREAD"
-                  type="u8"  sizeop="sizeof(u8)"/>
-
-    <!-- Java Thread ID -->
-    <primary_type symbol="JAVALANGTHREAD" datatype="LONG"
-                  contenttype="JAVALANGTHREAD" type="s8"
-                  sizeop="sizeof(s8)"/>
+    <!-- Thread ID -->
+    <primary_type symbol="THREAD" datatype="U8" contenttype="THREAD"
+                  type="u8" sizeop="sizeof(u8)"/>
 
     <!-- Threadgroup THIS TYPE MAY NOT BE USED IN NORMAL EVENTS (ATM). Only
           for thread constant pool // KK TODO: u8 should be ObjectP -->
-    <primary_type symbol="THREADGROUP" datatype="U4" contenttype="THREADGROUP"
+    <primary_type symbol="THREADGROUP" datatype="U8" contenttype="THREADGROUP"
                   type="u8"
-                  sizeop="sizeof(u4)"/>
+                  sizeop="sizeof(u8)"/>
 
     <!-- FRAMETYPE enum -->
-    <primary_type symbol="FRAMETYPE" datatype="U1" contenttype="FRAMETYPE"
-                  type="u1" sizeop="sizeof(u1)"/>
+    <primary_type symbol="FRAMETYPE" datatype="U8" contenttype="FRAMETYPE"
+                  type="u8" sizeop="sizeof(u8)"/>
 
     <!-- THREADSTATE enum -->
-    <primary_type symbol="THREADSTATE" datatype="U2" contenttype="THREADSTATE"
-                  type="u2" sizeop="sizeof(u2)"/>
+    <primary_type symbol="THREADSTATE" datatype="U8" contenttype="THREADSTATE"
+                  type="u8" sizeop="sizeof(u8)"/>
 
     <!-- GCName -->
-    <primary_type symbol="GCNAME" datatype="U1" contenttype="GCNAME"
-                  type="u1" sizeop="sizeof(u1)" />
+    <primary_type symbol="GCNAME" datatype="U8" contenttype="GCNAME"
+                  type="u8" sizeop="sizeof(u8)" />
 
     <!-- GCCAUSE -->
-    <primary_type symbol="GCCAUSE" datatype="U2" contenttype="GCCAUSE"
-                  type="u2" sizeop="sizeof(u2)" />
+    <primary_type symbol="GCCAUSE" datatype="U8" contenttype="GCCAUSE"
+                  type="u8" sizeop="sizeof(u8)" />
 
     <!-- GCWHEN -->
-    <primary_type symbol="GCWHEN" datatype="U1" contenttype="GCWHEN"
-                  type="u1" sizeop="sizeof(u1)" />
+    <primary_type symbol="GCWHEN" datatype="U8" contenttype="GCWHEN"
+                  type="u8" sizeop="sizeof(u8)" />
 
     <!-- G1HEAPREGIONTYPE -->
-    <primary_type symbol="G1HEAPREGIONTYPE" datatype="U1" contenttype="G1HEAPREGIONTYPE"
-                  type="u1" sizeop="sizeof(u1)" />
+    <primary_type symbol="G1HEAPREGIONTYPE" datatype="U8" contenttype="G1HEAPREGIONTYPE"
+                  type="u8" sizeop="sizeof(u8)" />
 
     <!-- G1YCType -->
-    <primary_type symbol="G1YCTYPE" datatype="U1" contenttype="G1YCTYPE"
-                  type="u1" sizeop="sizeof(u1)" />
+    <primary_type symbol="G1YCTYPE" datatype="U8" contenttype="G1YCTYPE"
+                  type="u8" sizeop="sizeof(u8)" />
 
     <!-- GCTHRESHOLDUPDATER -->
-    <primary_type symbol="GCTHRESHOLDUPDATER" datatype="U1" contenttype="GCTHRESHOLDUPDATER"
-                  type="u1" sizeop="sizeof(u1)" />
+    <primary_type symbol="GCTHRESHOLDUPDATER" datatype="U8" contenttype="GCTHRESHOLDUPDATER"
+                  type="u8" sizeop="sizeof(u8)" />
 
     <!-- REFERENCETYPE -->
-    <primary_type symbol="REFERENCETYPE" datatype="U1"
-                  contenttype="REFERENCETYPE" type="u1" sizeop="sizeof(u1)" />
+    <primary_type symbol="REFERENCETYPE" datatype="U8"
+                  contenttype="REFERENCETYPE" type="u8" sizeop="sizeof(u8)" />
 
     <!-- METADATATYPE -->
-    <primary_type symbol="METADATATYPE" datatype="U1"
-                  contenttype="METADATATYPE" type="u1" sizeop="sizeof(u1)" />
+    <primary_type symbol="METADATATYPE" datatype="U8"
+                  contenttype="METADATATYPE" type="u8" sizeop="sizeof(u8)" />
 
     <!-- METADATAOBJTYPE -->
-    <primary_type symbol="METASPACEOBJTYPE" datatype="U1"
-                  contenttype="METASPACEOBJTYPE" type="u1" sizeop="sizeof(u1)" />
+    <primary_type symbol="METASPACEOBJTYPE" datatype="U8"
+                  contenttype="METASPACEOBJTYPE" type="u8" sizeop="sizeof(u8)" />
 
     <!-- NARROWOOPMODE -->
-    <primary_type symbol="NARROWOOPMODE" datatype="U1"
-                  contenttype="NARROWOOPMODE" type="u1" sizeop="sizeof(u1)" />
+    <primary_type symbol="NARROWOOPMODE" datatype="U8"
+                  contenttype="NARROWOOPMODE" type="u8" sizeop="sizeof(u8)" />
 
     <!-- COMPILERPHASETYPE -->
-    <primary_type symbol="COMPILERPHASETYPE" datatype="U1"
-                  contenttype="COMPILERPHASETYPE" type="u1" sizeop="sizeof(u1)" />
+    <primary_type symbol="COMPILERPHASETYPE" datatype="U8"
+                  contenttype="COMPILERPHASETYPE" type="u8" sizeop="sizeof(u8)" />
 
     <!-- VMOPERATIONTYPE -->
-    <primary_type symbol="VMOPERATIONTYPE" datatype="U2" contenttype="VMOPERATIONTYPE"
-                  type="u2" sizeop="sizeof(u2)" />
+    <primary_type symbol="VMOPERATIONTYPE" datatype="U8" contenttype="VMOPERATIONTYPE"
+                  type="u8" sizeop="sizeof(u8)" />
                   
     <!-- FLAGVALUEORIGIN -->
-    <primary_type symbol="FLAGVALUEORIGIN" datatype="U1"
-                  contenttype="FLAGVALUEORIGIN" type="u1" sizeop="sizeof(u1)" />
+    <primary_type symbol="FLAGVALUEORIGIN" datatype="U8"
+                  contenttype="FLAGVALUEORIGIN" type="u8" sizeop="sizeof(u8)" />
                   
     <!-- CODEBLOBTYPE -->
-    <primary_type symbol="CODEBLOBTYPE" datatype="U1"
-                  contenttype="CODEBLOBTYPE" type="u1" sizeop="sizeof(u1)" />
+    <primary_type symbol="CODEBLOBTYPE" datatype="U8"
+                  contenttype="CODEBLOBTYPE" type="u8" sizeop="sizeof(u8)" />
 
     <!-- INFLATECAUSE -->
-    <primary_type symbol="INFLATECAUSE" datatype="U1"
-                  contenttype="INFLATECAUSE" type="u1" sizeop="sizeof(u1)" />
+    <primary_type symbol="INFLATECAUSE" datatype="U8"
+                  contenttype="INFLATECAUSE" type="u8" sizeop="sizeof(u8)" />
   </primary_types>
 </types>
--- a/hotspot/src/share/vm/utilities/debug.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/utilities/debug.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -51,9 +51,14 @@
 #include "services/heapDumper.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/events.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/top.hpp"
 #include "utilities/vmError.hpp"
 
+#if INCLUDE_TRACE
+#include "trace/tracing.hpp"
+#endif
+
 #ifndef ASSERT
 #  ifdef _DEBUG
    // NOTE: don't turn the lines below into a comment -- if you're getting
@@ -280,6 +285,12 @@
    exit(2);
 }
 
+static void notify_tracing() {
+#if INCLUDE_TRACE
+  Tracing::on_vm_error(true);
+#endif
+}
+
 void report_insufficient_metaspace(size_t required_size) {
   warning("\nThe MaxMetaspaceSize of " SIZE_FORMAT " bytes is not large enough.\n"
           "Either don't specify the -XX:MaxMetaspaceSize=<size>\n"
@@ -302,6 +313,8 @@
       HeapDumper::dump_heap_from_oome();
     }
 
+    notify_tracing();
+
     if (OnOutOfMemoryError && OnOutOfMemoryError[0]) {
       VMError::report_java_out_of_memory(message);
     }
--- a/hotspot/src/share/vm/utilities/growableArray.hpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/utilities/growableArray.hpp	Sat Mar 05 20:46:42 2016 -0800
@@ -396,7 +396,7 @@
     int max = length() - 1;
 
     while (max >= min) {
-      int mid = (max + min) / 2;
+      int mid = (int)(((uint)max + min) / 2);
       E value = at(mid);
       int diff = compare(key, value);
       if (diff > 0) {
--- a/hotspot/src/share/vm/utilities/hashtable.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/utilities/hashtable.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -383,4 +383,9 @@
 template class BasicHashtable<mtSymbol>;
 template class BasicHashtable<mtCode>;
 template class BasicHashtable<mtInternal>;
+#if INCLUDE_TRACE
+template class Hashtable<Symbol*, mtTracing>;
+template class HashtableEntry<Symbol*, mtTracing>;
+template class BasicHashtable<mtTracing>;
+#endif
 template class BasicHashtable<mtCompiler>;
--- a/hotspot/src/share/vm/utilities/vmError.cpp	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/src/share/vm/utilities/vmError.cpp	Sat Mar 05 20:46:42 2016 -0800
@@ -1121,6 +1121,10 @@
   if (first_error_tid == -1 &&
       Atomic::cmpxchg_ptr(mytid, &first_error_tid, -1) == -1) {
 
+    // Initialize time stamps to use the same base.
+    out.time_stamp().update_to(1);
+    log.time_stamp().update_to(1);
+
     _id = id;
     _message = message;
     _thread = thread;
--- a/hotspot/test/TEST.groups	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/TEST.groups	Sat Mar 05 20:46:42 2016 -0800
@@ -297,7 +297,8 @@
   compiler/types/ \
   compiler/uncommontrap/ \
   compiler/unsafe/ \
-  -compiler/intrinsics/bmi/verifycode \
+  -compiler/intrinsics/adler32 \
+  -compiler/intrinsics/bmi \
   -compiler/intrinsics/mathexact \
   -compiler/intrinsics/multiplytolen \
   -compiler/intrinsics/sha \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/c1/CanonicalizeArrayLength.java	Sat Mar 05 20:46:42 2016 -0800
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8150102 8150514 8150534
+ * @summary C1 crashes in Canonicalizer::do_ArrayLength() after fix for JDK-8150102
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions -XX:CompileThreshold=100 -XX:+TieredCompilation -XX:TieredStopAtLevel=1 -XX:-BackgroundCompilation CanonicalizeArrayLength
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions -XX:CompileThreshold=100 -XX:+TieredCompilation -XX:TieredStopAtLevel=1 -XX:-BackgroundCompilation -XX:+PatchALot CanonicalizeArrayLength
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions -XX:CompileThreshold=100 -XX:+TieredCompilation -XX:TieredStopAtLevel=1 -XX:-BackgroundCompilation -XX:ScavengeRootsInCode=0 CanonicalizeArrayLength
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions -XX:CompileThreshold=100 -XX:+TieredCompilation -XX:TieredStopAtLevel=1 -XX:-BackgroundCompilation -XX:ScavengeRootsInCode=1 CanonicalizeArrayLength
+ */
+public class CanonicalizeArrayLength {
+    int[] arr = new int[42];
+    int[] arrNull = null;
+
+    final int[] finalArr = new int[42];
+    final int[] finalArrNull = null;
+
+    static int[] staticArr = new int[42];
+    static int[] staticArrNull = null;
+
+    static final int[] staticFinalArr = new int[42];
+    static final int[] staticFinalArrNull = null;
+
+    public static void main(String... args) {
+        CanonicalizeArrayLength t = new CanonicalizeArrayLength();
+        for (int i = 0; i < 20000; i++) {
+            if (t.testLocal() != 42)
+                throw new IllegalStateException();
+            if (t.testLocalNull() != 42)
+                throw new IllegalStateException();
+            if (t.testField() != 42)
+                throw new IllegalStateException();
+            if (t.testFieldNull() != 42)
+                throw new IllegalStateException();
+            if (t.testFinalField() != 42)
+                throw new IllegalStateException();
+            if (t.testFinalFieldNull() != 42)
+                throw new IllegalStateException();
+            if (t.testStaticField() != 42)
+                throw new IllegalStateException();
+            if (t.testStaticFieldNull() != 42)
+                throw new IllegalStateException();
+            if (t.testStaticFinalField() != 42)
+                throw new IllegalStateException();
+            if (t.testStaticFinalFieldNull() != 42)
+                throw new IllegalStateException();
+        }
+    }
+
+    int testField() {
+        try {
+            return arr.length;
+        } catch (Throwable t) {
+            return -1;
+        }
+    }
+
+    int testFieldNull() {
+        try {
+            return arrNull.length;
+        } catch (Throwable t) {
+            return 42;
+        }
+    }
+
+    int testFinalField() {
+        try {
+            return finalArr.length;
+        } catch (Throwable t) {
+            return -1;
+        }
+    }
+
+    int testFinalFieldNull() {
+        try {
+            return finalArrNull.length;
+        } catch (Throwable t) {
+            return 42;
+        }
+    }
+
+    int testStaticField() {
+        try {
+            return staticArr.length;
+        } catch (Throwable t) {
+            return -1;
+        }
+    }
+
+    int testStaticFieldNull() {
+        try {
+            return staticArrNull.length;
+        } catch (Throwable t) {
+            return 42;
+        }
+    }
+
+    int testStaticFinalField() {
+        try {
+            return staticFinalArr.length;
+        } catch (Throwable t) {
+            return -1;
+        }
+    }
+
+    int testStaticFinalFieldNull() {
+        try {
+            return staticFinalArrNull.length;
+        } catch (Throwable t) {
+            return 42;
+        }
+    }
+
+    int testLocal() {
+        int[] arr = new int[42];
+        try {
+            return arr.length;
+        } catch (Throwable t) {
+            return -1;
+        }
+    }
+
+    int testLocalNull() {
+        int[] arrNull = null;
+        try {
+            return arrNull.length;
+        } catch (Throwable t) {
+            return 42;
+        }
+    }
+
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/c2/TestDominatingDeadCheckCast.java	Sat Mar 05 20:46:42 2016 -0800
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8149797
+ * @summary node replaced by dominating dead cast during parsing
+ * @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation -XX:TypeProfileLevel=200 -XX:CompileCommand=dontinline,TestDominatingDeadCheckCast::not_inlined TestDominatingDeadCheckCast
+ *
+ */
+
+public class TestDominatingDeadCheckCast {
+
+    static class A {
+        int f;
+    }
+
+    static class B extends A {
+    }
+
+    static A not_inlined() {
+        return new A();
+    }
+
+    static void inlined(A param) {
+        param.f = 42;
+    }
+
+    static A field;
+
+    static void test(boolean flag1, boolean flag2, boolean flag3, boolean flag4, boolean flag5) {
+        // Go through memory rather than through a local to defeat C2's replace_in_map
+        field = not_inlined();
+        // Speculation adds a CheckCast on entry of this inlined
+        // method for the parameter
+        inlined(field);
+        // Walk up the dominators is depth limited, make the CheckCast
+        // above unreachable from the last inlined call
+        if (flag1) {
+            if (flag2) {
+                if (flag3) {
+                    // Speculation adds a CheckCast on entry of this
+                    // inlined method for the parameter. This
+                    // CheckCast is replaced by the CheckCast of the
+                    // first inlined method call but the replaced
+                    // CheckCast is still around during parsing.
+                    inlined(field);
+                    // Same as above, some useless control
+                    if (flag4) {
+                        if (flag5) {
+                            // Speculation adds a CheckCast on entry
+                            // of this inlined method for the
+                            // parameter. This CheckCast is replaced
+                            // by the dead CheckCast of the previous
+                            // inlined() call.
+                            inlined(field);
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    static public void main(String[] args) {
+        field = new A();
+        for (int i = 0; i < 20000; i++) {
+            test(true, true, true, true, true);
+        }
+    }
+}
--- a/hotspot/test/compiler/compilercontrol/TestCompilerDirectivesCompatibilityBase.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/compiler/compilercontrol/TestCompilerDirectivesCompatibilityBase.java	Sat Mar 05 20:46:42 2016 -0800
@@ -24,25 +24,26 @@
 /*
  * @test TestCompilerDirectivesCompatibilityBase
  * @bug 8137167
- * @library /testlibrary /test/lib
+ * @library /testlibrary /test/lib /
  * @modules java.base/sun.misc
  *          java.compiler
  *          java.management
  * @build jdk.test.lib.*
- * @build jdk.test.lib.dcmd.*
- * @build sun.hotspot.WhiteBox
- * @run main ClassFileInstaller sun.hotspot.WhiteBox
- *                              sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run testng/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI TestCompilerDirectivesCompatibilityBase
+ *        jdk.test.lib.dcmd.*
+ *        sun.hotspot.WhiteBox
+ *        compiler.testlibrary.CompilerUtils
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ *                                sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run testng/othervm -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI TestCompilerDirectivesCompatibilityBase
  * @summary Test compiler control compatibility with compile command
  */
 
+import compiler.testlibrary.CompilerUtils;
+import compiler.whitebox.CompilerWhiteBoxTest;
 import jdk.test.lib.dcmd.CommandExecutor;
 import jdk.test.lib.dcmd.JMXExecutor;
-
 import org.testng.annotations.Test;
 import org.testng.Assert;
-
 import sun.hotspot.WhiteBox;
 
 import java.io.BufferedReader;
@@ -64,32 +65,38 @@
         method  = getMethod(TestCompilerDirectivesCompatibilityBase.class, "helper");
         nomatch = getMethod(TestCompilerDirectivesCompatibilityBase.class, "another");
 
-        testCompatibility(executor);
+        int[] levels = CompilerUtils.getAvailableCompilationLevels();
+        for (int complevel : levels) {
+            // Only test the major compilers, ignore profiling levels
+            if (complevel == CompilerWhiteBoxTest.COMP_LEVEL_SIMPLE || complevel == CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION){
+                testCompatibility(executor, complevel);
+            }
+        }
     }
 
-    public void testCompatibility(CommandExecutor executor) throws Exception {
+    public void testCompatibility(CommandExecutor executor, int comp_level) throws Exception {
 
         // Call all validation twice to catch error when overwriting a directive
         // Flag is default off
         expect(!WB.getBooleanVMFlag("PrintAssembly"));
-        expect(!WB.shouldPrintAssembly(method));
-        expect(!WB.shouldPrintAssembly(nomatch));
-        expect(!WB.shouldPrintAssembly(method));
-        expect(!WB.shouldPrintAssembly(nomatch));
+        expect(!WB.shouldPrintAssembly(method, comp_level));
+        expect(!WB.shouldPrintAssembly(nomatch, comp_level));
+        expect(!WB.shouldPrintAssembly(method, comp_level));
+        expect(!WB.shouldPrintAssembly(nomatch, comp_level));
 
         // load directives that turn it on
         executor.execute("Compiler.directives_add " + control_on);
-        expect(WB.shouldPrintAssembly(method));
-        expect(!WB.shouldPrintAssembly(nomatch));
-        expect(WB.shouldPrintAssembly(method));
-        expect(!WB.shouldPrintAssembly(nomatch));
+        expect(WB.shouldPrintAssembly(method, comp_level));
+        expect(!WB.shouldPrintAssembly(nomatch, comp_level));
+        expect(WB.shouldPrintAssembly(method, comp_level));
+        expect(!WB.shouldPrintAssembly(nomatch, comp_level));
 
         // remove and see that it is true again
         executor.execute("Compiler.directives_remove");
-        expect(!WB.shouldPrintAssembly(method));
-        expect(!WB.shouldPrintAssembly(nomatch));
-        expect(!WB.shouldPrintAssembly(method));
-        expect(!WB.shouldPrintAssembly(nomatch));
+        expect(!WB.shouldPrintAssembly(method, comp_level));
+        expect(!WB.shouldPrintAssembly(nomatch, comp_level));
+        expect(!WB.shouldPrintAssembly(method, comp_level));
+        expect(!WB.shouldPrintAssembly(nomatch, comp_level));
     }
 
     public void expect(boolean test) throws Exception {
--- a/hotspot/test/compiler/compilercontrol/TestCompilerDirectivesCompatibilityCommandOff.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/compiler/compilercontrol/TestCompilerDirectivesCompatibilityCommandOff.java	Sat Mar 05 20:46:42 2016 -0800
@@ -24,16 +24,17 @@
 /*
  * @test TestCompilerDirectivesCompatibilityCommandOff
  * @bug 8137167
- * @library /testlibrary /test/lib
+ * @library /testlibrary /test/lib /
  * @modules java.base/sun.misc
  *          java.compiler
  *          java.management
  * @build jdk.test.lib.*
- * @build jdk.test.lib.dcmd.*
- * @build sun.hotspot.WhiteBox
- * @run main ClassFileInstaller sun.hotspot.WhiteBox
- *                              sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run testng/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *        jdk.test.lib.dcmd.*
+ *        sun.hotspot.WhiteBox
+ *        compiler.testlibrary.CompilerUtils
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ *                                sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run testng/othervm -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions
  *      -XX:-PrintAssembly -XX:CompileCommand=option,*.helper,bool,PrintAssembly,false
  *      -XX:+WhiteBoxAPI TestCompilerDirectivesCompatibilityCommandOff
  * @summary Test compiler control compatibility with compile command
@@ -55,27 +56,27 @@
 
 public class TestCompilerDirectivesCompatibilityCommandOff extends TestCompilerDirectivesCompatibilityBase {
 
-    public void testCompatibility(CommandExecutor executor) throws Exception {
+    public void testCompatibility(CommandExecutor executor, int comp_level) throws Exception {
 
         // Call all validation twice to catch error when overwriting a directive
         // Flag is default off
-        expect(!WB.shouldPrintAssembly(method));
-        expect(!WB.shouldPrintAssembly(nomatch));
-        expect(!WB.shouldPrintAssembly(method));
-        expect(!WB.shouldPrintAssembly(nomatch));
+        expect(!WB.shouldPrintAssembly(method, comp_level));
+        expect(!WB.shouldPrintAssembly(nomatch, comp_level));
+        expect(!WB.shouldPrintAssembly(method, comp_level));
+        expect(!WB.shouldPrintAssembly(nomatch, comp_level));
 
         // load directives that turn it on
         executor.execute("Compiler.directives_add " + control_on);
-        expect(WB.shouldPrintAssembly(method));
-        expect(!WB.shouldPrintAssembly(nomatch));
-        expect(WB.shouldPrintAssembly(method));
-        expect(!WB.shouldPrintAssembly(nomatch));
+        expect(WB.shouldPrintAssembly(method, comp_level));
+        expect(!WB.shouldPrintAssembly(nomatch, comp_level));
+        expect(WB.shouldPrintAssembly(method, comp_level));
+        expect(!WB.shouldPrintAssembly(nomatch, comp_level));
 
         // remove and see that it is false again
         executor.execute("Compiler.directives_remove");
-        expect(!WB.shouldPrintAssembly(method));
-        expect(!WB.shouldPrintAssembly(nomatch));
-        expect(!WB.shouldPrintAssembly(method));
-        expect(!WB.shouldPrintAssembly(nomatch));
+        expect(!WB.shouldPrintAssembly(method, comp_level));
+        expect(!WB.shouldPrintAssembly(nomatch, comp_level));
+        expect(!WB.shouldPrintAssembly(method, comp_level));
+        expect(!WB.shouldPrintAssembly(nomatch, comp_level));
     }
 }
--- a/hotspot/test/compiler/compilercontrol/TestCompilerDirectivesCompatibilityCommandOn.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/compiler/compilercontrol/TestCompilerDirectivesCompatibilityCommandOn.java	Sat Mar 05 20:46:42 2016 -0800
@@ -24,16 +24,17 @@
 /*
  * @test TestCompilerDirectivesCompatibilityCommandOn
  * @bug 8137167
- * @library /testlibrary /test/lib
+ * @library /testlibrary /test/lib /
  * @modules java.base/sun.misc
  *          java.compiler
  *          java.management
  * @build jdk.test.lib.*
- * @build jdk.test.lib.dcmd.*
- * @build sun.hotspot.WhiteBox
- * @run main ClassFileInstaller sun.hotspot.WhiteBox
- *                              sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run testng/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *        jdk.test.lib.dcmd.*
+ *        sun.hotspot.WhiteBox
+ *        compiler.testlibrary.CompilerUtils
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ *                                sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run testng/othervm -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions
  *      -XX:-PrintAssembly -XX:CompileCommand=print,*.* -XX:+WhiteBoxAPI
  *      TestCompilerDirectivesCompatibilityCommandOn
  * @summary Test compiler control compatibility with compile command
@@ -55,27 +56,27 @@
 
 public class TestCompilerDirectivesCompatibilityCommandOn extends TestCompilerDirectivesCompatibilityBase{
 
-    public void testCompatibility(CommandExecutor executor) throws Exception {
+    public void testCompatibility(CommandExecutor executor, int comp_level) throws Exception {
 
         // Call all validation twice to catch error when overwriting a directive
         // Flag is default on
-        expect(WB.shouldPrintAssembly(method));
-        expect(WB.shouldPrintAssembly(nomatch));
-        expect(WB.shouldPrintAssembly(method));
-        expect(WB.shouldPrintAssembly(nomatch));
+        expect(WB.shouldPrintAssembly(method, comp_level));
+        expect(WB.shouldPrintAssembly(nomatch, comp_level));
+        expect(WB.shouldPrintAssembly(method, comp_level));
+        expect(WB.shouldPrintAssembly(nomatch, comp_level));
 
         // load directives that turn it off
         executor.execute("Compiler.directives_add " + control_off);
-        expect(!WB.shouldPrintAssembly(method));
-        expect(WB.shouldPrintAssembly(nomatch));
-        expect(!WB.shouldPrintAssembly(method));
-        expect(WB.shouldPrintAssembly(nomatch));
+        expect(!WB.shouldPrintAssembly(method, comp_level));
+        expect(WB.shouldPrintAssembly(nomatch, comp_level));
+        expect(!WB.shouldPrintAssembly(method, comp_level));
+        expect(WB.shouldPrintAssembly(nomatch, comp_level));
 
         // remove and see that it is true again
         executor.execute("Compiler.directives_remove");
-        expect(WB.shouldPrintAssembly(method));
-        expect(WB.shouldPrintAssembly(nomatch));
-        expect(WB.shouldPrintAssembly(method));
-        expect(WB.shouldPrintAssembly(nomatch));
+        expect(WB.shouldPrintAssembly(method, comp_level));
+        expect(WB.shouldPrintAssembly(nomatch, comp_level));
+        expect(WB.shouldPrintAssembly(method, comp_level));
+        expect(WB.shouldPrintAssembly(nomatch, comp_level));
     }
 }
--- a/hotspot/test/compiler/compilercontrol/TestCompilerDirectivesCompatibilityFlag.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/compiler/compilercontrol/TestCompilerDirectivesCompatibilityFlag.java	Sat Mar 05 20:46:42 2016 -0800
@@ -24,16 +24,17 @@
 /*
  * @test TestCompilerDirectivesCompatibilityFlag
  * @bug 8137167
- * @library /testlibrary /test/lib
+ * @library /testlibrary /test/lib /
  * @modules java.base/sun.misc
  *          java.compiler
  *          java.management
  * @build jdk.test.lib.*
- * @build jdk.test.lib.dcmd.*
- * @build sun.hotspot.WhiteBox
- * @run main ClassFileInstaller sun.hotspot.WhiteBox
- *                              sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run testng/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *        jdk.test.lib.dcmd.*
+ *        sun.hotspot.WhiteBox
+ *        compiler.testlibrary.CompilerUtils
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ *                                sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run testng/othervm -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions
  *      -XX:+PrintAssembly -XX:+WhiteBoxAPI TestCompilerDirectivesCompatibilityFlag
  * @summary Test compiler control compatibility with compile command
  */
@@ -54,28 +55,28 @@
 
 public class TestCompilerDirectivesCompatibilityFlag extends TestCompilerDirectivesCompatibilityBase {
 
-    public void testCompatibility(CommandExecutor executor) throws Exception {
+    public void testCompatibility(CommandExecutor executor, int comp_level) throws Exception {
 
         // Call all validation twice to catch error when overwriting a directive
         // Flag is default on
         expect(WB.getBooleanVMFlag("PrintAssembly"));
-        expect(WB.shouldPrintAssembly(method));
-        expect(WB.shouldPrintAssembly(nomatch));
-        expect(WB.shouldPrintAssembly(method));
-        expect(WB.shouldPrintAssembly(nomatch));
+        expect(WB.shouldPrintAssembly(method, comp_level));
+        expect(WB.shouldPrintAssembly(nomatch, comp_level));
+        expect(WB.shouldPrintAssembly(method, comp_level));
+        expect(WB.shouldPrintAssembly(nomatch, comp_level));
 
         // load directives that turn it off
         executor.execute("Compiler.directives_add " + control_off);
-        expect(!WB.shouldPrintAssembly(method));
-        expect(WB.shouldPrintAssembly(nomatch));
-        expect(!WB.shouldPrintAssembly(method));
-        expect(WB.shouldPrintAssembly(nomatch));
+        expect(!WB.shouldPrintAssembly(method, comp_level));
+        expect(WB.shouldPrintAssembly(nomatch, comp_level));
+        expect(!WB.shouldPrintAssembly(method, comp_level));
+        expect(WB.shouldPrintAssembly(nomatch, comp_level));
 
         // remove and see that it is true again
         executor.execute("Compiler.directives_remove");
-        expect(WB.shouldPrintAssembly(method));
-        expect(WB.shouldPrintAssembly(nomatch));
-        expect(WB.shouldPrintAssembly(method));
-        expect(WB.shouldPrintAssembly(nomatch));
+        expect(WB.shouldPrintAssembly(method, comp_level));
+        expect(WB.shouldPrintAssembly(nomatch, comp_level));
+        expect(WB.shouldPrintAssembly(method, comp_level));
+        expect(WB.shouldPrintAssembly(nomatch, comp_level));
     }
 }
--- a/hotspot/test/compiler/compilercontrol/share/AbstractTestBase.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/compiler/compilercontrol/share/AbstractTestBase.java	Sat Mar 05 20:46:42 2016 -0800
@@ -51,8 +51,9 @@
         for (int i = 0; !md.isValid() && i < ATTEMPTS; i++) {
             md = METHOD_GEN.generateRandomDescriptor(exec);
         }
-        if (!md.isValid()) {
-            System.out.println("WARN: Using predefined pattern");
+        if (!md.isValid() || "any.method()".matches(md.getRegexp())) {
+            /* if we haven't got a valid pattern or it matches any method
+               leading to timeouts, then use plain standard descriptor */
             md = MethodGenerator.commandDescriptor(exec);
         }
         return md;
--- a/hotspot/test/compiler/intrinsics/string/TestStringIntrinsics2.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/compiler/intrinsics/string/TestStringIntrinsics2.java	Sat Mar 05 20:46:42 2016 -0800
@@ -494,6 +494,29 @@
         return s.indexOf("1");
     }
 
+    static String text1UTF16 = "A" + "\u05d0" + "\u05d1" + "B";
+
+    @Test(role = Role.TEST_ENTRY)
+    public static void test_indexOf_immUTF16() {
+        assertEquals(      3, indexOf_imm1Latin1_needle(text1UTF16), "test_indexOf_immUTF16");
+        assertEquals(      1, indexOf_imm1UTF16_needle(text1UTF16), "test_indexOf_immUTF16");
+        assertEquals(      1, indexOf_immUTF16_needle(text1UTF16), "test_indexOf_immUTF16");
+    }
+
+    @Test(role = Role.TEST_HELPER, compileAt = 4, warmup = 1, warmupArgs = { "A" + "\u05d0" + "\u05d1" + "B" })
+    static int indexOf_imm1Latin1_needle(String s) {
+        return s.indexOf("B");
+    }
+
+    @Test(role = Role.TEST_HELPER, compileAt = 4, warmup = 1, warmupArgs = { "A" + "\u05d0" + "\u05d1" + "B" })
+    static int indexOf_imm1UTF16_needle(String s) {
+        return s.indexOf("\u05d0");
+    }
+
+    @Test(role = Role.TEST_HELPER, compileAt = 4, warmup = 1, warmupArgs = { "A" + "\u05d0" + "\u05d1" + "B" })
+    static int indexOf_immUTF16_needle(String s) {
+        return s.indexOf("\u05d0" + "\u05d1");
+    }
 
     @Test(role = Role.TEST_HELPER, compileAt = 4, warmup = 1, warmupArgs = { "abc", "abcd" })
     public static int asmStringCompareTo(String a, String b) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/jsr292/ContinuousCallSiteTargetChange.java	Sat Mar 05 20:46:42 2016 -0800
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @library /testlibrary
+ * @run main ContinuousCallSiteTargetChange
+ */
+import java.lang.invoke.*;
+import jdk.test.lib.*;
+
+public class ContinuousCallSiteTargetChange {
+    static void testServer() throws Exception {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+                "-XX:+IgnoreUnrecognizedVMOptions",
+                "-server", "-XX:-TieredCompilation", "-Xbatch",
+                "-XX:PerBytecodeRecompilationCutoff=10", "-XX:PerMethodRecompilationCutoff=10",
+                "-XX:+PrintCompilation", "-XX:+UnlockDiagnosticVMOptions", "-XX:+PrintInlining",
+                "ContinuousCallSiteTargetChange$Test", "100");
+
+        OutputAnalyzer analyzer = new OutputAnalyzer(pb.start());
+
+        analyzer.shouldHaveExitValue(0);
+
+        analyzer.shouldNotContain("made not compilable");
+        analyzer.shouldNotContain("decompile_count > PerMethodRecompilationCutoff");
+    }
+
+    static void testClient() throws Exception {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+                "-XX:+IgnoreUnrecognizedVMOptions",
+                "-client", "-XX:+TieredCompilation", "-XX:TieredStopAtLevel=1", "-Xbatch",
+                "-XX:PerBytecodeRecompilationCutoff=10", "-XX:PerMethodRecompilationCutoff=10",
+                "-XX:+PrintCompilation", "-XX:+UnlockDiagnosticVMOptions", "-XX:+PrintInlining",
+                "ContinuousCallSiteTargetChange$Test", "100");
+
+        OutputAnalyzer analyzer = new OutputAnalyzer(pb.start());
+
+        analyzer.shouldHaveExitValue(0);
+
+        analyzer.shouldNotContain("made not compilable");
+        analyzer.shouldNotContain("decompile_count > PerMethodRecompilationCutoff");
+    }
+
+    public static void main(String[] args) throws Exception {
+        testServer();
+        testClient();
+    }
+
+    static class Test {
+        static final MethodType mt = MethodType.methodType(void.class);
+        static final CallSite cs = new MutableCallSite(mt);
+
+        static final MethodHandle mh = cs.dynamicInvoker();
+
+        static void f() {
+        }
+
+        static void test1() throws Throwable {
+            mh.invokeExact();
+        }
+
+        static void test2() throws Throwable {
+            cs.getTarget().invokeExact();
+        }
+
+        static void iteration() throws Throwable {
+            MethodHandle mh1 = MethodHandles.lookup().findStatic(ContinuousCallSiteTargetChange.Test.class, "f", mt);
+            cs.setTarget(mh1);
+            for (int i = 0; i < 20_000; i++) {
+                test1();
+                test2();
+            }
+        }
+
+        public static void main(String[] args) throws Throwable {
+            int iterations = Integer.parseInt(args[0]);
+            for (int i = 0; i < iterations; i++) {
+                iteration();
+            }
+        }
+    }
+}
--- a/hotspot/test/compiler/jsr292/NonInlinedCall/InvokeTest.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/compiler/jsr292/NonInlinedCall/InvokeTest.java	Sat Mar 05 20:46:42 2016 -0800
@@ -180,7 +180,10 @@
     static void testInterface() {
         System.out.println("linkToInterface");
 
-        // Monomorphic case (optimized virtual call)
+        // Monomorphic case (optimized virtual call), concrete target method
+        run(() -> linkToInterface(new P1(), P1.class));
+
+        // Monomorphic case (optimized virtual call), default target method
         run(() -> linkToInterface(new T(), I.class));
 
         // Megamorphic case (virtual call)
--- a/hotspot/test/compiler/jvmci/common/testcases/MultipleAbstractImplementer.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/compiler/jvmci/common/testcases/MultipleAbstractImplementer.java	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,13 +23,93 @@
 
 package compiler.jvmci.common.testcases;
 
+import java.util.HashMap;
+import java.util.Map;
+
 public abstract class MultipleAbstractImplementer
         implements MultipleImplementersInterface {
 
+    // Different access levels on the fields of this class are used on purpose.
+    // It is needed to verify jdk.vm.ci.CompilerToVM constant pool related
+    // methods, e.g. resolveFieldInPool.
+
+    private static int intStaticField = INT_CONSTANT;
+    final static long longStaticField = LONG_CONSTANT;
+    volatile static float floatStaticField = FLOAT_CONSTANT;
+    static double doubleStaticField = DOUBLE_CONSTANT;
+    public static String stringStaticField = STRING_CONSTANT;
+    protected static Object objectStaticField = OBJECT_CONSTANT;
+
+    public int intField = INT_CONSTANT;
+    private long longField = LONG_CONSTANT;
+    protected float floatField = FLOAT_CONSTANT;
+    transient double doubleField = DOUBLE_CONSTANT;
+    volatile String stringField = STRING_CONSTANT;
+    String stringFieldEmpty = "";
+    final Object objectField;
+
+    public MultipleAbstractImplementer() {
+        intField = Integer.MAX_VALUE;
+        longField = Long.MAX_VALUE;
+        floatField = Float.MAX_VALUE;
+        doubleField = Double.MAX_VALUE;
+        stringField = "Message";
+        objectField = new Object();
+    }
+
     public abstract void abstractMethod();
 
     @Override
     public void finalize() throws Throwable {
         super.finalize();
     }
+
+    public void lambdaUsingMethod2() {
+        Thread t = new Thread(this::testMethod);
+        t.start();
+    }
+
+    /**
+     * This method is needed to have "getstatic" and "getfield" instructions
+     * in the class. These instructions are needed to test
+     * resolveFieldInPool method, because it needs a bytecode as one of its arguments.
+     */
+    public void printFileds() {
+        System.out.println(intStaticField);
+        System.out.println(longStaticField);
+        System.out.println(floatStaticField);
+        System.out.println(doubleStaticField);
+        System.out.println(stringStaticField);
+        System.out.println(objectStaticField);
+        System.out.println(intField);
+        System.out.println(longField);
+        System.out.println(floatField);
+        System.out.println(doubleField);
+        System.out.println(stringField);
+        System.out.println(stringFieldEmpty);
+        System.out.println(objectField);
+    }
+
+    public static void staticMethod() {
+        System.getProperties(); // calling some static method
+        Map map = new HashMap(); // calling some constructor
+        map.put(OBJECT_CONSTANT, OBJECT_CONSTANT); // calling some interface method
+        map.remove(OBJECT_CONSTANT); // calling some default interface method
+    }
+
+    @Override
+    public void instanceMethod() {
+        toString(); // calling some virtual method
+        super.toString(); // calling some special method
+    }
+
+    @Override
+    public void anonClassMethod() {
+        new Runnable() {
+            @Override
+            public void run() {
+                System.out.println("Running");
+            }
+        }.run();
+    }
 }
--- a/hotspot/test/compiler/jvmci/common/testcases/MultipleImplementer2.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/compiler/jvmci/common/testcases/MultipleImplementer2.java	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,11 +23,18 @@
 
 package compiler.jvmci.common.testcases;
 
+import java.util.HashMap;
+import java.util.Map;
+
 public class MultipleImplementer2 implements MultipleImplementersInterface {
 
+    // Different access levels on the fields of this class are used on purpose.
+    // It is needed to verify jdk.vm.ci.CompilerToVM constant pool related
+    // methods, e.g. resolveFieldInPool.
+
     private static int intStaticField = INT_CONSTANT;
-    static long longStaticField = LONG_CONSTANT;
-    static float floatStaticField = FLOAT_CONSTANT;
+    final static long longStaticField = LONG_CONSTANT;
+    volatile static float floatStaticField = FLOAT_CONSTANT;
     static double doubleStaticField = DOUBLE_CONSTANT;
     public static String stringStaticField = STRING_CONSTANT;
     protected static Object objectStaticField = OBJECT_CONSTANT;
@@ -35,9 +42,10 @@
     public int intField = INT_CONSTANT;
     private long longField = LONG_CONSTANT;
     protected float floatField = FLOAT_CONSTANT;
-    double doubleField = DOUBLE_CONSTANT;
-    String stringField = STRING_CONSTANT;
-    Object objectField = OBJECT_CONSTANT;
+    transient double doubleField = DOUBLE_CONSTANT;
+    volatile String stringField = STRING_CONSTANT;
+    String stringFieldEmpty = "";
+    final Object objectField;
 
     public MultipleImplementer2() {
         intField = Integer.MAX_VALUE;
@@ -58,12 +66,52 @@
         super.finalize();
     }
 
-    public void interfaceMethodReferral2(MultipleImplementersInterface obj) {
-        obj.interfaceMethodReferral(obj);
-    }
-
     public void lambdaUsingMethod2() {
         Thread t = new Thread(this::testMethod);
         t.start();
     }
+
+    /**
+     * This method is needed to have "getstatic" and "getfield" instructions
+     * in the class. These instructions are needed to test
+     * resolveFieldInPool method, because it needs a bytecode as one of its arguments.
+     */
+    public void printFileds() {
+        System.out.println(intStaticField);
+        System.out.println(longStaticField);
+        System.out.println(floatStaticField);
+        System.out.println(doubleStaticField);
+        System.out.println(stringStaticField);
+        System.out.println(objectStaticField);
+        System.out.println(intField);
+        System.out.println(longField);
+        System.out.println(floatField);
+        System.out.println(doubleField);
+        System.out.println(stringField);
+        System.out.println(stringFieldEmpty);
+        System.out.println(objectField);
+    }
+
+    public static void staticMethod() {
+        System.getProperties(); // calling some static method
+        Map map = new HashMap(); // calling some constructor
+        map.put(OBJECT_CONSTANT, OBJECT_CONSTANT); // calling some interface method
+        map.remove(OBJECT_CONSTANT); // calling some default interface method
+    }
+
+    @Override
+    public void instanceMethod() {
+        toString(); // calling some virtual method
+        super.toString(); // calling some special method
+    }
+
+    @Override
+    public void anonClassMethod() {
+        new Runnable() {
+            @Override
+            public void run() {
+                System.out.println("Running");
+            }
+        }.run();
+    }
 }
--- a/hotspot/test/compiler/jvmci/common/testcases/MultipleImplementersInterface.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/compiler/jvmci/common/testcases/MultipleImplementersInterface.java	Sat Mar 05 20:46:42 2016 -0800
@@ -23,6 +23,9 @@
 
 package compiler.jvmci.common.testcases;
 
+import java.util.HashMap;
+import java.util.Map;
+
 public interface MultipleImplementersInterface {
 
     int INT_CONSTANT = Integer.MAX_VALUE;
@@ -42,12 +45,34 @@
         // empty
     }
 
-    default void interfaceMethodReferral(MultipleImplementersInterface obj) {
-        obj.defaultMethod();
-    }
-
     default void lambdaUsingMethod() {
         Thread t = new Thread(this::defaultMethod);
         t.start();
     }
+
+    default void printFields() {
+        System.out.println(OBJECT_CONSTANT);
+        String s = "";
+        System.out.println(s);
+    }
+
+    static void staticMethod() {
+        System.getProperties(); // calling some static method
+        Map map = new HashMap(); // calling some constructor
+        map.put(OBJECT_CONSTANT, OBJECT_CONSTANT); // calling some interface method
+        map.remove(OBJECT_CONSTANT); // calling some default interface method
+    }
+
+    default void instanceMethod() {
+        toString(); // calling some virtual method
+    }
+
+    default void anonClassMethod() {
+        new Runnable() {
+            @Override
+            public void run() {
+                System.out.println("Running");
+            }
+        }.run();
+    }
 }
--- a/hotspot/test/compiler/jvmci/compilerToVM/ConstantPoolTestCase.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/compiler/jvmci/compilerToVM/ConstantPoolTestCase.java	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,118 +27,214 @@
 import java.util.HashMap;
 import java.util.Map;
 import jdk.vm.ci.hotspot.HotSpotResolvedObjectType;
-import jdk.internal.misc.SharedSecrets;
+import sun.hotspot.WhiteBox;
 import sun.reflect.ConstantPool;
+import sun.reflect.ConstantPool.Tag;
+import compiler.jvmci.compilerToVM.ConstantPoolTestsHelper.DummyClasses;
+import static compiler.jvmci.compilerToVM.ConstantPoolTestCase.ConstantTypes.*;
 
 /**
  * Common class for jdk.vm.ci.hotspot.CompilerToVM constant pool tests
  */
 public class ConstantPoolTestCase {
-    private final Map<ConstantPoolTestsHelper.ConstantTypes, Validator> typeTests;
+
+    private static final Map<Tag, ConstantTypes> TAG_TO_TYPE_MAP;
+    static {
+        TAG_TO_TYPE_MAP = new HashMap<>();
+        TAG_TO_TYPE_MAP.put(Tag.CLASS, CONSTANT_CLASS);
+        TAG_TO_TYPE_MAP.put(Tag.FIELDREF, CONSTANT_FIELDREF);
+        TAG_TO_TYPE_MAP.put(Tag.METHODREF, CONSTANT_METHODREF);
+        TAG_TO_TYPE_MAP.put(Tag.INTERFACEMETHODREF, CONSTANT_INTERFACEMETHODREF);
+        TAG_TO_TYPE_MAP.put(Tag.STRING, CONSTANT_STRING);
+        TAG_TO_TYPE_MAP.put(Tag.INTEGER, CONSTANT_INTEGER);
+        TAG_TO_TYPE_MAP.put(Tag.FLOAT, CONSTANT_FLOAT);
+        TAG_TO_TYPE_MAP.put(Tag.LONG, CONSTANT_LONG);
+        TAG_TO_TYPE_MAP.put(Tag.DOUBLE, CONSTANT_DOUBLE);
+        TAG_TO_TYPE_MAP.put(Tag.NAMEANDTYPE, CONSTANT_NAMEANDTYPE);
+        TAG_TO_TYPE_MAP.put(Tag.UTF8, CONSTANT_UTF8);
+        TAG_TO_TYPE_MAP.put(Tag.METHODHANDLE, CONSTANT_METHODHANDLE);
+        TAG_TO_TYPE_MAP.put(Tag.METHODTYPE, CONSTANT_METHODTYPE);
+        TAG_TO_TYPE_MAP.put(Tag.INVOKEDYNAMIC, CONSTANT_INVOKEDYNAMIC);
+        TAG_TO_TYPE_MAP.put(Tag.INVALID, CONSTANT_INVALID);
+    }
+    private static final WhiteBox WB = WhiteBox.getWhiteBox();
+    private final Map<ConstantTypes, Validator> typeTests;
+
+    public static enum ConstantTypes {
+        CONSTANT_CLASS {
+            @Override
+            public TestedCPEntry getTestedCPEntry(DummyClasses dummyClass, int index) {
+                ConstantPool constantPoolSS = dummyClass.constantPoolSS;
+                checkIndex(constantPoolSS, index);
+                Class<?> klass = constantPoolSS.getClassAt(index);
+                String klassName = klass.getName();
+                TestedCPEntry[] testedEntries = dummyClass.testedCP.get(this);
+                for (TestedCPEntry entry : testedEntries) {
+                    if (entry.klass.replaceAll("/", "\\.").equals(klassName)) {
+                        return entry;
+                    }
+                }
+                return null;
+            }
+        },
+        CONSTANT_FIELDREF {
+            @Override
+            public TestedCPEntry getTestedCPEntry(DummyClasses dummyClass, int index) {
+                return this.getTestedCPEntryForMethodAndField(dummyClass, index);
+            }
+        },
+        CONSTANT_METHODREF {
+            @Override
+            public TestedCPEntry getTestedCPEntry(DummyClasses dummyClass, int index) {
+                return this.getTestedCPEntryForMethodAndField(dummyClass, index);
+            }
+        },
+        CONSTANT_INTERFACEMETHODREF {
+            @Override
+            public TestedCPEntry getTestedCPEntry(DummyClasses dummyClass, int index) {
+                return this.getTestedCPEntryForMethodAndField(dummyClass, index);
+            }
+        },
+        CONSTANT_STRING {
+            @Override
+            public TestedCPEntry getTestedCPEntry(DummyClasses dummyClass, int index) {
+                ConstantPool constantPoolSS = dummyClass.constantPoolSS;
+                checkIndex(constantPoolSS, index);
+                String value = constantPoolSS.getStringAt(index);
+                TestedCPEntry[] testedEntries = dummyClass.testedCP.get(this);
+                for (TestedCPEntry entry : testedEntries) {
+                    if (entry.name.equals(value)) {
+                        return entry;
+                    }
+                }
+                return null;
+            }
+        },
+        CONSTANT_INTEGER,
+        CONSTANT_FLOAT,
+        CONSTANT_LONG,
+        CONSTANT_DOUBLE,
+        CONSTANT_NAMEANDTYPE,
+        CONSTANT_UTF8,
+        CONSTANT_METHODHANDLE,
+        CONSTANT_METHODTYPE,
+        CONSTANT_INVOKEDYNAMIC {
+            @Override
+            public TestedCPEntry getTestedCPEntry(DummyClasses dummyClass, int index) {
+                ConstantPool constantPoolSS = dummyClass.constantPoolSS;
+                checkIndex(constantPoolSS, index);
+                int nameAndTypeIndex = constantPoolSS.getNameAndTypeRefIndexAt(index);
+                String[] info = constantPoolSS.getNameAndTypeRefInfoAt(nameAndTypeIndex);
+                TestedCPEntry[] testedEntries = dummyClass.testedCP.get(this);
+                for (TestedCPEntry entry : testedEntries) {
+                    if (info[0].equals(entry.name) && info[1].equals(entry.type)) {
+                        return entry;
+                    }
+                }
+                return null;
+            }
+        },
+        CONSTANT_INVALID;
+
+        public TestedCPEntry getTestedCPEntry(DummyClasses dummyClass, int index) {
+            return null; // returning null by default
+        }
+
+        public TestedCPEntry[] getAllCPEntriesForType(DummyClasses dummyClass) {
+            TestedCPEntry[] toReturn = dummyClass.testedCP.get(this);
+            if (toReturn == null) {
+                return new TestedCPEntry[0];
+            }
+            return dummyClass.testedCP.get(this);
+        }
+
+        protected TestedCPEntry getTestedCPEntryForMethodAndField(DummyClasses dummyClass, int index) {
+            ConstantPool constantPoolSS = dummyClass.constantPoolSS;
+            checkIndex(constantPoolSS, index);
+            String[] info = constantPoolSS.getMemberRefInfoAt(index);
+            TestedCPEntry[] testedEntries = dummyClass.testedCP.get(this);
+            for (TestedCPEntry entry : testedEntries) {
+                if (info[0].equals(entry.klass) && info[1].equals(entry.name) && info[2].equals(entry.type)) {
+                    return entry;
+                }
+            }
+            return null;
+        }
+
+        protected void checkIndex(ConstantPool constantPoolSS, int index) {
+            ConstantPool.Tag tag = constantPoolSS.getTagAt(index);
+            ConstantTypes type = mapTagToCPType(tag);
+            if (!this.equals(type)) {
+                String msg = String.format("TESTBUG: CP tag should be a %s, but is %s",
+                                           this.name(),
+                                           type.name());
+               throw new Error(msg);
+            }
+        }
+    }
 
     public static interface Validator {
         void validate(jdk.vm.ci.meta.ConstantPool constantPoolCTVM,
-                ConstantPool constantPoolSS,
-            ConstantPoolTestsHelper.DummyClasses dummyClass, int index);
+                      ConstantTypes cpType,
+                      DummyClasses dummyClass,
+                      int index);
     }
 
-    public ConstantPoolTestCase(Map<ConstantPoolTestsHelper.ConstantTypes,Validator> typeTests) {
+    public static class TestedCPEntry {
+        public final String klass;
+        public final String name;
+        public final String type;
+        public final byte[] opcodes;
+        public final long accFlags;
+
+        public TestedCPEntry(String klass, String name, String type, byte[] opcodes, long accFlags) {
+            this.klass = klass;
+            this.name = name;
+            this.type = type;
+            if (opcodes != null) {
+                this.opcodes = new byte[opcodes.length];
+                System.arraycopy(opcodes, 0, this.opcodes, 0, opcodes.length);
+            } else {
+                this.opcodes = null;
+            }
+            this.accFlags = accFlags;
+        }
+
+        public TestedCPEntry(String klass, String name, String type, byte[] opcodes) {
+            this(klass, name, type, opcodes, 0);
+        }
+
+        public TestedCPEntry(String klass, String name, String type) {
+            this(klass, name, type, null, 0);
+        }
+    }
+
+    public static ConstantTypes mapTagToCPType(Tag tag) {
+        return TAG_TO_TYPE_MAP.get(tag);
+    }
+
+    public ConstantPoolTestCase(Map<ConstantTypes, Validator> typeTests) {
         this.typeTests = new HashMap<>();
         this.typeTests.putAll(typeTests);
     }
 
-    private void messageOnFail(Throwable t,
-            ConstantPoolTestsHelper.ConstantTypes cpType,
-            ConstantPoolTestsHelper.DummyClasses dummyClass, int index) {
-        ConstantPool constantPoolSS = SharedSecrets.getJavaLangAccess().
-                        getConstantPool(dummyClass.klass);
-        String msg = String.format("Test for %s constant pool entry of"
-                        + " type %s",
-                        dummyClass.klass, cpType.name());
-        switch (cpType) {
-            case CONSTANT_CLASS:
-            case CONSTANT_STRING:
-            case CONSTANT_METHODTYPE:
-                String utf8 = constantPoolSS
-                        .getUTF8At((int) dummyClass.cp.get(index).value);
-                msg = String.format("%s (%s) failed with %s", msg, utf8, t);
-                break;
-            case CONSTANT_INTEGER:
-                int intValue = constantPoolSS.getIntAt(index);
-                msg = String.format("%s (%d) failed with %s", msg, intValue, t);
-                break;
-            case CONSTANT_LONG:
-                long longValue = constantPoolSS.getLongAt(index);
-                msg = String.format("%s (%d) failed with %s", msg, longValue, t);
-                break;
-            case CONSTANT_FLOAT:
-                float floatValue = constantPoolSS.getFloatAt(index);
-                msg = String.format("%s (%E) failed with %s", msg, floatValue, t);
-                break;
-            case CONSTANT_DOUBLE:
-                double doubleValue = constantPoolSS.getDoubleAt(index);
-                msg = String.format("%s (%E) failed with %s", msg, doubleValue, t);
-                break;
-            case CONSTANT_UTF8:
-                String utf8Value = constantPoolSS.getUTF8At(index);
-                msg = String.format("%s (%s) failed with %s", msg, utf8Value, t);
-                break;
-            case CONSTANT_INVOKEDYNAMIC:
-                index = ((int[]) dummyClass.cp.get(index).value)[1];
-            case CONSTANT_NAMEANDTYPE:
-                String name = constantPoolSS
-                        .getUTF8At(((int[]) dummyClass.cp.get(index).value)[0]);
-                String type = constantPoolSS
-                        .getUTF8At(((int[]) dummyClass.cp.get(index).value)[1]);
-                msg = String.format("%s (%s:%s) failed with %s",
-                        msg, name, type, t);
-                break;
-            case CONSTANT_METHODHANDLE:
-                index = ((int[]) dummyClass.cp.get(index).value)[1];
-            case CONSTANT_METHODREF:
-            case CONSTANT_INTERFACEMETHODREF:
-            case CONSTANT_FIELDREF:
-                int classIndex = ((int[]) dummyClass.cp.get(index).value)[0];
-                int nameAndTypeIndex = ((int[]) dummyClass.cp.get(index).value)[1];
-                String cName = constantPoolSS
-                        .getUTF8At((int) dummyClass.cp.get(classIndex).value);
-                String mName = constantPoolSS
-                        .getUTF8At(((int[]) dummyClass.cp.get(nameAndTypeIndex).value)[0]);
-                String mType = constantPoolSS
-                        .getUTF8At(((int[]) dummyClass.cp.get(nameAndTypeIndex).value)[1]);
-                msg = String.format("%s (%s.%s:%s) failed with %s ",
-                        msg, cName, mName, mType, t);
-                break;
-            default:
-                msg = String.format("Test bug: unknown constant type %s ", cpType);
-        }
-        throw new Error(msg + t.getMessage(), t);
-    }
-
     public void test() {
-        for (ConstantPoolTestsHelper.DummyClasses dummyClass
-                : ConstantPoolTestsHelper.DummyClasses.values()) {
-            System.out.printf("%nTesting dummy %s%n", dummyClass.klass);
-            HotSpotResolvedObjectType holder = HotSpotResolvedObjectType
-                    .fromObjectClass(dummyClass.klass);
-            jdk.vm.ci.meta.ConstantPool constantPoolCTVM
-                    = holder.getConstantPool();
-            ConstantPool constantPoolSS = SharedSecrets.getJavaLangAccess().
-                        getConstantPool(dummyClass.klass);
-            for (Integer i : dummyClass.cp.keySet()) {
-                ConstantPoolTestsHelper.ConstantTypes cpType
-                        = dummyClass.cp.get(i).type;
+        for (DummyClasses dummyClass : DummyClasses.values()) {
+            boolean isCPCached = WB.getConstantPoolCacheLength(dummyClass.klass) > -1;
+            System.out.printf("Testing dummy %s with constant pool cached = %b%n",
+                              dummyClass.klass,
+                              isCPCached);
+            HotSpotResolvedObjectType holder = HotSpotResolvedObjectType.fromObjectClass(dummyClass.klass);
+            jdk.vm.ci.meta.ConstantPool constantPoolCTVM = holder.getConstantPool();
+            ConstantPool constantPoolSS = dummyClass.constantPoolSS;
+            for (int i = 0; i < constantPoolSS.getSize(); i++) {
+                Tag tag = constantPoolSS.getTagAt(i);
+                ConstantTypes cpType = mapTagToCPType(tag);
                 if (!typeTests.keySet().contains(cpType)) {
                     continue;
                 }
-                try {
-                    typeTests.get(cpType).validate(constantPoolCTVM,
-                            constantPoolSS, dummyClass, i);
-                } catch (Throwable t) {
-                    messageOnFail(t, cpType, dummyClass, i);
-                }
+                typeTests.get(cpType).validate(constantPoolCTVM, cpType, dummyClass, i);
             }
         }
     }
 }
-
--- a/hotspot/test/compiler/jvmci/compilerToVM/ConstantPoolTestsHelper.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/compiler/jvmci/compilerToVM/ConstantPoolTestsHelper.java	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,10 +23,19 @@
  */
 package compiler.jvmci.compilerToVM;
 
+import compiler.jvmci.common.testcases.MultipleAbstractImplementer;
 import compiler.jvmci.common.testcases.MultipleImplementer2;
 import compiler.jvmci.common.testcases.MultipleImplementersInterface;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.ConstantTypes;
+import static compiler.jvmci.compilerToVM.ConstantPoolTestCase.ConstantTypes.*;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.TestedCPEntry;
 import java.util.HashMap;
 import java.util.Map;
+import jdk.internal.misc.SharedSecrets;
+import jdk.internal.org.objectweb.asm.Opcodes;
+import sun.hotspot.WhiteBox;
+import sun.reflect.ConstantPool;
+import sun.reflect.ConstantPool.Tag;
 
 /**
  * Class contains hard-coded constant pool tables for dummy classes used for
@@ -34,104 +43,437 @@
  */
 public class ConstantPoolTestsHelper {
 
-    public enum ConstantTypes {
-        CONSTANT_CLASS,
-        CONSTANT_FIELDREF,
-        CONSTANT_METHODREF,
-        CONSTANT_INTERFACEMETHODREF,
-        CONSTANT_STRING,
-        CONSTANT_INTEGER,
-        CONSTANT_FLOAT,
-        CONSTANT_LONG,
-        CONSTANT_DOUBLE,
-        CONSTANT_NAMEANDTYPE,
-        CONSTANT_UTF8,
-        CONSTANT_METHODHANDLE,
-        CONSTANT_METHODTYPE,
-        CONSTANT_INVOKEDYNAMIC;
-    }
+    public static final int NO_CP_CACHE_PRESENT = Integer.MAX_VALUE;
 
     public enum DummyClasses {
         DUMMY_CLASS(MultipleImplementer2.class, CP_MAP_FOR_CLASS),
+        DUMMY_ABS_CLASS(MultipleAbstractImplementer.class, CP_MAP_FOR_ABS_CLASS),
         DUMMY_INTERFACE(MultipleImplementersInterface.class, CP_MAP_FOR_INTERFACE);
 
+        private static final WhiteBox WB = WhiteBox.getWhiteBox();
         public final Class<?> klass;
-        public final Map<Integer, ConstantPoolEntry> cp;
+        public final ConstantPool constantPoolSS;
+        public final Map<ConstantTypes, TestedCPEntry[]> testedCP;
 
-        DummyClasses(Class<?> klass, Map<Integer, ConstantPoolEntry> cp) {
+        DummyClasses(Class<?> klass, Map<ConstantTypes, TestedCPEntry[]> testedCP) {
             this.klass = klass;
-            this.cp = cp;
+            this.constantPoolSS = SharedSecrets.getJavaLangAccess().getConstantPool(klass);
+            this.testedCP = testedCP;
         }
-    }
 
-    public static class ConstantPoolEntry {
-
-        public final ConstantTypes type;
-        public final Object value;
-
-        public ConstantPoolEntry(ConstantTypes type, Object value) {
-            this.type = type;
-            this.value = value;
+        public int getCPCacheIndex(int cpi) {
+            int cacheLength = WB.getConstantPoolCacheLength(this.klass);
+            int indexTag = WB.getConstantPoolCacheIndexTag();
+            for (int cpci = indexTag; cpci < cacheLength + indexTag; cpci++) {
+                if (WB.remapInstructionOperandFromCPCache(this.klass, cpci) == cpi) {
+                    if (constantPoolSS.getTagAt(cpi).equals(Tag.INVOKEDYNAMIC)) {
+                        return WB.encodeConstantPoolIndyIndex(cpci) + indexTag;
+                    }
+                    return cpci;
+                }
+            }
+            return NO_CP_CACHE_PRESENT;
         }
     }
 
-    private static final Map<Integer, ConstantPoolEntry> CP_MAP_FOR_CLASS
-            = new HashMap<>();
+    private static final Map<ConstantTypes, TestedCPEntry[]> CP_MAP_FOR_CLASS = new HashMap<>();
     static {
-        CP_MAP_FOR_CLASS.put(1, new ConstantPoolEntry(ConstantTypes.CONSTANT_METHODREF, new int[]{22, 68}));
-        CP_MAP_FOR_CLASS.put(2, new ConstantPoolEntry(ConstantTypes.CONSTANT_CLASS, 69));
-        CP_MAP_FOR_CLASS.put(3, new ConstantPoolEntry(ConstantTypes.CONSTANT_INTEGER, 2147483647));
-        CP_MAP_FOR_CLASS.put(4, new ConstantPoolEntry(ConstantTypes.CONSTANT_FIELDREF, new int[]{35, 70}));
-        CP_MAP_FOR_CLASS.put(5, new ConstantPoolEntry(ConstantTypes.CONSTANT_LONG, 9223372036854775807L));
-        CP_MAP_FOR_CLASS.put(8, new ConstantPoolEntry(ConstantTypes.CONSTANT_FLOAT, 3.4028235E38F));
-        CP_MAP_FOR_CLASS.put(10, new ConstantPoolEntry(ConstantTypes.CONSTANT_DOUBLE, 1.7976931348623157E308D));
-        CP_MAP_FOR_CLASS.put(13, new ConstantPoolEntry(ConstantTypes.CONSTANT_STRING, 74));
-        CP_MAP_FOR_CLASS.put(22, new ConstantPoolEntry(ConstantTypes.CONSTANT_CLASS, 83));
-        CP_MAP_FOR_CLASS.put(23, new ConstantPoolEntry(ConstantTypes.CONSTANT_METHODREF, new int[]{22, 84}));
-        CP_MAP_FOR_CLASS.put(24, new ConstantPoolEntry(ConstantTypes.CONSTANT_INTERFACEMETHODREF, new int[]{2, 85}));
-        CP_MAP_FOR_CLASS.put(26, new ConstantPoolEntry(ConstantTypes.CONSTANT_INVOKEDYNAMIC, new int[]{0, 91}));
-        CP_MAP_FOR_CLASS.put(29, new ConstantPoolEntry(ConstantTypes.CONSTANT_FIELDREF, new int[]{35, 94}));
-        CP_MAP_FOR_CLASS.put(35, new ConstantPoolEntry(ConstantTypes.CONSTANT_CLASS, 100));
-        CP_MAP_FOR_CLASS.put(68, new ConstantPoolEntry(ConstantTypes.CONSTANT_NAMEANDTYPE, new int[]{54, 55}));
-        CP_MAP_FOR_CLASS.put(70, new ConstantPoolEntry(ConstantTypes.CONSTANT_NAMEANDTYPE, new int[]{48, 37}));
-        CP_MAP_FOR_CLASS.put(84, new ConstantPoolEntry(ConstantTypes.CONSTANT_NAMEANDTYPE, new int[]{59, 55}));
-        CP_MAP_FOR_CLASS.put(85, new ConstantPoolEntry(ConstantTypes.CONSTANT_NAMEANDTYPE, new int[]{103, 63}));
-        CP_MAP_FOR_CLASS.put(91, new ConstantPoolEntry(ConstantTypes.CONSTANT_NAMEANDTYPE, new int[]{106, 107}));
-        CP_MAP_FOR_CLASS.put(94, new ConstantPoolEntry(ConstantTypes.CONSTANT_NAMEANDTYPE, new int[]{36, 37}));
-        CP_MAP_FOR_CLASS.put(104, new ConstantPoolEntry(ConstantTypes.CONSTANT_METHODREF, new int[]{110, 111}));
-        CP_MAP_FOR_CLASS.put(105, new ConstantPoolEntry(ConstantTypes.CONSTANT_METHODREF, new int[]{35, 112}));
-        CP_MAP_FOR_CLASS.put(110, new ConstantPoolEntry(ConstantTypes.CONSTANT_CLASS, 113));
-        CP_MAP_FOR_CLASS.put(111, new ConstantPoolEntry(ConstantTypes.CONSTANT_NAMEANDTYPE, new int[]{114, 118}));
-        CP_MAP_FOR_CLASS.put(112, new ConstantPoolEntry(ConstantTypes.CONSTANT_NAMEANDTYPE, new int[]{58, 55}));
+        CP_MAP_FOR_CLASS.put(CONSTANT_CLASS,
+                new TestedCPEntry[] {
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleImplementersInterface", null, null),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleImplementer2", null, null),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleImplementer2$1", null, null),
+                    new TestedCPEntry("java/lang/invoke/MethodHandles$Lookup", null, null),
+                }
+        );
+        CP_MAP_FOR_CLASS.put(CONSTANT_FIELDREF,
+                new TestedCPEntry[] {
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleImplementer2",
+                                      "intStaticField",
+                                      "I",
+                                      new byte[] {(byte) Opcodes.PUTSTATIC, (byte) Opcodes.GETSTATIC},
+                                      Opcodes.ACC_PRIVATE | Opcodes.ACC_STATIC),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleImplementer2",
+                                      "longStaticField",
+                                      "J",
+                                      new byte[] {(byte) Opcodes.PUTSTATIC, (byte) Opcodes.GETSTATIC},
+                                      Opcodes.ACC_FINAL | Opcodes.ACC_STATIC),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleImplementer2",
+                                      "floatStaticField",
+                                      "F",
+                                      new byte[] {(byte) Opcodes.PUTSTATIC, (byte) Opcodes.GETSTATIC},
+                                      Opcodes.ACC_VOLATILE | Opcodes.ACC_STATIC),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleImplementer2",
+                                      "doubleStaticField",
+                                      "D",
+                                      new byte[] {(byte) Opcodes.PUTSTATIC, (byte) Opcodes.GETSTATIC},
+                                      Opcodes.ACC_STATIC),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleImplementer2",
+                                      "stringStaticField",
+                                      "Ljava/lang/String;",
+                                      new byte[] {(byte) Opcodes.PUTSTATIC, (byte) Opcodes.GETSTATIC},
+                                      Opcodes.ACC_PUBLIC | Opcodes.ACC_STATIC),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleImplementer2",
+                                      "objectStaticField",
+                                      "Ljava/lang/Object;",
+                                      new byte[] {(byte) Opcodes.PUTSTATIC, (byte) Opcodes.GETSTATIC},
+                                      Opcodes.ACC_PROTECTED | Opcodes.ACC_STATIC),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleImplementer2",
+                                      "intField",
+                                      "I",
+                                      new byte[] {(byte) Opcodes.PUTFIELD | (byte) Opcodes.GETFIELD},
+                                      Opcodes.ACC_PUBLIC),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleImplementer2",
+                                      "longField",
+                                      "J",
+                                      new byte[] {(byte) Opcodes.PUTFIELD | (byte) Opcodes.GETFIELD},
+                                      Opcodes.ACC_PRIVATE),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleImplementer2",
+                                      "floatField",
+                                      "F",
+                                      new byte[] {(byte) Opcodes.PUTFIELD | (byte) Opcodes.GETFIELD},
+                                      Opcodes.ACC_PROTECTED),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleImplementer2",
+                                      "doubleField",
+                                      "D",
+                                      new byte[] {(byte) Opcodes.PUTFIELD | (byte) Opcodes.GETFIELD},
+                                      Opcodes.ACC_TRANSIENT),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleImplementer2",
+                                      "objectField",
+                                      "Ljava/lang/Object;",
+                                      new byte[] {(byte) Opcodes.PUTFIELD | (byte) Opcodes.GETFIELD},
+                                      Opcodes.ACC_FINAL),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleImplementer2",
+                                      "stringField",
+                                      "Ljava/lang/String;",
+                                      new byte[] {(byte) Opcodes.PUTFIELD | (byte) Opcodes.GETFIELD},
+                                      Opcodes.ACC_VOLATILE),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleImplementer2",
+                                      "stringFieldEmpty",
+                                      "Ljava/lang/String;",
+                                      new byte[] {(byte) Opcodes.PUTFIELD | (byte) Opcodes.GETFIELD},
+                                      0L),
+                }
+        );
+        CP_MAP_FOR_CLASS.put(CONSTANT_METHODREF,
+                new TestedCPEntry[] {
+                    new TestedCPEntry("java/lang/System",
+                                      "getProperties",
+                                      "()Ljava/util/Properties;",
+                                      new byte[] {(byte) Opcodes.INVOKESTATIC}),
+                    new TestedCPEntry("java/util/HashMap",
+                                      "<init>",
+                                      "()V",
+                                      new byte[] {(byte) Opcodes.INVOKESPECIAL}),
+                    new TestedCPEntry("java/lang/Object",
+                                      "toString",
+                                      "()Ljava/lang/String;",
+                                      new byte[] {(byte) Opcodes.INVOKESPECIAL,
+                                      (byte) Opcodes.INVOKEVIRTUAL}),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleImplementer2$1",
+                                      "<init>",
+                                      "(Lcompiler/jvmci/common/testcases/MultipleImplementer2;)V",
+                                      new byte[0]),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleAbstractImplementer$1",
+                                      "run",
+                                      "()V",
+                                      new byte[0]),
+                }
+        );
+        CP_MAP_FOR_CLASS.put(CONSTANT_INTERFACEMETHODREF,
+                new TestedCPEntry[] {
+                    new TestedCPEntry("java/util/Map",
+                                      "put",
+                                      "(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
+                                      new byte[] {(byte) Opcodes.INVOKEINTERFACE}),
+                    new TestedCPEntry("java/util/Map",
+                                      "remove",
+                                      "(Ljava/lang/Object;)Ljava/lang/Object;",
+                                      new byte[] {(byte) Opcodes.INVOKEINTERFACE}),
+                }
+        );
+        CP_MAP_FOR_CLASS.put(CONSTANT_STRING,
+                new TestedCPEntry[] {
+                    new TestedCPEntry(null, "Message", null),
+                    new TestedCPEntry(null, "", null),
+                }
+        );
+        CP_MAP_FOR_CLASS.put(CONSTANT_METHODHANDLE,
+                new TestedCPEntry[] {
+                    new TestedCPEntry("java/lang/invoke/LambdaMetafactory",
+                                      "metafactory",
+                                      "(Ljava/lang/invoke/MethodHandles$Lookup;"
+                                              + "Ljava/lang/String;"
+                                              + "Ljava/lang/invoke/MethodType;"
+                                              + "Ljava/lang/invoke/MethodType;"
+                                              + "Ljava/lang/invoke/MethodHandle;"
+                                              + "Ljava/lang/invoke/MethodType;)"
+                                              + "Ljava/lang/invoke/CallSite;",
+                                      null),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleImplementer2",
+                                      "testMethod",
+                                      "()V"),
+                }
+        );
+        CP_MAP_FOR_CLASS.put(CONSTANT_METHODTYPE,
+                new TestedCPEntry[] {
+                    new TestedCPEntry(null, null, "()V"),
+                }
+        );
+        CP_MAP_FOR_CLASS.put(CONSTANT_INVOKEDYNAMIC,
+                new TestedCPEntry[] {
+                    new TestedCPEntry(null,
+                                     "run",
+                                     "(Lcompiler/jvmci/common/testcases/MultipleImplementer2;)"
+                                             + "Ljava/lang/Runnable;"),
+                }
+        );
     }
 
-    private static final Map<Integer, ConstantPoolEntry> CP_MAP_FOR_INTERFACE
+    private static final Map<ConstantTypes, TestedCPEntry[]> CP_MAP_FOR_ABS_CLASS
             = new HashMap<>();
     static {
-        CP_MAP_FOR_INTERFACE.put(1, new ConstantPoolEntry(ConstantTypes.CONSTANT_CLASS, 48));
-        CP_MAP_FOR_INTERFACE.put(5, new ConstantPoolEntry(ConstantTypes.CONSTANT_INTERFACEMETHODREF, new int[]{13, 52}));
-        CP_MAP_FOR_INTERFACE.put(6, new ConstantPoolEntry(ConstantTypes.CONSTANT_CLASS, 53));
-        CP_MAP_FOR_INTERFACE.put(7, new ConstantPoolEntry(ConstantTypes.CONSTANT_INVOKEDYNAMIC, new int[]{0, 58}));
-        CP_MAP_FOR_INTERFACE.put(8, new ConstantPoolEntry(ConstantTypes.CONSTANT_METHODREF, new int[]{6, 59}));
-        CP_MAP_FOR_INTERFACE.put(9, new ConstantPoolEntry(ConstantTypes.CONSTANT_METHODREF, new int[]{6, 60}));
-        CP_MAP_FOR_INTERFACE.put(12, new ConstantPoolEntry(ConstantTypes.CONSTANT_FIELDREF, new int[]{13, 63}));
-        CP_MAP_FOR_INTERFACE.put(13, new ConstantPoolEntry(ConstantTypes.CONSTANT_CLASS, 64));
-        CP_MAP_FOR_INTERFACE.put(17, new ConstantPoolEntry(ConstantTypes.CONSTANT_INTEGER, 2147483647));
-        CP_MAP_FOR_INTERFACE.put(20, new ConstantPoolEntry(ConstantTypes.CONSTANT_LONG, 9223372036854775807l));
-        CP_MAP_FOR_INTERFACE.put(24, new ConstantPoolEntry(ConstantTypes.CONSTANT_FLOAT, 3.4028235E38f));
-        CP_MAP_FOR_INTERFACE.put(27, new ConstantPoolEntry(ConstantTypes.CONSTANT_DOUBLE, 1.7976931348623157E308d));
-        CP_MAP_FOR_INTERFACE.put(31, new ConstantPoolEntry(ConstantTypes.CONSTANT_STRING, 65));
-        CP_MAP_FOR_INTERFACE.put(52, new ConstantPoolEntry(ConstantTypes.CONSTANT_NAMEANDTYPE, new int[]{34, 35}));
-        CP_MAP_FOR_INTERFACE.put(55, new ConstantPoolEntry(ConstantTypes.CONSTANT_METHODHANDLE, new int[]{6, 67}));
-        CP_MAP_FOR_INTERFACE.put(56, new ConstantPoolEntry(ConstantTypes.CONSTANT_METHODTYPE, 35));
-        CP_MAP_FOR_INTERFACE.put(57, new ConstantPoolEntry(ConstantTypes.CONSTANT_METHODHANDLE, new int[]{9, 5}));
-        CP_MAP_FOR_INTERFACE.put(58, new ConstantPoolEntry(ConstantTypes.CONSTANT_NAMEANDTYPE, new int[]{68, 69}));
-        CP_MAP_FOR_INTERFACE.put(59, new ConstantPoolEntry(ConstantTypes.CONSTANT_NAMEANDTYPE, new int[]{70, 71}));
-        CP_MAP_FOR_INTERFACE.put(60, new ConstantPoolEntry(ConstantTypes.CONSTANT_NAMEANDTYPE, new int[]{72, 35}));
-        CP_MAP_FOR_INTERFACE.put(63, new ConstantPoolEntry(ConstantTypes.CONSTANT_NAMEANDTYPE, new int[]{32, 33}));
-        CP_MAP_FOR_INTERFACE.put(67, new ConstantPoolEntry(ConstantTypes.CONSTANT_METHODREF, new int[]{73, 74}));
-        CP_MAP_FOR_INTERFACE.put(73, new ConstantPoolEntry(ConstantTypes.CONSTANT_CLASS, 75));
-        CP_MAP_FOR_INTERFACE.put(74, new ConstantPoolEntry(ConstantTypes.CONSTANT_NAMEANDTYPE, new int[]{76, 80}));
-        CP_MAP_FOR_INTERFACE.put(77, new ConstantPoolEntry(ConstantTypes.CONSTANT_CLASS, 82));
+        CP_MAP_FOR_ABS_CLASS.put(CONSTANT_CLASS,
+                new TestedCPEntry[] {
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleImplementersInterface", null, null),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleAbstractImplementer", null, null),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleAbstractImplementer$1", null, null),
+                    new TestedCPEntry("java/lang/invoke/MethodHandles$Lookup", null, null),
+                }
+        );
+        CP_MAP_FOR_ABS_CLASS.put(CONSTANT_FIELDREF,
+                new TestedCPEntry[] {
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleAbstractImplementer",
+                                      "intStaticField",
+                                      "I",
+                                      new byte[] {(byte) Opcodes.PUTSTATIC, (byte) Opcodes.GETSTATIC},
+                                      Opcodes.ACC_PRIVATE | Opcodes.ACC_STATIC),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleAbstractImplementer",
+                                      "longStaticField",
+                                      "J",
+                                      new byte[] {(byte) Opcodes.PUTSTATIC, (byte) Opcodes.GETSTATIC},
+                                      Opcodes.ACC_FINAL | Opcodes.ACC_STATIC),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleAbstractImplementer",
+                                      "floatStaticField",
+                                      "F",
+                                      new byte[] {(byte) Opcodes.PUTSTATIC, (byte) Opcodes.GETSTATIC},
+                                      Opcodes.ACC_VOLATILE | Opcodes.ACC_STATIC),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleAbstractImplementer",
+                                      "doubleStaticField",
+                                      "D",
+                                      new byte[] {(byte) Opcodes.PUTSTATIC, (byte) Opcodes.GETSTATIC},
+                                      Opcodes.ACC_STATIC),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleAbstractImplementer",
+                                      "stringStaticField",
+                                      "Ljava/lang/String;",
+                                      new byte[] {(byte) Opcodes.PUTSTATIC, (byte) Opcodes.GETSTATIC},
+                                      Opcodes.ACC_PUBLIC | Opcodes.ACC_STATIC),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleAbstractImplementer",
+                                      "objectStaticField",
+                                      "Ljava/lang/Object;",
+                                      new byte[] {(byte) Opcodes.PUTSTATIC, (byte) Opcodes.GETSTATIC},
+                                      Opcodes.ACC_PROTECTED | Opcodes.ACC_STATIC),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleAbstractImplementer",
+                                      "intField",
+                                      "I",
+                                      new byte[] {(byte) Opcodes.PUTFIELD | (byte) Opcodes.GETFIELD},
+                                      Opcodes.ACC_PUBLIC),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleAbstractImplementer",
+                                      "longField",
+                                      "J",
+                                      new byte[] {(byte) Opcodes.PUTFIELD | (byte) Opcodes.GETFIELD},
+                                      Opcodes.ACC_PRIVATE),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleAbstractImplementer",
+                                      "floatField",
+                                      "F",
+                                      new byte[] {(byte) Opcodes.PUTFIELD | (byte) Opcodes.GETFIELD},
+                                      Opcodes.ACC_PROTECTED),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleAbstractImplementer",
+                                      "doubleField",
+                                      "D",
+                                      new byte[] {(byte) Opcodes.PUTFIELD | (byte) Opcodes.GETFIELD},
+                                      Opcodes.ACC_TRANSIENT),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleAbstractImplementer",
+                                      "objectField",
+                                      "Ljava/lang/Object;",
+                                      new byte[] {(byte) Opcodes.PUTFIELD | (byte) Opcodes.GETFIELD},
+                                      Opcodes.ACC_FINAL),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleAbstractImplementer",
+                                      "stringField",
+                                      "Ljava/lang/String;",
+                                      new byte[] {(byte) Opcodes.PUTFIELD | (byte) Opcodes.GETFIELD},
+                                      Opcodes.ACC_VOLATILE),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleAbstractImplementer",
+                                      "stringFieldEmpty",
+                                      "Ljava/lang/String;",
+                                      new byte[] {(byte) Opcodes.PUTFIELD | (byte) Opcodes.GETFIELD},
+                                      0L),
+                }
+        );
+        CP_MAP_FOR_ABS_CLASS.put(CONSTANT_METHODREF,
+                new TestedCPEntry[] {
+                    new TestedCPEntry("java/lang/System",
+                                      "getProperties",
+                                      "()Ljava/util/Properties;",
+                                      new byte[] {(byte) Opcodes.INVOKESTATIC}),
+                    new TestedCPEntry("java/util/HashMap",
+                                      "<init>",
+                                      "()V",
+                                      new byte[] {(byte) Opcodes.INVOKESPECIAL}),
+                    new TestedCPEntry("java/lang/Object",
+                                      "toString",
+                                      "()Ljava/lang/String;",
+                                      new byte[] {(byte) Opcodes.INVOKESPECIAL,
+                                      (byte) Opcodes.INVOKEVIRTUAL}),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleAbstractImplementer$1",
+                                      "<init>",
+                                      "(Lcompiler/jvmci/common/testcases/MultipleAbstractImplementer;)V",
+                                      new byte[0]),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleAbstractImplementer$1",
+                                      "run",
+                                      "()V",
+                                      new byte[0]),
+                }
+        );
+        CP_MAP_FOR_ABS_CLASS.put(CONSTANT_INTERFACEMETHODREF,
+                new TestedCPEntry[] {
+                    new TestedCPEntry("java/util/Map",
+                                      "put",
+                                      "(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
+                                      new byte[] {(byte) Opcodes.INVOKEINTERFACE}),
+                    new TestedCPEntry("java/util/Map",
+                                      "remove",
+                                      "(Ljava/lang/Object;)Ljava/lang/Object;",
+                                      new byte[] {(byte) Opcodes.INVOKEINTERFACE}),
+                }
+        );
+        CP_MAP_FOR_ABS_CLASS.put(CONSTANT_STRING,
+                new TestedCPEntry[] {
+                    new TestedCPEntry(null, "Message", null),
+                    new TestedCPEntry(null, "", null),
+                }
+        );
+        CP_MAP_FOR_ABS_CLASS.put(CONSTANT_METHODHANDLE,
+                new TestedCPEntry[] {
+                    new TestedCPEntry("java/lang/invoke/LambdaMetafactory",
+                                      "metafactory",
+                                      "(Ljava/lang/invoke/MethodHandles$Lookup;"
+                                              + "Ljava/lang/String;"
+                                              + "Ljava/lang/invoke/MethodType;"
+                                              + "Ljava/lang/invoke/MethodType;"
+                                              + "Ljava/lang/invoke/MethodHandle;"
+                                              + "Ljava/lang/invoke/MethodType;)"
+                                              + "Ljava/lang/invoke/CallSite;",
+                                      null),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleAbstractImplementer",
+                                      "testMethod",
+                                      "()V"),
+                }
+        );
+        CP_MAP_FOR_ABS_CLASS.put(CONSTANT_METHODTYPE,
+                new TestedCPEntry[] {
+                    new TestedCPEntry(null, null, "()V"),
+                }
+        );
+        CP_MAP_FOR_ABS_CLASS.put(CONSTANT_INVOKEDYNAMIC,
+                new TestedCPEntry[] {
+                    new TestedCPEntry(null,
+                                      "run",
+                                      "(Lcompiler/jvmci/common/testcases/MultipleAbstractImplementer;)"
+                                              + "Ljava/lang/Runnable;"),
+                }
+        );
+    }
+
+    private static final Map<ConstantTypes, TestedCPEntry[]> CP_MAP_FOR_INTERFACE
+            = new HashMap<>();
+    static {
+        CP_MAP_FOR_INTERFACE.put(CONSTANT_CLASS,
+                new TestedCPEntry[] {
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleImplementersInterface", null, null),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleImplementersInterface$1", null, null),
+                    new TestedCPEntry("java/lang/Object", null, null),
+                    new TestedCPEntry("java/lang/invoke/MethodHandles$Lookup", null, null),
+                }
+        );
+        CP_MAP_FOR_INTERFACE.put(CONSTANT_FIELDREF,
+                new TestedCPEntry[] {
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleImplementersInterface",
+                                      "OBJECT_CONSTANT",
+                                      "Ljava/lang/Object;",
+                                      new byte[] {(byte) Opcodes.PUTSTATIC, (byte) Opcodes.GETSTATIC},
+                                      Opcodes.ACC_STATIC | Opcodes.ACC_FINAL | Opcodes.ACC_PUBLIC),
+                }
+        );
+        CP_MAP_FOR_INTERFACE.put(CONSTANT_METHODREF,
+                new TestedCPEntry[] {
+                    new TestedCPEntry("java/lang/System",
+                                      "getProperties",
+                                      "()Ljava/util/Properties;",
+                                      new byte[] {(byte) Opcodes.INVOKESTATIC}),
+                    new TestedCPEntry("java/util/HashMap",
+                                      "<init>",
+                                      "()V",
+                                      new byte[] {(byte) Opcodes.INVOKESPECIAL}),
+                    new TestedCPEntry("java/lang/Object",
+                                      "toString",
+                                      "()Ljava/lang/String;",
+                                      new byte[] {(byte) Opcodes.INVOKEVIRTUAL}),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleAbstractImplementer$1",
+                                      "<init>",
+                                      "(Lcompiler/jvmci/common/testcases/MultipleAbstractImplementer;)V",
+                                      new byte[0]),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleAbstractImplementer$1",
+                                      "run",
+                                      "()V",
+                                      new byte[0]),
+                }
+        );
+        CP_MAP_FOR_INTERFACE.put(CONSTANT_INTERFACEMETHODREF,
+                new TestedCPEntry[] {
+                    new TestedCPEntry("java/util/Map",
+                                      "put",
+                                      "(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
+                                      new byte[] {(byte) Opcodes.INVOKEINTERFACE}),
+                    new TestedCPEntry("java/util/Map",
+                                      "remove",
+                                      "(Ljava/lang/Object;)Ljava/lang/Object;",
+                                      new byte[] {(byte) Opcodes.INVOKEINTERFACE}),
+                }
+        );
+        CP_MAP_FOR_INTERFACE.put(CONSTANT_STRING,
+                new TestedCPEntry[] {
+                    new TestedCPEntry(null, "Hello", null),
+                    new TestedCPEntry(null, "", null),
+                }
+        );
+        CP_MAP_FOR_INTERFACE.put(CONSTANT_METHODHANDLE,
+                new TestedCPEntry[] {
+                    new TestedCPEntry("java/lang/invoke/LambdaMetafactory",
+                                      "metafactory",
+                                      "(Ljava/lang/invoke/MethodHandles$Lookup;"
+                                              + "Ljava/lang/String;Ljava/lang/invoke/MethodType;"
+                                              + "Ljava/lang/invoke/MethodType;"
+                                              + "Ljava/lang/invoke/MethodHandle;"
+                                              + "Ljava/lang/invoke/MethodType;)"
+                                              + "Ljava/lang/invoke/CallSite;"),
+                    new TestedCPEntry("compiler/jvmci/common/testcases/MultipleImplementersInterface",
+                                      "defaultMethod",
+                                      "()V"),
+                }
+        );
+        CP_MAP_FOR_INTERFACE.put(CONSTANT_METHODTYPE,
+                new TestedCPEntry[] {
+                    new TestedCPEntry(null, null, "()V"),
+                }
+        );
+        CP_MAP_FOR_INTERFACE.put(CONSTANT_INVOKEDYNAMIC,
+                new TestedCPEntry[] {
+                    new TestedCPEntry(null,
+                                      "run",
+                                      "(Lcompiler/jvmci/common/testcases/MultipleImplementersInterface;)"
+                                              + "Ljava/lang/Runnable;"),
+                }
+        );
     }
 }
--- a/hotspot/test/compiler/jvmci/compilerToVM/LookupKlassInPoolTest.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/compiler/jvmci/compilerToVM/LookupKlassInPoolTest.java	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,58 +29,73 @@
  * @summary Testing compiler.jvmci.CompilerToVM.lookupKlassInPool method
  * @library /testlibrary /test/lib /
  * @compile ../common/CompilerToVMHelper.java
- * @build compiler.jvmci.common.testcases.MultipleImplementersInterface
- *        compiler.jvmci.common.testcases.MultipleImplementer2
- *        compiler.jvmci.compilerToVM.ConstantPoolTestsHelper
- *        compiler.jvmci.compilerToVM.ConstantPoolTestCase
+ * @build sun.hotspot.WhiteBox
  *        compiler.jvmci.compilerToVM.LookupKlassInPoolTest
- * @run main ClassFileInstaller jdk.vm.ci.hotspot.CompilerToVMHelper
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockExperimentalVMOptions
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ *                              jdk.vm.ci.hotspot.CompilerToVMHelper
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions
  *                   -XX:+EnableJVMCI compiler.jvmci.compilerToVM.LookupKlassInPoolTest
  */
 
 package compiler.jvmci.compilerToVM;
 
+import compiler.jvmci.compilerToVM.ConstantPoolTestsHelper.DummyClasses;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.ConstantTypes;
+import static compiler.jvmci.compilerToVM.ConstantPoolTestCase.ConstantTypes.*;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.TestedCPEntry;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.Validator;
 import java.util.HashMap;
 import java.util.Map;
 import jdk.vm.ci.hotspot.CompilerToVMHelper;
 import jdk.vm.ci.hotspot.HotSpotResolvedObjectType;
-import sun.reflect.ConstantPool;
+import jdk.vm.ci.meta.ConstantPool;
 
 /**
- * Test for {@code compiler.jvmci.CompilerToVM.lookupKlassInPool} method
+ * Test for {@code jdk.vm.ci.hotspot.CompilerToVM.lookupKlassInPool} method
  */
 public class LookupKlassInPoolTest {
 
-    public static void main(String[] args)  {
-        Map<ConstantPoolTestsHelper.ConstantTypes,
-                ConstantPoolTestCase.Validator> typeTests = new HashMap<>(1);
-        typeTests.put(ConstantPoolTestsHelper.ConstantTypes.CONSTANT_CLASS,
-                LookupKlassInPoolTest::validate);
+    public static void main(String[] args) throws Exception  {
+        Map<ConstantTypes, Validator> typeTests = new HashMap<>();
+        typeTests.put(CONSTANT_CLASS, LookupKlassInPoolTest::validate);
         ConstantPoolTestCase testCase = new ConstantPoolTestCase(typeTests);
         testCase.test();
+        // The next "Class.forName" and repeating "testCase.test()"
+        // are here for the following reason.
+        // The first test run is without dummy class initialization,
+        // which means no constant pool cache exists.
+        // The second run is with initialized class (with constant pool cache available).
+        // Some CompilerToVM methods require different input
+        // depending on whether CP cache exists or not.
+        for (DummyClasses dummy : DummyClasses.values()) {
+            Class.forName(dummy.klass.getName());
+        }
+        testCase.test();
     }
 
-    public static void validate(jdk.vm.ci.meta.ConstantPool constantPoolCTVM,
-            ConstantPool constantPoolSS,
-            ConstantPoolTestsHelper.DummyClasses dummyClass, int i) {
-        Object classToVerify = CompilerToVMHelper
-                .lookupKlassInPool(constantPoolCTVM, i);
-        if (!(classToVerify instanceof HotSpotResolvedObjectType)
-                && !(classToVerify instanceof String)) {
-            String msg = String.format("Output of method"
-                    + " CTVM.lookupKlassInPool is neither"
-                    + " a HotSpotResolvedObjectType, nor a String");
+    public static void validate(ConstantPool constantPoolCTVM,
+                                ConstantTypes cpType,
+                                DummyClasses dummyClass,
+                                int i) {
+        TestedCPEntry entry = cpType.getTestedCPEntry(dummyClass, i);
+        if (entry == null) {
+            return;
+        }
+        Object classToVerify = CompilerToVMHelper.lookupKlassInPool(constantPoolCTVM, i);
+        if (!(classToVerify instanceof HotSpotResolvedObjectType) && !(classToVerify instanceof String)) {
+            String msg = String.format("Output of method CTVM.lookupKlassInPool is neither"
+                                               + " a HotSpotResolvedObjectType, nor a String");
             throw new AssertionError(msg);
         }
-        int classNameIndex = (int) dummyClass.cp.get(i).value;
-        String classNameToRefer
-                = constantPoolSS.getUTF8At(classNameIndex);
+        String classNameToRefer = entry.klass;
         String outputToVerify = classToVerify.toString();
         if (!outputToVerify.contains(classNameToRefer)) {
-            String msg = String.format("Wrong class accessed by constant"
-                    + " pool index %d: %s, but should be %s",
-                    i, outputToVerify, classNameToRefer);
+            String msg = String.format("Wrong class accessed by constant pool index %d: %s, but should be %s",
+                                       i,
+                                       outputToVerify,
+                                       classNameToRefer);
             throw new AssertionError(msg);
         }
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/jvmci/compilerToVM/LookupKlassRefIndexInPoolTest.java	Sat Mar 05 20:46:42 2016 -0800
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 8138708
+ * @requires (os.simpleArch == "x64" | os.simpleArch == "sparcv9" | os.simpleArch == "aarch64")
+ * @library /testlibrary /test/lib /
+ * @compile ../common/CompilerToVMHelper.java
+ * @build sun.hotspot.WhiteBox
+ *        compiler.jvmci.compilerToVM.LookupKlassRefIndexInPoolTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ *                              jdk.vm.ci.hotspot.CompilerToVMHelper
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCI
+ *                   compiler.jvmci.compilerToVM.LookupKlassRefIndexInPoolTest
+ */
+
+package compiler.jvmci.compilerToVM;
+
+import compiler.jvmci.compilerToVM.ConstantPoolTestsHelper.DummyClasses;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.ConstantTypes;
+import static compiler.jvmci.compilerToVM.ConstantPoolTestCase.ConstantTypes.*;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.TestedCPEntry;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.Validator;
+import java.util.HashMap;
+import java.util.Map;
+import jdk.test.lib.Asserts;
+import jdk.vm.ci.hotspot.CompilerToVMHelper;
+import jdk.vm.ci.meta.ConstantPool;
+
+/**
+ * Test for {@code jdk.vm.ci.hotspot.CompilerToVM.lookupKlassRefIndexInPool} method
+ */
+public class LookupKlassRefIndexInPoolTest {
+
+    public static void main(String[] args) throws Exception {
+        Map<ConstantTypes, Validator> typeTests = new HashMap<>();
+        typeTests.put(CONSTANT_METHODREF, LookupKlassRefIndexInPoolTest::validate);
+        typeTests.put(CONSTANT_INTERFACEMETHODREF, LookupKlassRefIndexInPoolTest::validate);
+        typeTests.put(CONSTANT_FIELDREF, LookupKlassRefIndexInPoolTest::validate);
+        ConstantPoolTestCase testCase = new ConstantPoolTestCase(typeTests);
+        testCase.test();
+        // The next "Class.forName" and repeating "testCase.test()"
+        // are here for the following reason.
+        // The first test run is without dummy class initialization,
+        // which means no constant pool cache exists.
+        // The second run is with initialized class (with constant pool cache available).
+        // Some CompilerToVM methods require different input
+        // depending on whether CP cache exists or not.
+        for (DummyClasses dummy : DummyClasses.values()) {
+            Class.forName(dummy.klass.getName());
+        }
+        testCase.test();
+    }
+
+    private static void validate(ConstantPool constantPoolCTVM,
+                                 ConstantTypes cpType,
+                                 DummyClasses dummyClass,
+                                 int cpi) {
+        TestedCPEntry entry = cpType.getTestedCPEntry(dummyClass, cpi);
+        if (entry == null) {
+            return;
+        }
+        int index = cpi;
+        String cached = "";
+        int cpci = dummyClass.getCPCacheIndex(cpi);
+        if (cpci != ConstantPoolTestsHelper.NO_CP_CACHE_PRESENT) {
+            index = cpci;
+            cached = "cached ";
+        }
+        int indexToVerify = CompilerToVMHelper.lookupKlassRefIndexInPool(constantPoolCTVM, index);
+        int indexToRefer = dummyClass.constantPoolSS.getClassRefIndexAt(cpi);
+        String msg = String.format("Wrong class index returned by lookupKlassRefIndexInPool method "
+                                           + "applied to %sconstant pool index %d",
+                                   cached,
+                                   index);
+        Asserts.assertEQ(indexToRefer, indexToVerify, msg);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/jvmci/compilerToVM/LookupMethodInPoolTest.java	Sat Mar 05 20:46:42 2016 -0800
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 8138708
+ * @requires (os.simpleArch == "x64" | os.simpleArch == "sparcv9" | os.simpleArch == "aarch64")
+ * @library /testlibrary /test/lib /
+ * @compile ../common/CompilerToVMHelper.java
+ * @build sun.hotspot.WhiteBox
+ *        compiler.jvmci.compilerToVM.LookupMethodInPoolTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ *                              jdk.vm.ci.hotspot.CompilerToVMHelper
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCI
+ *                   compiler.jvmci.compilerToVM.LookupMethodInPoolTest
+ */
+
+package compiler.jvmci.compilerToVM;
+
+import compiler.jvmci.compilerToVM.ConstantPoolTestsHelper.DummyClasses;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.ConstantTypes;
+import static compiler.jvmci.compilerToVM.ConstantPoolTestCase.ConstantTypes.*;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.TestedCPEntry;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.Validator;
+import java.util.HashMap;
+import java.util.Map;
+import jdk.test.lib.Asserts;
+import jdk.vm.ci.hotspot.CompilerToVMHelper;
+import jdk.vm.ci.hotspot.HotSpotResolvedJavaMethod;
+import jdk.vm.ci.meta.ConstantPool;
+
+/**
+ * Test for {@code jdk.vm.ci.hotspot.CompilerToVM.lookupMethodInPool} method
+ */
+public class LookupMethodInPoolTest {
+
+    public static void main(String[] args) throws Exception {
+        Map<ConstantTypes, Validator> typeTests = new HashMap<>();
+        typeTests.put(CONSTANT_METHODREF, LookupMethodInPoolTest::validate);
+        typeTests.put(CONSTANT_INTERFACEMETHODREF, LookupMethodInPoolTest::validate);
+        ConstantPoolTestCase testCase = new ConstantPoolTestCase(typeTests);
+        testCase.test();
+        // The next "Class.forName" and repeating "testCase.test()"
+        // are here for the following reason.
+        // The first test run is without dummy class initialization,
+        // which means no constant pool cache exists.
+        // The second run is with initialized class (with constant pool cache available).
+        // Some CompilerToVM methods require different input
+        // depending on whether CP cache exists or not.
+        for (DummyClasses dummy : DummyClasses.values()) {
+            Class.forName(dummy.klass.getName());
+        }
+        testCase.test();
+    }
+
+    private static void validate(ConstantPool constantPoolCTVM,
+                                 ConstantTypes cpType,
+                                 DummyClasses dummyClass,
+                                 int cpi) {
+        TestedCPEntry entry = cpType.getTestedCPEntry(dummyClass, cpi);
+        if (entry == null) {
+            return;
+        }
+        int index = cpi;
+        String cached = "";
+        int cpci = dummyClass.getCPCacheIndex(cpi);
+        if (cpci != ConstantPoolTestsHelper.NO_CP_CACHE_PRESENT) {
+            index = cpci;
+            cached = "cached ";
+        }
+        for (int j = 0; j < entry.opcodes.length; j++) {
+            HotSpotResolvedJavaMethod methodToVerify = CompilerToVMHelper
+                    .lookupMethodInPool(constantPoolCTVM, index, entry.opcodes[j]);
+            String msg = String.format("Object returned by lookupMethodInPool method"
+                                               + " for %sindex %d should not be null",
+                                       cached,
+                                       index);
+            Asserts.assertNotNull(methodToVerify, msg);
+            String[] classNameSplit = entry.klass.split("/");
+            String classNameToRefer = classNameSplit[classNameSplit.length - 1];
+            String methodNameToRefer = entry.name;
+            String methodToVerifyToString = methodToVerify.toString();
+            if (!methodToVerifyToString.contains(classNameToRefer)
+                    || !methodToVerifyToString.contains(methodNameToRefer)) {
+                msg = String.format("String representation \"%s\" of the object"
+                                            + " returned by lookupMethodInPool method"
+                                            + " for index %d does not contain a method's class name"
+                                            + " or method's name, should contain %s.%s",
+                                    methodToVerifyToString,
+                                    index,
+                                    classNameToRefer,
+                                    methodNameToRefer);
+                throw new AssertionError(msg);
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/jvmci/compilerToVM/LookupNameAndTypeRefIndexInPoolTest.java	Sat Mar 05 20:46:42 2016 -0800
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 8138708
+ * @requires (os.simpleArch == "x64" | os.simpleArch == "sparcv9" | os.simpleArch == "aarch64")
+ * @library /testlibrary /test/lib /
+ * @compile ../common/CompilerToVMHelper.java
+ * @build sun.hotspot.WhiteBox
+ *        compiler.jvmci.compilerToVM.LookupNameAndTypeRefIndexInPoolTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ *                              jdk.vm.ci.hotspot.CompilerToVMHelper
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCI
+ *                   compiler.jvmci.compilerToVM.LookupNameAndTypeRefIndexInPoolTest
+ */
+
+package compiler.jvmci.compilerToVM;
+
+import compiler.jvmci.compilerToVM.ConstantPoolTestsHelper.DummyClasses;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.ConstantTypes;
+import static compiler.jvmci.compilerToVM.ConstantPoolTestCase.ConstantTypes.*;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.TestedCPEntry;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.Validator;
+import java.util.HashMap;
+import java.util.Map;
+import jdk.test.lib.Asserts;
+import jdk.vm.ci.hotspot.CompilerToVMHelper;
+import jdk.vm.ci.meta.ConstantPool;
+
+/**
+ * Test for {@code jdk.vm.ci.hotspot.CompilerToVM.lookupNameAndTypeRefIndexInPool} method
+ */
+public class LookupNameAndTypeRefIndexInPoolTest {
+
+    public static void main(String[] args) throws Exception {
+        Map<ConstantTypes, Validator> typeTests = new HashMap<>();
+        typeTests.put(CONSTANT_METHODREF, LookupNameAndTypeRefIndexInPoolTest::validate);
+        typeTests.put(CONSTANT_INTERFACEMETHODREF, LookupNameAndTypeRefIndexInPoolTest::validate);
+        typeTests.put(CONSTANT_FIELDREF, LookupNameAndTypeRefIndexInPoolTest::validate);
+        typeTests.put(CONSTANT_INVOKEDYNAMIC, LookupNameAndTypeRefIndexInPoolTest::validate);
+        ConstantPoolTestCase testCase = new ConstantPoolTestCase(typeTests);
+        testCase.test();
+        // The next "Class.forName" and repeating "testCase.test()"
+        // are here for the following reason.
+        // The first test run is without dummy class initialization,
+        // which means no constant pool cache exists.
+        // The second run is with initialized class (with constant pool cache available).
+        // Some CompilerToVM methods require different input
+        // depending on whether CP cache exists or not.
+        for (DummyClasses dummy : DummyClasses.values()) {
+            Class.forName(dummy.klass.getName());
+        }
+        testCase.test();
+    }
+
+    private static void validate(ConstantPool constantPoolCTVM,
+                                 ConstantTypes cpType,
+                                 DummyClasses dummyClass,
+                                 int cpi) {
+        TestedCPEntry entry = cpType.getTestedCPEntry(dummyClass, cpi);
+        if (entry == null) {
+            return;
+        }
+        int index = cpi;
+        String cached = "";
+        int cpci = dummyClass.getCPCacheIndex(cpi);
+        if (cpci != ConstantPoolTestsHelper.NO_CP_CACHE_PRESENT) {
+            index = cpci;
+            cached = "cached ";
+        }
+        int indexToVerify = CompilerToVMHelper.lookupNameAndTypeRefIndexInPool(constantPoolCTVM, index);
+        int indexToRefer = dummyClass.constantPoolSS.getNameAndTypeRefIndexAt(cpi);
+        String msg = String.format("Wrong nameAndType index returned by lookupNameAndTypeRefIndexInPool"
+                                           + " method applied to %sconstant pool index %d",
+                                   cached,
+                                   index);
+        Asserts.assertEQ(indexToRefer, indexToVerify, msg);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/jvmci/compilerToVM/LookupNameInPoolTest.java	Sat Mar 05 20:46:42 2016 -0800
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 8138708
+ * @requires (os.simpleArch == "x64" | os.simpleArch == "sparcv9" | os.simpleArch == "aarch64")
+ * @library /testlibrary /test/lib /
+ * @compile ../common/CompilerToVMHelper.java
+ * @build sun.hotspot.WhiteBox
+ *        compiler.jvmci.compilerToVM.LookupNameInPoolTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ *                              jdk.vm.ci.hotspot.CompilerToVMHelper
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCI
+ *                   compiler.jvmci.compilerToVM.LookupNameInPoolTest
+ */
+
+package compiler.jvmci.compilerToVM;
+
+import compiler.jvmci.compilerToVM.ConstantPoolTestsHelper.DummyClasses;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.ConstantTypes;
+import static compiler.jvmci.compilerToVM.ConstantPoolTestCase.ConstantTypes.*;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.TestedCPEntry;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.Validator;
+import java.util.HashMap;
+import java.util.Map;
+import jdk.vm.ci.hotspot.CompilerToVMHelper;
+import jdk.vm.ci.meta.ConstantPool;
+import jdk.test.lib.Asserts;
+
+/**
+ * Test for {@code jdk.vm.ci.hotspot.CompilerToVM.lookupNameInPool} method
+ */
+public class LookupNameInPoolTest {
+
+    public static void main(String[] args) throws Exception {
+        Map<ConstantTypes, Validator> typeTests = new HashMap<>();
+        typeTests.put(CONSTANT_METHODREF, LookupNameInPoolTest::validate);
+        typeTests.put(CONSTANT_INTERFACEMETHODREF, LookupNameInPoolTest::validate);
+        typeTests.put(CONSTANT_FIELDREF, LookupNameInPoolTest::validate);
+        typeTests.put(CONSTANT_INVOKEDYNAMIC, LookupNameInPoolTest::validate);
+        ConstantPoolTestCase testCase = new ConstantPoolTestCase(typeTests);
+        testCase.test();
+        // The next "Class.forName" and repeating "testCase.test()"
+        // are here for the following reason.
+        // The first test run is without dummy class initialization,
+        // which means no constant pool cache exists.
+        // The second run is with initialized class (with constant pool cache available).
+        // Some CompilerToVM methods require different input
+        // depending on whether CP cache exists or not.
+        for (DummyClasses dummy : DummyClasses.values()) {
+            Class.forName(dummy.klass.getName());
+        }
+        testCase.test();
+    }
+
+    private static void validate(ConstantPool constantPoolCTVM,
+                                 ConstantTypes cpType,
+                                 DummyClasses dummyClass,
+                                 int cpi) {
+        TestedCPEntry entry = cpType.getTestedCPEntry(dummyClass, cpi);
+        if (entry == null) {
+            return;
+        }
+        int index = cpi;
+        String cached = "";
+        int cpci = dummyClass.getCPCacheIndex(cpi);
+        if (cpci != ConstantPoolTestsHelper.NO_CP_CACHE_PRESENT) {
+            index = cpci;
+            cached = "cached ";
+        }
+        String nameToVerify = CompilerToVMHelper.lookupNameInPool(constantPoolCTVM, index);
+        String nameToRefer = entry.name;
+        String msg = String.format("Wrong name accessed by %sconstant pool index %d", cached, index);
+        Asserts.assertEQ(nameToVerify, nameToRefer, msg);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/jvmci/compilerToVM/LookupSignatureInPoolTest.java	Sat Mar 05 20:46:42 2016 -0800
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 8138708
+ * @requires (os.simpleArch == "x64" | os.simpleArch == "sparcv9" | os.simpleArch == "aarch64")
+ * @library /testlibrary /test/lib /
+ * @compile ../common/CompilerToVMHelper.java
+ * @build sun.hotspot.WhiteBox
+ *        compiler.jvmci.compilerToVM.LookupSignatureInPoolTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ *                              jdk.vm.ci.hotspot.CompilerToVMHelper
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCI
+ *                   compiler.jvmci.compilerToVM.LookupSignatureInPoolTest
+ */
+
+package compiler.jvmci.compilerToVM;
+
+import compiler.jvmci.compilerToVM.ConstantPoolTestsHelper.DummyClasses;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.ConstantTypes;
+import static compiler.jvmci.compilerToVM.ConstantPoolTestCase.ConstantTypes.*;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.TestedCPEntry;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.Validator;
+import java.util.HashMap;
+import java.util.Map;
+import jdk.test.lib.Asserts;
+import jdk.vm.ci.hotspot.CompilerToVMHelper;
+import jdk.vm.ci.meta.ConstantPool;
+
+/**
+ * Test for {@code jdk.vm.ci.hotspot.CompilerToVM.lookupSignatureInPool} method
+ */
+public class LookupSignatureInPoolTest {
+
+    public static void main(String[] args) throws Exception {
+        Map<ConstantTypes, Validator> typeTests = new HashMap<>();
+        typeTests.put(CONSTANT_METHODREF, LookupSignatureInPoolTest::validate);
+        typeTests.put(CONSTANT_INTERFACEMETHODREF, LookupSignatureInPoolTest::validate);
+        typeTests.put(CONSTANT_FIELDREF, LookupSignatureInPoolTest::validate);
+        typeTests.put(CONSTANT_INVOKEDYNAMIC, LookupSignatureInPoolTest::validate);
+        ConstantPoolTestCase testCase = new ConstantPoolTestCase(typeTests);
+        testCase.test();
+        // The next "Class.forName" and repeating "testCase.test()"
+        // are here for the following reason.
+        // The first test run is without dummy class initialization,
+        // which means no constant pool cache exists.
+        // The second run is with initialized class (with constant pool cache available).
+        // Some CompilerToVM methods require different input
+        // depending on whether CP cache exists or not.
+        for (DummyClasses dummy : DummyClasses.values()) {
+            Class.forName(dummy.klass.getName());
+        }
+        testCase.test();
+    }
+
+    private static void validate(ConstantPool constantPoolCTVM,
+                                 ConstantTypes cpType,
+                                 DummyClasses dummyClass,
+                                 int cpi) {
+        TestedCPEntry entry = cpType.getTestedCPEntry(dummyClass, cpi);
+        if (entry == null) {
+            return;
+        }
+        int index = cpi;
+        String cached = "";
+        int cpci = dummyClass.getCPCacheIndex(cpi);
+        if (cpci != ConstantPoolTestsHelper.NO_CP_CACHE_PRESENT) {
+            index = cpci;
+            cached = "cached ";
+        }
+        String sigToVerify = CompilerToVMHelper.lookupSignatureInPool(constantPoolCTVM, index);
+        String sigToRefer = entry.type;
+        String msg = String.format("Wrong signature accessed by %sconstant pool index %d",
+                                   cached,
+                                   index);
+        Asserts.assertEQ(sigToVerify, sigToRefer, msg);
+    }
+}
--- a/hotspot/test/compiler/jvmci/compilerToVM/ResolveConstantInPoolTest.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/compiler/jvmci/compilerToVM/ResolveConstantInPoolTest.java	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,66 +28,86 @@
  * @requires (os.simpleArch == "x64" | os.simpleArch == "sparcv9" | os.simpleArch == "aarch64")
  * @library /testlibrary /test/lib /
  * @compile ../common/CompilerToVMHelper.java
- * @build compiler.jvmci.compilerToVM.ResolveConstantInPoolTest
- * @run main ClassFileInstaller jdk.vm.ci.hotspot.CompilerToVMHelper
- * @run main/othervm -Xbootclasspath/a:.
- *                   -XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCI
+ * @build sun.hotspot.WhiteBox
+ *        compiler.jvmci.compilerToVM.ResolveConstantInPoolTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ *                              jdk.vm.ci.hotspot.CompilerToVMHelper
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCI
  *                   compiler.jvmci.compilerToVM.ResolveConstantInPoolTest
  */
 
 package compiler.jvmci.compilerToVM;
 
+import compiler.jvmci.compilerToVM.ConstantPoolTestsHelper.DummyClasses;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.ConstantTypes;
+import static compiler.jvmci.compilerToVM.ConstantPoolTestCase.ConstantTypes.*;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.Validator;
 import java.lang.invoke.MethodHandle;
 import java.lang.invoke.MethodType;
 import java.util.HashMap;
 import java.util.Map;
+import jdk.test.lib.Asserts;
 import jdk.vm.ci.hotspot.CompilerToVMHelper;
-import jdk.test.lib.Asserts;
-import sun.reflect.ConstantPool;
+import jdk.vm.ci.meta.ConstantPool;
 
 /**
- * Test for {@code compiler.jvmci.CompilerToVM.resolveConstantInPool} method
+ * Test for {@code jdk.vm.ci.hotspot.CompilerToVM.resolveConstantInPool} method
  */
 public class ResolveConstantInPoolTest {
 
+    private static final String NOT_NULL_MSG
+            = "Object returned by resolveConstantInPool method should not be null";
+
     public static void main(String[] args) throws Exception {
-        Map<ConstantPoolTestsHelper.ConstantTypes,
-                ConstantPoolTestCase.Validator> typeTests = new HashMap<>(2);
-        typeTests.put(ConstantPoolTestsHelper.ConstantTypes.CONSTANT_METHODHANDLE,
-                ResolveConstantInPoolTest::validateMethodHandle);
-        typeTests.put(ConstantPoolTestsHelper.ConstantTypes.CONSTANT_METHODTYPE,
-                ResolveConstantInPoolTest::validateMethodType);
+        Map<ConstantTypes, Validator> typeTests = new HashMap<>();
+        typeTests.put(CONSTANT_METHODHANDLE, ResolveConstantInPoolTest::validateMethodHandle);
+        typeTests.put(CONSTANT_METHODTYPE, ResolveConstantInPoolTest::validateMethodType);
         ConstantPoolTestCase testCase = new ConstantPoolTestCase(typeTests);
         testCase.test();
+        // The next "Class.forName" and repeating "testCase.test()"
+        // are here for the following reason.
+        // The first test run is without dummy class initialization,
+        // which means no constant pool cache exists.
+        // The second run is with initialized class (with constant pool cache available).
+        // Some CompilerToVM methods require different input
+        // depending on whether CP cache exists or not.
+        for (DummyClasses dummy : DummyClasses.values()) {
+            Class.forName(dummy.klass.getName());
+        }
+        testCase.test();
     }
 
-    private static void validateMethodHandle(
-            jdk.vm.ci.meta.ConstantPool constantPoolCTVM,
-            ConstantPool constantPoolSS,
-            ConstantPoolTestsHelper.DummyClasses dummyClass, int index) {
-        Object constantInPool = CompilerToVMHelper
-                .resolveConstantInPool(constantPoolCTVM, index);
+    private static void validateMethodHandle(ConstantPool constantPoolCTVM,
+                                             ConstantTypes cpType,
+                                             DummyClasses dummyClass,
+                                             int index) {
+        Object constantInPool = CompilerToVMHelper.resolveConstantInPool(constantPoolCTVM, index);
+        String msg = String.format("%s for index %d", NOT_NULL_MSG, index);
+        Asserts.assertNotNull(constantInPool, msg);
         if (!(constantInPool instanceof MethodHandle)) {
-            String msg = String.format(
-                    "Wrong constant pool entry accessed by index"
-                            + " %d: %s, but should be subclass of %s",
-                    index + 1, constantInPool.getClass(),
-                    MethodHandle.class.getName());
+            msg = String.format("Wrong constant pool entry accessed by index"
+                                        + " %d: %s, but should be subclass of %s",
+                                index,
+                                constantInPool.getClass(),
+                                MethodHandle.class.getName());
             throw new AssertionError(msg);
         }
     }
 
-    private static void validateMethodType(
-            jdk.vm.ci.meta.ConstantPool constantPoolCTVM,
-            ConstantPool constantPoolSS,
-            ConstantPoolTestsHelper.DummyClasses dummyClass, int index) {
-        Object constantInPool = CompilerToVMHelper
-                .resolveConstantInPool(constantPoolCTVM, index);
+    private static void validateMethodType(ConstantPool constantPoolCTVM,
+                                           ConstantTypes cpType,
+                                           DummyClasses dummyClass,
+                                           int index) {
+        Object constantInPool = CompilerToVMHelper.resolveConstantInPool(constantPoolCTVM, index);
+        String msg = String.format("%s for index %d", NOT_NULL_MSG, index);
+        Asserts.assertNotNull(constantInPool, msg);
         Class mtToVerify = constantInPool.getClass();
         Class mtToRefer = MethodType.class;
-        String msg = String.format("Wrong %s accessed by constant pool index"
-                            + " %d: %s, but should be %s", "method type class",
-                            index, mtToVerify, mtToRefer);
+        msg = String.format("Wrong method type class accessed by"
+                                    + " constant pool index %d",
+                            index);
         Asserts.assertEQ(mtToRefer, mtToVerify, msg);
     }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/jvmci/compilerToVM/ResolveFieldInPoolTest.java	Sat Mar 05 20:46:42 2016 -0800
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 8138708
+ * @requires (os.simpleArch == "x64" | os.simpleArch == "sparcv9" | os.simpleArch == "aarch64")
+ * @library /testlibrary /test/lib /
+ * @compile ../common/CompilerToVMHelper.java
+ * @build sun.hotspot.WhiteBox
+ *        compiler.jvmci.compilerToVM.ResolveFieldInPoolTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ *                              jdk.vm.ci.hotspot.CompilerToVMHelper
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCI
+ *                   compiler.jvmci.compilerToVM.ResolveFieldInPoolTest
+ */
+
+package compiler.jvmci.compilerToVM;
+
+import compiler.jvmci.compilerToVM.ConstantPoolTestsHelper.DummyClasses;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.ConstantTypes;
+import static compiler.jvmci.compilerToVM.ConstantPoolTestCase.ConstantTypes.*;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.TestedCPEntry;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.Validator;
+import java.lang.reflect.Field;
+import java.util.HashMap;
+import java.util.Map;
+import jdk.internal.org.objectweb.asm.Opcodes;
+import jdk.vm.ci.hotspot.CompilerToVMHelper;
+import jdk.vm.ci.hotspot.HotSpotResolvedObjectType;
+import jdk.vm.ci.meta.ConstantPool;
+import jdk.test.lib.Asserts;
+import jdk.test.lib.Utils;
+import sun.misc.Unsafe;
+
+/**
+ * Test for {@code jdk.vm.ci.hotspot.CompilerToVM.resolveFieldInPool} method
+ */
+public class ResolveFieldInPoolTest {
+
+    private static final Unsafe UNSAFE = Utils.getUnsafe();
+
+    public static void main(String[] args) throws Exception {
+        Map<ConstantTypes, Validator> typeTests = new HashMap<>();
+        typeTests.put(CONSTANT_FIELDREF, ResolveFieldInPoolTest::validate);
+        ConstantPoolTestCase testCase = new ConstantPoolTestCase(typeTests);
+        testCase.test();
+        // The next "Class.forName" and repeating "testCase.test()"
+        // are here for the following reason.
+        // The first test run is without dummy class initialization,
+        // which means no constant pool cache exists.
+        // The second run is with initialized class (with constant pool cache available).
+        // Some CompilerToVM methods require different input
+        // depending on whether CP cache exists or not.
+        for (DummyClasses dummy : DummyClasses.values()) {
+            Class.forName(dummy.klass.getName());
+        }
+        testCase.test();
+    }
+
+    private static void validate(ConstantPool constantPoolCTVM,
+                                 ConstantTypes cpType,
+                                 DummyClasses dummyClass,
+                                 int cpi) {
+        TestedCPEntry entry = cpType.getTestedCPEntry(dummyClass, cpi);
+        if (entry == null) {
+            return;
+        }
+        int index = cpi;
+        String cached = "";
+        int cpci = dummyClass.getCPCacheIndex(cpi);
+        if (cpci != ConstantPoolTestsHelper.NO_CP_CACHE_PRESENT) {
+            index = cpci;
+            cached = "cached ";
+        }
+        for (int j = 0; j < entry.opcodes.length; j++) {
+            long[] info = new long[2];
+            HotSpotResolvedObjectType fieldToVerify
+                    = CompilerToVMHelper.resolveFieldInPool(constantPoolCTVM,
+                                                           index,
+                                                           entry.opcodes[j],
+                                                           info);
+            String msg = String.format("Object returned by resolveFieldInPool method"
+                                               + " for %sindex %d  should not be null",
+                                       cached,
+                                       index);
+            Asserts.assertNotNull(fieldToVerify, msg);
+            String classNameToRefer = entry.klass;
+            String fieldToVerifyKlassToString = fieldToVerify.klass().toValueString();
+            if (!fieldToVerifyKlassToString.contains(classNameToRefer)) {
+                msg = String.format("String representation \"%s\" of the object"
+                                            + " returned by resolveFieldInPool method"
+                                            + " for index %d does not contain a field's class name,"
+                                            + " should contain %s",
+                                    fieldToVerifyKlassToString,
+                                    index,
+                                    classNameToRefer);
+                throw new AssertionError(msg);
+            }
+            msg = String.format("Access flags returned by resolveFieldInPool"
+                                        + " method are wrong for the field %s.%s"
+                                        + " at %sindex %d",
+                                entry.klass,
+                                entry.name,
+                                cached,
+                                index);
+            Asserts.assertEQ(info[0], entry.accFlags, msg);
+            if (cpci == -1) {
+                return;
+            }
+            Class classOfTheField = null;
+            Field fieldToRefer = null;
+            try {
+                classOfTheField = Class.forName(classNameToRefer.replaceAll("/", "\\."));
+                fieldToRefer = classOfTheField.getDeclaredField(entry.name);
+                fieldToRefer.setAccessible(true);
+            } catch (Exception ex) {
+                throw new Error("Unexpected exception", ex);
+            }
+            long offsetToRefer;
+            if ((entry.accFlags & Opcodes.ACC_STATIC) != 0) {
+                offsetToRefer = UNSAFE.staticFieldOffset(fieldToRefer);
+            } else {
+                offsetToRefer = UNSAFE.objectFieldOffset(fieldToRefer);
+            }
+            msg = String.format("Field offset returned by resolveFieldInPool"
+                                        + " method is wrong for the field %s.%s"
+                                        + " at %sindex %d",
+                                entry.klass,
+                                entry.name,
+                                cached,
+                                index);
+            Asserts.assertEQ(info[1], offsetToRefer, msg);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/jvmci/compilerToVM/ResolvePossiblyCachedConstantInPoolTest.java	Sat Mar 05 20:46:42 2016 -0800
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 8138708
+ * @requires (os.simpleArch == "x64" | os.simpleArch == "sparcv9" | os.simpleArch == "aarch64")
+ * @library /testlibrary /test/lib /
+ * @compile ../common/CompilerToVMHelper.java
+ * @build sun.hotspot.WhiteBox
+ *        compiler.jvmci.compilerToVM.ResolvePossiblyCachedConstantInPoolTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ *                              jdk.vm.ci.hotspot.CompilerToVMHelper
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCI
+ *                   compiler.jvmci.compilerToVM.ResolvePossiblyCachedConstantInPoolTest
+ */
+
+package compiler.jvmci.compilerToVM;
+
+import compiler.jvmci.compilerToVM.ConstantPoolTestsHelper.DummyClasses;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.ConstantTypes;
+import static compiler.jvmci.compilerToVM.ConstantPoolTestCase.ConstantTypes.*;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.TestedCPEntry;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.Validator;
+import java.util.HashMap;
+import java.util.Map;
+import jdk.test.lib.Asserts;
+import jdk.vm.ci.hotspot.CompilerToVMHelper;
+import jdk.vm.ci.meta.ConstantPool;
+
+/**
+ * Test for {@code jdk.vm.ci.hotspot.CompilerToVM.resolvePossiblyCachedConstantInPool} method
+ */
+public class ResolvePossiblyCachedConstantInPoolTest {
+
+    public static void main(String[] args) throws Exception {
+        Map<ConstantTypes, Validator> typeTests = new HashMap<>();
+        typeTests.put(CONSTANT_STRING, ResolvePossiblyCachedConstantInPoolTest::validateString);
+        ConstantPoolTestCase testCase = new ConstantPoolTestCase(typeTests);
+        // The next "Class.forName" is here for the following reason.
+        // When class is initialized, constant pool cache is available.
+        // This method works only with cached constant pool.
+        for (DummyClasses dummy : DummyClasses.values()) {
+            Class.forName(dummy.klass.getName());
+        }
+        testCase.test();
+    }
+
+    private static void validateString(ConstantPool constantPoolCTVM,
+                                       ConstantTypes cpType,
+                                       DummyClasses dummyClass,
+                                       int cpi) {
+        TestedCPEntry entry = cpType.getTestedCPEntry(dummyClass, cpi);
+        if (entry == null) {
+            return;
+        }
+        int index = cpi;
+        String cached = "";
+        int cpci = dummyClass.getCPCacheIndex(cpi);
+        if (cpci != ConstantPoolTestsHelper.NO_CP_CACHE_PRESENT) {
+            index = cpci;
+            cached = "cached ";
+        }
+        Object constantInPool = CompilerToVMHelper.resolvePossiblyCachedConstantInPool(constantPoolCTVM, index);
+        String stringToVerify = (String) constantInPool;
+        String stringToRefer = entry.name;
+        if (stringToRefer.equals("") && cpci != ConstantPoolTestsHelper.NO_CP_CACHE_PRESENT) {
+            stringToRefer = null; // tested method returns null for cached empty strings
+        }
+        String msg = String.format("Wrong string accessed by %sconstant pool index %d", cached, index);
+        Asserts.assertEQ(stringToRefer, stringToVerify, msg);
+    }
+}
--- a/hotspot/test/compiler/jvmci/compilerToVM/ResolveTypeInPoolTest.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/compiler/jvmci/compilerToVM/ResolveTypeInPoolTest.java	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,51 +29,69 @@
  * @summary Testing compiler.jvmci.CompilerToVM.resolveTypeInPool method
  * @library /testlibrary /test/lib /
  * @compile ../common/CompilerToVMHelper.java
- * @build compiler.jvmci.common.testcases.MultipleImplementersInterface
- *        compiler.jvmci.common.testcases.MultipleImplementer2
- *        compiler.jvmci.compilerToVM.ConstantPoolTestsHelper
- *        compiler.jvmci.compilerToVM.ConstantPoolTestCase
+ * @build sun.hotspot.WhiteBox
  *        compiler.jvmci.compilerToVM.ResolveTypeInPoolTest
- * @run main ClassFileInstaller jdk.vm.ci.hotspot.CompilerToVMHelper
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockExperimentalVMOptions
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ *                              jdk.vm.ci.hotspot.CompilerToVMHelper
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions
  *                   -XX:+EnableJVMCI compiler.jvmci.compilerToVM.ResolveTypeInPoolTest
  */
 
 package compiler.jvmci.compilerToVM;
 
+import compiler.jvmci.compilerToVM.ConstantPoolTestsHelper.DummyClasses;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.ConstantTypes;
+import static compiler.jvmci.compilerToVM.ConstantPoolTestCase.ConstantTypes.*;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.TestedCPEntry;
+import compiler.jvmci.compilerToVM.ConstantPoolTestCase.Validator;
 import java.util.HashMap;
 import java.util.Map;
 import jdk.vm.ci.hotspot.CompilerToVMHelper;
 import jdk.vm.ci.hotspot.HotSpotResolvedObjectType;
-import sun.reflect.ConstantPool;
+import jdk.vm.ci.meta.ConstantPool;
 
 /**
- * Test for {@code compiler.jvmci.CompilerToVM.resolveTypeInPool} method
+ * Test for {@code jdk.vm.ci.hotspot.CompilerToVM.resolveTypeInPool} method
  */
 public class ResolveTypeInPoolTest {
 
     public static void main(String[] args) throws Exception {
-        Map<ConstantPoolTestsHelper.ConstantTypes,
-                ConstantPoolTestCase.Validator> typeTests = new HashMap<>(1);
-        typeTests.put(ConstantPoolTestsHelper.ConstantTypes.CONSTANT_CLASS,
-                ResolveTypeInPoolTest::validate);
+        Map<ConstantTypes, Validator> typeTests = new HashMap<>();
+        typeTests.put(CONSTANT_CLASS, ResolveTypeInPoolTest::validate);
         ConstantPoolTestCase testCase = new ConstantPoolTestCase(typeTests);
         testCase.test();
+        // The next "Class.forName" and repeating "testCase.test()"
+        // are here for the following reason.
+        // The first test run is without dummy class initialization,
+        // which means no constant pool cache exists.
+        // The second run is with initialized class (with constant pool cache available).
+        // Some CompilerToVM methods require different input
+        // depending on whether CP cache exists or not.
+        for (DummyClasses dummy : DummyClasses.values()) {
+            Class.forName(dummy.klass.getName());
+        }
+        testCase.test();
     }
 
-    public static void validate(
-            jdk.vm.ci.meta.ConstantPool constantPoolCTVM,
-            ConstantPool constantPoolSS,
-            ConstantPoolTestsHelper.DummyClasses dummyClass, int i) {
-        HotSpotResolvedObjectType typeToVerify = CompilerToVMHelper
-                .resolveTypeInPool(constantPoolCTVM, i);
-        int classNameIndex = (int) dummyClass.cp.get(i).value;
-        String classNameToRefer = constantPoolSS.getUTF8At(classNameIndex);
+    public static void validate(ConstantPool constantPoolCTVM,
+                                ConstantTypes cpType,
+                                DummyClasses dummyClass,
+                                int i) {
+        TestedCPEntry entry = cpType.getTestedCPEntry(dummyClass, i);
+        if (entry == null) {
+            return;
+        }
+        HotSpotResolvedObjectType typeToVerify = CompilerToVMHelper.resolveTypeInPool(constantPoolCTVM, i);
+        String classNameToRefer = entry.klass;
         String outputToVerify = typeToVerify.toString();
         if (!outputToVerify.contains(classNameToRefer)) {
             String msg = String.format("Wrong class accessed by constant"
-                    + " pool index %d: %s, but should be %s",
-                    i, outputToVerify, classNameToRefer);
+                                               + " pool index %d: %s, but should be %s",
+                                       i,
+                                       outputToVerify,
+                                       classNameToRefer);
             throw new AssertionError(msg);
         }
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/native/TestDirtyInt.java	Sat Mar 05 20:46:42 2016 -0800
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/* @test
+ * @run main/native TestDirtyInt
+ */
+public class TestDirtyInt {
+    static {
+        System.loadLibrary("TestDirtyInt");
+    }
+
+    native static int test(int v);
+
+    static int compiled(int v) {
+        return test(v<<2);
+    }
+
+    static public void main(String[] args) {
+        for (int i = 0; i < 20000; i++) {
+            int res = compiled(Integer.MAX_VALUE);
+            if (res != 0x42) {
+                throw new RuntimeException("Test failed");
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/native/libTestDirtyInt.c	Sat Mar 05 20:46:42 2016 -0800
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "jni.h"
+#include <stdio.h>
+
+static int array = 0x42;
+
+JNIEXPORT jint JNICALL Java_TestDirtyInt_test(JNIEnv* env, jclass jclazz, jint v)
+{
+  int* ptr = &array + v + 4;
+  return *ptr;
+}
--- a/hotspot/test/compiler/unsafe/JdkInternalMiscUnsafeAccessTestBoolean.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/compiler/unsafe/JdkInternalMiscUnsafeAccessTestBoolean.java	Sat Mar 05 20:46:42 2016 -0800
@@ -128,6 +128,20 @@
         }
 
 
+        // Lazy
+        {
+            UNSAFE.putBooleanRelease(base, offset, true);
+            boolean x = UNSAFE.getBooleanAcquire(base, offset);
+            assertEquals(x, true, "putRelease boolean value");
+        }
+
+        // Opaque
+        {
+            UNSAFE.putBooleanOpaque(base, offset, false);
+            boolean x = UNSAFE.getBooleanOpaque(base, offset);
+            assertEquals(x, false, "putOpaque boolean value");
+        }
+
 
 
     }
--- a/hotspot/test/compiler/unsafe/JdkInternalMiscUnsafeAccessTestByte.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/compiler/unsafe/JdkInternalMiscUnsafeAccessTestByte.java	Sat Mar 05 20:46:42 2016 -0800
@@ -157,6 +157,20 @@
         }
 
 
+        // Lazy
+        {
+            UNSAFE.putByteRelease(base, offset, (byte)1);
+            byte x = UNSAFE.getByteAcquire(base, offset);
+            assertEquals(x, (byte)1, "putRelease byte value");
+        }
+
+        // Opaque
+        {
+            UNSAFE.putByteOpaque(base, offset, (byte)2);
+            byte x = UNSAFE.getByteOpaque(base, offset);
+            assertEquals(x, (byte)2, "putOpaque byte value");
+        }
+
 
 
     }
--- a/hotspot/test/compiler/unsafe/JdkInternalMiscUnsafeAccessTestChar.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/compiler/unsafe/JdkInternalMiscUnsafeAccessTestChar.java	Sat Mar 05 20:46:42 2016 -0800
@@ -157,6 +157,20 @@
         }
 
 
+        // Lazy
+        {
+            UNSAFE.putCharRelease(base, offset, 'a');
+            char x = UNSAFE.getCharAcquire(base, offset);
+            assertEquals(x, 'a', "putRelease char value");
+        }
+
+        // Opaque
+        {
+            UNSAFE.putCharOpaque(base, offset, 'b');
+            char x = UNSAFE.getCharOpaque(base, offset);
+            assertEquals(x, 'b', "putOpaque char value");
+        }
+
         // Unaligned
         {
             UNSAFE.putCharUnaligned(base, offset, 'b');
--- a/hotspot/test/compiler/unsafe/JdkInternalMiscUnsafeAccessTestDouble.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/compiler/unsafe/JdkInternalMiscUnsafeAccessTestDouble.java	Sat Mar 05 20:46:42 2016 -0800
@@ -157,6 +157,20 @@
         }
 
 
+        // Lazy
+        {
+            UNSAFE.putDoubleRelease(base, offset, 1.0d);
+            double x = UNSAFE.getDoubleAcquire(base, offset);
+            assertEquals(x, 1.0d, "putRelease double value");
+        }
+
+        // Opaque
+        {
+            UNSAFE.putDoubleOpaque(base, offset, 2.0d);
+            double x = UNSAFE.getDoubleOpaque(base, offset);
+            assertEquals(x, 2.0d, "putOpaque double value");
+        }
+
 
 
     }
--- a/hotspot/test/compiler/unsafe/JdkInternalMiscUnsafeAccessTestFloat.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/compiler/unsafe/JdkInternalMiscUnsafeAccessTestFloat.java	Sat Mar 05 20:46:42 2016 -0800
@@ -157,6 +157,20 @@
         }
 
 
+        // Lazy
+        {
+            UNSAFE.putFloatRelease(base, offset, 1.0f);
+            float x = UNSAFE.getFloatAcquire(base, offset);
+            assertEquals(x, 1.0f, "putRelease float value");
+        }
+
+        // Opaque
+        {
+            UNSAFE.putFloatOpaque(base, offset, 2.0f);
+            float x = UNSAFE.getFloatOpaque(base, offset);
+            assertEquals(x, 2.0f, "putOpaque float value");
+        }
+
 
 
     }
--- a/hotspot/test/compiler/unsafe/JdkInternalMiscUnsafeAccessTestInt.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/compiler/unsafe/JdkInternalMiscUnsafeAccessTestInt.java	Sat Mar 05 20:46:42 2016 -0800
@@ -163,6 +163,20 @@
             assertEquals(x, 1, "putRelease int value");
         }
 
+        // Lazy
+        {
+            UNSAFE.putIntRelease(base, offset, 1);
+            int x = UNSAFE.getIntAcquire(base, offset);
+            assertEquals(x, 1, "putRelease int value");
+        }
+
+        // Opaque
+        {
+            UNSAFE.putIntOpaque(base, offset, 2);
+            int x = UNSAFE.getIntOpaque(base, offset);
+            assertEquals(x, 2, "putOpaque int value");
+        }
+
         // Unaligned
         {
             UNSAFE.putIntUnaligned(base, offset, 2);
@@ -199,6 +213,70 @@
             assertEquals(x, 2, "failing compareAndSwap int value");
         }
 
+        // Advanced compare
+        {
+            int r = UNSAFE.compareAndExchangeIntVolatile(base, offset, 2, 1);
+            assertEquals(r, 2, "success compareAndExchangeVolatile int");
+            int x = UNSAFE.getInt(base, offset);
+            assertEquals(x, 1, "success compareAndExchangeVolatile int value");
+        }
+
+        {
+            int r = UNSAFE.compareAndExchangeIntVolatile(base, offset, 2, 3);
+            assertEquals(r, 1, "failing compareAndExchangeVolatile int");
+            int x = UNSAFE.getInt(base, offset);
+            assertEquals(x, 1, "failing compareAndExchangeVolatile int value");
+        }
+
+        {
+            int r = UNSAFE.compareAndExchangeIntAcquire(base, offset, 1, 2);
+            assertEquals(r, 1, "success compareAndExchangeAcquire int");
+            int x = UNSAFE.getInt(base, offset);
+            assertEquals(x, 2, "success compareAndExchangeAcquire int value");
+        }
+
+        {
+            int r = UNSAFE.compareAndExchangeIntAcquire(base, offset, 1, 3);
+            assertEquals(r, 2, "failing compareAndExchangeAcquire int");
+            int x = UNSAFE.getInt(base, offset);
+            assertEquals(x, 2, "failing compareAndExchangeAcquire int value");
+        }
+
+        {
+            int r = UNSAFE.compareAndExchangeIntRelease(base, offset, 2, 1);
+            assertEquals(r, 2, "success compareAndExchangeRelease int");
+            int x = UNSAFE.getInt(base, offset);
+            assertEquals(x, 1, "success compareAndExchangeRelease int value");
+        }
+
+        {
+            int r = UNSAFE.compareAndExchangeIntRelease(base, offset, 2, 3);
+            assertEquals(r, 1, "failing compareAndExchangeRelease int");
+            int x = UNSAFE.getInt(base, offset);
+            assertEquals(x, 1, "failing compareAndExchangeRelease int value");
+        }
+
+        {
+            boolean r = UNSAFE.weakCompareAndSwapInt(base, offset, 1, 2);
+            assertEquals(r, true, "weakCompareAndSwap int");
+            int x = UNSAFE.getInt(base, offset);
+            assertEquals(x, 2, "weakCompareAndSwap int value");
+        }
+
+        {
+            boolean r = UNSAFE.weakCompareAndSwapIntAcquire(base, offset, 2, 1);
+            assertEquals(r, true, "weakCompareAndSwapAcquire int");
+            int x = UNSAFE.getInt(base, offset);
+            assertEquals(x, 1, "weakCompareAndSwapAcquire int");
+        }
+
+        {
+            boolean r = UNSAFE.weakCompareAndSwapIntRelease(base, offset, 1, 2);
+            assertEquals(r, true, "weakCompareAndSwapRelease int");
+            int x = UNSAFE.getInt(base, offset);
+            assertEquals(x, 2, "weakCompareAndSwapRelease int");
+        }
+
         // Compare set and get
         {
             int o = UNSAFE.getAndSetInt(base, offset, 1);
--- a/hotspot/test/compiler/unsafe/JdkInternalMiscUnsafeAccessTestLong.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/compiler/unsafe/JdkInternalMiscUnsafeAccessTestLong.java	Sat Mar 05 20:46:42 2016 -0800
@@ -163,6 +163,20 @@
             assertEquals(x, 1L, "putRelease long value");
         }
 
+        // Lazy
+        {
+            UNSAFE.putLongRelease(base, offset, 1L);
+            long x = UNSAFE.getLongAcquire(base, offset);
+            assertEquals(x, 1L, "putRelease long value");
+        }
+
+        // Opaque
+        {
+            UNSAFE.putLongOpaque(base, offset, 2L);
+            long x = UNSAFE.getLongOpaque(base, offset);
+            assertEquals(x, 2L, "putOpaque long value");
+        }
+
         // Unaligned
         {
             UNSAFE.putLongUnaligned(base, offset, 2L);
@@ -199,6 +213,70 @@
             assertEquals(x, 2L, "failing compareAndSwap long value");
         }
 
+        // Advanced compare
+        {
+            long r = UNSAFE.compareAndExchangeLongVolatile(base, offset, 2L, 1L);
+            assertEquals(r, 2L, "success compareAndExchangeVolatile long");
+            long x = UNSAFE.getLong(base, offset);
+            assertEquals(x, 1L, "success compareAndExchangeVolatile long value");
+        }
+
+        {
+            long r = UNSAFE.compareAndExchangeLongVolatile(base, offset, 2L, 3L);
+            assertEquals(r, 1L, "failing compareAndExchangeVolatile long");
+            long x = UNSAFE.getLong(base, offset);
+            assertEquals(x, 1L, "failing compareAndExchangeVolatile long value");
+        }
+
+        {
+            long r = UNSAFE.compareAndExchangeLongAcquire(base, offset, 1L, 2L);
+            assertEquals(r, 1L, "success compareAndExchangeAcquire long");
+            long x = UNSAFE.getLong(base, offset);
+            assertEquals(x, 2L, "success compareAndExchangeAcquire long value");
+        }
+
+        {
+            long r = UNSAFE.compareAndExchangeLongAcquire(base, offset, 1L, 3L);
+            assertEquals(r, 2L, "failing compareAndExchangeAcquire long");
+            long x = UNSAFE.getLong(base, offset);
+            assertEquals(x, 2L, "failing compareAndExchangeAcquire long value");
+        }
+
+        {
+            long r = UNSAFE.compareAndExchangeLongRelease(base, offset, 2L, 1L);
+            assertEquals(r, 2L, "success compareAndExchangeRelease long");
+            long x = UNSAFE.getLong(base, offset);
+            assertEquals(x, 1L, "success compareAndExchangeRelease long value");
+        }
+
+        {
+            long r = UNSAFE.compareAndExchangeLongRelease(base, offset, 2L, 3L);
+            assertEquals(r, 1L, "failing compareAndExchangeRelease long");
+            long x = UNSAFE.getLong(base, offset);
+            assertEquals(x, 1L, "failing compareAndExchangeRelease long value");
+        }
+
+        {
+            boolean r = UNSAFE.weakCompareAndSwapLong(base, offset, 1L, 2L);
+            assertEquals(r, true, "weakCompareAndSwap long");
+            long x = UNSAFE.getLong(base, offset);
+            assertEquals(x, 2L, "weakCompareAndSwap long value");
+        }
+
+        {
+            boolean r = UNSAFE.weakCompareAndSwapLongAcquire(base, offset, 2L, 1L);
+            assertEquals(r, true, "weakCompareAndSwapAcquire long");
+            long x = UNSAFE.getLong(base, offset);
+            assertEquals(x, 1L, "weakCompareAndSwapAcquire long");
+        }
+
+        {
+            boolean r = UNSAFE.weakCompareAndSwapLongRelease(base, offset, 1L, 2L);
+            assertEquals(r, true, "weakCompareAndSwapRelease long");
+            long x = UNSAFE.getLong(base, offset);
+            assertEquals(x, 2L, "weakCompareAndSwapRelease long");
+        }
+
         // Compare set and get
         {
             long o = UNSAFE.getAndSetLong(base, offset, 1L);
--- a/hotspot/test/compiler/unsafe/JdkInternalMiscUnsafeAccessTestObject.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/compiler/unsafe/JdkInternalMiscUnsafeAccessTestObject.java	Sat Mar 05 20:46:42 2016 -0800
@@ -134,6 +134,20 @@
             assertEquals(x, "foo", "putRelease Object value");
         }
 
+        // Lazy
+        {
+            UNSAFE.putObjectRelease(base, offset, "foo");
+            Object x = UNSAFE.getObjectAcquire(base, offset);
+            assertEquals(x, "foo", "putRelease Object value");
+        }
+
+        // Opaque
+        {
+            UNSAFE.putObjectOpaque(base, offset, "bar");
+            Object x = UNSAFE.getObjectOpaque(base, offset);
+            assertEquals(x, "bar", "putOpaque Object value");
+        }
+
 
         UNSAFE.putObject(base, offset, "foo");
 
@@ -152,6 +166,70 @@
             assertEquals(x, "bar", "failing compareAndSwap Object value");
         }
 
+        // Advanced compare
+        {
+            Object r = UNSAFE.compareAndExchangeObjectVolatile(base, offset, "bar", "foo");
+            assertEquals(r, "bar", "success compareAndExchangeVolatile Object");
+            Object x = UNSAFE.getObject(base, offset);
+            assertEquals(x, "foo", "success compareAndExchangeVolatile Object value");
+        }
+
+        {
+            Object r = UNSAFE.compareAndExchangeObjectVolatile(base, offset, "bar", "baz");
+            assertEquals(r, "foo", "failing compareAndExchangeVolatile Object");
+            Object x = UNSAFE.getObject(base, offset);
+            assertEquals(x, "foo", "failing compareAndExchangeVolatile Object value");
+        }
+
+        {
+            Object r = UNSAFE.compareAndExchangeObjectAcquire(base, offset, "foo", "bar");
+            assertEquals(r, "foo", "success compareAndExchangeAcquire Object");
+            Object x = UNSAFE.getObject(base, offset);
+            assertEquals(x, "bar", "success compareAndExchangeAcquire Object value");
+        }
+
+        {
+            Object r = UNSAFE.compareAndExchangeObjectAcquire(base, offset, "foo", "baz");
+            assertEquals(r, "bar", "failing compareAndExchangeAcquire Object");
+            Object x = UNSAFE.getObject(base, offset);
+            assertEquals(x, "bar", "failing compareAndExchangeAcquire Object value");
+        }
+
+        {
+            Object r = UNSAFE.compareAndExchangeObjectRelease(base, offset, "bar", "foo");
+            assertEquals(r, "bar", "success compareAndExchangeRelease Object");
+            Object x = UNSAFE.getObject(base, offset);
+            assertEquals(x, "foo", "success compareAndExchangeRelease Object value");
+        }
+
+        {
+            Object r = UNSAFE.compareAndExchangeObjectRelease(base, offset, "bar", "baz");
+            assertEquals(r, "foo", "failing compareAndExchangeRelease Object");
+            Object x = UNSAFE.getObject(base, offset);
+            assertEquals(x, "foo", "failing compareAndExchangeRelease Object value");
+        }
+
+        {
+            boolean r = UNSAFE.weakCompareAndSwapObject(base, offset, "foo", "bar");
+            assertEquals(r, true, "weakCompareAndSwap Object");
+            Object x = UNSAFE.getObject(base, offset);
+            assertEquals(x, "bar", "weakCompareAndSwap Object value");
+        }
+
+        {
+            boolean r = UNSAFE.weakCompareAndSwapObjectAcquire(base, offset, "bar", "foo");
+            assertEquals(r, true, "weakCompareAndSwapAcquire Object");
+            Object x = UNSAFE.getObject(base, offset);
+            assertEquals(x, "foo", "weakCompareAndSwapAcquire Object");
+        }
+
+        {
+            boolean r = UNSAFE.weakCompareAndSwapObjectRelease(base, offset, "foo", "bar");
+            assertEquals(r, true, "weakCompareAndSwapRelease Object");
+            Object x = UNSAFE.getObject(base, offset);
+            assertEquals(x, "bar", "weakCompareAndSwapRelease Object");
+        }
+
         // Compare set and get
         {
             Object o = UNSAFE.getAndSetObject(base, offset, "foo");
--- a/hotspot/test/compiler/unsafe/JdkInternalMiscUnsafeAccessTestShort.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/compiler/unsafe/JdkInternalMiscUnsafeAccessTestShort.java	Sat Mar 05 20:46:42 2016 -0800
@@ -157,6 +157,20 @@
         }
 
 
+        // Lazy
+        {
+            UNSAFE.putShortRelease(base, offset, (short)1);
+            short x = UNSAFE.getShortAcquire(base, offset);
+            assertEquals(x, (short)1, "putRelease short value");
+        }
+
+        // Opaque
+        {
+            UNSAFE.putShortOpaque(base, offset, (short)2);
+            short x = UNSAFE.getShortOpaque(base, offset);
+            assertEquals(x, (short)2, "putOpaque short value");
+        }
+
         // Unaligned
         {
             UNSAFE.putShortUnaligned(base, offset, (short)2);
--- a/hotspot/test/compiler/unsafe/UnsafeGetConstantField.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/compiler/unsafe/UnsafeGetConstantField.java	Sat Mar 05 20:46:42 2016 -0800
@@ -27,6 +27,9 @@
  * @test
  * @summary tests on constant folding of unsafe get operations
  * @library /testlibrary /test/lib
+ *
+ * @requires vm.flavor != "client"
+ *
  * @run main/bootclasspath -XX:+UnlockDiagnosticVMOptions
  *                   -Xbatch -XX:-TieredCompilation
  *                   -XX:+FoldStableValues
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/unsafe/UnsafeGetStableArrayElement.java	Sat Mar 05 20:46:42 2016 -0800
@@ -0,0 +1,325 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary tests on constant folding of unsafe get operations from stable arrays
+ * @library /testlibrary /test/lib
+ * @ignore 8151137
+ *
+ * @requires vm.flavor != "client"
+ *
+ * @run main/bootclasspath -XX:+UnlockDiagnosticVMOptions
+ *                   -Xbatch -XX:-TieredCompilation
+ *                   -XX:+FoldStableValues
+ *                   -XX:CompileCommand=dontinline,*Test::test*
+ *                   UnsafeGetStableArrayElement
+ */
+import jdk.internal.misc.Unsafe;
+import jdk.internal.vm.annotation.Stable;
+import java.util.concurrent.Callable;
+
+import static jdk.internal.misc.Unsafe.*;
+import static jdk.test.lib.Asserts.*;
+
+public class UnsafeGetStableArrayElement {
+    @Stable static final boolean[] STABLE_BOOLEAN_ARRAY = new boolean[16];
+    @Stable static final    byte[]    STABLE_BYTE_ARRAY = new    byte[16];
+    @Stable static final   short[]   STABLE_SHORT_ARRAY = new   short[8];
+    @Stable static final    char[]    STABLE_CHAR_ARRAY = new    char[8];
+    @Stable static final     int[]     STABLE_INT_ARRAY = new     int[4];
+    @Stable static final    long[]    STABLE_LONG_ARRAY = new    long[2];
+    @Stable static final   float[]   STABLE_FLOAT_ARRAY = new   float[4];
+    @Stable static final  double[]  STABLE_DOUBLE_ARRAY = new  double[2];
+    @Stable static final  Object[]  STABLE_OBJECT_ARRAY = new  Object[4];
+
+    static {
+        Setter.reset();
+    }
+    static final Unsafe U = Unsafe.getUnsafe();
+
+    static class Setter {
+        private static void setZ(boolean defaultVal) { STABLE_BOOLEAN_ARRAY[0] = defaultVal ? false :                true; }
+        private static void setB(boolean defaultVal) { STABLE_BYTE_ARRAY[0]    = defaultVal ?     0 :      Byte.MAX_VALUE; }
+        private static void setS(boolean defaultVal) { STABLE_SHORT_ARRAY[0]   = defaultVal ?     0 :     Short.MAX_VALUE; }
+        private static void setC(boolean defaultVal) { STABLE_CHAR_ARRAY[0]    = defaultVal ?     0 : Character.MAX_VALUE; }
+        private static void setI(boolean defaultVal) { STABLE_INT_ARRAY[0]     = defaultVal ?     0 :   Integer.MAX_VALUE; }
+        private static void setJ(boolean defaultVal) { STABLE_LONG_ARRAY[0]    = defaultVal ?     0 :      Long.MAX_VALUE; }
+        private static void setF(boolean defaultVal) { STABLE_FLOAT_ARRAY[0]   = defaultVal ?     0 :     Float.MAX_VALUE; }
+        private static void setD(boolean defaultVal) { STABLE_DOUBLE_ARRAY[0]  = defaultVal ?     0 :    Double.MAX_VALUE; }
+        private static void setL(boolean defaultVal) { STABLE_OBJECT_ARRAY[0]  = defaultVal ?  null :        new Object(); }
+
+        static void reset() {
+            setZ(false);
+            setB(false);
+            setS(false);
+            setC(false);
+            setI(false);
+            setJ(false);
+            setF(false);
+            setD(false);
+            setL(false);
+        }
+    }
+
+    static class Test {
+        static void changeZ() { Setter.setZ(true); }
+        static void changeB() { Setter.setB(true); }
+        static void changeS() { Setter.setS(true); }
+        static void changeC() { Setter.setC(true); }
+        static void changeI() { Setter.setI(true); }
+        static void changeJ() { Setter.setJ(true); }
+        static void changeF() { Setter.setF(true); }
+        static void changeD() { Setter.setD(true); }
+        static void changeL() { Setter.setL(true); }
+
+        static boolean testZ_Z() { return U.getBoolean(STABLE_BOOLEAN_ARRAY, ARRAY_BOOLEAN_BASE_OFFSET); }
+        static byte    testZ_B() { return U.getByte(   STABLE_BOOLEAN_ARRAY, ARRAY_BOOLEAN_BASE_OFFSET); }
+        static short   testZ_S() { return U.getShort(  STABLE_BOOLEAN_ARRAY, ARRAY_BOOLEAN_BASE_OFFSET); }
+        static char    testZ_C() { return U.getChar(   STABLE_BOOLEAN_ARRAY, ARRAY_BOOLEAN_BASE_OFFSET); }
+        static int     testZ_I() { return U.getInt(    STABLE_BOOLEAN_ARRAY, ARRAY_BOOLEAN_BASE_OFFSET); }
+        static long    testZ_J() { return U.getLong(   STABLE_BOOLEAN_ARRAY, ARRAY_BOOLEAN_BASE_OFFSET); }
+        static float   testZ_F() { return U.getFloat(  STABLE_BOOLEAN_ARRAY, ARRAY_BOOLEAN_BASE_OFFSET); }
+        static double  testZ_D() { return U.getDouble( STABLE_BOOLEAN_ARRAY, ARRAY_BOOLEAN_BASE_OFFSET); }
+
+        static boolean testB_Z() { return U.getBoolean(STABLE_BYTE_ARRAY, ARRAY_BYTE_BASE_OFFSET); }
+        static byte    testB_B() { return U.getByte(   STABLE_BYTE_ARRAY, ARRAY_BYTE_BASE_OFFSET); }
+        static short   testB_S() { return U.getShort(  STABLE_BYTE_ARRAY, ARRAY_BYTE_BASE_OFFSET); }
+        static char    testB_C() { return U.getChar(   STABLE_BYTE_ARRAY, ARRAY_BYTE_BASE_OFFSET); }
+        static int     testB_I() { return U.getInt(    STABLE_BYTE_ARRAY, ARRAY_BYTE_BASE_OFFSET); }
+        static long    testB_J() { return U.getLong(   STABLE_BYTE_ARRAY, ARRAY_BYTE_BASE_OFFSET); }
+        static float   testB_F() { return U.getFloat(  STABLE_BYTE_ARRAY, ARRAY_BYTE_BASE_OFFSET); }
+        static double  testB_D() { return U.getDouble( STABLE_BYTE_ARRAY, ARRAY_BYTE_BASE_OFFSET); }
+
+        static boolean testS_Z() { return U.getBoolean(STABLE_SHORT_ARRAY, ARRAY_SHORT_BASE_OFFSET); }
+        static byte    testS_B() { return U.getByte(   STABLE_SHORT_ARRAY, ARRAY_SHORT_BASE_OFFSET); }
+        static short   testS_S() { return U.getShort(  STABLE_SHORT_ARRAY, ARRAY_SHORT_BASE_OFFSET); }
+        static char    testS_C() { return U.getChar(   STABLE_SHORT_ARRAY, ARRAY_SHORT_BASE_OFFSET); }
+        static int     testS_I() { return U.getInt(    STABLE_SHORT_ARRAY, ARRAY_SHORT_BASE_OFFSET); }
+        static long    testS_J() { return U.getLong(   STABLE_SHORT_ARRAY, ARRAY_SHORT_BASE_OFFSET); }
+        static float   testS_F() { return U.getFloat(  STABLE_SHORT_ARRAY, ARRAY_SHORT_BASE_OFFSET); }
+        static double  testS_D() { return U.getDouble( STABLE_SHORT_ARRAY, ARRAY_SHORT_BASE_OFFSET); }
+
+        static boolean testC_Z() { return U.getBoolean(STABLE_CHAR_ARRAY, ARRAY_CHAR_BASE_OFFSET); }
+        static byte    testC_B() { return U.getByte(   STABLE_CHAR_ARRAY, ARRAY_CHAR_BASE_OFFSET); }
+        static short   testC_S() { return U.getShort(  STABLE_CHAR_ARRAY, ARRAY_CHAR_BASE_OFFSET); }
+        static char    testC_C() { return U.getChar(   STABLE_CHAR_ARRAY, ARRAY_CHAR_BASE_OFFSET); }
+        static int     testC_I() { return U.getInt(    STABLE_CHAR_ARRAY, ARRAY_CHAR_BASE_OFFSET); }
+        static long    testC_J() { return U.getLong(   STABLE_CHAR_ARRAY, ARRAY_CHAR_BASE_OFFSET); }
+        static float   testC_F() { return U.getFloat(  STABLE_CHAR_ARRAY, ARRAY_CHAR_BASE_OFFSET); }
+        static double  testC_D() { return U.getDouble( STABLE_CHAR_ARRAY, ARRAY_CHAR_BASE_OFFSET); }
+
+        static boolean testI_Z() { return U.getBoolean(STABLE_INT_ARRAY, ARRAY_INT_BASE_OFFSET); }
+        static byte    testI_B() { return U.getByte(   STABLE_INT_ARRAY, ARRAY_INT_BASE_OFFSET); }
+        static short   testI_S() { return U.getShort(  STABLE_INT_ARRAY, ARRAY_INT_BASE_OFFSET); }
+        static char    testI_C() { return U.getChar(   STABLE_INT_ARRAY, ARRAY_INT_BASE_OFFSET); }
+        static int     testI_I() { return U.getInt(    STABLE_INT_ARRAY, ARRAY_INT_BASE_OFFSET); }
+        static long    testI_J() { return U.getLong(   STABLE_INT_ARRAY, ARRAY_INT_BASE_OFFSET); }
+        static float   testI_F() { return U.getFloat(  STABLE_INT_ARRAY, ARRAY_INT_BASE_OFFSET); }
+        static double  testI_D() { return U.getDouble( STABLE_INT_ARRAY, ARRAY_INT_BASE_OFFSET); }
+
+        static boolean testJ_Z() { return U.getBoolean(STABLE_LONG_ARRAY, ARRAY_LONG_BASE_OFFSET); }
+        static byte    testJ_B() { return U.getByte(   STABLE_LONG_ARRAY, ARRAY_LONG_BASE_OFFSET); }
+        static short   testJ_S() { return U.getShort(  STABLE_LONG_ARRAY, ARRAY_LONG_BASE_OFFSET); }
+        static char    testJ_C() { return U.getChar(   STABLE_LONG_ARRAY, ARRAY_LONG_BASE_OFFSET); }
+        static int     testJ_I() { return U.getInt(    STABLE_LONG_ARRAY, ARRAY_LONG_BASE_OFFSET); }
+        static long    testJ_J() { return U.getLong(   STABLE_LONG_ARRAY, ARRAY_LONG_BASE_OFFSET); }
+        static float   testJ_F() { return U.getFloat(  STABLE_LONG_ARRAY, ARRAY_LONG_BASE_OFFSET); }
+        static double  testJ_D() { return U.getDouble( STABLE_LONG_ARRAY, ARRAY_LONG_BASE_OFFSET); }
+
+        static boolean testF_Z() { return U.getBoolean(STABLE_FLOAT_ARRAY, ARRAY_FLOAT_BASE_OFFSET); }
+        static byte    testF_B() { return U.getByte(   STABLE_FLOAT_ARRAY, ARRAY_FLOAT_BASE_OFFSET); }
+        static short   testF_S() { return U.getShort(  STABLE_FLOAT_ARRAY, ARRAY_FLOAT_BASE_OFFSET); }
+        static char    testF_C() { return U.getChar(   STABLE_FLOAT_ARRAY, ARRAY_FLOAT_BASE_OFFSET); }
+        static int     testF_I() { return U.getInt(    STABLE_FLOAT_ARRAY, ARRAY_FLOAT_BASE_OFFSET); }
+        static long    testF_J() { return U.getLong(   STABLE_FLOAT_ARRAY, ARRAY_FLOAT_BASE_OFFSET); }
+        static float   testF_F() { return U.getFloat(  STABLE_FLOAT_ARRAY, ARRAY_FLOAT_BASE_OFFSET); }
+        static double  testF_D() { return U.getDouble( STABLE_FLOAT_ARRAY, ARRAY_FLOAT_BASE_OFFSET); }
+
+        static boolean testD_Z() { return U.getBoolean(STABLE_DOUBLE_ARRAY, ARRAY_DOUBLE_BASE_OFFSET); }
+        static byte    testD_B() { return U.getByte(   STABLE_DOUBLE_ARRAY, ARRAY_DOUBLE_BASE_OFFSET); }
+        static short   testD_S() { return U.getShort(  STABLE_DOUBLE_ARRAY, ARRAY_DOUBLE_BASE_OFFSET); }
+        static char    testD_C() { return U.getChar(   STABLE_DOUBLE_ARRAY, ARRAY_DOUBLE_BASE_OFFSET); }
+        static int     testD_I() { return U.getInt(    STABLE_DOUBLE_ARRAY, ARRAY_DOUBLE_BASE_OFFSET); }
+        static long    testD_J() { return U.getLong(   STABLE_DOUBLE_ARRAY, ARRAY_DOUBLE_BASE_OFFSET); }
+        static float   testD_F() { return U.getFloat(  STABLE_DOUBLE_ARRAY, ARRAY_DOUBLE_BASE_OFFSET); }
+        static double  testD_D() { return U.getDouble( STABLE_DOUBLE_ARRAY, ARRAY_DOUBLE_BASE_OFFSET); }
+
+        static Object  testL_L() { return U.getObject( STABLE_OBJECT_ARRAY, ARRAY_OBJECT_BASE_OFFSET); }
+        static boolean testL_Z() { return U.getBoolean(STABLE_OBJECT_ARRAY, ARRAY_OBJECT_BASE_OFFSET); }
+        static byte    testL_B() { return U.getByte(   STABLE_OBJECT_ARRAY, ARRAY_OBJECT_BASE_OFFSET); }
+        static short   testL_S() { return U.getShort(  STABLE_OBJECT_ARRAY, ARRAY_OBJECT_BASE_OFFSET); }
+        static char    testL_C() { return U.getChar(   STABLE_OBJECT_ARRAY, ARRAY_OBJECT_BASE_OFFSET); }
+        static int     testL_I() { return U.getInt(    STABLE_OBJECT_ARRAY, ARRAY_OBJECT_BASE_OFFSET); }
+        static long    testL_J() { return U.getLong(   STABLE_OBJECT_ARRAY, ARRAY_OBJECT_BASE_OFFSET); }
+        static float   testL_F() { return U.getFloat(  STABLE_OBJECT_ARRAY, ARRAY_OBJECT_BASE_OFFSET); }
+        static double  testL_D() { return U.getDouble( STABLE_OBJECT_ARRAY, ARRAY_OBJECT_BASE_OFFSET); }
+
+        static short   testS_U() { return U.getShortUnaligned(STABLE_SHORT_ARRAY, ARRAY_SHORT_BASE_OFFSET + 1); }
+        static char    testC_U() { return U.getCharUnaligned(  STABLE_CHAR_ARRAY,  ARRAY_CHAR_BASE_OFFSET + 1); }
+        static int     testI_U() { return U.getIntUnaligned(    STABLE_INT_ARRAY,   ARRAY_INT_BASE_OFFSET + 1); }
+        static long    testJ_U() { return U.getLongUnaligned(  STABLE_LONG_ARRAY,  ARRAY_LONG_BASE_OFFSET + 1); }
+    }
+
+    static void run(Callable<?> c) throws Exception {
+        run(c, null, null);
+    }
+
+    static void run(Callable<?> c, Runnable sameResultAction, Runnable changeResultAction) throws Exception {
+        Object first = c.call();
+
+        // Trigger compilation.
+        for (int i = 0; i < 20_000; i++) {
+            // Don't compare results here, since most of Test::testL_* results vary across iterations (due to GC).
+            c.call();
+        }
+
+        if (sameResultAction != null) {
+            sameResultAction.run();
+            assertEQ(first, c.call());
+        }
+
+        if (changeResultAction != null) {
+            changeResultAction.run();
+            assertNE(first, c.call());
+            assertEQ(c.call(), c.call());
+        }
+    }
+
+    static void testMatched(Callable<?> c, Runnable setDefaultAction) throws Exception {
+        run(c, setDefaultAction, null);
+        Setter.reset();
+    }
+
+    static void testMismatched(Callable<?> c, Runnable setDefaultAction) throws Exception {
+        run(c, null, setDefaultAction);
+        Setter.reset();
+    }
+
+    public static void main(String[] args) throws Exception {
+        // boolean[], aligned accesses
+        testMatched(   Test::testZ_Z, Test::changeZ);
+        testMismatched(Test::testZ_B, Test::changeZ);
+        testMismatched(Test::testZ_S, Test::changeZ);
+        testMismatched(Test::testZ_C, Test::changeZ);
+        testMismatched(Test::testZ_I, Test::changeZ);
+        testMismatched(Test::testZ_J, Test::changeZ);
+        testMismatched(Test::testZ_F, Test::changeZ);
+        testMismatched(Test::testZ_D, Test::changeZ);
+
+        // byte[], aligned accesses
+        testMismatched(Test::testB_Z, Test::changeB);
+        testMatched(   Test::testB_B, Test::changeB);
+        testMismatched(Test::testB_S, Test::changeB);
+        testMismatched(Test::testB_C, Test::changeB);
+        testMismatched(Test::testB_I, Test::changeB);
+        testMismatched(Test::testB_J, Test::changeB);
+        testMismatched(Test::testB_F, Test::changeB);
+        testMismatched(Test::testB_D, Test::changeB);
+
+        // short[], aligned accesses
+        testMismatched(Test::testS_Z, Test::changeS);
+        testMismatched(Test::testS_B, Test::changeS);
+        testMatched(   Test::testS_S, Test::changeS);
+        testMismatched(Test::testS_C, Test::changeS);
+        testMismatched(Test::testS_I, Test::changeS);
+        testMismatched(Test::testS_J, Test::changeS);
+        testMismatched(Test::testS_F, Test::changeS);
+        testMismatched(Test::testS_D, Test::changeS);
+
+        // char[], aligned accesses
+        testMismatched(Test::testC_Z, Test::changeC);
+        testMismatched(Test::testC_B, Test::changeC);
+        testMismatched(Test::testC_S, Test::changeC);
+        testMatched(   Test::testC_C, Test::changeC);
+        testMismatched(Test::testC_I, Test::changeC);
+        testMismatched(Test::testC_J, Test::changeC);
+        testMismatched(Test::testC_F, Test::changeC);
+        testMismatched(Test::testC_D, Test::changeC);
+
+        // int[], aligned accesses
+        testMismatched(Test::testI_Z, Test::changeI);
+        testMismatched(Test::testI_B, Test::changeI);
+        testMismatched(Test::testI_S, Test::changeI);
+        testMismatched(Test::testI_C, Test::changeI);
+        testMatched(   Test::testI_I, Test::changeI);
+        testMismatched(Test::testI_J, Test::changeI);
+        testMismatched(Test::testI_F, Test::changeI);
+        testMismatched(Test::testI_D, Test::changeI);
+
+        // long[], aligned accesses
+        testMismatched(Test::testJ_Z, Test::changeJ);
+        testMismatched(Test::testJ_B, Test::changeJ);
+        testMismatched(Test::testJ_S, Test::changeJ);
+        testMismatched(Test::testJ_C, Test::changeJ);
+        testMismatched(Test::testJ_I, Test::changeJ);
+        testMatched(   Test::testJ_J, Test::changeJ);
+        testMismatched(Test::testJ_F, Test::changeJ);
+        testMismatched(Test::testJ_D, Test::changeJ);
+
+        // float[], aligned accesses
+        testMismatched(Test::testF_Z, Test::changeF);
+        testMismatched(Test::testF_B, Test::changeF);
+        testMismatched(Test::testF_S, Test::changeF);
+        testMismatched(Test::testF_C, Test::changeF);
+        testMismatched(Test::testF_I, Test::changeF);
+        testMismatched(Test::testF_J, Test::changeF);
+        testMatched(   Test::testF_F, Test::changeF);
+        testMismatched(Test::testF_D, Test::changeF);
+
+        // double[], aligned accesses
+        testMismatched(Test::testD_Z, Test::changeD);
+        testMismatched(Test::testD_B, Test::changeD);
+        testMismatched(Test::testD_S, Test::changeD);
+        testMismatched(Test::testD_C, Test::changeD);
+        testMismatched(Test::testD_I, Test::changeD);
+        testMismatched(Test::testD_J, Test::changeD);
+        testMismatched(Test::testD_F, Test::changeD);
+        testMatched(   Test::testD_D, Test::changeD);
+
+        // Object[], aligned accesses
+        testMismatched(Test::testL_J, Test::changeL); // long & double are always as large as an OOP
+        testMismatched(Test::testL_D, Test::changeL);
+        testMatched(   Test::testL_L, Test::changeL);
+
+        // Unaligned accesses
+        testMismatched(Test::testS_U, Test::changeS);
+        testMismatched(Test::testC_U, Test::changeC);
+        testMismatched(Test::testI_U, Test::changeI);
+        testMismatched(Test::testJ_U, Test::changeJ);
+
+        // No way to reliably check the expected behavior:
+        //   (1) OOPs change during GC;
+        //   (2) there's no way to reliably change some part of an OOP (e.g., when reading a byte from it).
+        //
+        // Just trigger the compilation hoping to catch any problems with asserts.
+        run(Test::testL_B);
+        run(Test::testL_Z);
+        run(Test::testL_S);
+        run(Test::testL_C);
+        run(Test::testL_I);
+        run(Test::testL_F);
+    }
+}
--- a/hotspot/test/compiler/unsafe/X-UnsafeAccessTest.java.template	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/compiler/unsafe/X-UnsafeAccessTest.java.template	Sat Mar 05 20:46:42 2016 -0800
@@ -170,6 +170,22 @@
 #end[Ordered]
 
 #if[JdkInternalMisc]
+        // Lazy
+        {
+            UNSAFE.put$Type$Release(base, offset, $value1$);
+            $type$ x = UNSAFE.get$Type$Acquire(base, offset);
+            assertEquals(x, $value1$, "putRelease $type$ value");
+        }
+
+        // Opaque
+        {
+            UNSAFE.put$Type$Opaque(base, offset, $value2$);
+            $type$ x = UNSAFE.get$Type$Opaque(base, offset);
+            assertEquals(x, $value2$, "putOpaque $type$ value");
+        }
+#end[JdkInternalMisc]
+
+#if[JdkInternalMisc]
 #if[Unaligned]
         // Unaligned
         {
@@ -210,6 +226,72 @@
             assertEquals(x, $value2$, "failing compareAndSwap $type$ value");
         }
 
+#if[JdkInternalMisc]
+        // Advanced compare
+        {
+            $type$ r = UNSAFE.compareAndExchange$Type$Volatile(base, offset, $value2$, $value1$);
+            assertEquals(r, $value2$, "success compareAndExchangeVolatile $type$");
+            $type$ x = UNSAFE.get$Type$(base, offset);
+            assertEquals(x, $value1$, "success compareAndExchangeVolatile $type$ value");
+        }
+
+        {
+            $type$ r = UNSAFE.compareAndExchange$Type$Volatile(base, offset, $value2$, $value3$);
+            assertEquals(r, $value1$, "failing compareAndExchangeVolatile $type$");
+            $type$ x = UNSAFE.get$Type$(base, offset);
+            assertEquals(x, $value1$, "failing compareAndExchangeVolatile $type$ value");
+        }
+
+        {
+            $type$ r = UNSAFE.compareAndExchange$Type$Acquire(base, offset, $value1$, $value2$);
+            assertEquals(r, $value1$, "success compareAndExchangeAcquire $type$");
+            $type$ x = UNSAFE.get$Type$(base, offset);
+            assertEquals(x, $value2$, "success compareAndExchangeAcquire $type$ value");
+        }
+
+        {
+            $type$ r = UNSAFE.compareAndExchange$Type$Acquire(base, offset, $value1$, $value3$);
+            assertEquals(r, $value2$, "failing compareAndExchangeAcquire $type$");
+            $type$ x = UNSAFE.get$Type$(base, offset);
+            assertEquals(x, $value2$, "failing compareAndExchangeAcquire $type$ value");
+        }
+
+        {
+            $type$ r = UNSAFE.compareAndExchange$Type$Release(base, offset, $value2$, $value1$);
+            assertEquals(r, $value2$, "success compareAndExchangeRelease $type$");
+            $type$ x = UNSAFE.get$Type$(base, offset);
+            assertEquals(x, $value1$, "success compareAndExchangeRelease $type$ value");
+        }
+
+        {
+            $type$ r = UNSAFE.compareAndExchange$Type$Release(base, offset, $value2$, $value3$);
+            assertEquals(r, $value1$, "failing compareAndExchangeRelease $type$");
+            $type$ x = UNSAFE.get$Type$(base, offset);
+            assertEquals(x, $value1$, "failing compareAndExchangeRelease $type$ value");
+        }
+
+        {
+            boolean r = UNSAFE.weakCompareAndSwap$Type$(base, offset, $value1$, $value2$);
+            assertEquals(r, true, "weakCompareAndSwap $type$");
+            $type$ x = UNSAFE.get$Type$(base, offset);
+            assertEquals(x, $value2$, "weakCompareAndSwap $type$ value");
+        }
+
+        {
+            boolean r = UNSAFE.weakCompareAndSwap$Type$Acquire(base, offset, $value2$, $value1$);
+            assertEquals(r, true, "weakCompareAndSwapAcquire $type$");
+            $type$ x = UNSAFE.get$Type$(base, offset);
+            assertEquals(x, $value1$, "weakCompareAndSwapAcquire $type$");
+        }
+
+        {
+            boolean r = UNSAFE.weakCompareAndSwap$Type$Release(base, offset, $value1$, $value2$);
+            assertEquals(r, true, "weakCompareAndSwapRelease $type$");
+            $type$ x = UNSAFE.get$Type$(base, offset);
+            assertEquals(x, $value2$, "weakCompareAndSwapRelease $type$");
+        }
+#end[JdkInternalMisc]
+
         // Compare set and get
         {
             $type$ o = UNSAFE.getAndSet$Type$(base, offset, $value1$);
@@ -244,4 +326,5 @@
     }
 #end[!boolean]
 #end[!Object]
-}
\ No newline at end of file
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/unsafe/generate-unsafe-tests.sh	Sat Mar 05 20:46:42 2016 -0800
@@ -0,0 +1,97 @@
+#!/bin/bash
+
+javac -d . ../../../../jdk/make/src/classes/build/tools/spp/Spp.java
+
+SPP=build.tools.spp.Spp
+
+# Generates unsafe access tests for objects and all primitive types
+# $1 = package name to Unsafe, sun.misc | jdk.internal.misc
+# $2 = test class qualifier name, SunMisc | JdkInternalMisc
+function generate {
+    package=$1
+    Qualifier=$2
+
+    for type in boolean byte short char int long float double Object
+    do
+      Type="$(tr '[:lower:]' '[:upper:]' <<< ${type:0:1})${type:1}"
+      args="-K$type -Dtype=$type -DType=$Type"
+
+      case $type in
+        Object|int|long)
+          args="$args -KCAS -KOrdered"
+          ;;
+      esac
+
+      case $type in
+        int|long)
+          args="$args -KAtomicAdd"
+          ;;
+      esac
+
+      case $type in
+        short|char|int|long)
+          args="$args -KUnaligned"
+          ;;
+      esac
+
+      case $type in
+        boolean)
+          value1=true
+          value2=false
+          value3=false
+          ;;
+        byte)
+          value1=(byte)1
+          value2=(byte)2
+          value3=(byte)3
+          ;;
+        short)
+          value1=(short)1
+          value2=(short)2
+          value3=(short)3
+          ;;
+        char)
+          value1=\'a\'
+          value2=\'b\'
+          value3=\'c\'
+          ;;
+        int)
+          value1=1
+          value2=2
+          value3=3
+          ;;
+        long)
+          value1=1L
+          value2=2L
+          value3=3L
+          ;;
+        float)
+          value1=1.0f
+          value2=2.0f
+          value3=3.0f
+          ;;
+        double)
+          value1=1.0d
+          value2=2.0d
+          value3=3.0d
+          ;;
+        Object)
+          value1=\"foo\"
+          value2=\"bar\"
+          value3=\"baz\"
+          ;;
+      esac
+
+      args="$args -Dvalue1=$value1 -Dvalue2=$value2 -Dvalue3=$value3"
+
+      echo $args
+
+      java $SPP -nel -K$Qualifier -Dpackage=$package -DQualifier=$Qualifier \
+          $args < X-UnsafeAccessTest.java.template > ${Qualifier}UnsafeAccessTest${Type}.java
+    done
+}
+
+generate sun.misc SunMisc
+generate jdk.internal.misc JdkInternalMisc
+
+rm -fr build
\ No newline at end of file
--- a/hotspot/test/gc/g1/TestGCLogMessages.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/gc/g1/TestGCLogMessages.java	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
 
 /*
  * @test TestGCLogMessages
- * @bug 8035406 8027295 8035398 8019342 8027959 8048179 8027962 8069330
+ * @bug 8035406 8027295 8035398 8019342 8027959 8048179 8027962 8069330 8076463 8150630
  * @summary Ensure the output for a minor GC with G1
  * includes the expected necessary messages.
  * @key gc
@@ -38,10 +38,24 @@
 public class TestGCLogMessages {
 
     private enum Level {
-        OFF, DEBUG, TRACE;
-        public boolean lessOrEqualTo(Level other) {
+        OFF(""),
+        INFO("info"),
+        DEBUG("debug"),
+        TRACE("trace");
+
+        private String logName;
+
+        Level(String logName) {
+            this.logName = logName;
+        }
+
+        public boolean lessThan(Level other) {
             return this.compareTo(other) < 0;
         }
+
+        public String toString() {
+            return logName;
+        }
     }
 
     private class LogMessageWithLevel {
@@ -56,44 +70,48 @@
 
     private LogMessageWithLevel allLogMessages[] = new LogMessageWithLevel[] {
         // Update RS
-        new LogMessageWithLevel("Scan HCC", Level.DEBUG),
+        new LogMessageWithLevel("Scan HCC", Level.TRACE),
         // Ext Root Scan
-        new LogMessageWithLevel("Thread Roots:", Level.DEBUG),
-        new LogMessageWithLevel("StringTable Roots:", Level.DEBUG),
-        new LogMessageWithLevel("Universe Roots:", Level.DEBUG),
-        new LogMessageWithLevel("JNI Handles Roots:", Level.DEBUG),
-        new LogMessageWithLevel("ObjectSynchronizer Roots:", Level.DEBUG),
-        new LogMessageWithLevel("FlatProfiler Roots", Level.DEBUG),
-        new LogMessageWithLevel("Management Roots", Level.DEBUG),
-        new LogMessageWithLevel("SystemDictionary Roots", Level.DEBUG),
-        new LogMessageWithLevel("CLDG Roots", Level.DEBUG),
-        new LogMessageWithLevel("JVMTI Roots", Level.DEBUG),
-        new LogMessageWithLevel("SATB Filtering", Level.DEBUG),
-        new LogMessageWithLevel("CM RefProcessor Roots", Level.DEBUG),
-        new LogMessageWithLevel("Wait For Strong CLD", Level.DEBUG),
-        new LogMessageWithLevel("Weak CLD Roots", Level.DEBUG),
+        new LogMessageWithLevel("Thread Roots", Level.TRACE),
+        new LogMessageWithLevel("StringTable Roots", Level.TRACE),
+        new LogMessageWithLevel("Universe Roots", Level.TRACE),
+        new LogMessageWithLevel("JNI Handles Roots", Level.TRACE),
+        new LogMessageWithLevel("ObjectSynchronizer Roots", Level.TRACE),
+        new LogMessageWithLevel("FlatProfiler Roots", Level.TRACE),
+        new LogMessageWithLevel("Management Roots", Level.TRACE),
+        new LogMessageWithLevel("SystemDictionary Roots", Level.TRACE),
+        new LogMessageWithLevel("CLDG Roots", Level.TRACE),
+        new LogMessageWithLevel("JVMTI Roots", Level.TRACE),
+        new LogMessageWithLevel("SATB Filtering", Level.TRACE),
+        new LogMessageWithLevel("CM RefProcessor Roots", Level.TRACE),
+        new LogMessageWithLevel("Wait For Strong CLD", Level.TRACE),
+        new LogMessageWithLevel("Weak CLD Roots", Level.TRACE),
         // Redirty Cards
         new LogMessageWithLevel("Redirty Cards", Level.DEBUG),
-        new LogMessageWithLevel("Parallel Redirty", Level.DEBUG),
-        new LogMessageWithLevel("Redirtied Cards", Level.DEBUG),
+        new LogMessageWithLevel("Parallel Redirty", Level.TRACE),
+        new LogMessageWithLevel("Redirtied Cards", Level.TRACE),
         // Misc Top-level
-        new LogMessageWithLevel("Code Root Purge", Level.DEBUG),
-        new LogMessageWithLevel("String Dedup Fixup", Level.DEBUG),
-        new LogMessageWithLevel("Expand Heap After Collection", Level.DEBUG),
+        new LogMessageWithLevel("Code Roots Purge", Level.DEBUG),
+        new LogMessageWithLevel("String Dedup Fixup", Level.INFO),
+        new LogMessageWithLevel("Expand Heap After Collection", Level.INFO),
         // Free CSet
-        new LogMessageWithLevel("Young Free CSet", Level.TRACE),
-        new LogMessageWithLevel("Non-Young Free CSet", Level.TRACE),
+        new LogMessageWithLevel("Young Free Collection Set", Level.DEBUG),
+        new LogMessageWithLevel("Non-Young Free Collection Set", Level.DEBUG),
         // Humongous Eager Reclaim
         new LogMessageWithLevel("Humongous Reclaim", Level.DEBUG),
         new LogMessageWithLevel("Humongous Register", Level.DEBUG),
+        // Preserve CM Referents
+        new LogMessageWithLevel("Preserve CM Refs", Level.DEBUG),
+        // Merge PSS
+        new LogMessageWithLevel("Merge Per-Thread State", Level.INFO),
     };
 
     void checkMessagesAtLevel(OutputAnalyzer output, LogMessageWithLevel messages[], Level level) throws Exception {
         for (LogMessageWithLevel l : messages) {
-            if (level.lessOrEqualTo(l.level)) {
+            if (level.lessThan(l.level)) {
                 output.shouldNotContain(l.message);
             } else {
-                output.shouldContain(l.message);
+                output.shouldMatch("\\[" + l.level + ".*" + l.message);
             }
         }
     }
--- a/hotspot/test/gc/g1/TestPLABOutput.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/gc/g1/TestPLABOutput.java	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -65,7 +65,7 @@
 
         System.out.println(output.getStdout());
 
-        String pattern = ".*GC\\(0\\) .*allocated = (\\d+).*";
+        String pattern = ".*GC\\(0\\) .*allocated: (\\d+).*";
         Pattern r = Pattern.compile(pattern);
         Matcher m = r.matcher(output.getStdout());
 
--- a/hotspot/test/gc/g1/plab/TestPLABPromotion.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/gc/g1/plab/TestPLABPromotion.java	Sat Mar 05 20:46:42 2016 -0800
@@ -23,7 +23,7 @@
 
  /*
  * @test TestPLABPromotion
- * @bug 8141278
+ * @bug 8141278 8141141
  * @summary Test PLAB promotion
  * @requires vm.gc=="G1" | vm.gc=="null"
  * @requires vm.opt.FlightRecorder != true
@@ -130,16 +130,15 @@
         long plabAllocatedOld;
         long directAllocatedOld;
         long memAllocated = testCase.getMemToFill();
-        long wordSize = Platform.is32bit() ? 4l : 8l;
         LogParser logParser = new LogParser(output);
 
         Map<String, Long> survivorStats = getPlabStats(logParser, LogParser.ReportType.SURVIVOR_STATS, GC_ID_SURVIVOR_STATS);
         Map<String, Long> oldStats = getPlabStats(logParser, LogParser.ReportType.OLD_STATS, GC_ID_OLD_STATS);
 
-        plabAllocatedSurvivor = wordSize * survivorStats.get("used");
-        directAllocatedSurvivor = wordSize * survivorStats.get("direct_allocated");
-        plabAllocatedOld = wordSize * oldStats.get("used");
-        directAllocatedOld = wordSize * oldStats.get("direct_allocated");
+        plabAllocatedSurvivor = survivorStats.get("used");
+        directAllocatedSurvivor = survivorStats.get("direct allocated");
+        plabAllocatedOld = oldStats.get("used");
+        directAllocatedOld = oldStats.get("direct allocated");
 
         System.out.printf("Survivor PLAB allocated:%17d Direct allocated: %17d Mem consumed:%17d%n", plabAllocatedSurvivor, directAllocatedSurvivor, memAllocated);
         System.out.printf("Old      PLAB allocated:%17d Direct allocated: %17d Mem consumed:%17d%n", plabAllocatedOld, directAllocatedOld, memAllocated);
--- a/hotspot/test/gc/g1/plab/TestPLABResize.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/gc/g1/plab/TestPLABResize.java	Sat Mar 05 20:46:42 2016 -0800
@@ -23,7 +23,7 @@
 
  /*
  * @test TestPLABResize
- * @bug 8141278
+ * @bug 8141278 8141141
  * @summary Test for PLAB resizing
  * @requires vm.gc=="G1" | vm.gc=="null"
  * @requires vm.opt.FlightRecorder != true
@@ -117,7 +117,7 @@
                 .map(item -> {
                     return item.getValue()
                             .get(LogParser.ReportType.SURVIVOR_STATS)
-                            .get("desired_plab_sz");
+                            .get("actual");
                 })
                 .collect(Collectors.toCollection(ArrayList::new));
 
--- a/hotspot/test/gc/g1/plab/lib/LogParser.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/gc/g1/plab/lib/LogParser.java	Sat Mar 05 20:46:42 2016 -0800
@@ -35,14 +35,12 @@
  *
  * Typical GC log with PLAB statistics (options - -Xlog:gc=debug,gc+plab=debug) looks like:
  *
- * [2,244s][info   ][gc     ] GC(30) Concurrent Mark abort
- * [2,245s][debug  ][gc,plab] GC(33)  (allocated = 1 wasted = 0 unused = 0 used = 1 undo_waste = 0 region_end_waste = 0 regions filled = 0 direct_allocated = 0 failure_used = 0 failure_waste = 0)  (plab_sz = 0 desired_plab_sz = 258)
- * [2,245s][debug  ][gc,plab] GC(33)  (allocated = 1 wasted = 0 unused = 0 used = 1 undo_waste = 0 region_end_waste = 0 regions filled = 0 direct_allocated = 0 failure_used = 0 failure_waste = 0)  (plab_sz = 0 desired_plab_sz = 258)
- * [2,245s][info   ][gc     ] GC(33) Pause Young (G1 Evacuation Pause) 127M->127M(128M) (2,244s, 2,245s) 0,899ms
- * [2,246s][debug  ][gc,plab] GC(34)  (allocated = 1 wasted = 0 unused = 0 used = 1 undo_waste = 0 region_end_waste = 0 regions filled = 0 direct_allocated = 0 failure_used = 0 failure_waste = 0)  (plab_sz = 0 desired_plab_sz = 258)
- * [2,246s][debug  ][gc,plab] GC(34)  (allocated = 1 wasted = 0 unused = 0 used = 1 undo_waste = 0 region_end_waste = 0 regions filled = 0 direct_allocated = 0 failure_used = 0 failure_waste = 0)  (plab_sz = 0 desired_plab_sz = 258)
- * [2,246s][info   ][gc     ] GC(34) Pause Initial Mark (G1 Evacuation Pause) 127M->127M(128M) (2,245s, 2,246s) 0,907ms
-
+ * [0.330s][debug][gc,plab  ] GC(0) Young PLAB allocation: allocated: 1825632B, wasted: 29424B, unused: 2320B, used: 1793888B, undo waste: 0B,
+ * [0.330s][debug][gc,plab  ] GC(0) Young other allocation: region end waste: 0B, regions filled: 2, direct allocated: 271520B, failure used: 0B, failure wasted: 0B
+ * [0.330s][debug][gc,plab  ] GC(0) Young sizing: calculated: 358776B, actual: 358776B
+ * [0.330s][debug][gc,plab  ] GC(0) Old PLAB allocation: allocated: 427248B, wasted: 592B, unused: 368584B, used: 58072B, undo waste: 0B,
+ * [0.330s][debug][gc,plab  ] GC(0) Old other allocation: region end waste: 0B, regions filled: 1, direct allocated: 41704B, failure used: 0B, failure wasted: 0B
+ * [0.330s][debug][gc,plab  ] GC(0) Old sizing: calculated: 11608B, actual: 11608B
  */
 final public class LogParser {
 
@@ -53,7 +51,6 @@
      * Type of parsed log element.
      */
     public static enum ReportType {
-
         SURVIVOR_STATS,
         OLD_STATS
     }
@@ -64,8 +61,8 @@
 
     // GC ID
     private static final Pattern GC_ID_PATTERN = Pattern.compile("\\[gc,plab\\s*\\] GC\\((\\d+)\\)");
-    // Pattern for extraction pair <name>=<numeric value>
-    private static final Pattern PAIRS_PATTERN = Pattern.compile("\\w+\\s+=\\s+\\d+");
+    // Pattern for extraction pair <name>: <numeric value>
+    private static final Pattern PAIRS_PATTERN = Pattern.compile("\\w* \\w+:\\s+\\d+");
 
     /**
      * Construct LogParser Object
@@ -108,24 +105,29 @@
                 if (matcher.find()) {
                     Map<ReportType,Map<String, Long>> oneReportItem;
                     ReportType reportType;
-                    // Second line in log is statistics for Old PLAB allocation
-                    if ( !allocationStatistics.containsKey(gc_id.get()) ) {
-                        oneReportItem = new EnumMap<>(ReportType.class);
+
+                    if (!allocationStatistics.containsKey(gc_id.get())) {
+                      allocationStatistics.put(gc_id.get(), new EnumMap<>(ReportType.class));
+                    }
+
+                    if ( line.contains("Young") ) {
                         reportType = ReportType.SURVIVOR_STATS;
-                        allocationStatistics.put(gc_id.get(), oneReportItem);
                     } else {
-                        oneReportItem = allocationStatistics.get(gc_id.get());
                         reportType = ReportType.OLD_STATS;
                     }
 
+                    oneReportItem = allocationStatistics.get(gc_id.get());
+                    if (!oneReportItem.containsKey(reportType)) {
+                      oneReportItem.put(reportType,new HashMap<String, Long>());
+                    }
+
                     // Extract all pairs from log.
-                    HashMap<String, Long> plabStats = new HashMap<>();
+                    Map<String, Long> plabStats = oneReportItem.get(reportType);
                     do {
                         String pair = matcher.group();
-                        String[] nameValue = pair.replaceAll(" ", "").split("=");
-                        plabStats.put(nameValue[0], Long.parseLong(nameValue[1]));
+                        String[] nameValue = pair.replaceAll(": ", ":").split(":");
+                        plabStats.put(nameValue[0].trim(), Long.parseLong(nameValue[1]));
                     } while (matcher.find());
-                    oneReportItem.put(reportType,plabStats);
                 }
             }
         }
--- a/hotspot/test/native_sanity/JniVersion.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/native_sanity/JniVersion.java	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,12 +27,12 @@
  */
 public class JniVersion {
 
-    public static final int JNI_VERSION_1_8 = 0x00010008;
+    public static final int JNI_VERSION_9 = 0x00090000;
 
     public static void main(String... args) throws Exception {
         System.loadLibrary("JniVersion");
         int res = getJniVersion();
-        if (res < JNI_VERSION_1_8) {
+        if (res != JNI_VERSION_9) {
             throw new Exception("Unexpected value returned from getJniVersion(): 0x" + Integer.toHexString(res));
         }
     }
--- a/hotspot/test/runtime/logging/ItablesTest.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/runtime/logging/ItablesTest.java	Sat Mar 05 20:46:42 2016 -0800
@@ -49,7 +49,6 @@
             output.shouldContain("invokespecial resolved method: caller-class:ClassB");
             output.shouldContain("invokespecial selected method: resolved-class:ClassB");
             output.shouldContain("invokeinterface selected method: receiver-class");
-            output.shouldContain("Resolving: klass: ");
             output.shouldHaveExitValue(0);
 
             pb = ProcessTools.createJavaProcessBuilder("-Xlog:itables=trace", "ItablesVtableTest");
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/logging/ProtectionDomainVerificationTest.java	Sat Mar 05 20:46:42 2016 -0800
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test ProtectionDomainVerificationTest
+ * @bug 8149064
+ * @library /testlibrary
+ * @build jdk.test.lib.OutputAnalyzer jdk.test.lib.Platform jdk.test.lib.ProcessTools
+ * @run driver ProtectionDomainVerificationTest
+ */
+
+import jdk.test.lib.OutputAnalyzer;
+import jdk.test.lib.Platform;
+import jdk.test.lib.ProcessTools;
+
+public class ProtectionDomainVerificationTest {
+
+    public static void main(String... args) throws Exception {
+
+        // -Xlog:protectiondomain=trace
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xlog:protectiondomain=trace",
+                                                                  "-Xmx64m",
+                                                                  Hello.class.getName());
+        OutputAnalyzer out = new OutputAnalyzer(pb.start());
+        out.shouldContain("[protectiondomain] Checking package access");
+        out.shouldContain("[protectiondomain] pd set count = #");
+
+        // -Xlog:protectiondomain=debug
+        pb = ProcessTools.createJavaProcessBuilder("-Xlog:protectiondomain=debug",
+                                                                  "-Xmx64m",
+                                                                  Hello.class.getName());
+        out = new OutputAnalyzer(pb.start());
+        out.shouldContain("[protectiondomain] Checking package access");
+        out.shouldNotContain("pd set count = #");
+    }
+
+    public static class Hello {
+        public static void main(String[] args) {
+            System.out.print("Hello!");
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/logging/ThreadLoggingTest.java	Sat Mar 05 20:46:42 2016 -0800
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, SAP SE and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.    See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8149036 8150619
+ * @summary os+thread output should contain logging calls for thread start stop attaches detaches
+ * @library /testlibrary
+ * @modules java.base/sun.misc
+ *          java.management
+ * @build jdk.test.lib.OutputAnalyzer jdk.test.lib.ProcessTools
+ * @run driver ThreadLoggingTest
+ * @author Thomas Stuefe (SAP)
+ */
+
+import java.io.File;
+import java.util.Map;
+import jdk.test.lib.OutputAnalyzer;
+import jdk.test.lib.ProcessTools;
+
+public class ThreadLoggingTest {
+
+    static void analyzeOutputForInfoLevel(OutputAnalyzer output) throws Exception {
+        output.shouldContain("Thread started");
+        output.shouldContain("Thread is alive");
+        output.shouldContain("Thread finished");
+        output.shouldHaveExitValue(0);
+    }
+
+    static void analyzeOutputForDebugLevel(OutputAnalyzer output) throws Exception {
+        analyzeOutputForInfoLevel(output);
+        output.shouldContain("stack dimensions");
+        output.shouldContain("stack guard pages");
+    }
+
+    public static void main(String[] args) throws Exception {
+
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xlog:os+thread", "-version");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        analyzeOutputForInfoLevel(output);
+
+        pb = ProcessTools.createJavaProcessBuilder("-Xlog:os+thread=debug", "-version");
+        output = new OutputAnalyzer(pb.start());
+        analyzeOutputForDebugLevel(output);
+
+    }
+
+}
--- a/hotspot/test/serviceability/dcmd/compiler/CompilerQueueTest.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/serviceability/dcmd/compiler/CompilerQueueTest.java	Sat Mar 05 20:46:42 2016 -0800
@@ -24,25 +24,33 @@
 /*
  * @test CompilerQueueTest
  * @bug 8054889
- * @library /testlibrary
+ * @library /testlibrary /test/lib /
  * @modules java.base/sun.misc
  *          java.compiler
  *          java.management
  *          jdk.jvmstat/sun.jvmstat.monitor
- * @ignore 8069160
  * @build jdk.test.lib.*
- * @build jdk.test.lib.dcmd.*
- * @run testng CompilerQueueTest
- * @run testng/othervm -XX:-TieredCompilation CompilerQueueTest
- * @run testng/othervm -Xint CompilerQueueTest
+ *        jdk.test.lib.dcmd.*
+ *        sun.hotspot.WhiteBox
+ *        compiler.testlibrary.CompilerUtils
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ *                                sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run testng/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -Xmixed -XX:+WhiteBoxAPI CompilerQueueTest
+ * @run testng/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -Xmixed -XX:-TieredCompilation -XX:+WhiteBoxAPI CompilerQueueTest
+ * @run testng/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -Xint -XX:+WhiteBoxAPI CompilerQueueTest
  * @summary Test of diagnostic command Compiler.queue
  */
 
+import compiler.testlibrary.CompilerUtils;
 import jdk.test.lib.OutputAnalyzer;
 import jdk.test.lib.dcmd.CommandExecutor;
 import jdk.test.lib.dcmd.JMXExecutor;
 import org.testng.annotations.Test;
+import org.testng.Assert;
+import sun.hotspot.WhiteBox;
 
+import java.lang.reflect.Executable;
+import java.lang.reflect.Method;
 import java.util.Iterator;
 
 public class CompilerQueueTest {
@@ -54,70 +62,123 @@
      *
      * Output example:
      *
-     * Contents of C1 compile queue
-     * ----------------------------
-     * 73       3       java.lang.AbstractStringBuilder::append (50 bytes)
-     * 74       1       java.util.TreeMap::size (5 bytes)
-     * 75       3       java.lang.StringBuilder::append (8 bytes)
-     * 83       3       java.util.TreeMap$ValueIterator::next (8 bytes)
-     * 84       1       javax.management.MBeanFeatureInfo::getName (5 bytes)
-     * ----------------------------
-     * Contents of C2 compile queue
-     * ----------------------------
+     * Current compiles:
+     * C1 CompilerThread14 267       3       java.net.URLStreamHandler::parseURL (1166 bytes)
+     * C1 CompilerThread13 760       3       javax.management.StandardMBean::getDescription (11 bytes)
+     * C1 CompilerThread12 757  s    3       com.sun.jmx.mbeanserver.DefaultMXBeanMappingFactory::getMapping (27 bytes)
+     * C1 CompilerThread11 756  s!   3       com.sun.jmx.mbeanserver.DefaultMXBeanMappingFactory::mappingForType (110 bytes)
+     * C1 CompilerThread10 761       3       java.lang.StringLatin1::indexOf (121 bytes)
+     * C2 CompilerThread7 769       4       CompilerQueueTest::testcaseMethod4 (1 bytes)
+     *
+     * C1 compile queue:
+     * 762       3       java.lang.invoke.MethodType::basicType (8 bytes)
+     * 763       3       java.util.ArrayList::rangeCheck (22 bytes)
+     * 764       3       java.util.ArrayList::elementData (7 bytes)
+     * 765       3       jdk.internal.org.objectweb.asm.MethodVisitor::<init> (35 bytes)
+     * 766       1       CompilerQueueTest::testcaseMethod1 (1 bytes)
+     * 767       2       CompilerQueueTest::testcaseMethod2 (1 bytes)
+     * 768       3       CompilerQueueTest::testcaseMethod3 (1 bytes)
+     * 770       3       java.util.Properties::getProperty (46 bytes)
+     *
+     * C2 compile queue:
      * Empty
-     * ----------------------------
      *
      **/
 
+    protected static final WhiteBox WB = WhiteBox.getWhiteBox();
+
     public void run(CommandExecutor executor) {
 
+        TestCase[] testcases = {
+                new TestCase(1, "testcaseMethod1"),
+                new TestCase(2, "testcaseMethod2"),
+                new TestCase(3, "testcaseMethod3"),
+                new TestCase(4, "testcaseMethod4"),
+        };
+
+        // Lock compilation makes all compiles stay in queue or compile thread before completion
+        WB.lockCompilation();
+
+        // Enqueue one test method for each available level
+        int[] complevels = CompilerUtils.getAvailableCompilationLevels();
+        for (int level : complevels) {
+            TestCase testcase = testcases[level - 1];
+
+            boolean added = WB.enqueueMethodForCompilation(testcase.method, testcase.level);
+            // Set results to false for those methods we must to find
+            // We will also assert if we find any test method we don't expect
+            Assert.assertTrue(WB.isMethodQueuedForCompilation(testcase.method));
+            testcase.check = false;
+        }
+
         // Get output from dcmd (diagnostic command)
         OutputAnalyzer output = executor.execute("Compiler.queue");
         Iterator<String> lines = output.asLines().iterator();
 
+        // Loop over output set result for all found methods
         while (lines.hasNext()) {
             String str = lines.next();
-            if (str.startsWith("Contents of C")) {
-                match(lines.next(), "----------------------------");
-                str = lines.next();
-                if (!str.equals("Empty")) {
-                    while (str.charAt(0) != '-') {
-                        validateMethodLine(str);
-                        str = lines.next();
+            // Fast check for common part of method name
+            if (str.contains("testcaseMethod")) {
+                for (TestCase testcase : testcases) {
+                    if (str.contains(testcase.methodName)) {
+                        Assert.assertFalse(testcase.check, "Must not be found or already found.");
+                        testcase.check = true;
                     }
-                } else {
-                    str = lines.next();
                 }
-                match(str,"----------------------------");
-            } else {
-                Assert.fail("Failed parsing dcmd queue, line: " + str);
             }
         }
-    }
 
-    private static void validateMethodLine(String str) {
-        // Skip until package/class name begins. Trim to remove whitespace that
-        // may differ.
-        String name = str.substring(14).trim();
-        int sep = name.indexOf("::");
-        if (sep == -1) {
-            Assert.fail("Failed dcmd queue, didn't find separator :: in: " + name);
+        for (TestCase testcase : testcases) {
+            if (!testcase.check) {
+                // If this method wasn't found it must have been removed by policy,
+                // verify that it is now removed from the queue
+                Assert.assertFalse(WB.isMethodQueuedForCompilation(testcase.method), "Must be found or not in queue");
+            }
+            // Otherwise all good.
         }
-        try {
-            Class.forName(name.substring(0, sep));
-        } catch (ClassNotFoundException e) {
-            Assert.fail("Failed dcmd queue, Class for name: " + str);
-        }
-    }
 
-    public static void match(String line, String str) {
-        if (!line.equals(str)) {
-            Assert.fail("String equals: " + line + ", " + str);
-        }
+        // Enable compilations again
+        WB.unlockCompilation();
     }
 
     @Test
     public void jmx() {
         run(new JMXExecutor());
     }
+
+    public void testcaseMethod1() {
+    }
+
+    public void testcaseMethod2() {
+    }
+
+    public void testcaseMethod3() {
+    }
+
+    public void testcaseMethod4() {
+    }
+
+    public static Method getMethod(Class klass, String name, Class<?>... parameterTypes) {
+        try {
+            return klass.getDeclaredMethod(name, parameterTypes);
+        } catch (NoSuchMethodException | SecurityException e) {
+            throw new RuntimeException("exception on getting method Helper." + name, e);
+        }
+    }
+
+    class TestCase {
+        Method method;
+        int level;
+        String methodName;
+        Boolean check;
+
+        public TestCase(int level, String methodName) {
+            this.method = getMethod(CompilerQueueTest.class, methodName);
+            this.level = level;
+            this.methodName = methodName;
+            this.check = true;
+        }
+    }
+
 }
--- a/hotspot/test/serviceability/dcmd/jvmti/LoadAgentDcmdTest.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/serviceability/dcmd/jvmti/LoadAgentDcmdTest.java	Sat Mar 05 20:46:42 2016 -0800
@@ -22,6 +22,10 @@
  */
 import java.io.*;
 import java.nio.file.*;
+import java.util.jar.Attributes;
+import java.util.jar.JarEntry;
+import java.util.jar.JarOutputStream;
+import java.util.jar.Manifest;
 import jdk.test.lib.*;
 import jdk.test.lib.dcmd.*;
 import org.testng.annotations.Test;
@@ -55,12 +59,7 @@
                       "'-Dtest.jdk=/path/to/jdk'.");
         }
 
-        Path libpath;
-        if (Platform.isWindows()) {
-            libpath = Paths.get(jdkPath, "bin", "instrument.dll");
-        } else {
-            libpath = Paths.get(jdkPath, "lib", Platform.getOsArch(), "libinstrument.so");
-        }
+        Path libpath = Paths.get(jdkPath, Platform.jdkLibPath(), Platform.sharedObjectName("instrument"));
 
         if (!libpath.toFile().exists()) {
             throw new FileNotFoundException(
@@ -70,31 +69,62 @@
         return libpath.toAbsolutePath().toString();
     }
 
+
+    public void createJarFileForAgent()
+      throws IOException {
+
+        final String jarName = "agent.jar";
+        final String agentClass = "SimpleJvmtiAgent";
+
+        Manifest manifest = new Manifest();
+
+        manifest.getMainAttributes().put(
+             Attributes.Name.MANIFEST_VERSION, "1.0");
+
+        manifest.getMainAttributes().put(
+             new Attributes.Name("Agent-Class"), agentClass);
+
+        JarOutputStream target = null;
+
+        try {
+            target = new
+              JarOutputStream(new FileOutputStream(jarName), manifest);
+            JarEntry entry = new JarEntry(agentClass + ".class");
+            target.putNextEntry(entry);
+            target.closeEntry();
+        } finally {
+            target.close();
+        }
+    }
+
     public void run(CommandExecutor executor)  {
         try{
-            PrintWriter pw = new PrintWriter("MANIFEST.MF");
-            pw.println("Agent-Class: SimpleJvmtiAgent");
-            pw.close();
 
-            ProcessBuilder pb = new ProcessBuilder();
-            pb.command(new String[] { JDKToolFinder.getJDKTool("jar"),
-                                      "cmf",
-                                      "MANIFEST.MF",
-                                      "agent.jar",
-                                      "SimpleJvmtiAgent.class"});
-            pb.start().waitFor();
+            createJarFileForAgent();
 
             String libpath = getLibInstrumentPath();
+            OutputAnalyzer output = null;
 
-            // Test 1: No argument
-            OutputAnalyzer output = executor.execute("JVMTI.agent_load " +
-                                                     libpath + " agent.jar");
+            // Test 1: Native agent, no arguments
+            output = executor.execute("JVMTI.agent_load " +
+                                          libpath + " agent.jar");
             output.stderrShouldBeEmpty();
 
-            // Test 2: With argument
+            // Test 2: Native agent, with arguments
+            output = executor.execute("JVMTI.agent_load " +
+                                          libpath + " \"agent.jar=foo=bar\"");
+            output.stderrShouldBeEmpty();
+
+            // Test 3: Java agent, no arguments
             output = executor.execute("JVMTI.agent_load " +
-                                      libpath + " \"agent.jar=foo=bar\"");
+                                          "agent.jar");
             output.stderrShouldBeEmpty();
+
+            // Test 4: Java agent, with arguments
+            output = executor.execute("JVMTI.agent_load " +
+                                          "\"agent.jar=foo=bar\"");
+            output.stderrShouldBeEmpty();
+
         } catch (Exception e) {
             throw new RuntimeException(e);
         }
--- a/hotspot/test/serviceability/dcmd/jvmti/LoadJavaAgentDcmdTest.java	Wed Jul 05 21:25:35 2017 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,81 +0,0 @@
-/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-import java.io.*;
-import jdk.test.lib.*;
-import jdk.test.lib.dcmd.*;
-import org.testng.annotations.Test;
-
-/*
- * Test to attach JVMTI java agent.
- *
- * @test
- * @bug 8147388
- * @library /testlibrary
- * @modules java.base/sun.misc
- *          java.compiler
- *          java.instrument
- *          java.management
- *          jdk.jvmstat/sun.jvmstat.monitor
- * @build ClassFileInstaller jdk.test.lib.* SimpleJvmtiAgent
- * @run main ClassFileInstaller SimpleJvmtiAgent
- * @run testng LoadJavaAgentDcmdTest
- */
-public class LoadJavaAgentDcmdTest {
-    public void run(CommandExecutor executor)  {
-        try{
-            PrintWriter pw = new PrintWriter("MANIFEST.MF");
-            pw.println("Agent-Class: SimpleJvmtiAgent");
-            pw.close();
-
-            ProcessBuilder pb = new ProcessBuilder();
-            pb.command(new String[] { JDKToolFinder.getJDKTool("jar"),
-                                      "cmf",
-                                      "MANIFEST.MF",
-                                      "agent.jar",
-                                      "SimpleJvmtiAgent.class"});
-            pb.start().waitFor();
-
-            // Test 1: No argument
-            OutputAnalyzer output = executor.execute("JVMTI.agent_load " +
-                                                     "agent.jar");
-            output.stderrShouldBeEmpty();
-
-            // Test 2: With argument
-            output = executor.execute("JVMTI.agent_load " +
-                                      "\"agent.jar=foo=bar\"");
-            output.stderrShouldBeEmpty();
-        } catch (Exception e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    @Test
-    public void jmx() throws Throwable {
-        run(new JMXExecutor());
-    }
-
-    @Test
-    public void cli() throws Throwable {
-        run(new PidJcmdExecutor());
-    }
-}
--- a/hotspot/test/serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,7 +54,6 @@
 
 public class JMapHProfLargeHeapTest {
     private static final String HEAP_DUMP_FILE_NAME = "heap.hprof";
-    private static final String HPROF_HEADER_1_0_1 = "JAVA PROFILE 1.0.1";
     private static final String HPROF_HEADER_1_0_2 = "JAVA PROFILE 1.0.2";
     private static final long M = 1024L;
     private static final long G = 1024L * M;
@@ -79,8 +78,8 @@
             }
         }
 
-        // Small heap 22 megabytes, should create 1.0.1 file format
-        testHProfFileFormat("-Xmx1g", 22 * M, HPROF_HEADER_1_0_1);
+        // All heap dumps should create 1.0.2 file format
+        testHProfFileFormat("-Xmx1g", 22 * M, HPROF_HEADER_1_0_2);
 
         /**
          * This test was deliberately commented out since the test system lacks
--- a/hotspot/test/testlibrary/jdk/test/lib/Platform.java	Wed Jul 05 21:25:35 2017 +0200
+++ b/hotspot/test/testlibrary/jdk/test/lib/Platform.java	Sat Mar 05 20:46:42 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -203,4 +203,31 @@
     public static boolean canAttachOSX() throws Exception {
         return userName.equals("root");
     }
+
+    /**
+     * return path to library inside jdk tree
+     */
+    public static String jdkLibPath() {
+        if (isWindows()) {
+            return "bin";
+        }
+        if (isOSX()) {
+            return "lib";
+        }
+
+        return "lib/" + getOsArch();
+    }
+
+    /**
+     * Build name of shared object according to platform rules
+     */
+    public static String sharedObjectName(String name) {
+        if (isWindows()) {
+            return name + ".dll";
+        }
+        if (isOSX()) {
+            return "lib" + name + ".dylib";
+        }
+        return "lib" + name + ".so";
+    }
 }