Merge JDK-8200758-branch
authorherrick
Wed, 03 Jul 2019 17:46:04 -0400
branchJDK-8200758-branch
changeset 57451 64e1fca51e8c
parent 57450 82c78b40b39d (current diff)
parent 55576 4d193e40e7af (diff)
child 57455 f1290ca0fee6
Merge
src/hotspot/share/jfr/leakprofiler/emitEventOperation.cpp
src/hotspot/share/jfr/leakprofiler/emitEventOperation.hpp
--- a/make/jdk/src/classes/build/tools/jdwpgen/AbstractCommandNode.java	Wed Jul 03 17:36:06 2019 -0400
+++ b/make/jdk/src/classes/build/tools/jdwpgen/AbstractCommandNode.java	Wed Jul 03 17:46:04 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,8 +30,8 @@
 class AbstractCommandNode extends AbstractNamedNode {
 
     void document(PrintWriter writer) {
-        writer.println("<h5 id=\"" + context.whereC + "\">" + name +
-                       " Command (" + nameNode.value() + ")</h5>");
+        writer.println("<h3 id=\"" + context.whereC + "\">" + name +
+                       " Command (" + nameNode.value() + ")</h3>");
         writer.println(comment());
         writer.println("<dl>");
         for (Node node : components) {
--- a/make/jdk/src/classes/build/tools/jdwpgen/AbstractNamedNode.java	Wed Jul 03 17:36:06 2019 -0400
+++ b/make/jdk/src/classes/build/tools/jdwpgen/AbstractNamedNode.java	Wed Jul 03 17:46:04 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -62,8 +62,8 @@
     }
 
     void document(PrintWriter writer) {
-        writer.println("<h4 id=\"" + name + "\">" + name +
-                       " Command Set</h4>");
+        writer.println("<h2 id=\"" + name + "\">" + name +
+                       " Command Set</h2>");
         for (Node node : components) {
             node.document(writer);
         }
--- a/make/jdk/src/classes/build/tools/jdwpgen/CommandSetNode.java	Wed Jul 03 17:36:06 2019 -0400
+++ b/make/jdk/src/classes/build/tools/jdwpgen/CommandSetNode.java	Wed Jul 03 17:46:04 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,9 +38,9 @@
     }
 
     void document(PrintWriter writer) {
-        writer.println("<h4 id=\"" + context.whereC + "\">" + name +
+        writer.println("<h2 id=\"" + context.whereC + "\">" + name +
                        " Command Set (" +
-                       nameNode.value() + ")</h4>");
+                       nameNode.value() + ")</h2>");
         writer.println(comment());
         for (Node node : components) {
             node.document(writer);
--- a/make/jdk/src/classes/build/tools/jdwpgen/ConstantSetNode.java	Wed Jul 03 17:36:06 2019 -0400
+++ b/make/jdk/src/classes/build/tools/jdwpgen/ConstantSetNode.java	Wed Jul 03 17:46:04 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,8 +54,8 @@
     }
 
     void document(PrintWriter writer) {
-        writer.println("<h4 id=\"" + context.whereC + "\">" + name +
-                       " Constants</h4>");
+        writer.println("<h2 id=\"" + context.whereC + "\">" + name +
+                       " Constants</h2>");
         writer.println(comment());
         writer.println("<table><tr>");
         writer.println("<th style=\"width: 20%\"><th style=\"width: 5%\"><th style=\"width:  65%\">");
--- a/make/jdk/src/classes/build/tools/jdwpgen/RootNode.java	Wed Jul 03 17:36:06 2019 -0400
+++ b/make/jdk/src/classes/build/tools/jdwpgen/RootNode.java	Wed Jul 03 17:46:04 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -52,11 +52,16 @@
         writer.println("</style>");
         writer.println("</head>");
         writer.println("<body>");
-        writer.println("<ul role=\"navigation\">");
+        writer.println("<div class=\"centered\" role=\"banner\">");
+        writer.println("<h1 id=\"Protocol Details\">Java Debug Wire Protocol Details</h1>");
+        writer.println("</div>");
+        writer.println("<nav>");
+        writer.println("<ul>");
         for (Node node : components) {
             node.documentIndex(writer);
         }
         writer.println("</ul>");
+        writer.println("</nav>");
         writer.println("<div role=\"main\">");
         for (Node node : components) {
             node.document(writer);
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -47,7 +47,7 @@
                                                        Register src, Register dst, Register count, RegSet saved_regs) {
   if (is_oop) {
     bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
-    if (ShenandoahSATBBarrier && !dest_uninitialized && !ShenandoahHeap::heap()->heuristics()->can_do_traversal_gc()) {
+    if (ShenandoahSATBBarrier && !dest_uninitialized) {
 
       Label done;
 
--- a/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad	Wed Jul 03 17:46:04 2019 -0400
@@ -61,7 +61,7 @@
 //
 // Execute ZGC load barrier (strong) slow path
 //
-instruct loadBarrierSlowReg(iRegP dst, memory mem, rFlagsReg cr,
+instruct loadBarrierSlowReg(iRegP dst, memory src, rFlagsReg cr,
     vRegD_V0 v0, vRegD_V1 v1, vRegD_V2 v2, vRegD_V3 v3, vRegD_V4 v4,
     vRegD_V5 v5, vRegD_V6 v6, vRegD_V7 v7, vRegD_V8 v8, vRegD_V9 v9,
     vRegD_V10 v10, vRegD_V11 v11, vRegD_V12 v12, vRegD_V13 v13, vRegD_V14 v14,
@@ -69,20 +69,22 @@
     vRegD_V20 v20, vRegD_V21 v21, vRegD_V22 v22, vRegD_V23 v23, vRegD_V24 v24,
     vRegD_V25 v25, vRegD_V26 v26, vRegD_V27 v27, vRegD_V28 v28, vRegD_V29 v29,
     vRegD_V30 v30, vRegD_V31 v31) %{
-  match(Set dst (LoadBarrierSlowReg mem));
+  match(Set dst (LoadBarrierSlowReg src dst));
   predicate(!n->as_LoadBarrierSlowReg()->is_weak());
 
-  effect(DEF dst, KILL cr,
+  effect(KILL cr,
      KILL v0, KILL v1, KILL v2, KILL v3, KILL v4, KILL v5, KILL v6, KILL v7,
      KILL v8, KILL v9, KILL v10, KILL v11, KILL v12, KILL v13, KILL v14,
      KILL v15, KILL v16, KILL v17, KILL v18, KILL v19, KILL v20, KILL v21,
      KILL v22, KILL v23, KILL v24, KILL v25, KILL v26, KILL v27, KILL v28,
      KILL v29, KILL v30, KILL v31);
 
-  format %{"LoadBarrierSlowReg $dst, $mem" %}
+  format %{ "lea $dst, $src\n\t"
+            "call #ZLoadBarrierSlowPath" %}
+
   ins_encode %{
-    z_load_barrier_slow_reg(_masm, $dst$$Register, $mem$$base$$Register,
-                            $mem$$index, $mem$$scale, $mem$$disp, false);
+    z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$base$$Register,
+                            $src$$index, $src$$scale, $src$$disp, false);
   %}
   ins_pipe(pipe_slow);
 %}
@@ -90,7 +92,7 @@
 //
 // Execute ZGC load barrier (weak) slow path
 //
-instruct loadBarrierWeakSlowReg(iRegP dst, memory mem, rFlagsReg cr,
+instruct loadBarrierWeakSlowReg(iRegP dst, memory src, rFlagsReg cr,
     vRegD_V0 v0, vRegD_V1 v1, vRegD_V2 v2, vRegD_V3 v3, vRegD_V4 v4,
     vRegD_V5 v5, vRegD_V6 v6, vRegD_V7 v7, vRegD_V8 v8, vRegD_V9 v9,
     vRegD_V10 v10, vRegD_V11 v11, vRegD_V12 v12, vRegD_V13 v13, vRegD_V14 v14,
@@ -98,20 +100,22 @@
     vRegD_V20 v20, vRegD_V21 v21, vRegD_V22 v22, vRegD_V23 v23, vRegD_V24 v24,
     vRegD_V25 v25, vRegD_V26 v26, vRegD_V27 v27, vRegD_V28 v28, vRegD_V29 v29,
     vRegD_V30 v30, vRegD_V31 v31) %{
-  match(Set dst (LoadBarrierSlowReg mem));
+  match(Set dst (LoadBarrierSlowReg src dst));
   predicate(n->as_LoadBarrierSlowReg()->is_weak());
 
-  effect(DEF dst, KILL cr,
+  effect(KILL cr,
      KILL v0, KILL v1, KILL v2, KILL v3, KILL v4, KILL v5, KILL v6, KILL v7,
      KILL v8, KILL v9, KILL v10, KILL v11, KILL v12, KILL v13, KILL v14,
      KILL v15, KILL v16, KILL v17, KILL v18, KILL v19, KILL v20, KILL v21,
      KILL v22, KILL v23, KILL v24, KILL v25, KILL v26, KILL v27, KILL v28,
      KILL v29, KILL v30, KILL v31);
 
-  format %{"LoadBarrierWeakSlowReg $dst, $mem" %}
+  format %{ "lea $dst, $src\n\t"
+            "call #ZLoadBarrierSlowPath" %}
+
   ins_encode %{
-    z_load_barrier_slow_reg(_masm, $dst$$Register, $mem$$base$$Register,
-                            $mem$$index, $mem$$scale, $mem$$disp, true);
+    z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$base$$Register,
+                            $src$$index, $src$$scale, $src$$disp, true);
   %}
   ins_pipe(pipe_slow);
 %}
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -69,7 +69,7 @@
     }
 #endif
 
-    if (ShenandoahSATBBarrier && !dest_uninitialized && !ShenandoahHeap::heap()->heuristics()->can_do_traversal_gc()) {
+    if (ShenandoahSATBBarrier && !dest_uninitialized) {
       Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
       assert_different_registers(dst, count, thread); // we don't care about src here?
 #ifndef _LP64
--- a/src/hotspot/cpu/x86/gc/z/z_x86_64.ad	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/cpu/x86/gc/z/z_x86_64.ad	Wed Jul 03 17:46:04 2019 -0400
@@ -45,32 +45,31 @@
 
 // For XMM and YMM enabled processors
 instruct zLoadBarrierSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr,
-                                      rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+                                      rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
                                       rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
                                       rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
                                       rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
+  match(Set dst (LoadBarrierSlowReg src dst));
+  predicate(UseAVX <= 2 && !n->as_LoadBarrierSlowReg()->is_weak());
 
-  match(Set dst (LoadBarrierSlowReg src));
-  predicate((UseAVX <= 2) && !n->as_LoadBarrierSlowReg()->is_weak());
-
-  effect(DEF dst, KILL cr,
+  effect(KILL cr,
          KILL x0, KILL x1, KILL x2, KILL x3,
          KILL x4, KILL x5, KILL x6, KILL x7,
          KILL x8, KILL x9, KILL x10, KILL x11,
          KILL x12, KILL x13, KILL x14, KILL x15);
 
-  format %{ "zLoadBarrierSlowRegXmmAndYmm $dst, $src" %}
+  format %{ "lea $dst, $src\n\t"
+            "call #ZLoadBarrierSlowPath" %}
 
   ins_encode %{
     z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */);
   %}
-
   ins_pipe(pipe_slow);
 %}
 
 // For ZMM enabled processors
 instruct zLoadBarrierSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
-                                rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+                                rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
                                 rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
                                 rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
                                 rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
@@ -79,10 +78,10 @@
                                 rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
                                 rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
 
-  match(Set dst (LoadBarrierSlowReg src));
-  predicate((UseAVX == 3) && !n->as_LoadBarrierSlowReg()->is_weak());
+  match(Set dst (LoadBarrierSlowReg src dst));
+  predicate(UseAVX == 3 && !n->as_LoadBarrierSlowReg()->is_weak());
 
-  effect(DEF dst, KILL cr,
+  effect(KILL cr,
          KILL x0, KILL x1, KILL x2, KILL x3,
          KILL x4, KILL x5, KILL x6, KILL x7,
          KILL x8, KILL x9, KILL x10, KILL x11,
@@ -92,43 +91,42 @@
          KILL x24, KILL x25, KILL x26, KILL x27,
          KILL x28, KILL x29, KILL x30, KILL x31);
 
-  format %{ "zLoadBarrierSlowRegZmm $dst, $src" %}
+  format %{ "lea $dst, $src\n\t"
+            "call #ZLoadBarrierSlowPath" %}
 
   ins_encode %{
     z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */);
   %}
-
   ins_pipe(pipe_slow);
 %}
 
 // For XMM and YMM enabled processors
 instruct zLoadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr,
-                                          rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+                                          rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
                                           rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
                                           rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
                                           rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
+  match(Set dst (LoadBarrierSlowReg src dst));
+  predicate(UseAVX <= 2 && n->as_LoadBarrierSlowReg()->is_weak());
 
-  match(Set dst (LoadBarrierSlowReg src));
-  predicate((UseAVX <= 2) && n->as_LoadBarrierSlowReg()->is_weak());
-
-  effect(DEF dst, KILL cr,
+  effect(KILL cr,
          KILL x0, KILL x1, KILL x2, KILL x3,
          KILL x4, KILL x5, KILL x6, KILL x7,
          KILL x8, KILL x9, KILL x10, KILL x11,
          KILL x12, KILL x13, KILL x14, KILL x15);
 
-  format %{ "zLoadBarrierWeakSlowRegXmmAndYmm $dst, $src" %}
+  format %{ "lea $dst, $src\n\t"
+            "call #ZLoadBarrierSlowPath" %}
 
   ins_encode %{
     z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */);
   %}
-
   ins_pipe(pipe_slow);
 %}
 
 // For ZMM enabled processors
 instruct zLoadBarrierWeakSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
-                                    rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+                                    rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
                                     rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
                                     rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
                                     rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
@@ -137,10 +135,10 @@
                                     rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
                                     rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
 
-  match(Set dst (LoadBarrierSlowReg src));
-  predicate((UseAVX == 3) && n->as_LoadBarrierSlowReg()->is_weak());
+  match(Set dst (LoadBarrierSlowReg src dst));
+  predicate(UseAVX == 3 && n->as_LoadBarrierSlowReg()->is_weak());
 
-  effect(DEF dst, KILL cr,
+  effect(KILL cr,
          KILL x0, KILL x1, KILL x2, KILL x3,
          KILL x4, KILL x5, KILL x6, KILL x7,
          KILL x8, KILL x9, KILL x10, KILL x11,
@@ -150,12 +148,12 @@
          KILL x24, KILL x25, KILL x26, KILL x27,
          KILL x28, KILL x29, KILL x30, KILL x31);
 
-  format %{ "zLoadBarrierWeakSlowRegZmm $dst, $src" %}
+  format %{ "lea $dst, $src\n\t"
+            "call #ZLoadBarrierSlowPath" %}
 
   ins_encode %{
     z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */);
   %}
-
   ins_pipe(pipe_slow);
 %}
 
--- a/src/hotspot/os/linux/os_linux.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/os/linux/os_linux.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -84,6 +84,7 @@
 # include <sys/select.h>
 # include <pthread.h>
 # include <signal.h>
+# include <endian.h>
 # include <errno.h>
 # include <dlfcn.h>
 # include <stdio.h>
@@ -1747,11 +1748,26 @@
     return NULL;
   }
 
+  if (elf_head.e_ident[EI_DATA] != LITTLE_ENDIAN_ONLY(ELFDATA2LSB) BIG_ENDIAN_ONLY(ELFDATA2MSB)) {
+    // handle invalid/out of range endianness values
+    if (elf_head.e_ident[EI_DATA] == 0 || elf_head.e_ident[EI_DATA] > 2) {
+      return NULL;
+    }
+
+#if defined(VM_LITTLE_ENDIAN)
+    // VM is LE, shared object BE
+    elf_head.e_machine = be16toh(elf_head.e_machine);
+#else
+    // VM is BE, shared object LE
+    elf_head.e_machine = le16toh(elf_head.e_machine);
+#endif
+  }
+
   typedef struct {
     Elf32_Half    code;         // Actual value as defined in elf.h
     Elf32_Half    compat_class; // Compatibility of archs at VM's sense
     unsigned char elf_class;    // 32 or 64 bit
-    unsigned char endianess;    // MSB or LSB
+    unsigned char endianness;   // MSB or LSB
     char*         name;         // String representation
   } arch_t;
 
@@ -1778,8 +1794,9 @@
     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
     {EM_SH,          EM_SH,      ELFCLASS32, ELFDATA2MSB, (char*)"SuperH BE"},
 #endif
-    {EM_ARM,         EM_ARM,     ELFCLASS32,   ELFDATA2LSB, (char*)"ARM"},
-    {EM_S390,        EM_S390,    ELFCLASSNONE, ELFDATA2MSB, (char*)"IBM System/390"},
+    {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM"},
+    // we only support 64 bit z architecture
+    {EM_S390,        EM_S390,    ELFCLASS64, ELFDATA2MSB, (char*)"IBM System/390"},
     {EM_ALPHA,       EM_ALPHA,   ELFCLASS64, ELFDATA2LSB, (char*)"Alpha"},
     {EM_MIPS_RS3_LE, EM_MIPS_RS3_LE, ELFCLASS32, ELFDATA2LSB, (char*)"MIPSel"},
     {EM_MIPS,        EM_MIPS,    ELFCLASS32, ELFDATA2MSB, (char*)"MIPS"},
@@ -1825,7 +1842,7 @@
         AARCH64, ALPHA, ARM, AMD64, IA32, IA64, M68K, MIPS, MIPSEL, PARISC, __powerpc__, __powerpc64__, S390, SH, __sparc
 #endif
 
-  // Identify compatability class for VM's architecture and library's architecture
+  // Identify compatibility class for VM's architecture and library's architecture
   // Obtain string descriptions for architectures
 
   arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
@@ -1849,29 +1866,35 @@
     return NULL;
   }
 
-  if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
-    ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
-    return NULL;
-  }
-
-#ifndef S390
-  if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
-    ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
-    return NULL;
-  }
-#endif // !S390
-
   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
-    if (lib_arch.name!=NULL) {
+    if (lib_arch.name != NULL) {
       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
-                 " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
+                 " (Possible cause: can't load %s .so on a %s platform)",
                  lib_arch.name, arch_array[running_arch_index].name);
     } else {
       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
-                 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
-                 lib_arch.code,
-                 arch_array[running_arch_index].name);
+                 " (Possible cause: can't load this .so (machine code=0x%x) on a %s platform)",
+                 lib_arch.code, arch_array[running_arch_index].name);
     }
+    return NULL;
+  }
+
+  if (lib_arch.endianness != arch_array[running_arch_index].endianness) {
+    ::snprintf(diag_msg_buf, diag_msg_max_length-1, " (Possible cause: endianness mismatch)");
+    return NULL;
+  }
+
+  // ELF file class/capacity : 0 - invalid, 1 - 32bit, 2 - 64bit
+  if (lib_arch.elf_class > 2 || lib_arch.elf_class < 1) {
+    ::snprintf(diag_msg_buf, diag_msg_max_length-1, " (Possible cause: invalid ELF file class)");
+    return NULL;
+  }
+
+  if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
+    ::snprintf(diag_msg_buf, diag_msg_max_length-1,
+               " (Possible cause: architecture word width mismatch, can't load %d-bit .so on a %d-bit platform)",
+               (int) lib_arch.elf_class * 32, arch_array[running_arch_index].elf_class * 32);
+    return NULL;
   }
 
   return NULL;
--- a/src/hotspot/os/solaris/os_solaris.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/os/solaris/os_solaris.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -1520,6 +1520,13 @@
   }
 }
 
+static void change_endianness(Elf32_Half& val) {
+  unsigned char *ptr = (unsigned char *)&val;
+  unsigned char swp = ptr[0];
+  ptr[0] = ptr[1];
+  ptr[1] = swp;
+}
+
 // Loads .dll/.so and
 // in case of error it checks if .dll/.so was built for the
 // same architecture as Hotspot is running on
@@ -1570,6 +1577,14 @@
     return NULL;
   }
 
+  if (elf_head.e_ident[EI_DATA] != LITTLE_ENDIAN_ONLY(ELFDATA2LSB) BIG_ENDIAN_ONLY(ELFDATA2MSB)) {
+    // handle invalid/out of range endianness values
+    if (elf_head.e_ident[EI_DATA] == 0 || elf_head.e_ident[EI_DATA] > 2) {
+      return NULL;
+    }
+    change_endianness(elf_head.e_machine);
+  }
+
   typedef struct {
     Elf32_Half    code;         // Actual value as defined in elf.h
     Elf32_Half    compat_class; // Compatibility of archs at VM's sense
@@ -1588,7 +1603,10 @@
     {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
-    {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
+    {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM"},
+    // we only support 64 bit z architecture
+    {EM_S390,        EM_S390,    ELFCLASS64, ELFDATA2MSB, (char*)"IBM System/390"},
+    {EM_AARCH64,     EM_AARCH64, ELFCLASS64, ELFDATA2LSB, (char*)"AARCH64"}
   };
 
 #if  (defined IA32)
@@ -1612,7 +1630,7 @@
        IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
 #endif
 
-  // Identify compatability class for VM's architecture and library's architecture
+  // Identify compatibility class for VM's architecture and library's architecture
   // Obtain string descriptions for architectures
 
   arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
@@ -1636,29 +1654,37 @@
     return NULL;
   }
 
+  if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
+    if (lib_arch.name != NULL) {
+      ::snprintf(diag_msg_buf, diag_msg_max_length-1,
+                 " (Possible cause: can't load %s .so on a %s platform)",
+                 lib_arch.name, arch_array[running_arch_index].name);
+    } else {
+      ::snprintf(diag_msg_buf, diag_msg_max_length-1,
+                 " (Possible cause: can't load this .so (machine code=0x%x) on a %s platform)",
+                 lib_arch.code, arch_array[running_arch_index].name);
+    }
+    return NULL;
+  }
+
   if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
     return NULL;
   }
 
+  // ELF file class/capacity : 0 - invalid, 1 - 32bit, 2 - 64bit
+  if (lib_arch.elf_class > 2 || lib_arch.elf_class < 1) {
+    ::snprintf(diag_msg_buf, diag_msg_max_length-1, " (Possible cause: invalid ELF file class)");
+    return NULL;
+  }
+
   if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
-    ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
+    ::snprintf(diag_msg_buf, diag_msg_max_length-1,
+               " (Possible cause: architecture word width mismatch, can't load %d-bit .so on a %d-bit platform)",
+               (int) lib_arch.elf_class * 32, arch_array[running_arch_index].elf_class * 32);
     return NULL;
   }
 
-  if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
-    if (lib_arch.name!=NULL) {
-      ::snprintf(diag_msg_buf, diag_msg_max_length-1,
-                 " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
-                 lib_arch.name, arch_array[running_arch_index].name);
-    } else {
-      ::snprintf(diag_msg_buf, diag_msg_max_length-1,
-                 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
-                 lib_arch.code,
-                 arch_array[running_arch_index].name);
-    }
-  }
-
   return NULL;
 }
 
--- a/src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -57,7 +57,13 @@
 
 inline void OrderAccess::cross_modify_fence() {
   int idx = 0;
+#ifdef AMD64
   __asm__ volatile ("cpuid " : "+a" (idx) : : "ebx", "ecx", "edx", "memory");
+#else
+  // On some x86 systems EBX is a reserved register that cannot be
+  // clobbered, so we must protect it around the CPUID.
+  __asm__ volatile ("xchg %%esi, %%ebx; cpuid; xchg %%esi, %%ebx " : "+a" (idx) : : "esi", "ecx", "edx", "memory");
+#endif
 }
 
 template<>
--- a/src/hotspot/share/adlc/formssel.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/adlc/formssel.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -3513,7 +3513,7 @@
     "GetAndSetB", "GetAndSetS", "GetAndAddI", "GetAndSetI", "GetAndSetP",
     "GetAndAddB", "GetAndAddS", "GetAndAddL", "GetAndSetL", "GetAndSetN",
 #if INCLUDE_ZGC
-    "LoadBarrierSlowReg", "ZGetAndSetP", "ZCompareAndSwapP", "ZCompareAndExchangeP", "ZWeakCompareAndSwapP",
+    "ZGetAndSetP", "ZCompareAndSwapP", "ZCompareAndExchangeP", "ZWeakCompareAndSwapP",
 #endif
     "ClearArray"
   };
--- a/src/hotspot/share/code/nmethod.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/code/nmethod.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -1774,10 +1774,9 @@
   }
 }
 
-void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
+void nmethod::oops_do(OopClosure* f, bool allow_dead) {
   // make sure the oops ready to receive visitors
-  assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
-  assert(!is_unloaded(), "should not call follow on unloaded nmethod");
+  assert(allow_dead || is_alive(), "should not call follow on dead nmethod");
 
   // Prevent extra code cache walk for platforms that don't have immediate oops.
   if (relocInfo::mustIterateImmediateOopsInCode()) {
--- a/src/hotspot/share/code/nmethod.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/code/nmethod.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -473,7 +473,7 @@
 
  public:
   void oops_do(OopClosure* f) { oops_do(f, false); }
-  void oops_do(OopClosure* f, bool allow_zombie);
+  void oops_do(OopClosure* f, bool allow_dead);
 
   bool test_set_oops_do_mark();
   static void oops_do_marking_prologue();
--- a/src/hotspot/share/gc/cms/cmsHeap.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/gc/cms/cmsHeap.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -62,7 +62,7 @@
   }
 
   size_t used_in_bytes() {
-    return _space->used_stable();
+    return _space->used();
   }
 };
 
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -372,8 +372,6 @@
     )
   }
   _dictionary->set_par_lock(&_parDictionaryAllocLock);
-
-  _used_stable = 0;
 }
 
 // Like CompactibleSpace forward() but always calls cross_threshold() to
@@ -579,14 +577,6 @@
   return capacity() - free();
 }
 
-size_t CompactibleFreeListSpace::used_stable() const {
-  return _used_stable;
-}
-
-void CompactibleFreeListSpace::recalculate_used_stable() {
-  _used_stable = used();
-}
-
 size_t CompactibleFreeListSpace::free() const {
   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
   // if you do this while the structures are in flux you
@@ -1384,9 +1374,6 @@
     debug_only(fc->mangleAllocated(size));
   }
 
-  // After allocation, recalculate used space and update used_stable
-  recalculate_used_stable();
-
   return res;
 }
 
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -192,9 +192,6 @@
   // Used to keep track of limit of sweep for the space
   HeapWord* _sweep_limit;
 
-  // Stable value of used().
-  size_t _used_stable;
-
   // Used to make the young collector update the mod union table
   MemRegionClosure* _preconsumptionDirtyCardClosure;
 
@@ -415,17 +412,6 @@
   // which overestimates the region by returning the entire
   // committed region (this is safe, but inefficient).
 
-  // Returns monotonically increasing stable used space bytes for CMS.
-  // This is required for jstat and other memory monitoring tools
-  // that might otherwise see inconsistent used space values during a garbage
-  // collection, promotion or allocation into compactibleFreeListSpace.
-  // The value returned by this function might be smaller than the
-  // actual value.
-  size_t used_stable() const;
-  // Recalculate and cache the current stable used() value. Only to be called
-  // in places where we can be sure that the result is stable.
-  void recalculate_used_stable();
-
   // Returns a subregion of the space containing all the objects in
   // the space.
   MemRegion used_region() const {
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -692,10 +692,6 @@
   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
 }
 
-size_t ConcurrentMarkSweepGeneration::used_stable() const {
-  return cmsSpace()->used_stable();
-}
-
 size_t ConcurrentMarkSweepGeneration::max_available() const {
   return free() + _virtual_space.uncommitted_size();
 }
@@ -1527,8 +1523,6 @@
   FreelistLocker z(this);
   MetaspaceGC::compute_new_size();
   _cmsGen->compute_new_size_free_list();
-  // recalculate CMS used space after CMS collection
-  _cmsGen->cmsSpace()->recalculate_used_stable();
 }
 
 // A work method used by the foreground collector to do
@@ -2057,7 +2051,6 @@
 
   _capacity_at_prologue = capacity();
   _used_at_prologue = used();
-  _cmsSpace->recalculate_used_stable();
 
   // We enable promotion tracking so that card-scanning can recognize
   // which objects have been promoted during this GC and skip them.
@@ -2130,7 +2123,6 @@
   _eden_chunk_index = 0;
 
   size_t cms_used   = _cmsGen->cmsSpace()->used();
-  _cmsGen->cmsSpace()->recalculate_used_stable();
 
   // update performance counters - this uses a special version of
   // update_counters() that allows the utilization to be passed as a
@@ -2824,8 +2816,6 @@
     rp->enable_discovery();
     _collectorState = Marking;
   }
-
-  _cmsGen->cmsSpace()->recalculate_used_stable();
 }
 
 void CMSCollector::checkpointRootsInitialWork() {
@@ -4187,7 +4177,6 @@
     MutexLocker y(bitMapLock(),
                   Mutex::_no_safepoint_check_flag);
     checkpointRootsFinalWork();
-    _cmsGen->cmsSpace()->recalculate_used_stable();
   }
   verify_work_stacks_empty();
   verify_overflow_empty();
@@ -5347,14 +5336,9 @@
     // further below.
     {
       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
-
       // Update heap occupancy information which is used as
       // input to soft ref clearing policy at the next gc.
       Universe::update_heap_info_at_gc();
-
-      // recalculate CMS used space after CMS collection
-      _cmsGen->cmsSpace()->recalculate_used_stable();
-
       _collectorState = Resizing;
     }
   }
@@ -5443,7 +5427,6 @@
     // Gather statistics on the young generation collection.
     collector()->stats().record_gc0_end(used());
   }
-  _cmsSpace->recalculate_used_stable();
 }
 
 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -1112,7 +1112,6 @@
   double occupancy() const { return ((double)used())/((double)capacity()); }
   size_t contiguous_available() const;
   size_t unsafe_max_alloc_nogc() const;
-  size_t used_stable() const;
 
   // over-rides
   MemRegion used_region_at_save_marks() const;
--- a/src/hotspot/share/gc/cms/gSpaceCounters.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/gc/cms/gSpaceCounters.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -59,7 +59,7 @@
   }
 
   inline void update_used() {
-    _used->set_value(_gen->used_stable());
+    _used->set_value(_gen->used());
   }
 
   // special version of update_used() to allow the used value to be
@@ -103,7 +103,7 @@
     GenerationUsedHelper(Generation* g) : _gen(g) { }
 
     inline jlong take_sample() {
-      return _gen->used_stable();
+      return _gen->used();
     }
 };
 
--- a/src/hotspot/share/gc/g1/g1HeapTransition.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1HeapTransition.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -35,7 +35,6 @@
   _old_length = g1_heap->old_regions_count();
   _archive_length = g1_heap->archive_regions_count();
   _humongous_length = g1_heap->humongous_regions_count();
-  _metaspace_used_bytes = MetaspaceUtils::used_bytes();
 }
 
 G1HeapTransition::G1HeapTransition(G1CollectedHeap* g1_heap) : _g1_heap(g1_heap), _before(g1_heap) { }
@@ -131,5 +130,5 @@
   log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
       usage._humongous_used / K, ((after._humongous_length * HeapRegion::GrainBytes) - usage._humongous_used) / K);
 
-  MetaspaceUtils::print_metaspace_change(_before._metaspace_used_bytes);
+  MetaspaceUtils::print_metaspace_change(_before._meta_sizes);
 }
--- a/src/hotspot/share/gc/g1/g1HeapTransition.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/gc/g1/g1HeapTransition.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -26,6 +26,7 @@
 #define SHARE_GC_G1_G1HEAPTRANSITION_HPP
 
 #include "gc/shared/plab.hpp"
+#include "memory/metaspace/metaspaceSizesSnapshot.hpp"
 
 class G1CollectedHeap;
 
@@ -36,7 +37,7 @@
     size_t _old_length;
     size_t _archive_length;
     size_t _humongous_length;
-    size_t _metaspace_used_bytes;
+    const metaspace::MetaspaceSizesSnapshot _meta_sizes;
 
     Data(G1CollectedHeap* g1_heap);
   };
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -38,6 +38,7 @@
 #include "gc/shared/strongRootsScope.hpp"
 #include "logging/log.hpp"
 #include "memory/metaspace.hpp"
+#include "memory/metaspace/metaspaceSizesSnapshot.hpp"
 #include "utilities/growableArray.hpp"
 #include "utilities/ostream.hpp"
 
@@ -257,19 +258,18 @@
   PreGCValues(ParallelScavengeHeap* heap) :
       _heap_used(heap->used()),
       _young_gen_used(heap->young_gen()->used_in_bytes()),
-      _old_gen_used(heap->old_gen()->used_in_bytes()),
-      _metadata_used(MetaspaceUtils::used_bytes()) { };
+      _old_gen_used(heap->old_gen()->used_in_bytes()) { }
 
   size_t heap_used() const      { return _heap_used; }
   size_t young_gen_used() const { return _young_gen_used; }
   size_t old_gen_used() const   { return _old_gen_used; }
-  size_t metadata_used() const  { return _metadata_used; }
+  const metaspace::MetaspaceSizesSnapshot& metaspace_sizes() const { return _meta_sizes; }
 
 private:
   size_t _heap_used;
   size_t _young_gen_used;
   size_t _old_gen_used;
-  size_t _metadata_used;
+  const metaspace::MetaspaceSizesSnapshot _meta_sizes;
 };
 
 // Class that can be used to print information about the
--- a/src/hotspot/share/gc/parallel/psMarkSweep.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/gc/parallel/psMarkSweep.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -193,11 +193,7 @@
 
     BiasedLocking::preserve_marks();
 
-    // Capture metadata size before collection for sizing.
-    size_t metadata_prev_used = MetaspaceUtils::used_bytes();
-
-    size_t old_gen_prev_used = old_gen->used_in_bytes();
-    size_t young_gen_prev_used = young_gen->used_in_bytes();
+    const PreGCValues pre_gc_values(heap);
 
     allocate_stacks();
 
@@ -352,9 +348,9 @@
       accumulated_time()->stop();
     }
 
-    young_gen->print_used_change(young_gen_prev_used);
-    old_gen->print_used_change(old_gen_prev_used);
-    MetaspaceUtils::print_metaspace_change(metadata_prev_used);
+    young_gen->print_used_change(pre_gc_values.young_gen_used());
+    old_gen->print_used_change(pre_gc_values.old_gen_used());
+    MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());
 
     // Track memory usage and detect low memory
     MemoryService::track_memory_usage();
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -1779,7 +1779,7 @@
   // miscellaneous bookkeeping.
   pre_compact();
 
-  PreGCValues pre_gc_values(heap);
+  const PreGCValues pre_gc_values(heap);
 
   // Get the compaction manager reserved for the VM thread.
   ParCompactionManager* const vmthread_cm =
@@ -1925,7 +1925,7 @@
 
     young_gen->print_used_change(pre_gc_values.young_gen_used());
     old_gen->print_used_change(pre_gc_values.old_gen_used());
-    MetaspaceUtils::print_metaspace_change(pre_gc_values.metadata_used());
+    MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());
 
     // Track memory usage and detect low memory
     MemoryService::track_memory_usage();
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -328,7 +328,7 @@
     reference_processor()->enable_discovery();
     reference_processor()->setup_policy(false);
 
-    PreGCValues pre_gc_values(heap);
+    const PreGCValues pre_gc_values(heap);
 
     // Reset our survivor overflow.
     set_survivor_overflow(false);
@@ -601,7 +601,7 @@
 
     young_gen->print_used_change(pre_gc_values.young_gen_used());
     old_gen->print_used_change(pre_gc_values.old_gen_used());
-    MetaspaceUtils::print_metaspace_change(pre_gc_values.metadata_used());
+    MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());
 
     // Track memory usage and detect low memory
     MemoryService::track_memory_usage();
--- a/src/hotspot/share/gc/shared/gcBehaviours.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/gc/shared/gcBehaviours.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -64,7 +64,7 @@
 bool ClosureIsUnloadingBehaviour::is_unloading(CompiledMethod* cm) const {
   if (cm->is_nmethod()) {
     IsCompiledMethodUnloadingOopClosure cl(_cl);
-    static_cast<nmethod*>(cm)->oops_do(&cl);
+    static_cast<nmethod*>(cm)->oops_do(&cl, true /* allow_dead */);
     return cl.is_unloading();
   } else {
     return false;
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -575,9 +575,6 @@
 
   ClearedAllSoftRefs casr(do_clear_all_soft_refs, soft_ref_policy());
 
-  const size_t metadata_prev_used = MetaspaceUtils::used_bytes();
-
-
   FlagSetting fl(_is_gc_active, true);
 
   bool complete = full && (max_generation == OldGen);
@@ -586,6 +583,7 @@
 
   size_t young_prev_used = _young_gen->used();
   size_t old_prev_used = _old_gen->used();
+  const metaspace::MetaspaceSizesSnapshot prev_meta_sizes;
 
   bool run_verification = total_collections() >= VerifyGCStartAt;
   bool prepared_for_verification = false;
@@ -628,7 +626,7 @@
       _young_gen->compute_new_size();
 
       print_heap_change(young_prev_used, old_prev_used);
-      MetaspaceUtils::print_metaspace_change(metadata_prev_used);
+      MetaspaceUtils::print_metaspace_change(prev_meta_sizes);
 
       // Track memory usage and detect low memory after GC finishes
       MemoryService::track_memory_usage();
@@ -687,7 +685,7 @@
     update_full_collections_completed();
 
     print_heap_change(young_prev_used, old_prev_used);
-    MetaspaceUtils::print_metaspace_change(metadata_prev_used);
+    MetaspaceUtils::print_metaspace_change(prev_meta_sizes);
 
     // Track memory usage and detect low memory after GC finishes
     MemoryService::track_memory_usage();
--- a/src/hotspot/share/gc/shared/generation.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/gc/shared/generation.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -68,12 +68,6 @@
   return gch->old_gen_spec()->init_size();
 }
 
-// This is for CMS. It returns stable monotonic used space size.
-// Remove this when CMS is removed.
-size_t Generation::used_stable() const {
-  return used();
-}
-
 size_t Generation::max_capacity() const {
   return reserved().byte_size();
 }
--- a/src/hotspot/share/gc/shared/generation.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/gc/shared/generation.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -156,7 +156,6 @@
   virtual size_t capacity() const = 0;  // The maximum number of object bytes the
                                         // generation can currently hold.
   virtual size_t used() const = 0;      // The number of used bytes in the gen.
-  virtual size_t used_stable() const;   // The number of used bytes for memory monitoring tools.
   virtual size_t free() const = 0;      // The number of free bytes in the gen.
 
   // Support for java.lang.Runtime.maxMemory(); see CollectedHeap.
--- a/src/hotspot/share/gc/shared/oopStorage.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/gc/shared/oopStorage.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -35,6 +35,7 @@
 #include "runtime/mutex.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/orderAccess.hpp"
+#include "runtime/os.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.hpp"
@@ -414,14 +415,6 @@
 oop* OopStorage::allocate() {
   MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
 
-  // Note: Without this we might never perform cleanup.  As it is,
-  // cleanup is only requested here, when completing a concurrent
-  // iteration, or when someone entirely else wakes up the service
-  // thread, which isn't ideal.  But we can't notify in release().
-  if (reduce_deferred_updates()) {
-    notify_needs_cleanup();
-  }
-
   Block* block = block_for_allocation();
   if (block == NULL) return NULL; // Block allocation failed.
   assert(!block->is_full(), "invariant");
@@ -474,23 +467,20 @@
 
 OopStorage::Block* OopStorage::block_for_allocation() {
   assert_lock_strong(_allocation_mutex);
-
   while (true) {
     // Use the first block in _allocation_list for the allocation.
     Block* block = _allocation_list.head();
     if (block != NULL) {
       return block;
     } else if (reduce_deferred_updates()) {
-      MutexUnlocker ul(_allocation_mutex, Mutex::_no_safepoint_check_flag);
-      notify_needs_cleanup();
+      // Might have added a block to the _allocation_list, so retry.
     } else if (try_add_block()) {
-      block = _allocation_list.head();
-      assert(block != NULL, "invariant");
-      return block;
-    } else if (reduce_deferred_updates()) { // Once more before failure.
-      MutexUnlocker ul(_allocation_mutex, Mutex::_no_safepoint_check_flag);
-      notify_needs_cleanup();
-    } else {
+      // Successfully added a new block to the list, so retry.
+      assert(_allocation_list.chead() != NULL, "invariant");
+    } else if (_allocation_list.chead() != NULL) {
+      // Trying to add a block failed, but some other thread added to the
+      // list while we'd dropped the lock over the new block allocation.
+    } else if (!reduce_deferred_updates()) { // Once more before failure.
       // Attempt to add a block failed, no other thread added a block,
       // and no deferred updated added a block, then allocation failed.
       log_debug(oopstorage, blocks)("%s: failed block allocation", name());
@@ -635,7 +625,14 @@
         if (fetched == head) break; // Successful update.
         head = fetched;             // Retry with updated head.
       }
-      owner->record_needs_cleanup();
+      // Only request cleanup for to-empty transitions, not for from-full.
+      // There isn't any rush to process from-full transitions.  Allocation
+      // will reduce deferrals before allocating new blocks, so may process
+      // some.  And the service thread will drain the entire deferred list
+      // if there are any pending to-empty transitions.
+      if (releasing == old_allocated) {
+        owner->record_needs_cleanup();
+      }
       log_debug(oopstorage, blocks)("%s: deferred update " PTR_FORMAT,
                                     _owner->name(), p2i(this));
     }
@@ -684,7 +681,6 @@
   if (is_empty_bitmask(allocated)) {
     _allocation_list.unlink(*block);
     _allocation_list.push_back(*block);
-    notify_needs_cleanup();
   }
 
   log_debug(oopstorage, blocks)("%s: processed deferred update " PTR_FORMAT,
@@ -740,11 +736,6 @@
   return dup;
 }
 
-// Possible values for OopStorage::_needs_cleanup.
-const uint needs_cleanup_none = 0;     // No cleanup needed.
-const uint needs_cleanup_marked = 1;   // Requested, but no notification made.
-const uint needs_cleanup_notified = 2; // Requested and Service thread notified.
-
 const size_t initial_active_array_size = 8;
 
 OopStorage::OopStorage(const char* name,
@@ -758,7 +749,7 @@
   _active_mutex(active_mutex),
   _allocation_count(0),
   _concurrent_iteration_count(0),
-  _needs_cleanup(needs_cleanup_none)
+  _needs_cleanup(false)
 {
   _active_array->increment_refcount();
   assert(_active_mutex->rank() < _allocation_mutex->rank(),
@@ -796,40 +787,89 @@
   FREE_C_HEAP_ARRAY(char, _name);
 }
 
-// Called by service thread to check for pending work.
-bool OopStorage::needs_delete_empty_blocks() const {
-  return Atomic::load(&_needs_cleanup) != needs_cleanup_none;
+// Managing service thread notifications.
+//
+// We don't want cleanup work to linger indefinitely, but we also don't want
+// to run the service thread too often.  We're also very limited in what we
+// can do in a release operation, where cleanup work is created.
+//
+// When a release operation changes a block's state to empty, it records the
+// need for cleanup in both the associated storage object and in the global
+// request state.  A safepoint cleanup task notifies the service thread when
+// there may be cleanup work for any storage object, based on the global
+// request state.  But that notification is deferred if the service thread
+// has run recently, and we also avoid duplicate notifications.  The service
+// thread updates the timestamp and resets the state flags on every iteration.
+
+// Global cleanup request state.
+static volatile bool needs_cleanup_requested = false;
+
+// Flag for avoiding duplicate notifications.
+static bool needs_cleanup_triggered = false;
+
+// Time after which a notification can be made.
+static jlong cleanup_trigger_permit_time = 0;
+
+// Minimum time since last service thread check before notification is
+// permitted.  The value of 500ms was an arbitrary choice; frequent, but not
+// too frequent.
+const jlong cleanup_trigger_defer_period = 500 * NANOSECS_PER_MILLISEC;
+
+void OopStorage::trigger_cleanup_if_needed() {
+  MonitorLocker ml(Service_lock, Monitor::_no_safepoint_check_flag);
+  if (Atomic::load(&needs_cleanup_requested) &&
+      !needs_cleanup_triggered &&
+      (os::javaTimeNanos() > cleanup_trigger_permit_time)) {
+    needs_cleanup_triggered = true;
+    ml.notify_all();
+  }
+}
+
+bool OopStorage::has_cleanup_work_and_reset() {
+  assert_lock_strong(Service_lock);
+  cleanup_trigger_permit_time =
+    os::javaTimeNanos() + cleanup_trigger_defer_period;
+  needs_cleanup_triggered = false;
+  // Set the request flag false and return its old value.
+  // Needs to be atomic to avoid dropping a concurrent request.
+  // Can't use Atomic::xchg, which may not support bool.
+  return Atomic::cmpxchg(false, &needs_cleanup_requested, true);
 }
 
 // Record that cleanup is needed, without notifying the Service thread.
 // Used by release(), where we can't lock even Service_lock.
 void OopStorage::record_needs_cleanup() {
-  Atomic::cmpxchg(needs_cleanup_marked, &_needs_cleanup, needs_cleanup_none);
-}
-
-// Record that cleanup is needed, and notify the Service thread.
-void OopStorage::notify_needs_cleanup() {
-  // Avoid re-notification if already notified.
-  const uint notified = needs_cleanup_notified;
-  if (Atomic::xchg(notified, &_needs_cleanup) != notified) {
-    MonitorLocker ml(Service_lock, Monitor::_no_safepoint_check_flag);
-    ml.notify_all();
-  }
+  // Set local flag first, else service thread could wake up and miss
+  // the request.  This order may instead (rarely) unnecessarily notify.
+  OrderAccess::release_store(&_needs_cleanup, true);
+  OrderAccess::release_store_fence(&needs_cleanup_requested, true);
 }
 
 bool OopStorage::delete_empty_blocks() {
+  // Service thread might have oopstorage work, but not for this object.
+  // Check for deferred updates even though that's not a service thread
+  // trigger; since we're here, we might as well process them.
+  if (!OrderAccess::load_acquire(&_needs_cleanup) &&
+      (OrderAccess::load_acquire(&_deferred_updates) == NULL)) {
+    return false;
+  }
+
   MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
 
   // Clear the request before processing.
-  Atomic::store(needs_cleanup_none, &_needs_cleanup);
-  OrderAccess::fence();
+  OrderAccess::release_store_fence(&_needs_cleanup, false);
 
   // Other threads could be adding to the empty block count or the
   // deferred update list while we're working.  Set an upper bound on
   // how many updates we'll process and blocks we'll try to release,
   // so other threads can't cause an unbounded stay in this function.
-  size_t limit = block_count();
-  if (limit == 0) return false; // Empty storage; nothing at all to do.
+  // We add a bit of slop because the reduce_deferred_updates clause
+  // can cause blocks to be double counted.  If there are few blocks
+  // and many of them are deferred and empty, we might hit the limit
+  // and spin the caller without doing very much work.  Otherwise,
+  // we don't normally hit the limit anyway, instead running out of
+  // work to do.
+  size_t limit = block_count() + 10;
 
   for (size_t i = 0; i < limit; ++i) {
     // Process deferred updates, which might make empty blocks available.
@@ -946,8 +986,8 @@
   _storage->relinquish_block_array(_active_array);
   update_concurrent_iteration_count(-1);
   if (_concurrent) {
-    // We may have deferred some work.
-    const_cast<OopStorage*>(_storage)->notify_needs_cleanup();
+    // We may have deferred some cleanup work.
+    const_cast<OopStorage*>(_storage)->record_needs_cleanup();
   }
 }
 
--- a/src/hotspot/share/gc/shared/oopStorage.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/gc/shared/oopStorage.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -152,18 +152,26 @@
   template<bool concurrent, bool is_const> class ParState;
 
   // Service thread cleanup support.
-  // Stops deleting if there is an in-progress concurrent iteration.
-  // Locks both the _allocation_mutex and the _active_mutex, and may
-  // safepoint.  Deletion may be throttled, with only some available
-  // work performed, in order to allow other Service thread subtasks
-  // to run.  Returns true if there may be more work to do, false if
-  // nothing to do.
+
+  // Called by the service thread to process any pending cleanups for this
+  // storage object.  Drains the _deferred_updates list, and deletes empty
+  // blocks.  Stops deleting if there is an in-progress concurrent
+  // iteration.  Locks both the _allocation_mutex and the _active_mutex, and
+  // may safepoint.  Deletion may be throttled, with only some available
+  // work performed, in order to allow other Service thread subtasks to run.
+  // Returns true if there may be more work to do, false if nothing to do.
   bool delete_empty_blocks();
 
-  // Service thread cleanup support.
-  // Called by the service thread (while holding Service_lock) to test
-  // whether a call to delete_empty_blocks should be made.
-  bool needs_delete_empty_blocks() const;
+  // Called by safepoint cleanup to notify the service thread (via
+  // Service_lock) that there may be some OopStorage objects with pending
+  // cleanups to process.
+  static void trigger_cleanup_if_needed();
+
+  // Called by the service thread (while holding Service_lock) to to test
+  // for pending cleanup requests, and resets the request state to allow
+  // recognition of new requests.  Returns true if there was a pending
+  // request.
+  static bool has_cleanup_work_and_reset();
 
   // Debugging and logging support.
   const char* name() const;
@@ -232,7 +240,7 @@
   // mutable because this gets set even for const iteration.
   mutable int _concurrent_iteration_count;
 
-  volatile uint _needs_cleanup;
+  volatile bool _needs_cleanup;
 
   bool try_add_block();
   Block* block_for_allocation();
@@ -240,7 +248,6 @@
   Block* find_block_or_null(const oop* ptr) const;
   void delete_empty_block(const Block& block);
   bool reduce_deferred_updates();
-  void notify_needs_cleanup();
 AIX_ONLY(public:)               // xlC 12 on AIX doesn't implement C++ DR45.
   void record_needs_cleanup();
 AIX_ONLY(private:)
--- a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -170,7 +170,7 @@
       ShenandoahLocker locker(CodeCache_lock->owned_by_self() ? NULL : &_recorded_nms_lock);
 
       ShenandoahNMethodOopDetector detector;
-      nm->oops_do(&detector, /* allow_zombie = */ true);
+      nm->oops_do(&detector, /* allow_dead = */ true);
 
       if (detector.has_oops()) {
         int idx = _recorded_nms->find(nm, ShenandoahNMethod::find_with_nmethod);
--- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -128,14 +128,19 @@
   }
 }
 
-static bool load_require_barrier(LoadNode* load)      { return ((load->barrier_data() & RequireBarrier) != 0); }
-static bool load_has_weak_barrier(LoadNode* load)     { return ((load->barrier_data() & WeakBarrier) != 0); }
-static bool load_has_expanded_barrier(LoadNode* load) { return ((load->barrier_data() & ExpandedBarrier) != 0); }
+const uint NoBarrier       = 0;
+const uint RequireBarrier  = 1;
+const uint WeakBarrier     = 2;
+const uint ExpandedBarrier = 4;
+
+static bool load_require_barrier(LoadNode* load)      { return (load->barrier_data() & RequireBarrier)  == RequireBarrier; }
+static bool load_has_weak_barrier(LoadNode* load)     { return (load->barrier_data() & WeakBarrier)     == WeakBarrier; }
+static bool load_has_expanded_barrier(LoadNode* load) { return (load->barrier_data() & ExpandedBarrier) == ExpandedBarrier; }
 static void load_set_expanded_barrier(LoadNode* load) { return load->set_barrier_data(ExpandedBarrier); }
 
-static void load_set_barrier(LoadNode* load, bool weak)    {
+static void load_set_barrier(LoadNode* load, bool weak) {
   if (weak) {
-    load->set_barrier_data(WeakBarrier);
+    load->set_barrier_data(RequireBarrier | WeakBarrier);
   } else {
     load->set_barrier_data(RequireBarrier);
   }
@@ -540,8 +545,8 @@
   Node* then = igvn.transform(new IfTrueNode(iff));
   Node* elsen = igvn.transform(new IfFalseNode(iff));
 
-  Node* new_loadp = igvn.transform(new LoadBarrierSlowRegNode(then, in_mem, in_adr, in_val->adr_type(),
-                                                                    (const TypePtr*) in_val->bottom_type(), MemNode::unordered, barrier->is_weak()));
+  Node* new_loadp = igvn.transform(new LoadBarrierSlowRegNode(then, in_adr, in_val,
+                                                              (const TypePtr*) in_val->bottom_type(), barrier->is_weak()));
 
   // Create the final region/phi pair to converge cntl/data paths to downstream code
   Node* result_region = igvn.transform(new RegionNode(3));
@@ -667,7 +672,6 @@
     case Op_ZCompareAndExchangeP:
     case Op_ZCompareAndSwapP:
     case Op_ZWeakCompareAndSwapP:
-    case Op_LoadBarrierSlowReg:
 #ifdef ASSERT
       if (VerifyOptoOopOffsets) {
         MemNode *mem = n->as_Mem();
@@ -1229,7 +1233,6 @@
   Compile *C = phase->C;
   PhaseIterGVN &igvn = phase->igvn();
   LoadStoreNode* zclone = NULL;
-  bool is_weak = false;
 
   Node *in_ctrl = old_node->in(MemNode::Control);
   Node *in_mem  = old_node->in(MemNode::Memory);
@@ -1249,7 +1252,6 @@
       if (can_simplify_cas(old_node)) {
         break;
       }
-      is_weak  = true;
       zclone = new ZWeakCompareAndSwapPNode(in_ctrl, in_mem, in_adr, in_val, old_node->in(LoadStoreConditionalNode::ExpectedIn),
               ((CompareAndSwapNode*)old_node)->order());
       adr_type = TypePtr::BOTTOM;
@@ -1280,7 +1282,7 @@
     igvn.register_new_node_with_optimizer(load);
     igvn.replace_node(old_node, zclone);
 
-    Node *barrier = new LoadBarrierNode(C, NULL, in_mem, load, in_adr, is_weak);
+    Node *barrier = new LoadBarrierNode(C, NULL, in_mem, load, in_adr, false /* weak */);
     Node *barrier_val = new ProjNode(barrier, LoadBarrierNode::Oop);
     Node *barrier_ctrl = new ProjNode(barrier, LoadBarrierNode::Control);
 
--- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -104,22 +104,25 @@
   }
 };
 
-class LoadBarrierSlowRegNode : public LoadPNode {
+class LoadBarrierSlowRegNode : public TypeNode {
 private:
-    bool _is_weak;
+  bool _is_weak;
 public:
   LoadBarrierSlowRegNode(Node *c,
-                         Node *mem,
                          Node *adr,
-                         const TypePtr *at,
+                         Node *src,
                          const TypePtr* t,
-                         MemOrd mo,
-                         bool weak = false,
-                         ControlDependency control_dependency = DependsOnlyOnTest) :
-      LoadPNode(c, mem, adr, at, t, mo, control_dependency), _is_weak(weak) {
+                         bool weak) :
+      TypeNode(t, 3), _is_weak(weak) {
+    init_req(1, adr);
+    init_req(2, src);
     init_class_id(Class_LoadBarrierSlowReg);
   }
 
+  virtual uint size_of() const {
+    return sizeof(*this);
+  }
+
   virtual const char * name() {
     return "LoadBarrierSlowRegNode";
   }
@@ -146,13 +149,6 @@
   LoadBarrierNode* load_barrier_node(int idx) const;
 };
 
-enum BarrierInfo {
-    NoBarrier       = 0,
-    RequireBarrier  = 1,
-    WeakBarrier     = 3,  // Inclusive with RequireBarrier
-    ExpandedBarrier = 4
-};
-
 class ZBarrierSetC2 : public BarrierSetC2 {
 private:
   ZBarrierSetC2State* state() const;
--- a/src/hotspot/share/gc/z/zBarrier.inline.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/gc/z/zBarrier.inline.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -24,10 +24,12 @@
 #ifndef SHARE_GC_Z_ZBARRIER_INLINE_HPP
 #define SHARE_GC_Z_ZBARRIER_INLINE_HPP
 
+#include "classfile/javaClasses.hpp"
 #include "gc/z/zAddress.inline.hpp"
 #include "gc/z/zBarrier.hpp"
 #include "gc/z/zOop.inline.hpp"
 #include "gc/z/zResurrection.inline.hpp"
+#include "oops/oop.hpp"
 #include "runtime/atomic.hpp"
 
 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
@@ -173,7 +175,21 @@
   }
 }
 
+// ON_WEAK barriers should only ever be applied to j.l.r.Reference.referents.
+inline void verify_on_weak(volatile oop* referent_addr) {
+#ifdef ASSERT
+  if (referent_addr != NULL) {
+    uintptr_t base = (uintptr_t)referent_addr - java_lang_ref_Reference::referent_offset;
+    oop obj = cast_to_oop(base);
+    assert(oopDesc::is_oop(obj), "Verification failed for: ref " PTR_FORMAT " obj: " PTR_FORMAT, (uintptr_t)referent_addr, base);
+    assert(java_lang_ref_Reference::is_referent_field(obj, java_lang_ref_Reference::referent_offset), "Sanity");
+  }
+#endif
+}
+
 inline oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
+  verify_on_weak(p);
+
   if (is_resurrection_blocked(p, &o)) {
     return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
   }
@@ -217,6 +233,8 @@
 }
 
 inline oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
+  verify_on_weak(p);
+
   if (is_resurrection_blocked(p, &o)) {
     return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
   }
--- a/src/hotspot/share/gc/z/zBarrierSet.inline.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/gc/z/zBarrierSet.inline.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -62,6 +62,7 @@
     } else if (HasDecorator<decorators, ON_WEAK_OOP_REF>::value) {
       return ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(addr, o);
     } else {
+      assert((HasDecorator<decorators, ON_PHANTOM_OOP_REF>::value), "Must be");
       return ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(addr, o);
     }
   } else {
@@ -70,6 +71,7 @@
     } else if (HasDecorator<decorators, ON_WEAK_OOP_REF>::value) {
       return ZBarrier::load_barrier_on_weak_oop_field_preloaded(addr, o);
     } else {
+      assert((HasDecorator<decorators, ON_PHANTOM_OOP_REF>::value), "Must be");
       return ZBarrier::load_barrier_on_phantom_oop_field_preloaded(addr, o);
     }
   }
@@ -89,6 +91,7 @@
     } else if (decorators_known_strength & ON_WEAK_OOP_REF) {
       return ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(addr, o);
     } else {
+      assert(decorators_known_strength & ON_PHANTOM_OOP_REF, "Must be");
       return ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(addr, o);
     }
   } else {
@@ -97,6 +100,7 @@
     } else if (decorators_known_strength & ON_WEAK_OOP_REF) {
       return ZBarrier::load_barrier_on_weak_oop_field_preloaded(addr, o);
     } else {
+      assert(decorators_known_strength & ON_PHANTOM_OOP_REF, "Must be");
       return ZBarrier::load_barrier_on_phantom_oop_field_preloaded(addr, o);
     }
   }
--- a/src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -99,7 +99,6 @@
 }
 
 void BFSClosure::process() {
-
   process_root_set();
   process_queue();
 }
@@ -138,7 +137,6 @@
 
     // if we are processinig initial root set, don't add to queue
     if (_current_parent != NULL) {
-      assert(_current_parent->distance_to_root() == _current_frontier_level, "invariant");
       _edge_queue->add(_current_parent, reference);
     }
 
@@ -151,20 +149,8 @@
 void BFSClosure::add_chain(const oop* reference, const oop pointee) {
   assert(pointee != NULL, "invariant");
   assert(NULL == pointee->mark(), "invariant");
-
-  const size_t length = _current_parent == NULL ? 1 : _current_parent->distance_to_root() + 2;
-  ResourceMark rm;
-  Edge* const chain = NEW_RESOURCE_ARRAY(Edge, length);
-  size_t idx = 0;
-  chain[idx++] = Edge(NULL, reference);
-  // aggregate from breadth-first search
-  const Edge* current = _current_parent;
-  while (current != NULL) {
-    chain[idx++] = Edge(NULL, current->reference());
-    current = current->parent();
-  }
-  assert(length == idx, "invariant");
-  _edge_store->add_chain(chain, length);
+  Edge leak_edge(_current_parent, reference);
+  _edge_store->put_chain(&leak_edge, _current_parent == NULL ? 1 : _current_frontier_level + 2);
 }
 
 void BFSClosure::dfs_fallback() {
@@ -241,3 +227,12 @@
     closure_impl(UnifiedOop::encode(ref), pointee);
   }
 }
+
+void BFSClosure::do_root(const oop* ref) {
+  assert(ref != NULL, "invariant");
+  assert(is_aligned(ref, HeapWordSize), "invariant");
+  assert(*ref != NULL, "invariant");
+  if (!_edge_queue->is_full()) {
+    _edge_queue->add(NULL, ref);
+  }
+}
--- a/src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -26,7 +26,6 @@
 #define SHARE_JFR_LEAKPROFILER_CHAINS_BFSCLOSURE_HPP
 
 #include "memory/iterator.hpp"
-#include "oops/oop.hpp"
 
 class BitSet;
 class Edge;
@@ -65,6 +64,7 @@
  public:
   BFSClosure(EdgeQueue* edge_queue, EdgeStore* edge_store, BitSet* mark_bits);
   void process();
+  void do_root(const oop* ref);
 
   virtual void do_oop(oop* ref);
   virtual void do_oop(narrowOop* ref);
--- a/src/hotspot/share/jfr/leakprofiler/chains/bitset.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/chains/bitset.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -47,7 +47,7 @@
 
   BitMap::idx_t mark_obj(const HeapWord* addr) {
     const BitMap::idx_t bit = addr_to_bit(addr);
-    _bits.par_set_bit(bit);
+    _bits.set_bit(bit);
     return bit;
   }
 
--- a/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,14 +23,14 @@
  */
 
 #include "precompiled.hpp"
+#include "jfr/leakprofiler/chains/bitset.hpp"
 #include "jfr/leakprofiler/chains/dfsClosure.hpp"
 #include "jfr/leakprofiler/chains/edge.hpp"
 #include "jfr/leakprofiler/chains/edgeStore.hpp"
+#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
 #include "jfr/leakprofiler/utilities/granularTimer.hpp"
-#include "jfr/leakprofiler/chains/bitset.hpp"
+#include "jfr/leakprofiler/utilities/rootType.hpp"
 #include "jfr/leakprofiler/utilities/unifiedOop.hpp"
-#include "jfr/leakprofiler/utilities/rootType.hpp"
-#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
 #include "memory/iterator.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/access.inline.hpp"
@@ -88,15 +88,15 @@
   // Mark root set, to avoid going sideways
   _max_depth = 1;
   _ignore_root_set = false;
-  DFSClosure dfs1;
-  RootSetClosure::process_roots(&dfs1);
+  DFSClosure dfs;
+  RootSetClosure<DFSClosure> rs(&dfs);
+  rs.process();
 
   // Depth-first search
   _max_depth = max_dfs_depth;
   _ignore_root_set = true;
   assert(_start_edge == NULL, "invariant");
-  DFSClosure dfs2;
-  RootSetClosure::process_roots(&dfs2);
+  rs.process();
 }
 
 void DFSClosure::closure_impl(const oop* reference, const oop pointee) {
@@ -133,30 +133,29 @@
 }
 
 void DFSClosure::add_chain() {
-  const size_t length = _start_edge == NULL ? _depth + 1 :
-                        _start_edge->distance_to_root() + 1 + _depth + 1;
+  const size_t array_length = _depth + 2;
 
   ResourceMark rm;
-  Edge* const chain = NEW_RESOURCE_ARRAY(Edge, length);
+  Edge* const chain = NEW_RESOURCE_ARRAY(Edge, array_length);
   size_t idx = 0;
 
   // aggregate from depth-first search
   const DFSClosure* c = this;
   while (c != NULL) {
-    chain[idx++] = Edge(NULL, c->reference());
+    const size_t next = idx + 1;
+    chain[idx++] = Edge(&chain[next], c->reference());
     c = c->parent();
   }
-
-  assert(idx == _depth + 1, "invariant");
+  assert(_depth + 1 == idx, "invariant");
+  assert(array_length == idx + 1, "invariant");
 
   // aggregate from breadth-first search
-  const Edge* current = _start_edge;
-  while (current != NULL) {
-    chain[idx++] = Edge(NULL, current->reference());
-    current = current->parent();
+  if (_start_edge != NULL) {
+    chain[idx++] = *_start_edge;
+  } else {
+    chain[idx - 1] = Edge(NULL, chain[idx - 1].reference());
   }
-  assert(idx == length, "invariant");
-  _edge_store->add_chain(chain, length);
+  _edge_store->put_chain(chain, idx + (_start_edge != NULL ? _start_edge->distance_to_root() : 0));
 }
 
 void DFSClosure::do_oop(oop* ref) {
@@ -176,3 +175,11 @@
     closure_impl(UnifiedOop::encode(ref), pointee);
   }
 }
+
+void DFSClosure::do_root(const oop* ref) {
+  assert(ref != NULL, "invariant");
+  assert(is_aligned(ref, HeapWordSize), "invariant");
+  const oop pointee = *ref;
+  assert(pointee != NULL, "invariant");
+  closure_impl(ref, pointee);
+}
--- a/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -26,7 +26,6 @@
 #define SHARE_JFR_LEAKPROFILER_CHAINS_DFSCLOSURE_HPP
 
 #include "memory/iterator.hpp"
-#include "oops/oop.hpp"
 
 class BitSet;
 class Edge;
@@ -34,7 +33,7 @@
 class EdgeQueue;
 
 // Class responsible for iterating the heap depth-first
-class DFSClosure: public BasicOopIterateClosure {
+class DFSClosure : public BasicOopIterateClosure {
  private:
   static EdgeStore* _edge_store;
   static BitSet*    _mark_bits;
@@ -57,6 +56,7 @@
  public:
   static void find_leaks_from_edge(EdgeStore* edge_store, BitSet* mark_bits, const Edge* start_edge);
   static void find_leaks_from_root_set(EdgeStore* edge_store, BitSet* mark_bits);
+  void do_root(const oop* ref);
 
   virtual void do_oop(oop* ref);
   virtual void do_oop(narrowOop* ref);
--- a/src/hotspot/share/jfr/leakprofiler/chains/edge.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/chains/edge.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -29,7 +29,7 @@
 #include "oops/oopsHierarchy.hpp"
 
 class Edge {
- private:
+ protected:
   const Edge* _parent;
   const oop* _reference;
  public:
--- a/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,37 +27,17 @@
 #include "jfr/leakprofiler/chains/edgeUtils.hpp"
 #include "oops/oop.inline.hpp"
 
-RoutableEdge::RoutableEdge() : Edge() {}
-RoutableEdge::RoutableEdge(const Edge* parent, const oop* reference) : Edge(parent, reference),
-                                                                       _skip_edge(NULL),
-                                                                       _skip_length(0),
-                                                                       _processed(false) {}
+StoredEdge::StoredEdge() : Edge() {}
+StoredEdge::StoredEdge(const Edge* parent, const oop* reference) : Edge(parent, reference), _gc_root_id(0), _skip_length(0) {}
 
-RoutableEdge::RoutableEdge(const Edge& edge) : Edge(edge),
-                                               _skip_edge(NULL),
-                                               _skip_length(0),
-                                               _processed(false) {}
-
-RoutableEdge::RoutableEdge(const RoutableEdge& edge) : Edge(edge),
-                                                      _skip_edge(edge._skip_edge),
-                                                      _skip_length(edge._skip_length),
-                                                      _processed(edge._processed) {}
+StoredEdge::StoredEdge(const Edge& edge) : Edge(edge), _gc_root_id(0), _skip_length(0) {}
 
-void RoutableEdge::operator=(const RoutableEdge& edge) {
-  Edge::operator=(edge);
-  _skip_edge = edge._skip_edge;
-  _skip_length = edge._skip_length;
-  _processed = edge._processed;
-}
+StoredEdge::StoredEdge(const StoredEdge& edge) : Edge(edge), _gc_root_id(edge._gc_root_id), _skip_length(edge._skip_length) {}
 
-size_t RoutableEdge::logical_distance_to_root() const {
-  size_t depth = 0;
-  const RoutableEdge* current = logical_parent();
-  while (current != NULL) {
-    depth++;
-    current = current->logical_parent();
-  }
-  return depth;
+void StoredEdge::operator=(const StoredEdge& edge) {
+  Edge::operator=(edge);
+  _gc_root_id = edge._gc_root_id;
+  _skip_length = edge._skip_length;
 }
 
 traceid EdgeStore::_edge_id_counter = 0;
@@ -69,79 +49,12 @@
 EdgeStore::~EdgeStore() {
   assert(_edges != NULL, "invariant");
   delete _edges;
-  _edges = NULL;
-}
-
-const Edge* EdgeStore::get_edge(const Edge* edge) const {
-  assert(edge != NULL, "invariant");
-  EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference());
-  return entry != NULL ? entry->literal_addr() : NULL;
-}
-
-const Edge* EdgeStore::put(const Edge* edge) {
-  assert(edge != NULL, "invariant");
-  const RoutableEdge e = *edge;
-  assert(NULL == _edges->lookup_only(e, (uintptr_t)e.reference()), "invariant");
-  EdgeEntry& entry = _edges->put(e, (uintptr_t)e.reference());
-  return entry.literal_addr();
-}
-
-traceid EdgeStore::get_id(const Edge* edge) const {
-  assert(edge != NULL, "invariant");
-  EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference());
-  assert(entry != NULL, "invariant");
-  return entry->id();
-}
-
-traceid EdgeStore::get_root_id(const Edge* edge) const {
-  assert(edge != NULL, "invariant");
-  const Edge* root = EdgeUtils::root(*edge);
-  assert(root != NULL, "invariant");
-  return get_id(root);
-}
-
-void EdgeStore::add_chain(const Edge* chain, size_t length) {
-  assert(chain != NULL, "invariant");
-  assert(length > 0, "invariant");
-
-  size_t bottom_index = length - 1;
-  const size_t top_index = 0;
-
-  const Edge* stored_parent_edge = NULL;
-
-  // determine level of shared ancestry
-  for (; bottom_index > top_index; --bottom_index) {
-    const Edge* stored_edge = get_edge(&chain[bottom_index]);
-    if (stored_edge != NULL) {
-      stored_parent_edge = stored_edge;
-      continue;
-    }
-    break;
-  }
-
-  // insertion of new Edges
-  for (int i = (int)bottom_index; i >= (int)top_index; --i) {
-    Edge edge(stored_parent_edge, chain[i].reference());
-    stored_parent_edge = put(&edge);
-  }
-
-  const oop sample_object = stored_parent_edge->pointee();
-  assert(sample_object != NULL, "invariant");
-  assert(NULL == sample_object->mark(), "invariant");
-
-  // Install the "top" edge of the chain into the sample object mark oop.
-  // This associates the sample object with its navigable reference chain.
-  sample_object->set_mark(markOop(stored_parent_edge));
 }
 
 bool EdgeStore::is_empty() const {
   return !_edges->has_entries();
 }
 
-size_t EdgeStore::number_of_entries() const {
-  return _edges->cardinality();
-}
-
 void EdgeStore::assign_id(EdgeEntry* entry) {
   assert(entry != NULL, "invariant");
   assert(entry->id() == 0, "invariant");
@@ -153,3 +66,254 @@
   assert(entry->hash() == hash, "invariant");
   return true;
 }
+
+#ifdef ASSERT
+bool EdgeStore::contains(const oop* reference) const {
+  return get(reference) != NULL;
+}
+#endif
+
+StoredEdge* EdgeStore::get(const oop* reference) const {
+  assert(reference != NULL, "invariant");
+  const StoredEdge e(NULL, reference);
+  EdgeEntry* const entry = _edges->lookup_only(e, (uintptr_t)reference);
+  return entry != NULL ? entry->literal_addr() : NULL;
+}
+
+StoredEdge* EdgeStore::put(const oop* reference) {
+  assert(reference != NULL, "invariant");
+  const StoredEdge e(NULL, reference);
+  assert(NULL == _edges->lookup_only(e, (uintptr_t)reference), "invariant");
+  EdgeEntry& entry = _edges->put(e, (uintptr_t)reference);
+  return entry.literal_addr();
+}
+
+traceid EdgeStore::get_id(const Edge* edge) const {
+  assert(edge != NULL, "invariant");
+  EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference());
+  assert(entry != NULL, "invariant");
+  return entry->id();
+}
+
+traceid EdgeStore::gc_root_id(const Edge* edge) const {
+  assert(edge != NULL, "invariant");
+  const traceid gc_root_id = static_cast<const StoredEdge*>(edge)->gc_root_id();
+  if (gc_root_id != 0) {
+    return gc_root_id;
+  }
+  // not cached
+  assert(edge != NULL, "invariant");
+  const Edge* const root = EdgeUtils::root(*edge);
+  assert(root != NULL, "invariant");
+  assert(root->parent() == NULL, "invariant");
+  return get_id(root);
+}
+
+static const Edge* get_skip_ancestor(const Edge** current, size_t distance_to_root, size_t* skip_length) {
+  assert(distance_to_root >= EdgeUtils::root_context, "invariant");
+  assert(*skip_length == 0, "invariant");
+  *skip_length = distance_to_root - (EdgeUtils::root_context - 1);
+  const Edge* const target = EdgeUtils::ancestor(**current, *skip_length);
+  assert(target != NULL, "invariant");
+  assert(target->distance_to_root() + 1 == EdgeUtils::root_context, "invariant");
+  return target;
+}
+
+bool EdgeStore::put_skip_edge(StoredEdge** previous, const Edge** current, size_t distance_to_root) {
+  assert(*previous != NULL, "invariant");
+  assert((*previous)->parent() == NULL, "invariant");
+  assert(*current != NULL, "invariant");
+  assert((*current)->distance_to_root() == distance_to_root, "invariant");
+
+  if (distance_to_root < EdgeUtils::root_context) {
+    // nothing to skip
+    return false;
+  }
+
+  size_t skip_length = 0;
+  const Edge* const skip_ancestor = get_skip_ancestor(current, distance_to_root, &skip_length);
+  assert(skip_ancestor != NULL, "invariant");
+  (*previous)->set_skip_length(skip_length);
+
+  // lookup target
+  StoredEdge* stored_target = get(skip_ancestor->reference());
+  if (stored_target != NULL) {
+    (*previous)->set_parent(stored_target);
+    // linked to existing, complete
+    return true;
+  }
+
+  assert(stored_target == NULL, "invariant");
+  stored_target = put(skip_ancestor->reference());
+  assert(stored_target != NULL, "invariant");
+  (*previous)->set_parent(stored_target);
+  *previous = stored_target;
+  *current = skip_ancestor->parent();
+  return false;
+}
+
+static void link_edge(const StoredEdge* current_stored, StoredEdge** previous) {
+  assert(current_stored != NULL, "invariant");
+  assert(*previous != NULL, "invariant");
+  assert((*previous)->parent() == NULL, "invariant");
+  (*previous)->set_parent(current_stored);
+}
+
+static const StoredEdge* find_closest_skip_edge(const StoredEdge* edge, size_t* distance) {
+  assert(edge != NULL, "invariant");
+  assert(distance != NULL, "invariant");
+  const StoredEdge* current = edge;
+  *distance = 1;
+  while (current != NULL && !current->is_skip_edge()) {
+    ++(*distance);
+    current = current->parent();
+  }
+  return current;
+}
+
+void EdgeStore::link_with_existing_chain(const StoredEdge* current_stored, StoredEdge** previous, size_t previous_length) {
+  assert(current_stored != NULL, "invariant");
+  assert((*previous)->parent() == NULL, "invariant");
+  size_t distance_to_skip_edge; // including the skip edge itself
+  const StoredEdge* const closest_skip_edge = find_closest_skip_edge(current_stored, &distance_to_skip_edge);
+  if (closest_skip_edge == NULL) {
+    // no found skip edge implies root
+    if (distance_to_skip_edge + previous_length <= EdgeUtils::max_ref_chain_depth) {
+      link_edge(current_stored, previous);
+      return;
+    }
+    assert(current_stored->distance_to_root() == distance_to_skip_edge - 2, "invariant");
+    put_skip_edge(previous, reinterpret_cast<const Edge**>(&current_stored), distance_to_skip_edge - 2);
+    return;
+  }
+  assert(closest_skip_edge->is_skip_edge(), "invariant");
+  if (distance_to_skip_edge + previous_length <= EdgeUtils::leak_context) {
+    link_edge(current_stored, previous);
+    return;
+  }
+  // create a new skip edge with derived information from closest skip edge
+  (*previous)->set_skip_length(distance_to_skip_edge + closest_skip_edge->skip_length());
+  (*previous)->set_parent(closest_skip_edge->parent());
+}
+
+StoredEdge* EdgeStore::link_new_edge(StoredEdge** previous, const Edge** current) {
+  assert(*previous != NULL, "invariant");
+  assert((*previous)->parent() == NULL, "invariant");
+  assert(*current != NULL, "invariant");
+  assert(!contains((*current)->reference()), "invariant");
+  StoredEdge* const stored_edge = put((*current)->reference());
+  assert(stored_edge != NULL, "invariant");
+  link_edge(stored_edge, previous);
+  return stored_edge;
+}
+
+bool EdgeStore::put_edges(StoredEdge** previous, const Edge** current, size_t limit) {
+  assert(*previous != NULL, "invariant");
+  assert(*current != NULL, "invariant");
+  size_t depth = 1;
+  while (*current != NULL && depth < limit) {
+    StoredEdge* stored_edge = get((*current)->reference());
+    if (stored_edge != NULL) {
+      link_with_existing_chain(stored_edge, previous, depth);
+      return true;
+    }
+    stored_edge = link_new_edge(previous, current);
+    assert((*previous)->parent() != NULL, "invariant");
+    *previous = stored_edge;
+    *current = (*current)->parent();
+    ++depth;
+  }
+  return NULL == *current;
+}
+
+// Install the immediate edge into the mark word of the leak candidate object
+StoredEdge* EdgeStore::associate_leak_context_with_candidate(const Edge* edge) {
+  assert(edge != NULL, "invariant");
+  assert(!contains(edge->reference()), "invariant");
+  StoredEdge* const leak_context_edge = put(edge->reference());
+  oop sample_object = edge->pointee();
+  assert(sample_object != NULL, "invariant");
+  assert(NULL == sample_object->mark(), "invariant");
+  sample_object->set_mark(markOop(leak_context_edge));
+  return leak_context_edge;
+}
+
+/*
+ * The purpose of put_chain() is to reify the edge sequence
+ * discovered during heap traversal with a normalized logical copy.
+ * This copy consist of two sub-sequences and a connecting link (skip edge).
+ *
+ * "current" can be thought of as the cursor (search) edge, it is not in the edge store.
+ * "previous" is always an edge in the edge store.
+ * The leak context edge is the edge adjacent to the leak candidate object, always an edge in the edge store.
+ */
+void EdgeStore::put_chain(const Edge* chain, size_t length) {
+  assert(chain != NULL, "invariant");
+  assert(chain->distance_to_root() + 1 == length, "invariant");
+  StoredEdge* const leak_context_edge = associate_leak_context_with_candidate(chain);
+  assert(leak_context_edge != NULL, "invariant");
+  assert(leak_context_edge->parent() == NULL, "invariant");
+
+  if (1 == length) {
+    return;
+  }
+
+  const Edge* current = chain->parent();
+  assert(current != NULL, "invariant");
+  StoredEdge* previous = leak_context_edge;
+
+  // a leak context is the sequence of (limited) edges reachable from the leak candidate
+  if (put_edges(&previous, &current, EdgeUtils::leak_context)) {
+    // complete
+    assert(previous != NULL, "invariant");
+    put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous));
+    return;
+  }
+
+  const size_t distance_to_root = length > EdgeUtils::leak_context ? length - 1 - EdgeUtils::leak_context : length - 1;
+  assert(current->distance_to_root() == distance_to_root, "invariant");
+
+  // a skip edge is the logical link
+  // connecting the leak context sequence with the root context sequence
+  if (put_skip_edge(&previous, &current, distance_to_root)) {
+    // complete
+    assert(previous != NULL, "invariant");
+    assert(previous->is_skip_edge(), "invariant");
+    assert(previous->parent() != NULL, "invariant");
+    put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous->parent()));
+    return;
+  }
+
+  assert(current->distance_to_root() < EdgeUtils::root_context, "invariant");
+
+  // a root context is the sequence of (limited) edges reachable from the root
+  put_edges(&previous, &current, EdgeUtils::root_context);
+  assert(previous != NULL, "invariant");
+  put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous));
+}
+
+void EdgeStore::put_chain_epilogue(StoredEdge* leak_context_edge, const Edge* root) const {
+  assert(leak_context_edge != NULL, "invariant");
+  assert(root != NULL, "invariant");
+  store_gc_root_id_in_leak_context_edge(leak_context_edge, root);
+  assert(leak_context_edge->distance_to_root() + 1 <= EdgeUtils::max_ref_chain_depth, "invariant");
+}
+
+// To avoid another traversal to resolve the root edge id later,
+// cache it in the immediate leak context edge for fast retrieval.
+void EdgeStore::store_gc_root_id_in_leak_context_edge(StoredEdge* leak_context_edge, const Edge* root) const {
+  assert(leak_context_edge != NULL, "invariant");
+  assert(leak_context_edge->gc_root_id() == 0, "invariant");
+  assert(root != NULL, "invariant");
+  assert(root->parent() == NULL, "invariant");
+  assert(root->distance_to_root() == 0, "invariant");
+  const StoredEdge* const stored_root = static_cast<const StoredEdge*>(root);
+  traceid root_id = stored_root->gc_root_id();
+  if (root_id == 0) {
+    root_id = get_id(root);
+    stored_root->set_gc_root_id(root_id);
+  }
+  assert(root_id != 0, "invariant");
+  leak_context_edge->set_gc_root_id(root_id);
+  assert(leak_context_edge->gc_root_id() == stored_root->gc_root_id(), "invariant");
+}
--- a/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -25,64 +25,40 @@
 #ifndef SHARE_JFR_LEAKPROFILER_CHAINS_EDGESTORE_HPP
 #define SHARE_JFR_LEAKPROFILER_CHAINS_EDGESTORE_HPP
 
+#include "jfr/leakprofiler/chains/edge.hpp"
 #include "jfr/utilities/jfrHashtable.hpp"
-#include "jfr/leakprofiler/chains/edge.hpp"
 #include "memory/allocation.hpp"
 
 typedef u8 traceid;
 
-class RoutableEdge : public Edge {
+class StoredEdge : public Edge {
  private:
-  mutable const RoutableEdge* _skip_edge;
-  mutable size_t _skip_length;
-  mutable bool _processed;
+  mutable traceid _gc_root_id;
+  size_t _skip_length;
 
  public:
-  RoutableEdge();
-  RoutableEdge(const Edge* parent, const oop* reference);
-  RoutableEdge(const Edge& edge);
-  RoutableEdge(const RoutableEdge& edge);
-  void operator=(const RoutableEdge& edge);
-
-  const RoutableEdge* skip_edge() const { return _skip_edge; }
-  size_t skip_length() const { return _skip_length; }
+  StoredEdge();
+  StoredEdge(const Edge* parent, const oop* reference);
+  StoredEdge(const Edge& edge);
+  StoredEdge(const StoredEdge& edge);
+  void operator=(const StoredEdge& edge);
 
-  bool is_skip_edge() const { return _skip_edge != NULL; }
-  bool processed() const { return _processed; }
-  bool is_sentinel() const {
-    return _skip_edge == NULL && _skip_length == 1;
-  }
-
-  void set_skip_edge(const RoutableEdge* edge) const {
-    assert(!is_skip_edge(), "invariant");
-    assert(edge != this, "invariant");
-    _skip_edge = edge;
-  }
+  traceid gc_root_id() const { return _gc_root_id; }
+  void set_gc_root_id(traceid root_id) const { _gc_root_id = root_id; }
 
-  void set_skip_length(size_t length) const {
-    _skip_length = length;
-  }
-
-  void set_processed() const {
-    assert(!_processed, "invariant");
-    _processed = true;
-  }
+  bool is_skip_edge() const { return _skip_length != 0; }
+  size_t skip_length() const { return _skip_length; }
+  void set_skip_length(size_t length) { _skip_length = length; }
 
-  // true navigation according to physical tree representation
-  const RoutableEdge* physical_parent() const {
-    return static_cast<const RoutableEdge*>(parent());
-  }
+  void set_parent(const Edge* edge) { this->_parent = edge; }
 
-  // logical navigation taking skip levels into account
-  const RoutableEdge* logical_parent() const {
-    return is_skip_edge() ? skip_edge() : physical_parent();
+  StoredEdge* parent() const {
+    return const_cast<StoredEdge*>(static_cast<const StoredEdge*>(Edge::parent()));
   }
-
-  size_t logical_distance_to_root() const;
 };
 
 class EdgeStore : public CHeapObj<mtTracing> {
-  typedef HashTableHost<RoutableEdge, traceid, Entry, EdgeStore> EdgeHashTable;
+  typedef HashTableHost<StoredEdge, traceid, Entry, EdgeStore> EdgeHashTable;
   typedef EdgeHashTable::HashEntry EdgeEntry;
   template <typename,
             typename,
@@ -90,6 +66,9 @@
             typename,
             size_t>
   friend class HashTableHost;
+  friend class EventEmitter;
+  friend class ObjectSampleWriter;
+  friend class ObjectSampleCheckpoint;
  private:
   static traceid _edge_id_counter;
   EdgeHashTable* _edges;
@@ -98,22 +77,31 @@
   void assign_id(EdgeEntry* entry);
   bool equals(const Edge& query, uintptr_t hash, const EdgeEntry* entry);
 
-  const Edge* get_edge(const Edge* edge) const;
-  const Edge* put(const Edge* edge);
+  StoredEdge* get(const oop* reference) const;
+  StoredEdge* put(const oop* reference);
+  traceid gc_root_id(const Edge* edge) const;
+
+  bool put_edges(StoredEdge** previous, const Edge** current, size_t length);
+  bool put_skip_edge(StoredEdge** previous, const Edge** current, size_t distance_to_root);
+  void put_chain_epilogue(StoredEdge* leak_context_edge, const Edge* root) const;
+
+  StoredEdge* associate_leak_context_with_candidate(const Edge* edge);
+  void store_gc_root_id_in_leak_context_edge(StoredEdge* leak_context_edge, const Edge* root) const;
+  StoredEdge* link_new_edge(StoredEdge** previous, const Edge** current);
+  void link_with_existing_chain(const StoredEdge* current_stored, StoredEdge** previous, size_t previous_length);
+
+  template <typename T>
+  void iterate(T& functor) const { _edges->iterate_value<T>(functor); }
+
+  DEBUG_ONLY(bool contains(const oop* reference) const;)
 
  public:
   EdgeStore();
   ~EdgeStore();
 
-  void add_chain(const Edge* chain, size_t length);
   bool is_empty() const;
-  size_t number_of_entries() const;
-
   traceid get_id(const Edge* edge) const;
-  traceid get_root_id(const Edge* edge) const;
-
-  template <typename T>
-  void iterate_edges(T& functor) const { _edges->iterate_value<T>(functor); }
+  void put_chain(const Edge* chain, size_t length);
 };
 
 #endif // SHARE_JFR_LEAKPROFILER_CHAINS_EDGESTORE_HPP
--- a/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,11 +38,7 @@
   return (const Edge*)edge.pointee()->mark() == &edge;
 }
 
-bool EdgeUtils::is_root(const Edge& edge) {
-  return edge.is_root();
-}
-
-static int field_offset(const Edge& edge) {
+static int field_offset(const StoredEdge& edge) {
   assert(!edge.is_root(), "invariant");
   const oop ref_owner = edge.reference_owner();
   assert(ref_owner != NULL, "invariant");
@@ -56,7 +52,7 @@
   return offset;
 }
 
-static const InstanceKlass* field_type(const Edge& edge) {
+static const InstanceKlass* field_type(const StoredEdge& edge) {
   assert(!edge.is_root() || !EdgeUtils::is_array_element(edge), "invariant");
   return (const InstanceKlass*)edge.reference_owner_klass();
 }
@@ -138,175 +134,18 @@
     current = parent;
     parent = current->parent();
   }
-  return current;
-}
-
-// The number of references associated with the leak node;
-// can be viewed as the leak node "context".
-// Used to provide leak context for a "capped/skipped" reference chain.
-static const size_t leak_context = 100;
-
-// The number of references associated with the root node;
-// can be viewed as the root node "context".
-// Used to provide root context for a "capped/skipped" reference chain.
-static const size_t root_context = 100;
-
-// A limit on the reference chain depth to be serialized,
-static const size_t max_ref_chain_depth = leak_context + root_context;
-
-const RoutableEdge* skip_to(const RoutableEdge& edge, size_t skip_length) {
-  const RoutableEdge* current = &edge;
-  const RoutableEdge* parent = current->physical_parent();
-  size_t seek = 0;
-  while (parent != NULL && seek != skip_length) {
-    seek++;
-    current = parent;
-    parent = parent->physical_parent();
-  }
-  return current;
-}
-
-#ifdef ASSERT
-static void validate_skip_target(const RoutableEdge* skip_target) {
-  assert(skip_target != NULL, "invariant");
-  assert(skip_target->distance_to_root() + 1 == root_context, "invariant");
-  assert(skip_target->is_sentinel(), "invariant");
-}
-
-static void validate_new_skip_edge(const RoutableEdge* new_skip_edge, const RoutableEdge* last_skip_edge, size_t adjustment) {
-  assert(new_skip_edge != NULL, "invariant");
-  assert(new_skip_edge->is_skip_edge(), "invariant");
-  if (last_skip_edge != NULL) {
-    const RoutableEdge* const target = skip_to(*new_skip_edge->logical_parent(), adjustment);
-    validate_skip_target(target->logical_parent());
-    return;
-  }
-  assert(last_skip_edge == NULL, "invariant");
-  // only one level of logical indirection
-  validate_skip_target(new_skip_edge->logical_parent());
-}
-#endif // ASSERT
-
-static void install_logical_route(const RoutableEdge* new_skip_edge, size_t skip_target_distance) {
-  assert(new_skip_edge != NULL, "invariant");
-  assert(!new_skip_edge->is_skip_edge(), "invariant");
-  assert(!new_skip_edge->processed(), "invariant");
-  const RoutableEdge* const skip_target = skip_to(*new_skip_edge, skip_target_distance);
-  assert(skip_target != NULL, "invariant");
-  new_skip_edge->set_skip_edge(skip_target);
-  new_skip_edge->set_skip_length(skip_target_distance);
-  assert(new_skip_edge->is_skip_edge(), "invariant");
-  assert(new_skip_edge->logical_parent() == skip_target, "invariant");
-}
-
-static const RoutableEdge* find_last_skip_edge(const RoutableEdge& edge, size_t& distance) {
-  assert(distance == 0, "invariant");
-  const RoutableEdge* current = &edge;
-  while (current != NULL) {
-    if (current->is_skip_edge() && current->skip_edge()->is_sentinel()) {
-      return current;
-    }
-    current = current->physical_parent();
-    ++distance;
-  }
+  assert(current != NULL, "invariant");
   return current;
 }
 
-static void collapse_overlapping_chain(const RoutableEdge& edge,
-                                       const RoutableEdge* first_processed_edge,
-                                       size_t first_processed_distance) {
-  assert(first_processed_edge != NULL, "invariant");
-  // first_processed_edge is already processed / written
-  assert(first_processed_edge->processed(), "invariant");
-  assert(first_processed_distance + 1 <= leak_context, "invariant");
-
-  // from this first processed edge, attempt to fetch the last skip edge
-  size_t last_skip_edge_distance = 0;
-  const RoutableEdge* const last_skip_edge = find_last_skip_edge(*first_processed_edge, last_skip_edge_distance);
-  const size_t distance_discovered = first_processed_distance + last_skip_edge_distance + 1;
-
-  if (distance_discovered <= leak_context || (last_skip_edge == NULL && distance_discovered <= max_ref_chain_depth)) {
-    // complete chain can be accommodated without modification
-    return;
-  }
-
-  // backtrack one edge from existing processed edge
-  const RoutableEdge* const new_skip_edge = skip_to(edge, first_processed_distance - 1);
-  assert(new_skip_edge != NULL, "invariant");
-  assert(!new_skip_edge->processed(), "invariant");
-  assert(new_skip_edge->parent() == first_processed_edge, "invariant");
-
-  size_t adjustment = 0;
-  if (last_skip_edge != NULL) {
-    assert(leak_context - 1 > first_processed_distance - 1, "invariant");
-    adjustment = leak_context - first_processed_distance - 1;
-    assert(last_skip_edge_distance + 1 > adjustment, "invariant");
-    install_logical_route(new_skip_edge, last_skip_edge_distance + 1 - adjustment);
-  } else {
-    install_logical_route(new_skip_edge, last_skip_edge_distance + 1 - root_context);
-    new_skip_edge->logical_parent()->set_skip_length(1); // sentinel
+const Edge* EdgeUtils::ancestor(const Edge& edge, size_t distance) {
+  const Edge* current = &edge;
+  const Edge* parent = current->parent();
+  size_t seek = 0;
+  while (parent != NULL && seek != distance) {
+    seek++;
+    current = parent;
+    parent = parent->parent();
   }
-
-  DEBUG_ONLY(validate_new_skip_edge(new_skip_edge, last_skip_edge, adjustment);)
-}
-
-static void collapse_non_overlapping_chain(const RoutableEdge& edge,
-                                           const RoutableEdge* first_processed_edge,
-                                           size_t first_processed_distance) {
-  assert(first_processed_edge != NULL, "invariant");
-  assert(!first_processed_edge->processed(), "invariant");
-  // this implies that the first "processed" edge is the leak context relative "leaf"
-  assert(first_processed_distance + 1 == leak_context, "invariant");
-
-  const size_t distance_to_root = edge.distance_to_root();
-  if (distance_to_root + 1 <= max_ref_chain_depth) {
-    // complete chain can be accommodated without constructing a skip edge
-    return;
-  }
-
-  install_logical_route(first_processed_edge, distance_to_root + 1 - first_processed_distance - root_context);
-  first_processed_edge->logical_parent()->set_skip_length(1); // sentinel
-
-  DEBUG_ONLY(validate_new_skip_edge(first_processed_edge, NULL, 0);)
-}
-
-static const RoutableEdge* processed_edge(const RoutableEdge& edge, size_t& distance) {
-  assert(distance == 0, "invariant");
-  const RoutableEdge* current = &edge;
-  while (current != NULL && distance < leak_context - 1) {
-    if (current->processed()) {
-      return current;
-    }
-    current = current->physical_parent();
-    ++distance;
-  }
-  assert(distance <= leak_context - 1, "invariant");
   return current;
 }
-
-/*
- * Some vocabulary:
- * -----------
- * "Context" is an interval in the chain, it is associcated with an edge and it signifies a number of connected edges.
- * "Processed / written" means an edge that has already been serialized.
- * "Skip edge" is an edge that contains additional information for logical routing purposes.
- * "Skip target" is an edge used as a destination for a skip edge
- */
-void EdgeUtils::collapse_chain(const RoutableEdge& edge) {
-  assert(is_leak_edge(edge), "invariant");
-
-  // attempt to locate an already processed edge inside current leak context (if any)
-  size_t first_processed_distance = 0;
-  const RoutableEdge* const first_processed_edge = processed_edge(edge, first_processed_distance);
-  if (first_processed_edge == NULL) {
-    return;
-  }
-
-  if (first_processed_edge->processed()) {
-    collapse_overlapping_chain(edge, first_processed_edge, first_processed_distance);
-  } else {
-    collapse_non_overlapping_chain(edge, first_processed_edge, first_processed_distance);
-  }
-
-  assert(edge.logical_distance_to_root() + 1 <= max_ref_chain_depth, "invariant");
-}
--- a/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -28,15 +28,17 @@
 #include "memory/allocation.hpp"
 
 class Edge;
-class RoutableEdge;
 class Symbol;
 
 class EdgeUtils : public AllStatic {
  public:
-  static bool is_leak_edge(const Edge& edge);
+  static const size_t leak_context = 100;
+  static const size_t root_context = 100;
+  static const size_t max_ref_chain_depth = leak_context + root_context;
 
+  static bool is_leak_edge(const Edge& edge);
   static const Edge* root(const Edge& edge);
-  static bool is_root(const Edge& edge);
+  static const Edge* ancestor(const Edge& edge, size_t distance);
 
   static bool is_array_element(const Edge& edge);
   static int array_index(const Edge& edge);
@@ -44,8 +46,6 @@
 
   static const Symbol* field_name_symbol(const Edge& edge);
   static jshort field_modifiers(const Edge& edge);
-
-  static void collapse_chain(const RoutableEdge& edge);
 };
 
 #endif // SHARE_JFR_LEAKPROFILER_CHAINS_EDGEUTILS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/jfr/leakprofiler/chains/pathToGcRootsOperation.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "jfr/leakprofiler/leakProfiler.hpp"
+#include "jfr/leakprofiler/chains/bfsClosure.hpp"
+#include "jfr/leakprofiler/chains/bitset.hpp"
+#include "jfr/leakprofiler/chains/dfsClosure.hpp"
+#include "jfr/leakprofiler/chains/edge.hpp"
+#include "jfr/leakprofiler/chains/edgeQueue.hpp"
+#include "jfr/leakprofiler/chains/edgeStore.hpp"
+#include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
+#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
+#include "jfr/leakprofiler/chains/edgeStore.hpp"
+#include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
+#include "jfr/leakprofiler/chains/pathToGcRootsOperation.hpp"
+#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp"
+#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
+#include "jfr/leakprofiler/sampling/objectSample.hpp"
+#include "jfr/leakprofiler/sampling/objectSampler.hpp"
+#include "jfr/leakprofiler/utilities/granularTimer.hpp"
+#include "logging/log.hpp"
+#include "memory/universe.hpp"
+#include "oops/markOop.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/safepoint.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+PathToGcRootsOperation::PathToGcRootsOperation(ObjectSampler* sampler, EdgeStore* edge_store, int64_t cutoff, bool emit_all) :
+  _sampler(sampler),_edge_store(edge_store), _cutoff_ticks(cutoff), _emit_all(emit_all) {}
+
+/* The EdgeQueue is backed by directly managed virtual memory.
+ * We will attempt to dimension an initial reservation
+ * in proportion to the size of the heap (represented by heap_region).
+ * Initial memory reservation: 5% of the heap OR at least 32 Mb
+ * Commit ratio: 1 : 10 (subject to allocation granularties)
+ */
+static size_t edge_queue_memory_reservation(const MemRegion& heap_region) {
+  const size_t memory_reservation_bytes = MAX2(heap_region.byte_size() / 20, 32*M);
+  assert(memory_reservation_bytes >= (size_t)32*M, "invariant");
+  return memory_reservation_bytes;
+}
+
+static size_t edge_queue_memory_commit_size(size_t memory_reservation_bytes) {
+  const size_t memory_commit_block_size_bytes = memory_reservation_bytes / 10;
+  assert(memory_commit_block_size_bytes >= (size_t)3*M, "invariant");
+  return memory_commit_block_size_bytes;
+}
+
+static void log_edge_queue_summary(const EdgeQueue& edge_queue) {
+  log_trace(jfr, system)("EdgeQueue reserved size total: " SIZE_FORMAT " [KB]", edge_queue.reserved_size() / K);
+  log_trace(jfr, system)("EdgeQueue edges total: " SIZE_FORMAT, edge_queue.top());
+  log_trace(jfr, system)("EdgeQueue liveset total: " SIZE_FORMAT " [KB]", edge_queue.live_set() / K);
+  if (edge_queue.reserved_size() > 0) {
+    log_trace(jfr, system)("EdgeQueue commit reserve ratio: %f\n",
+      ((double)edge_queue.live_set() / (double)edge_queue.reserved_size()));
+  }
+}
+
+void PathToGcRootsOperation::doit() {
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  assert(_cutoff_ticks > 0, "invariant");
+
+  // The bitset used for marking is dimensioned as a function of the heap size
+  const MemRegion heap_region = Universe::heap()->reserved_region();
+  BitSet mark_bits(heap_region);
+
+  // The edge queue is dimensioned as a fraction of the heap size
+  const size_t edge_queue_reservation_size = edge_queue_memory_reservation(heap_region);
+  EdgeQueue edge_queue(edge_queue_reservation_size, edge_queue_memory_commit_size(edge_queue_reservation_size));
+
+  // The initialize() routines will attempt to reserve and allocate backing storage memory.
+  // Failure to accommodate will render root chain processing impossible.
+  // As a fallback on failure, just write out the existing samples, flat, without chains.
+  if (!(mark_bits.initialize() && edge_queue.initialize())) {
+    log_warning(jfr)("Unable to allocate memory for root chain processing");
+    return;
+  }
+
+  // Save the original markWord for the potential leak objects,
+  // to be restored on function exit
+  ObjectSampleMarker marker;
+  if (ObjectSampleCheckpoint::mark(_sampler, marker, _emit_all) == 0) {
+    // no valid samples to process
+    return;
+  }
+
+  // Necessary condition for attempting a root set iteration
+  Universe::heap()->ensure_parsability(false);
+
+  BFSClosure bfs(&edge_queue, _edge_store, &mark_bits);
+  RootSetClosure<BFSClosure> roots(&bfs);
+
+  GranularTimer::start(_cutoff_ticks, 1000000);
+  roots.process();
+  if (edge_queue.is_full()) {
+    // Pathological case where roots don't fit in queue
+    // Do a depth-first search, but mark roots first
+    // to avoid walking sideways over roots
+    DFSClosure::find_leaks_from_root_set(_edge_store, &mark_bits);
+  } else {
+    bfs.process();
+  }
+  GranularTimer::stop();
+  log_edge_queue_summary(edge_queue);
+
+  // Emit old objects including their reference chains as events
+  EventEmitter emitter(GranularTimer::start_time(), GranularTimer::end_time());
+  emitter.write_events(_sampler, _edge_store, _emit_all);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/jfr/leakprofiler/chains/pathToGcRootsOperation.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_JFR_LEAKPROFILER_CHAINS_PATHTOGCROOTSOPERATION_HPP
+#define SHARE_JFR_LEAKPROFILER_CHAINS_PATHTOGCROOTSOPERATION_HPP
+
+#include "jfr/leakprofiler/utilities/vmOperation.hpp"
+
+class EdgeStore;
+class ObjectSampler;
+
+// Safepoint operation for finding paths to gc roots
+class PathToGcRootsOperation : public OldObjectVMOperation {
+ private:
+  ObjectSampler* _sampler;
+  EdgeStore* const _edge_store;
+  const int64_t _cutoff_ticks;
+  const bool _emit_all;
+
+ public:
+  PathToGcRootsOperation(ObjectSampler* sampler, EdgeStore* edge_store, int64_t cutoff, bool emit_all);
+  virtual void doit();
+};
+
+#endif // SHARE_JFR_LEAKPROFILER_CHAINS_PATHTOGCROOTSOPERATION_HPP
--- a/src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -28,12 +28,14 @@
 #include "classfile/stringTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "gc/shared/strongRootsScope.hpp"
+#include "jfr/leakprofiler/chains/bfsClosure.hpp"
+#include "jfr/leakprofiler/chains/dfsClosure.hpp"
 #include "jfr/leakprofiler/chains/edgeQueue.hpp"
 #include "jfr/leakprofiler/chains/rootSetClosure.hpp"
-#include "jfr/leakprofiler/utilities/saveRestore.hpp"
 #include "jfr/leakprofiler/utilities/unifiedOop.hpp"
 #include "memory/universe.hpp"
 #include "oops/access.inline.hpp"
+#include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/synchronizer.hpp"
@@ -44,11 +46,11 @@
 #include "jvmci/jvmci.hpp"
 #endif
 
-RootSetClosure::RootSetClosure(EdgeQueue* edge_queue) :
-  _edge_queue(edge_queue) {
-}
+template <typename Delegate>
+RootSetClosure<Delegate>::RootSetClosure(Delegate* delegate) : _delegate(delegate) {}
 
-void RootSetClosure::do_oop(oop* ref) {
+template <typename Delegate>
+void RootSetClosure<Delegate>::do_oop(oop* ref) {
   assert(ref != NULL, "invariant");
   // We discard unaligned root references because
   // our reference tagging scheme will use
@@ -62,50 +64,40 @@
   }
 
   assert(is_aligned(ref, HeapWordSize), "invariant");
-  const oop pointee = *ref;
-  if (pointee != NULL) {
-    closure_impl(ref, pointee);
+  if (*ref != NULL) {
+    _delegate->do_root(ref);
   }
 }
 
-void RootSetClosure::do_oop(narrowOop* ref) {
+template <typename Delegate>
+void RootSetClosure<Delegate>::do_oop(narrowOop* ref) {
   assert(ref != NULL, "invariant");
   assert(is_aligned(ref, sizeof(narrowOop)), "invariant");
   const oop pointee = RawAccess<>::oop_load(ref);
   if (pointee != NULL) {
-    closure_impl(UnifiedOop::encode(ref), pointee);
-  }
-}
-
-void RootSetClosure::closure_impl(const oop* reference, const oop pointee) {
-  if (!_edge_queue->is_full())  {
-    _edge_queue->add(NULL, reference);
+    _delegate->do_root(UnifiedOop::encode(ref));
   }
 }
 
-void RootSetClosure::add_to_queue(EdgeQueue* edge_queue) {
-  RootSetClosure rs(edge_queue);
-  process_roots(&rs);
+class RootSetClosureMarkScope : public MarkScope {};
+
+template <typename Delegate>
+void RootSetClosure<Delegate>::process() {
+  RootSetClosureMarkScope mark_scope;
+  CLDToOopClosure cldt_closure(this, ClassLoaderData::_claim_none);
+  ClassLoaderDataGraph::always_strong_cld_do(&cldt_closure);
+  CodeBlobToOopClosure blobs(this, false);
+  Threads::oops_do(this, &blobs);
+  ObjectSynchronizer::oops_do(this);
+  Universe::oops_do(this);
+  JNIHandles::oops_do(this);
+  JvmtiExport::oops_do(this);
+  SystemDictionary::oops_do(this);
+  Management::oops_do(this);
+  StringTable::oops_do(this);
+  AOTLoader::oops_do(this);
+  JVMCI_ONLY(JVMCI::oops_do(this);)
 }
 
-class RootSetClosureMarkScope : public MarkScope {
-};
-
-void RootSetClosure::process_roots(OopClosure* closure) {
-  SaveRestoreCLDClaimBits save_restore_cld_claim_bits;
-  RootSetClosureMarkScope mark_scope;
-
-  CLDToOopClosure cldt_closure(closure, ClassLoaderData::_claim_strong);
-  ClassLoaderDataGraph::always_strong_cld_do(&cldt_closure);
-  CodeBlobToOopClosure blobs(closure, false);
-  Threads::oops_do(closure, &blobs);
-  ObjectSynchronizer::oops_do(closure);
-  Universe::oops_do(closure);
-  JNIHandles::oops_do(closure);
-  JvmtiExport::oops_do(closure);
-  SystemDictionary::oops_do(closure);
-  Management::oops_do(closure);
-  StringTable::oops_do(closure);
-  AOTLoader::oops_do(closure);
-  JVMCI_ONLY(JVMCI::oops_do(closure);)
-}
+template class RootSetClosure<BFSClosure>;
+template class RootSetClosure<DFSClosure>;
--- a/src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -26,18 +26,14 @@
 #define SHARE_JFR_LEAKPROFILER_CHAINS_ROOTSETCLOSURE_HPP
 
 #include "memory/iterator.hpp"
-#include "oops/oop.hpp"
 
-class EdgeQueue;
-
+template <typename Delegate>
 class RootSetClosure: public BasicOopIterateClosure {
  private:
-  RootSetClosure(EdgeQueue* edge_queue);
-  EdgeQueue* _edge_queue;
-  void closure_impl(const oop* reference, const oop pointee);
+  Delegate* const _delegate;
  public:
-  static void add_to_queue(EdgeQueue* edge_queue);
-  static void process_roots(OopClosure* closure);
+  RootSetClosure(Delegate* delegate);
+  void process();
 
   virtual void do_oop(oop* reference);
   virtual void do_oop(narrowOop* reference);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/jfrEvents.hpp"
+#include "jfr/leakprofiler/chains/edgeStore.hpp"
+#include "jfr/leakprofiler/chains/pathToGcRootsOperation.hpp"
+#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp"
+#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
+#include "jfr/leakprofiler/sampling/objectSample.hpp"
+#include "jfr/leakprofiler/sampling/objectSampler.hpp"
+#include "logging/log.hpp"
+#include "memory/resourceArea.hpp"
+#include "oops/markOop.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/thread.inline.hpp"
+#include "runtime/vmThread.hpp"
+
+EventEmitter::EventEmitter(const JfrTicks& start_time, const JfrTicks& end_time) :
+  _start_time(start_time),
+  _end_time(end_time),
+  _thread(Thread::current()),
+  _jfr_thread_local(_thread->jfr_thread_local()),
+  _thread_id(_thread->jfr_thread_local()->thread_id()) {}
+
+EventEmitter::~EventEmitter() {
+  // restore / reset thread local stack trace and thread id
+  _jfr_thread_local->set_thread_id(_thread_id);
+  _jfr_thread_local->clear_cached_stack_trace();
+}
+
+void EventEmitter::emit(ObjectSampler* sampler, int64_t cutoff_ticks, bool emit_all) {
+  assert(sampler != NULL, "invariant");
+
+  ResourceMark rm;
+  EdgeStore edge_store;
+  if (cutoff_ticks <= 0) {
+    // no reference chains
+    JfrTicks time_stamp = JfrTicks::now();
+    EventEmitter emitter(time_stamp, time_stamp);
+    emitter.write_events(sampler, &edge_store, emit_all);
+    return;
+  }
+  // events emitted with reference chains require a safepoint operation
+  PathToGcRootsOperation op(sampler, &edge_store, cutoff_ticks, emit_all);
+  VMThread::execute(&op);
+}
+
+size_t EventEmitter::write_events(ObjectSampler* object_sampler, EdgeStore* edge_store, bool emit_all) {
+  assert(_thread == Thread::current(), "invariant");
+  assert(_thread->jfr_thread_local() == _jfr_thread_local, "invariant");
+  assert(object_sampler != NULL, "invariant");
+  assert(edge_store != NULL, "invariant");
+
+  const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value();
+  size_t count = 0;
+
+  const ObjectSample* current = object_sampler->first();
+  while (current != NULL) {
+    ObjectSample* prev = current->prev();
+    if (current->is_alive_and_older_than(last_sweep)) {
+      write_event(current, edge_store);
+      ++count;
+    }
+    current = prev;
+  }
+
+  if (count > 0) {
+    // serialize associated checkpoints and potential chains
+    ObjectSampleCheckpoint::write(object_sampler, edge_store, emit_all, _thread);
+  }
+  return count;
+}
+
+static int array_size(const oop object) {
+  assert(object != NULL, "invariant");
+  if (object->is_array()) {
+    return arrayOop(object)->length();
+  }
+  return min_jint;
+}
+
+void EventEmitter::write_event(const ObjectSample* sample, EdgeStore* edge_store) {
+  assert(sample != NULL, "invariant");
+  assert(!sample->is_dead(), "invariant");
+  assert(edge_store != NULL, "invariant");
+  assert(_jfr_thread_local != NULL, "invariant");
+
+  const oop* object_addr = sample->object_addr();
+  traceid gc_root_id = 0;
+  const Edge* edge = NULL;
+  if (SafepointSynchronize::is_at_safepoint()) {
+    edge = (const Edge*)(*object_addr)->mark();
+  }
+  if (edge == NULL) {
+    // In order to dump out a representation of the event
+    // even though it was not reachable / too long to reach,
+    // we need to register a top level edge for this object.
+    edge = edge_store->put(object_addr);
+  } else {
+    gc_root_id = edge_store->gc_root_id(edge);
+  }
+
+  assert(edge != NULL, "invariant");
+  const traceid object_id = edge_store->get_id(edge);
+  assert(object_id != 0, "invariant");
+
+  EventOldObjectSample e(UNTIMED);
+  e.set_starttime(_start_time);
+  e.set_endtime(_end_time);
+  e.set_allocationTime(sample->allocation_time());
+  e.set_lastKnownHeapUsage(sample->heap_used_at_last_gc());
+  e.set_object(object_id);
+  e.set_arrayElements(array_size(edge->pointee()));
+  e.set_root(gc_root_id);
+
+  // Temporarily assigning both the stack trace id and thread id
+  // onto the thread local data structure of the emitter thread (for the duration
+  // of the commit() call). This trick provides a means to override
+  // the event generation mechanism by injecting externally provided id's.
+  // At this particular location, it allows us to emit an old object event
+  // supplying information from where the actual sampling occurred.
+  _jfr_thread_local->set_cached_stack_trace_id(sample->stack_trace_id());
+  assert(sample->has_thread(), "invariant");
+  _jfr_thread_local->set_thread_id(sample->thread_id());
+  e.commit();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_JFR_LEAKPROFILER_CHECKPOINT_EVENTEMITTER_HPP
+#define SHARE_JFR_LEAKPROFILER_CHECKPOINT_EVENTEMITTER_HPP
+
+#include "memory/allocation.hpp"
+#include "jfr/utilities/jfrTime.hpp"
+
+typedef u8 traceid;
+
+class EdgeStore;
+class JfrThreadLocal;
+class ObjectSample;
+class ObjectSampler;
+class Thread;
+
+class EventEmitter : public CHeapObj<mtTracing> {
+  friend class LeakProfiler;
+  friend class PathToGcRootsOperation;
+ private:
+  const JfrTicks& _start_time;
+  const JfrTicks& _end_time;
+  Thread* _thread;
+  JfrThreadLocal* _jfr_thread_local;
+  traceid _thread_id;
+
+  EventEmitter(const JfrTicks& start_time, const JfrTicks& end_time);
+  ~EventEmitter();
+
+  void write_event(const ObjectSample* sample, EdgeStore* edge_store);
+  size_t write_events(ObjectSampler* sampler, EdgeStore* store, bool emit_all);
+
+  static void emit(ObjectSampler* sampler, int64_t cutoff_ticks, bool emit_all);
+};
+
+#endif // SHARE_JFR_LEAKPROFILER_CHECKPOINT_EVENTEMITTER_HPP
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -181,21 +181,18 @@
   }
 };
 
-void ObjectSampleCheckpoint::install(JfrCheckpointWriter& writer, bool class_unload, bool resume) {
-  assert(class_unload ? SafepointSynchronize::is_at_safepoint() : LeakProfiler::is_suspended(), "invariant");
-
+void ObjectSampleCheckpoint::install(JfrCheckpointWriter& writer, bool class_unload) {
   if (!writer.has_data()) {
-    if (!class_unload) {
-      LeakProfiler::resume();
-    }
-    assert(LeakProfiler::is_running(), "invariant");
     return;
   }
 
   assert(writer.has_data(), "invariant");
   const JfrCheckpointBlobHandle h_cp = writer.checkpoint_blob();
 
-  const ObjectSampler* const object_sampler = LeakProfiler::object_sampler();
+  // Class unload implies a safepoint.
+  // Not class unload implies the object sampler is locked, because it was claimed exclusively earlier.
+  // Therefore: direct access the object sampler instance is safe.
+  const ObjectSampler* const object_sampler = ObjectSampler::sampler();
   assert(object_sampler != NULL, "invariant");
 
   ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
@@ -203,80 +200,71 @@
   CheckpointInstall install(h_cp);
 
   if (class_unload) {
-    if (last != NULL) {
-      // all samples need the class unload information
-      do_samples(last, NULL, install);
-    }
-    assert(LeakProfiler::is_running(), "invariant");
+    // all samples need class unload information
+    do_samples(last, NULL, install);
     return;
   }
 
   // only new samples since last resolved checkpoint
   if (last != last_resolved) {
     do_samples(last, last_resolved, install);
-    if (resume) {
-      const_cast<ObjectSampler*>(object_sampler)->set_last_resolved(last);
-    }
-  }
-  assert(LeakProfiler::is_suspended(), "invariant");
-  if (resume) {
-    LeakProfiler::resume();
-    assert(LeakProfiler::is_running(), "invariant");
+    const_cast<ObjectSampler*>(object_sampler)->set_last_resolved(last);
   }
 }
 
-void ObjectSampleCheckpoint::write(const EdgeStore* edge_store, bool emit_all, Thread* thread) {
+void ObjectSampleCheckpoint::write(ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread) {
+  assert(sampler != NULL, "invariant");
   assert(edge_store != NULL, "invariant");
   assert(thread != NULL, "invariant");
+
   static bool types_registered = false;
   if (!types_registered) {
     JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTSYSTEM, false, true, new RootSystemType());
     JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTTYPE, false, true, new RootType());
     types_registered = true;
   }
-  const ObjectSampler* const object_sampler = LeakProfiler::object_sampler();
-  assert(object_sampler != NULL, "invariant");
-  const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value();
-  ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
+
+  const jlong last_sweep = emit_all ? max_jlong : sampler->last_sweep().value();
+  ObjectSample* const last = const_cast<ObjectSample*>(sampler->last());
   {
     JfrCheckpointWriter writer(false, false, thread);
     CheckpointWrite checkpoint_write(writer, last_sweep);
     do_samples(last, NULL, checkpoint_write);
   }
+
   CheckpointStateReset state_reset(last_sweep);
   do_samples(last, NULL, state_reset);
+
   if (!edge_store->is_empty()) {
     // java object and chain representations
     JfrCheckpointWriter writer(false, true, thread);
     ObjectSampleWriter osw(writer, edge_store);
-    edge_store->iterate_edges(osw);
+    edge_store->iterate(osw);
   }
 }
 
-WriteObjectSampleStacktrace::WriteObjectSampleStacktrace(JfrStackTraceRepository& repo) :
-  _stack_trace_repo(repo) {
+int ObjectSampleCheckpoint::mark(ObjectSampler* object_sampler, ObjectSampleMarker& marker, bool emit_all) {
+  assert(object_sampler != NULL, "invariant");
+  ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
+  if (last == NULL) {
+    return 0;
+  }
+  const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value();
+  SampleMark mark(marker, last_sweep);
+  do_samples(last, NULL, mark);
+  return mark.count();
 }
 
+WriteObjectSampleStacktrace::WriteObjectSampleStacktrace(ObjectSampler* sampler, JfrStackTraceRepository& repo) :
+  _sampler(sampler), _stack_trace_repo(repo) {}
+
 bool WriteObjectSampleStacktrace::process() {
-  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
-  if (!LeakProfiler::is_running()) {
-    return true;
-  }
-  // Suspend the LeakProfiler subsystem
-  // to ensure stable samples even
-  // after we return from the safepoint.
-  LeakProfiler::suspend();
-  assert(!LeakProfiler::is_running(), "invariant");
-  assert(LeakProfiler::is_suspended(), "invariant");
+  assert(LeakProfiler::is_running(), "invariant");
+  assert(_sampler != NULL, "invariant");
 
-  const ObjectSampler* object_sampler = LeakProfiler::object_sampler();
-  assert(object_sampler != NULL, "invariant");
-  assert(LeakProfiler::is_suspended(), "invariant");
-
-  ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
-  const ObjectSample* const last_resolved = object_sampler->last_resolved();
+  ObjectSample* const last = const_cast<ObjectSample*>(_sampler->last());
+  const ObjectSample* const last_resolved = _sampler->last_resolved();
   if (last == last_resolved) {
-    assert(LeakProfiler::is_suspended(), "invariant");
     return true;
   }
 
@@ -294,27 +282,13 @@
   }
   if (count == 0) {
     writer.set_context(ctx);
-    assert(LeakProfiler::is_suspended(), "invariant");
     return true;
   }
   assert(count > 0, "invariant");
   writer.write_count((u4)count, count_offset);
   JfrStackTraceRepository::write_metadata(writer);
 
-  ObjectSampleCheckpoint::install(writer, false, false);
-  assert(LeakProfiler::is_suspended(), "invariant");
+  // install the stacktrace checkpoint information to the candidates
+  ObjectSampleCheckpoint::install(writer, false);
   return true;
 }
-
-int ObjectSampleCheckpoint::mark(ObjectSampleMarker& marker, bool emit_all) {
-  const ObjectSampler* object_sampler = LeakProfiler::object_sampler();
-  assert(object_sampler != NULL, "invariant");
-  ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last());
-  if (last == NULL) {
-    return 0;
-  }
-  const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value();
-  SampleMark mark(marker, last_sweep);
-  do_samples(last, NULL, mark);
-  return mark.count();
-}
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -26,25 +26,26 @@
 #define SHARE_JFR_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLECHECKPOINT_HPP
 
 #include "memory/allocation.hpp"
-#include "utilities/exceptions.hpp"
 
 class EdgeStore;
+class JfrCheckpointWriter;
 class JfrStackTraceRepository;
-class JfrCheckpointWriter;
 class ObjectSampleMarker;
+class ObjectSampler;
 
 class ObjectSampleCheckpoint : AllStatic {
  public:
-  static void install(JfrCheckpointWriter& writer, bool class_unload, bool resume);
-  static void write(const EdgeStore* edge_store, bool emit_all, Thread* thread);
-  static int mark(ObjectSampleMarker& marker, bool emit_all);
+  static void install(JfrCheckpointWriter& writer, bool class_unload);
+  static void write(ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread);
+  static int mark(ObjectSampler* sampler, ObjectSampleMarker& marker, bool emit_all);
 };
 
 class WriteObjectSampleStacktrace : public StackObj {
  private:
+  ObjectSampler* const _sampler;
   JfrStackTraceRepository& _stack_trace_repo;
  public:
-  WriteObjectSampleStacktrace(JfrStackTraceRepository& repo);
+  WriteObjectSampleStacktrace(ObjectSampler* sampler, JfrStackTraceRepository& repo);
   bool process();
 };
 
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -350,7 +350,7 @@
   return 1;
 }
 
-static traceid get_root_description_info_id(const Edge& edge, traceid id) {
+static traceid get_gc_root_description_info_id(const Edge& edge, traceid id) {
   assert(edge.is_root(), "invariant");
   if (EdgeUtils::is_leak_edge(edge)) {
     return 0;
@@ -518,7 +518,7 @@
   }
 }
 
-static void add_old_object_sample_info(const Edge* current, traceid id) {
+static void add_old_object_sample_info(const StoredEdge* current, traceid id) {
   assert(current != NULL, "invariant");
   if (sample_infos == NULL) {
     sample_infos = new SampleInfo();
@@ -528,11 +528,11 @@
   assert(oosi != NULL, "invariant");
   oosi->_id = id;
   oosi->_data._object = current->pointee();
-  oosi->_data._reference_id = current->is_root() ? (traceid)0 : id;
+  oosi->_data._reference_id = current->parent() == NULL ? (traceid)0 : id;
   sample_infos->store(oosi);
 }
 
-static void add_reference_info(const RoutableEdge* current, traceid id, traceid parent_id) {
+static void add_reference_info(const StoredEdge* current, traceid id, traceid parent_id) {
   assert(current != NULL, "invariant");
   if (ref_infos == NULL) {
     ref_infos = new RefInfo();
@@ -544,37 +544,43 @@
 
   ri->_id = id;
   ri->_data._array_info_id =  !current->is_skip_edge() ? get_array_info_id(*current, id) : 0;
-  ri->_data._field_info_id = ri->_data._array_info_id == 0 && !current->is_skip_edge() ?
-                               get_field_info_id(*current) : (traceid)0;
+  ri->_data._field_info_id = ri->_data._array_info_id == 0 && !current->is_skip_edge() ? get_field_info_id(*current) : (traceid)0;
   ri->_data._old_object_sample_id = parent_id;
   ri->_data._skip = current->skip_length();
   ref_infos->store(ri);
 }
 
-static traceid add_root_info(const Edge* root, traceid id) {
-  assert(root != NULL, "invariant");
-  assert(root->is_root(), "invariant");
-  return get_root_description_info_id(*root, id);
+static bool is_gc_root(const StoredEdge* current) {
+  assert(current != NULL, "invariant");
+  return current->parent() == NULL && current->gc_root_id() != 0;
 }
 
-void ObjectSampleWriter::write(const RoutableEdge* edge) {
+static traceid add_gc_root_info(const StoredEdge* root, traceid id) {
+  assert(root != NULL, "invariant");
+  assert(is_gc_root(root), "invariant");
+  return get_gc_root_description_info_id(*root, id);
+}
+
+void ObjectSampleWriter::write(const StoredEdge* edge) {
   assert(edge != NULL, "invariant");
   const traceid id = _store->get_id(edge);
   add_old_object_sample_info(edge, id);
-  const RoutableEdge* parent = edge->logical_parent();
+  const StoredEdge* const parent = edge->parent();
   if (parent != NULL) {
     add_reference_info(edge, id, _store->get_id(parent));
   } else {
-    assert(edge->is_root(), "invariant");
-    add_root_info(edge, id);
+    if (is_gc_root(edge)) {
+      assert(edge->gc_root_id() == id, "invariant");
+      add_gc_root_info(edge, id);
+    }
   }
 }
 
-ObjectSampleWriter::ObjectSampleWriter(JfrCheckpointWriter& writer, const EdgeStore* store) :
+ObjectSampleWriter::ObjectSampleWriter(JfrCheckpointWriter& writer, EdgeStore* store) :
   _writer(writer),
   _store(store) {
   assert(store != NULL, "invariant");
-  assert(store->number_of_entries() > 0, "invariant");
+  assert(!store->is_empty(), "invariant");
   sample_infos = NULL;
   ref_infos = NULL;
   array_infos = NULL;
@@ -590,26 +596,7 @@
   write_root_descriptors(_writer);
 }
 
-void ObjectSampleWriter::write_chain(const RoutableEdge& edge) {
-  assert(EdgeUtils::is_leak_edge(edge), "invariant");
-  if (edge.processed()) {
-    return;
-  }
-  EdgeUtils::collapse_chain(edge);
-  const RoutableEdge* current = &edge;
-  while (current != NULL) {
-    if (current->processed()) {
-      return;
-    }
-    write(current);
-    current->set_processed();
-    current = current->logical_parent();
-  }
-}
-
-bool ObjectSampleWriter::operator()(const RoutableEdge& edge) {
-  if (EdgeUtils::is_leak_edge(edge)) {
-    write_chain(edge);
-  }
+bool ObjectSampleWriter::operator()(StoredEdge& e) {
+  write(&e);
   return true;
 }
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -30,21 +30,17 @@
 class Edge;
 class EdgeStore;
 class JfrCheckpointWriter;
-class RoutableEdge;
+class StoredEdge;
 
 class ObjectSampleWriter : public StackObj {
  private:
   JfrCheckpointWriter& _writer;
-  const EdgeStore* const _store;
-
-  void write(const RoutableEdge* edge);
-  void write_chain(const RoutableEdge& edge);
-
+  EdgeStore* const _store;
+  void write(const StoredEdge* edge);
  public:
-  ObjectSampleWriter(JfrCheckpointWriter& writer, const EdgeStore* store);
+  ObjectSampleWriter(JfrCheckpointWriter& writer, EdgeStore* store);
   ~ObjectSampleWriter();
-
-  bool operator()(const RoutableEdge& edge);
+  bool operator()(StoredEdge& edge);
 };
 
 #endif // SHARE_JFR_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLEWRITER_HPP
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -132,7 +132,7 @@
 bool ReferenceToRootClosure::do_cldg_roots() {
   assert(!complete(), "invariant");
   ReferenceLocateClosure rlc(_callback, OldObjectRoot::_class_loader_data, OldObjectRoot::_type_undetermined, NULL);
-  CLDToOopClosure cldt_closure(&rlc, ClassLoaderData::_claim_strong);
+  CLDToOopClosure cldt_closure(&rlc, ClassLoaderData::_claim_none);
   ClassLoaderDataGraph::always_strong_cld_do(&cldt_closure);
   return rlc.complete();
 }
@@ -435,9 +435,6 @@
 };
 
 void RootResolver::resolve(RootCallback& callback) {
-
-  // Need to clear cld claim bit before starting
-  ClassLoaderDataGraph::clear_claimed_marks();
   RootResolverMarkScope mark_scope;
 
   // thread local roots
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -25,8 +25,8 @@
 #ifndef SHARE_JFR_LEAKPROFILER_CHECKPOINT_ROOTRESOLVER_HPP
 #define SHARE_JFR_LEAKPROFILER_CHECKPOINT_ROOTRESOLVER_HPP
 
+#include "jfr/leakprofiler/utilities/rootType.hpp"
 #include "memory/allocation.hpp"
-#include "jfr/leakprofiler/utilities/rootType.hpp"
 #include "oops/oopsHierarchy.hpp"
 
 struct RootCallbackInfo {
--- a/src/hotspot/share/jfr/leakprofiler/emitEventOperation.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,236 +0,0 @@
-/*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-#include "precompiled.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "jfr/jfrEvents.hpp"
-#include "jfr/leakprofiler/utilities/granularTimer.hpp"
-#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
-#include "jfr/leakprofiler/chains/edge.hpp"
-#include "jfr/leakprofiler/chains/edgeQueue.hpp"
-#include "jfr/leakprofiler/chains/edgeStore.hpp"
-#include "jfr/leakprofiler/chains/bitset.hpp"
-#include "jfr/leakprofiler/sampling/objectSample.hpp"
-#include "jfr/leakprofiler/leakProfiler.hpp"
-#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
-#include "jfr/leakprofiler/sampling/objectSampler.hpp"
-#include "jfr/leakprofiler/emitEventOperation.hpp"
-#include "jfr/leakprofiler/chains/bfsClosure.hpp"
-#include "jfr/leakprofiler/chains/dfsClosure.hpp"
-#include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
-#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
-#include "jfr/support/jfrThreadId.hpp"
-#include "logging/log.hpp"
-#include "memory/resourceArea.hpp"
-#include "memory/universe.hpp"
-#include "oops/markOop.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/safepoint.hpp"
-#include "runtime/vmThread.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-/* The EdgeQueue is backed by directly managed virtual memory.
- * We will attempt to dimension an initial reservation
- * in proportion to the size of the heap (represented by heap_region).
- * Initial memory reservation: 5% of the heap OR at least 32 Mb
- * Commit ratio: 1 : 10 (subject to allocation granularties)
- */
-static size_t edge_queue_memory_reservation(const MemRegion& heap_region) {
-  const size_t memory_reservation_bytes = MAX2(heap_region.byte_size() / 20, 32*M);
-  assert(memory_reservation_bytes >= (size_t)32*M, "invariant");
-  return memory_reservation_bytes;
-}
-
-static size_t edge_queue_memory_commit_size(size_t memory_reservation_bytes) {
-  const size_t memory_commit_block_size_bytes = memory_reservation_bytes / 10;
-  assert(memory_commit_block_size_bytes >= (size_t)3*M, "invariant");
-  return memory_commit_block_size_bytes;
-}
-
-static void log_edge_queue_summary(const EdgeQueue& edge_queue) {
-  log_trace(jfr, system)("EdgeQueue reserved size total: " SIZE_FORMAT " [KB]", edge_queue.reserved_size() / K);
-  log_trace(jfr, system)("EdgeQueue edges total: " SIZE_FORMAT, edge_queue.top());
-  log_trace(jfr, system)("EdgeQueue liveset total: " SIZE_FORMAT " [KB]", edge_queue.live_set() / K);
-  if (edge_queue.reserved_size() > 0) {
-    log_trace(jfr, system)("EdgeQueue commit reserve ratio: %f\n",
-      ((double)edge_queue.live_set() / (double)edge_queue.reserved_size()));
-  }
-}
-
-void EmitEventOperation::doit() {
-  assert(LeakProfiler::is_running(), "invariant");
-  _object_sampler = LeakProfiler::object_sampler();
-  assert(_object_sampler != NULL, "invariant");
-
-  _vm_thread = VMThread::vm_thread();
-  assert(_vm_thread == Thread::current(), "invariant");
-  _vm_thread_local = _vm_thread->jfr_thread_local();
-  assert(_vm_thread_local != NULL, "invariant");
-  assert(_vm_thread->jfr_thread_local()->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
-
-  // The VM_Operation::evaluate() which invoked doit()
-  // contains a top level ResourceMark
-
-  // save the original markWord for the potential leak objects
-  // to be restored on function exit
-  ObjectSampleMarker marker;
-  if (ObjectSampleCheckpoint::mark(marker, _emit_all) == 0) {
-    return;
-  }
-
-  EdgeStore edge_store;
-
-  GranularTimer::start(_cutoff_ticks, 1000000);
-  if (_cutoff_ticks <= 0) {
-    // no chains
-    write_events(&edge_store);
-    return;
-  }
-
-  assert(_cutoff_ticks > 0, "invariant");
-
-  // The bitset used for marking is dimensioned as a function of the heap size
-  const MemRegion heap_region = Universe::heap()->reserved_region();
-  BitSet mark_bits(heap_region);
-
-  // The edge queue is dimensioned as a fraction of the heap size
-  const size_t edge_queue_reservation_size = edge_queue_memory_reservation(heap_region);
-  EdgeQueue edge_queue(edge_queue_reservation_size, edge_queue_memory_commit_size(edge_queue_reservation_size));
-
-  // The initialize() routines will attempt to reserve and allocate backing storage memory.
-  // Failure to accommodate will render root chain processing impossible.
-  // As a fallback on failure, just write out the existing samples, flat, without chains.
-  if (!(mark_bits.initialize() && edge_queue.initialize())) {
-    log_warning(jfr)("Unable to allocate memory for root chain processing");
-    write_events(&edge_store);
-    return;
-  }
-
-  // necessary condition for attempting a root set iteration
-  Universe::heap()->ensure_parsability(false);
-
-  RootSetClosure::add_to_queue(&edge_queue);
-  if (edge_queue.is_full()) {
-    // Pathological case where roots don't fit in queue
-    // Do a depth-first search, but mark roots first
-    // to avoid walking sideways over roots
-    DFSClosure::find_leaks_from_root_set(&edge_store, &mark_bits);
-  } else {
-    BFSClosure bfs(&edge_queue, &edge_store, &mark_bits);
-    bfs.process();
-  }
-  GranularTimer::stop();
-  write_events(&edge_store);
-  log_edge_queue_summary(edge_queue);
-}
-
-int EmitEventOperation::write_events(EdgeStore* edge_store) {
-  assert(_object_sampler != NULL, "invariant");
-  assert(edge_store != NULL, "invariant");
-  assert(_vm_thread != NULL, "invariant");
-  assert(_vm_thread_local != NULL, "invariant");
-  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
-
-  // save thread id in preparation for thread local trace data manipulations
-  const traceid vmthread_id = _vm_thread_local->thread_id();
-  assert(_vm_thread_local->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
-
-  const jlong last_sweep = _emit_all ? max_jlong : _object_sampler->last_sweep().value();
-  int count = 0;
-
-  const ObjectSample* current = _object_sampler->first();
-  while (current != NULL) {
-    ObjectSample* prev = current->prev();
-    if (current->is_alive_and_older_than(last_sweep)) {
-      write_event(current, edge_store);
-      ++count;
-    }
-    current = prev;
-  }
-
-  // restore thread local stack trace and thread id
-  _vm_thread_local->set_thread_id(vmthread_id);
-  _vm_thread_local->clear_cached_stack_trace();
-  assert(_vm_thread_local->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
-
-  if (count > 0) {
-    // serialize assoicated checkpoints
-    ObjectSampleCheckpoint::write(edge_store, _emit_all, _vm_thread);
-  }
-  return count;
-}
-
-static int array_size(const oop object) {
-  assert(object != NULL, "invariant");
-  if (object->is_array()) {
-    return arrayOop(object)->length();
-  }
-  return min_jint;
-}
-
-void EmitEventOperation::write_event(const ObjectSample* sample, EdgeStore* edge_store) {
-  assert(sample != NULL, "invariant");
-  assert(!sample->is_dead(), "invariant");
-  assert(edge_store != NULL, "invariant");
-  assert(_vm_thread_local != NULL, "invariant");
-  const oop* object_addr = sample->object_addr();
-  assert(*object_addr != NULL, "invariant");
-
-  const Edge* edge = (const Edge*)(*object_addr)->mark();
-  traceid gc_root_id = 0;
-  if (edge == NULL) {
-    // In order to dump out a representation of the event
-    // even though it was not reachable / too long to reach,
-    // we need to register a top level edge for this object
-    Edge e(NULL, object_addr);
-    edge_store->add_chain(&e, 1);
-    edge = (const Edge*)(*object_addr)->mark();
-  } else {
-    gc_root_id = edge_store->get_root_id(edge);
-  }
-
-  assert(edge != NULL, "invariant");
-  assert(edge->pointee() == *object_addr, "invariant");
-  const traceid object_id = edge_store->get_id(edge);
-  assert(object_id != 0, "invariant");
-
-  EventOldObjectSample e(UNTIMED);
-  e.set_starttime(GranularTimer::start_time());
-  e.set_endtime(GranularTimer::end_time());
-  e.set_allocationTime(sample->allocation_time());
-  e.set_lastKnownHeapUsage(sample->heap_used_at_last_gc());
-  e.set_object(object_id);
-  e.set_arrayElements(array_size(*object_addr));
-  e.set_root(gc_root_id);
-
-  // Temporarily assigning both the stack trace id and thread id
-  // onto the thread local data structure of the VMThread (for the duration
-  // of the commit() call). This trick provides a means to override
-  // the event generation mechanism by injecting externally provided id's.
-  // Here, in particular, this allows us to emit an old object event
-  // supplying information from where the actual sampling occurred.
-  _vm_thread_local->set_cached_stack_trace_id(sample->stack_trace_id());
-  assert(sample->has_thread(), "invariant");
-  _vm_thread_local->set_thread_id(sample->thread_id());
-  e.commit();
-}
--- a/src/hotspot/share/jfr/leakprofiler/emitEventOperation.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_JFR_LEAKPROFILER_EMITEVENTOPERATION_HPP
-#define SHARE_JFR_LEAKPROFILER_EMITEVENTOPERATION_HPP
-
-#include "runtime/vmOperations.hpp"
-
-class BFSClosure;
-class EdgeStore;
-class EdgeQueue;
-class JfrThreadData;
-class ObjectSample;
-class ObjectSampler;
-
-class VMThread;
-
-// Safepoint operation for emitting object sample events
-class EmitEventOperation : public VM_Operation {
- private:
-  jlong _cutoff_ticks;
-  bool _emit_all;
-  VMThread* _vm_thread;
-  JfrThreadLocal* _vm_thread_local;
-  ObjectSampler* _object_sampler;
-
-  void write_event(const ObjectSample* sample, EdgeStore* edge_store);
-  int write_events(EdgeStore* edge_store);
-
- public:
-  EmitEventOperation(jlong cutoff_ticks, bool emit_all) :
-    _cutoff_ticks(cutoff_ticks),
-    _emit_all(emit_all),
-    _vm_thread(NULL),
-    _vm_thread_local(NULL),
-    _object_sampler(NULL) {
-  }
-
-  VMOp_Type type() const {
-    return VMOp_GC_HeapInspection;
-  }
-
-  Mode evaluation_mode() const {
-    return _safepoint;
-  }
-
-  virtual void doit();
-};
-
-#endif // SHARE_JFR_LEAKPROFILER_EMITEVENTOPERATION_HPP
--- a/src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,25 +23,31 @@
  */
 
 #include "precompiled.hpp"
-#include "jfr/leakprofiler/emitEventOperation.hpp"
 #include "jfr/leakprofiler/leakProfiler.hpp"
 #include "jfr/leakprofiler/startOperation.hpp"
 #include "jfr/leakprofiler/stopOperation.hpp"
+#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp"
 #include "jfr/leakprofiler/sampling/objectSampler.hpp"
 #include "jfr/recorder/service/jfrOptionSet.hpp"
+#include "logging/log.hpp"
 #include "memory/iterator.hpp"
-#include "oops/oop.hpp"
-#include "runtime/atomic.hpp"
-#include "runtime/orderAccess.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/vmThread.hpp"
-#include "utilities/ostream.hpp"
+
+bool LeakProfiler::is_running() {
+  return ObjectSampler::is_created();
+}
 
-// Only to be updated during safepoint
-ObjectSampler* LeakProfiler::_object_sampler = NULL;
+bool LeakProfiler::start(int sample_count) {
+  if (is_running()) {
+    return true;
+  }
 
-static volatile jbyte suspended = 0;
-bool LeakProfiler::start(jint sample_count) {
+  // Allows user to disable leak profiler on command line by setting queue size to zero.
+  if (sample_count == 0) {
+    return false;
+  }
+
   if (UseZGC) {
     log_warning(jfr)("LeakProfiler is currently not supported in combination with ZGC");
     return false;
@@ -52,49 +58,56 @@
     return false;
   }
 
-  if (_object_sampler != NULL) {
-    // already started
-    return true;
+  assert(!is_running(), "invariant");
+  assert(sample_count > 0, "invariant");
+
+  // schedule the safepoint operation for installing the object sampler
+  StartOperation op(sample_count);
+  VMThread::execute(&op);
+
+  if (!is_running()) {
+    log_trace(jfr, system)("Object sampling could not be started because the sampler could not be allocated");
+    return false;
   }
-  // Allows user to disable leak profiler on command line by setting queue size to zero.
-  if (sample_count > 0) {
-    StartOperation op(sample_count);
-    VMThread::execute(&op);
-    return _object_sampler != NULL;
-  }
-  return false;
+  assert(is_running(), "invariant");
+  log_trace(jfr, system)("Object sampling started");
+  return true;
 }
 
 bool LeakProfiler::stop() {
-  if (_object_sampler == NULL) {
-    // already stopped/not started
-    return true;
+  if (!is_running()) {
+    return false;
   }
+
+  // schedule the safepoint operation for uninstalling and destroying the object sampler
   StopOperation op;
   VMThread::execute(&op);
-  return _object_sampler == NULL;
+
+  assert(!is_running(), "invariant");
+  log_trace(jfr, system)("Object sampling stopped");
+  return true;
 }
 
-void LeakProfiler::emit_events(jlong cutoff_ticks, bool emit_all) {
+void LeakProfiler::emit_events(int64_t cutoff_ticks, bool emit_all) {
   if (!is_running()) {
     return;
   }
-  EmitEventOperation op(cutoff_ticks, emit_all);
-  VMThread::execute(&op);
+  // exclusive access to object sampler instance
+  ObjectSampler* const sampler = ObjectSampler::acquire();
+  assert(sampler != NULL, "invariant");
+  EventEmitter::emit(sampler, cutoff_ticks, emit_all);
+  ObjectSampler::release();
 }
 
 void LeakProfiler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
   assert(SafepointSynchronize::is_at_safepoint(),
     "Leak Profiler::oops_do(...) may only be called during safepoint");
-
-  if (_object_sampler != NULL) {
-    _object_sampler->oops_do(is_alive, f);
+  if (is_running()) {
+    ObjectSampler::oops_do(is_alive, f);
   }
 }
 
-void LeakProfiler::sample(HeapWord* object,
-                          size_t size,
-                          JavaThread* thread) {
+void LeakProfiler::sample(HeapWord* object, size_t size, JavaThread* thread) {
   assert(is_running(), "invariant");
   assert(thread != NULL, "invariant");
   assert(thread->thread_state() == _thread_in_vm, "invariant");
@@ -104,39 +117,5 @@
     return;
   }
 
-  _object_sampler->add(object, size, thread);
-}
-
-ObjectSampler* LeakProfiler::object_sampler() {
-  assert(is_suspended() || SafepointSynchronize::is_at_safepoint(),
-    "Leak Profiler::object_sampler() may only be called during safepoint");
-  return _object_sampler;
-}
-
-void LeakProfiler::set_object_sampler(ObjectSampler* object_sampler) {
-  assert(SafepointSynchronize::is_at_safepoint(),
-    "Leak Profiler::set_object_sampler() may only be called during safepoint");
-  _object_sampler = object_sampler;
-}
-
-bool LeakProfiler::is_running() {
-  return _object_sampler != NULL && !suspended;
+  ObjectSampler::sample(object, size, thread);
 }
-
-bool LeakProfiler::is_suspended() {
-  return _object_sampler != NULL && suspended;
-}
-
-void LeakProfiler::resume() {
-  assert(is_suspended(), "invariant");
-  OrderAccess::storestore();
-  Atomic::store((jbyte)0, &suspended);
-  assert(is_running(), "invariant");
-}
-
-void LeakProfiler::suspend() {
-  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
-  assert(_object_sampler != NULL, "invariant");
-  assert(!is_suspended(), "invariant");
-  suspended = (jbyte)1; // safepoint visible
-}
--- a/src/hotspot/share/jfr/leakprofiler/leakProfiler.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/leakProfiler.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -28,36 +28,16 @@
 #include "memory/allocation.hpp"
 
 class BoolObjectClosure;
-class ObjectSampler;
 class OopClosure;
 class JavaThread;
-class Thread;
 
 class LeakProfiler : public AllStatic {
-  friend class ClassUnloadTypeSet;
-  friend class EmitEventOperation;
-  friend class ObjectSampleCheckpoint;
-  friend class StartOperation;
-  friend class StopOperation;
-  friend class TypeSet;
-  friend class WriteObjectSampleStacktrace;
-
- private:
-  static ObjectSampler* _object_sampler;
-
-  static void set_object_sampler(ObjectSampler* object_sampler);
-  static ObjectSampler* object_sampler();
-
-  static void suspend();
-  static void resume();
-  static bool is_suspended();
-
  public:
-  static bool start(jint sample_count);
+  static bool start(int sample_count);
   static bool stop();
-  static void emit_events(jlong cutoff_ticks, bool emit_all);
   static bool is_running();
 
+  static void emit_events(int64_t cutoff_ticks, bool emit_all);
   static void sample(HeapWord* object, size_t size, JavaThread* thread);
 
   // Called by GC
--- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -21,6 +21,7 @@
  * questions.
  *
  */
+
 #include "precompiled.hpp"
 #include "jfr/jfrEvents.hpp"
 #include "jfr/leakprofiler/sampling/objectSample.hpp"
@@ -35,8 +36,18 @@
 #include "logging/log.hpp"
 #include "memory/universe.hpp"
 #include "oops/oop.inline.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/orderAccess.hpp"
+#include "runtime/safepoint.hpp"
 #include "runtime/thread.hpp"
 
+static ObjectSampler* _instance = NULL;
+
+static ObjectSampler& instance() {
+  assert(_instance != NULL, "invariant");
+  return *_instance;
+}
+
 ObjectSampler::ObjectSampler(size_t size) :
   _priority_queue(new SamplePriorityQueue(size)),
   _list(new SampleList(size)),
@@ -44,7 +55,6 @@
   _total_allocated(0),
   _threshold(0),
   _size(size),
-  _tryLock(0),
   _dead_samples(false) {}
 
 ObjectSampler::~ObjectSampler() {
@@ -54,32 +64,110 @@
   _list = NULL;
 }
 
-void ObjectSampler::add(HeapWord* obj, size_t allocated, JavaThread* thread) {
+bool ObjectSampler::create(size_t size) {
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  assert(_instance == NULL, "invariant");
+  _instance = new ObjectSampler(size);
+  return _instance != NULL;
+}
+
+bool ObjectSampler::is_created() {
+  return _instance != NULL;
+}
+
+ObjectSampler* ObjectSampler::sampler() {
+  assert(is_created(), "invariant");
+  return _instance;
+}
+
+void ObjectSampler::destroy() {
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  if (_instance != NULL) {
+    ObjectSampler* const sampler = _instance;
+    _instance = NULL;
+    delete sampler;
+  }
+}
+
+static volatile int _lock = 0;
+
+ObjectSampler* ObjectSampler::acquire() {
+  assert(is_created(), "invariant");
+  while (Atomic::cmpxchg(1, &_lock, 0) == 1) {}
+  return _instance;
+}
+
+void ObjectSampler::release() {
+  assert(is_created(), "invariant");
+  OrderAccess::fence();
+  _lock = 0;
+}
+
+static traceid get_thread_id(JavaThread* thread) {
   assert(thread != NULL, "invariant");
-  const traceid thread_id = thread->threadObj() != NULL ? thread->jfr_thread_local()->thread_id() : 0;
+  if (thread->threadObj() == NULL) {
+    return 0;
+  }
+  const JfrThreadLocal* const tl = thread->jfr_thread_local();
+  assert(tl != NULL, "invariant");
+  if (!tl->has_thread_checkpoint()) {
+    JfrCheckpointManager::create_thread_checkpoint(thread);
+  }
+  assert(tl->has_thread_checkpoint(), "invariant");
+  return tl->thread_id();
+}
+
+// Populates the thread local stack frames, but does not add them
+// to the stacktrace repository (...yet, see stacktrace_id() below)
+//
+void ObjectSampler::fill_stacktrace(JfrStackTrace* stacktrace, JavaThread* thread) {
+  assert(stacktrace != NULL, "invariant");
+  assert(thread != NULL, "invariant");
+  if (JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) {
+    JfrStackTraceRepository::fill_stacktrace_for(thread, stacktrace, 0);
+  }
+}
+
+// We were successful in acquiring the try lock and have been selected for adding a sample.
+// Go ahead with installing our previously taken stacktrace into the stacktrace repository.
+//
+traceid ObjectSampler::stacktrace_id(const JfrStackTrace* stacktrace, JavaThread* thread) {
+  assert(stacktrace != NULL, "invariant");
+  assert(stacktrace->hash() != 0, "invariant");
+  const traceid stacktrace_id = JfrStackTraceRepository::add(stacktrace, thread);
+  thread->jfr_thread_local()->set_cached_stack_trace_id(stacktrace_id, stacktrace->hash());
+  return stacktrace_id;
+}
+
+void ObjectSampler::sample(HeapWord* obj, size_t allocated, JavaThread* thread) {
+  assert(thread != NULL, "invariant");
+  assert(is_created(), "invariant");
+
+  const traceid thread_id = get_thread_id(thread);
   if (thread_id == 0) {
     return;
   }
-  assert(thread_id != 0, "invariant");
-
-  if (!thread->jfr_thread_local()->has_thread_checkpoint()) {
-    JfrCheckpointManager::create_thread_checkpoint(thread);
-    assert(thread->jfr_thread_local()->has_thread_checkpoint(), "invariant");
-  }
 
-  traceid stack_trace_id = 0;
-  unsigned int stack_trace_hash = 0;
-  if (JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) {
-    stack_trace_id = JfrStackTraceRepository::record(thread, 0, &stack_trace_hash);
-    thread->jfr_thread_local()->set_cached_stack_trace_id(stack_trace_id, stack_trace_hash);
-  }
+  const JfrThreadLocal* const tl = thread->jfr_thread_local();
+  JfrStackTrace stacktrace(tl->stackframes(), tl->stackdepth());
+  fill_stacktrace(&stacktrace, thread);
 
-  JfrTryLock tryLock(&_tryLock);
+  // try enter critical section
+  JfrTryLock tryLock(&_lock);
   if (!tryLock.has_lock()) {
     log_trace(jfr, oldobject, sampling)("Skipping old object sample due to lock contention");
     return;
   }
 
+  instance().add(obj, allocated, thread_id, &stacktrace, thread);
+}
+
+void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, JfrStackTrace* stacktrace, JavaThread* thread) {
+  assert(stacktrace != NULL, "invariant");
+  assert(thread_id != 0, "invariant");
+  assert(thread != NULL, "invariant");
+  assert(thread->jfr_thread_local()->has_thread_checkpoint(), "invariant");
+
   if (_dead_samples) {
     scavenge();
     assert(!_dead_samples, "invariant");
@@ -101,13 +189,13 @@
   }
 
   assert(sample != NULL, "invariant");
-  assert(thread_id != 0, "invariant");
   sample->set_thread_id(thread_id);
   sample->set_thread_checkpoint(thread->jfr_thread_local()->thread_checkpoint());
 
-  if (stack_trace_id != 0) {
-    sample->set_stack_trace_id(stack_trace_id);
-    sample->set_stack_trace_hash(stack_trace_hash);
+  const unsigned int stacktrace_hash = stacktrace->hash();
+  if (stacktrace_hash != 0) {
+    sample->set_stack_trace_id(stacktrace_id(stacktrace, thread));
+    sample->set_stack_trace_hash(stacktrace_hash);
   }
 
   sample->set_span(allocated);
@@ -118,6 +206,53 @@
   _priority_queue->push(sample);
 }
 
+void ObjectSampler::scavenge() {
+  ObjectSample* current = _list->last();
+  while (current != NULL) {
+    ObjectSample* next = current->next();
+    if (current->is_dead()) {
+      remove_dead(current);
+    }
+    current = next;
+  }
+  _dead_samples = false;
+}
+
+void ObjectSampler::remove_dead(ObjectSample* sample) {
+  assert(sample != NULL, "invariant");
+  assert(sample->is_dead(), "invariant");
+  ObjectSample* const previous = sample->prev();
+  // push span on to previous
+  if (previous != NULL) {
+    _priority_queue->remove(previous);
+    previous->add_span(sample->span());
+    _priority_queue->push(previous);
+  }
+  _priority_queue->remove(sample);
+  _list->release(sample);
+}
+
+void ObjectSampler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
+  assert(is_created(), "invariant");
+  assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+  ObjectSampler& sampler = instance();
+  ObjectSample* current = sampler._list->last();
+  while (current != NULL) {
+    ObjectSample* next = current->next();
+    if (!current->is_dead()) {
+      if (is_alive->do_object_b(current->object())) {
+        // The weakly referenced object is alive, update pointer
+        f->do_oop(const_cast<oop*>(current->object_addr()));
+      } else {
+        current->set_dead();
+        sampler._dead_samples = true;
+      }
+    }
+    current = next;
+  }
+  sampler._last_sweep = JfrTicks::now();
+}
+
 const ObjectSample* ObjectSampler::last() const {
   return _list->last();
 }
@@ -134,50 +269,6 @@
   _list->set_last_resolved(sample);
 }
 
-void ObjectSampler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
-  ObjectSample* current = _list->last();
-  while (current != NULL) {
-    ObjectSample* next = current->next();
-    if (!current->is_dead()) {
-      if (is_alive->do_object_b(current->object())) {
-        // The weakly referenced object is alive, update pointer
-        f->do_oop(const_cast<oop*>(current->object_addr()));
-      } else {
-        current->set_dead();
-        _dead_samples = true;
-      }
-    }
-    current = next;
-  }
-  _last_sweep = JfrTicks::now();
-}
-
-void ObjectSampler::remove_dead(ObjectSample* sample) {
-  assert(sample != NULL, "invariant");
-  assert(sample->is_dead(), "invariant");
-  ObjectSample* const previous = sample->prev();
-  // push span on to previous
-  if (previous != NULL) {
-    _priority_queue->remove(previous);
-    previous->add_span(sample->span());
-    _priority_queue->push(previous);
-  }
-  _priority_queue->remove(sample);
-  _list->release(sample);
-}
-
-void ObjectSampler::scavenge() {
-  ObjectSample* current = _list->last();
-  while (current != NULL) {
-    ObjectSample* next = current->next();
-    if (current->is_dead()) {
-      remove_dead(current);
-    }
-    current = next;
-  }
-  _dead_samples = false;
-}
-
 int ObjectSampler::item_count() const {
   return _priority_queue->count();
 }
@@ -189,7 +280,7 @@
 ObjectSample* ObjectSampler::item_at(int index) {
   return const_cast<ObjectSample*>(
     const_cast<const ObjectSampler*>(this)->item_at(index)
-                                   );
+                                  );
 }
 
 const JfrTicks& ObjectSampler::last_sweep() const {
--- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -28,7 +28,10 @@
 #include "memory/allocation.hpp"
 #include "jfr/utilities/jfrTime.hpp"
 
+typedef u8 traceid;
+
 class BoolObjectClosure;
+class JfrStackTrace;
 class OopClosure;
 class ObjectSample;
 class ObjectSampler;
@@ -40,11 +43,13 @@
 // making sure the samples are evenly distributed as
 // new entries are added and removed.
 class ObjectSampler : public CHeapObj<mtTracing> {
+  friend class EventEmitter;
+  friend class JfrRecorderService;
   friend class LeakProfiler;
-  friend class ObjectSampleCheckpoint;
   friend class StartOperation;
   friend class StopOperation;
-  friend class EmitEventOperation;
+  friend class ObjectSampleCheckpoint;
+  friend class WriteObjectSampleStacktrace;
  private:
   SamplePriorityQueue* _priority_queue;
   SampleList* _list;
@@ -52,20 +57,33 @@
   size_t _total_allocated;
   size_t _threshold;
   size_t _size;
-  volatile int _tryLock;
   bool _dead_samples;
 
+  // Lifecycle
   explicit ObjectSampler(size_t size);
   ~ObjectSampler();
+  static bool create(size_t size);
+  static bool is_created();
+  static ObjectSampler* sampler();
+  static void destroy();
 
-  void add(HeapWord* object, size_t size, JavaThread* thread);
+  // For operations that require exclusive access (non-safepoint)
+  static ObjectSampler* acquire();
+  static void release();
+
+  // Stacktrace
+  static void fill_stacktrace(JfrStackTrace* stacktrace, JavaThread* thread);
+  traceid stacktrace_id(const JfrStackTrace* stacktrace, JavaThread* thread);
+
+  // Sampling
+  static void sample(HeapWord* object, size_t size, JavaThread* thread);
+  void add(HeapWord* object, size_t size, traceid thread_id, JfrStackTrace* stacktrace, JavaThread* thread);
+  void scavenge();
   void remove_dead(ObjectSample* sample);
-  void scavenge();
 
   // Called by GC
-  void oops_do(BoolObjectClosure* is_alive, OopClosure* f);
+  static void oops_do(BoolObjectClosure* is_alive, OopClosure* f);
 
- public:
   const ObjectSample* item_at(int index) const;
   ObjectSample* item_at(int index);
   int item_count() const;
--- a/src/hotspot/share/jfr/leakprofiler/startOperation.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/startOperation.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -25,35 +25,18 @@
 #ifndef SHARE_JFR_LEAKPROFILER_STARTOPERATION_HPP
 #define SHARE_JFR_LEAKPROFILER_STARTOPERATION_HPP
 
-#include "jfr/recorder/jfrRecorder.hpp"
-#include "jfr/leakprofiler/leakProfiler.hpp"
 #include "jfr/leakprofiler/sampling/objectSampler.hpp"
-#include "jfr/recorder/service/jfrOptionSet.hpp"
-#include "logging/log.hpp"
-#include "runtime/vmOperations.hpp"
+#include "jfr/leakprofiler/utilities/vmOperation.hpp"
 
-// Safepoint operation for starting leak profiler object sampler
-class StartOperation : public VM_Operation {
+// Safepoint operation for creating and starting the leak profiler object sampler
+class StartOperation : public OldObjectVMOperation {
  private:
-  jlong _sample_count;
+  int _sample_count;
  public:
-  StartOperation(jlong sample_count) :
-    _sample_count(sample_count) {
-  }
-
-  Mode evaluation_mode() const {
-    return _safepoint;
-  }
-
-  VMOp_Type type() const {
-    return VMOp_GC_HeapInspection;
-  }
+  StartOperation(int sample_count) : _sample_count(sample_count) {}
 
   virtual void doit() {
-    assert(!LeakProfiler::is_running(), "invariant");
-    jint queue_size = JfrOptionSet::old_object_queue_size();
-    LeakProfiler::set_object_sampler(new ObjectSampler(queue_size));
-    log_trace(jfr, system)( "Object sampling started");
+    ObjectSampler::create(_sample_count);
   }
 };
 
--- a/src/hotspot/share/jfr/leakprofiler/stopOperation.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/leakprofiler/stopOperation.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -25,31 +25,14 @@
 #ifndef SHARE_JFR_LEAKPROFILER_STOPOPERATION_HPP
 #define SHARE_JFR_LEAKPROFILER_STOPOPERATION_HPP
 
-#include "jfr/leakprofiler/leakProfiler.hpp"
 #include "jfr/leakprofiler/sampling/objectSampler.hpp"
-#include "jfr/recorder/service/jfrOptionSet.hpp"
-#include "logging/log.hpp"
-#include "runtime/vmOperations.hpp"
-
-// Safepoint operation for stopping leak profiler object sampler
-class StopOperation : public VM_Operation {
- public:
-  StopOperation() {}
+#include "jfr/leakprofiler/utilities/vmOperation.hpp"
 
-  Mode evaluation_mode() const {
-    return _safepoint;
-  }
-
-  VMOp_Type type() const {
-    return VMOp_GC_HeapInspection;
-  }
-
+// Safepoint operation for stopping and destroying the leak profiler object sampler
+class StopOperation : public OldObjectVMOperation {
+ public:
   virtual void doit() {
-    assert(LeakProfiler::is_running(), "invariant");
-    ObjectSampler* object_sampler = LeakProfiler::object_sampler();
-    delete object_sampler;
-    LeakProfiler::set_object_sampler(NULL);
-    log_trace(jfr, system)( "Object sampling stopped");
+    ObjectSampler::destroy();
   }
 };
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/jfr/leakprofiler/utilities/vmOperation.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_JFR_LEAKPROFILER_UTILITIES_VMOPERATION_HPP
+#define SHARE_JFR_LEAKPROFILER_UTILITIES_VMOPERATION_HPP
+
+#include "runtime/vmOperations.hpp"
+
+class OldObjectVMOperation : public VM_Operation {
+ public:
+  Mode evaluation_mode() const {
+    return _safepoint;
+  }
+
+  VMOp_Type type() const {
+    return VMOp_JFROldObject;
+  }
+};
+
+#endif // SHARE_JFR_LEAKPROFILER_UTILITIES_VMOPERATION_HPP
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -311,7 +311,7 @@
   if (LeakProfiler::is_running()) {
     JfrCheckpointWriter leakp_writer(false, true, Thread::current());
     type_set.write(writer, &leakp_writer);
-    ObjectSampleCheckpoint::install(leakp_writer, true, true);
+    ObjectSampleCheckpoint::install(leakp_writer, true);
     return;
   }
   type_set.write(writer, NULL);
@@ -319,10 +319,10 @@
 
 void TypeSet::serialize(JfrCheckpointWriter& writer) {
   TypeSetSerialization type_set(false);
-  if (LeakProfiler::is_suspended()) {
+  if (LeakProfiler::is_running()) {
     JfrCheckpointWriter leakp_writer(false, true, Thread::current());
     type_set.write(writer, &leakp_writer);
-    ObjectSampleCheckpoint::install(leakp_writer, false, true);
+    ObjectSampleCheckpoint::install(leakp_writer, false);
     return;
   }
   type_set.write(writer, NULL);
--- a/src/hotspot/share/jfr/recorder/jfrRecorder.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/recorder/jfrRecorder.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -194,9 +194,6 @@
     if (!validate_recording_options(thread)) {
       return false;
     }
-    if (!JfrJavaEventWriter::initialize()) {
-      return false;
-    }
     if (!JfrOptionSet::configure(thread)) {
       return false;
     }
@@ -246,6 +243,9 @@
   ResourceMark rm;
   HandleMark hm;
 
+  if (!create_java_event_writer()) {
+    return false;
+  }
   if (!create_jvmti_agent()) {
     return false;
   }
@@ -287,6 +287,10 @@
 static JfrOSInterface* _os_interface = NULL;
 static JfrThreadSampling* _thread_sampling = NULL;
 
+bool JfrRecorder::create_java_event_writer() {
+  return JfrJavaEventWriter::initialize();
+}
+
 bool JfrRecorder::create_jvmti_agent() {
   return JfrOptionSet::allow_retransforms() ? JfrJvmtiAgent::create() : true;
 }
--- a/src/hotspot/share/jfr/recorder/jfrRecorder.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/recorder/jfrRecorder.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -40,6 +40,7 @@
  private:
   static bool create_checkpoint_manager();
   static bool create_chunk_repository();
+  static bool create_java_event_writer();
   static bool create_jvmti_agent();
   static bool create_os_interface();
   static bool create_post_box();
--- a/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -24,7 +24,9 @@
 
 #include "precompiled.hpp"
 #include "jfr/jni/jfrJavaSupport.hpp"
+#include "jfr/leakprofiler/leakProfiler.hpp"
 #include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
+#include "jfr/leakprofiler/sampling/objectSampler.hpp"
 #include "jfr/recorder/jfrRecorder.hpp"
 #include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
 #include "jfr/recorder/checkpoint/jfrMetadataEvent.hpp"
@@ -335,6 +337,7 @@
     open_new_chunk(true);
   }
   _checkpoint_manager.register_service_thread(Thread::current());
+  JfrMetadataEvent::lock();
 }
 
 void JfrRecorderService::open_new_chunk(bool vm_error) {
@@ -398,6 +401,11 @@
   write_stack_trace_checkpoint.process();
 }
 
+static void write_object_sample_stacktrace(ObjectSampler* sampler, JfrStackTraceRepository& stack_trace_repository) {
+  WriteObjectSampleStacktrace object_sample_stacktrace(sampler, stack_trace_repository);
+  object_sample_stacktrace.process();
+}
+
 static void write_stringpool_checkpoint(JfrStringPool& string_pool, JfrChunkWriter& chunkwriter) {
   WriteStringPool write_string_pool(string_pool);
   WriteStringPoolCheckpoint write_string_pool_checkpoint(chunkwriter, TYPE_STRING, write_string_pool);
@@ -418,8 +426,9 @@
 //      write checkpoint epoch transition list->
 //        write stack trace checkpoint ->
 //          write string pool checkpoint ->
-//            write storage ->
-//              release stream lock
+//            write object sample stacktraces ->
+//              write storage ->
+//                release stream lock
 //
 void JfrRecorderService::pre_safepoint_write() {
   MutexLocker stream_lock(JfrStream_lock, Mutex::_no_safepoint_check_flag);
@@ -428,6 +437,13 @@
   _checkpoint_manager.write_epoch_transition_mspace();
   write_stacktrace_checkpoint(_stack_trace_repository, _chunkwriter, false);
   write_stringpool_checkpoint(_string_pool, _chunkwriter);
+  if (LeakProfiler::is_running()) {
+    // Exclusive access to the object sampler instance.
+    // The sampler is released (unlocked) later in post_safepoint_write.
+    ObjectSampler* const sampler = ObjectSampler::acquire();
+    assert(sampler != NULL, "invariant");
+    write_object_sample_stacktrace(sampler, _stack_trace_repository);
+  }
   _storage.write();
 }
 
@@ -436,16 +452,10 @@
   VMThread::execute(&safepoint_task);
 }
 
-static void write_object_sample_stacktrace(JfrStackTraceRepository& stack_trace_repository) {
-  WriteObjectSampleStacktrace object_sample_stacktrace(stack_trace_repository);
-  object_sample_stacktrace.process();
-}
-
 //
 // safepoint write sequence
 //
 //   lock stream lock ->
-//     write object sample stacktraces ->
 //       write stacktrace repository ->
 //         write string pool ->
 //           write safepoint dependent types ->
@@ -458,7 +468,6 @@
 void JfrRecorderService::safepoint_write() {
   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
   MutexLocker stream_lock(JfrStream_lock, Mutex::_no_safepoint_check_flag);
-  write_object_sample_stacktrace(_stack_trace_repository);
   write_stacktrace_checkpoint(_stack_trace_repository, _chunkwriter, true);
   write_stringpool_checkpoint_safepoint(_string_pool, _chunkwriter);
   _checkpoint_manager.write_safepoint_types();
@@ -478,13 +487,14 @@
 //
 // post-safepoint write sequence
 //
-//  lock stream lock ->
-//    write type set ->
-//      write checkpoints ->
-//        write metadata event ->
-//          write chunk header ->
-//            close chunk fd ->
-//              release stream lock
+//   write type set ->
+//     release object sampler ->
+//       lock stream lock ->
+//         write checkpoints ->
+//           write metadata event ->
+//             write chunk header ->
+//               close chunk fd ->
+//                 release stream lock
 //
 void JfrRecorderService::post_safepoint_write() {
   assert(_chunkwriter.is_valid(), "invariant");
@@ -493,6 +503,11 @@
   // already tagged artifacts for the previous epoch. We can accomplish this concurrently
   // with threads now tagging artifacts in relation to the new, now updated, epoch and remain outside of a safepoint.
   _checkpoint_manager.write_type_set();
+  if (LeakProfiler::is_running()) {
+    // The object sampler instance was exclusively acquired and locked in pre_safepoint_write.
+    // Note: There is a dependency on write_type_set() above, ensure the release is subsequent.
+    ObjectSampler::release();
+  }
   MutexLocker stream_lock(JfrStream_lock, Mutex::_no_safepoint_check_flag);
   // serialize any outstanding checkpoint memory
   _checkpoint_manager.write();
@@ -512,11 +527,9 @@
 void JfrRecorderService::finalize_current_chunk_on_vm_error() {
   assert(_chunkwriter.is_valid(), "invariant");
   pre_safepoint_write();
-  JfrMetadataEvent::lock();
   // Do not attempt safepoint dependent operations during emergency dump.
   // Optimistically write tagged artifacts.
   _checkpoint_manager.shift_epoch();
-  _checkpoint_manager.write_type_set();
   // update time
   _chunkwriter.time_stamp_chunk_now();
   post_safepoint_write();
--- a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -164,7 +164,13 @@
 }
 
 traceid JfrStackTraceRepository::add(const JfrStackTrace& stacktrace) {
-  return instance().add_trace(stacktrace);
+  traceid tid = instance().add_trace(stacktrace);
+  if (tid == 0) {
+    stacktrace.resolve_linenos();
+    tid = instance().add_trace(stacktrace);
+  }
+  assert(tid != 0, "invariant");
+  return tid;
 }
 
 traceid JfrStackTraceRepository::record(Thread* thread, int skip /* 0 */) {
@@ -187,54 +193,29 @@
   return instance().record_for((JavaThread*)thread, skip,frames, tl->stackdepth());
 }
 
-traceid JfrStackTraceRepository::record(Thread* thread, int skip, unsigned int* hash) {
-  assert(thread == Thread::current(), "invariant");
-  JfrThreadLocal* const tl = thread->jfr_thread_local();
-  assert(tl != NULL, "invariant");
-
-  if (tl->has_cached_stack_trace()) {
-    *hash = tl->cached_stack_trace_hash();
-    return tl->cached_stack_trace_id();
-  }
-  if (!thread->is_Java_thread() || thread->is_hidden_from_external_view()) {
-    return 0;
-  }
-  JfrStackFrame* frames = tl->stackframes();
-  if (frames == NULL) {
-    // pending oom
-    return 0;
-  }
-  assert(frames != NULL, "invariant");
-  assert(tl->stackframes() == frames, "invariant");
-  return instance().record_for((JavaThread*)thread, skip, frames, tl->stackdepth(), hash);
-}
-
 traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, JfrStackFrame *frames, u4 max_frames) {
   JfrStackTrace stacktrace(frames, max_frames);
-  if (!stacktrace.record_safe(thread, skip)) {
-    return 0;
-  }
-  traceid tid = add(stacktrace);
-  if (tid == 0) {
-    stacktrace.resolve_linenos();
-    tid = add(stacktrace);
-  }
-  return tid;
+  return stacktrace.record_safe(thread, skip) ? add(stacktrace) : 0;
+}
+
+traceid JfrStackTraceRepository::add(const JfrStackTrace* stacktrace, JavaThread* thread) {
+  assert(stacktrace != NULL, "invariant");
+  assert(thread != NULL, "invariant");
+  assert(stacktrace->hash() != 0, "invariant");
+  return add(*stacktrace);
 }
 
-traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, JfrStackFrame *frames, u4 max_frames, unsigned int* hash) {
-  assert(hash != NULL && *hash == 0, "invariant");
-  JfrStackTrace stacktrace(frames, max_frames);
-  if (!stacktrace.record_safe(thread, skip, true)) {
-    return 0;
+bool JfrStackTraceRepository::fill_stacktrace_for(JavaThread* thread, JfrStackTrace* stacktrace, int skip) {
+  assert(thread == Thread::current(), "invariant");
+  assert(stacktrace != NULL, "invariant");
+  JfrThreadLocal* const tl = thread->jfr_thread_local();
+  assert(tl != NULL, "invariant");
+  const unsigned int cached_stacktrace_hash = tl->cached_stack_trace_hash();
+  if (cached_stacktrace_hash != 0) {
+    stacktrace->set_hash(cached_stacktrace_hash);
+    return true;
   }
-  traceid tid = add(stacktrace);
-  if (tid == 0) {
-    stacktrace.resolve_linenos();
-    tid = add(stacktrace);
-  }
-  *hash = stacktrace._hash;
-  return tid;
+  return stacktrace->record_safe(thread, skip, true);
 }
 
 size_t JfrStackTraceRepository::write_impl(JfrChunkWriter& sw, bool clear) {
@@ -363,7 +344,7 @@
   return trace;
 }
 
-void JfrStackFrame::resolve_lineno() {
+void JfrStackFrame::resolve_lineno() const {
   assert(_method, "no method pointer");
   assert(_line == 0, "already have linenumber");
   _line = _method->line_number_from_bci(_bci);
@@ -375,7 +356,7 @@
   _frames[frame_pos] = frame;
 }
 
-void JfrStackTrace::resolve_linenos() {
+void JfrStackTrace::resolve_linenos() const {
   for(unsigned int i = 0; i < _nr_of_frames; i++) {
     _frames[i].resolve_lineno();
   }
--- a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -36,9 +36,9 @@
 
 class JfrStackFrame {
  private:
-  const Method* _method;
+  mutable const Method* _method;
   traceid _methodid;
-  int _line;
+  mutable int _line;
   int _bci;
   u1 _type;
 
@@ -58,7 +58,7 @@
   bool equals(const JfrStackFrame& rhs) const;
   void write(JfrChunkWriter& cw) const;
   void write(JfrCheckpointWriter& cpw) const;
-  void resolve_lineno();
+  void resolve_lineno() const;
 };
 
 class JfrStackTrace : public StackObj {
@@ -70,7 +70,7 @@
   unsigned int _hash;
   const u4 _max_frames;
   bool _reached_root;
-  bool _lineno;
+  mutable bool _lineno;
 
  public:
   JfrStackTrace(JfrStackFrame* frames, u4 max_frames) : _frames(frames),
@@ -82,9 +82,10 @@
                                                         _lineno(false) {}
   bool record_thread(JavaThread& thread, frame& frame);
   bool record_safe(JavaThread* thread, int skip, bool leakp = false);
-  void resolve_linenos();
+  void resolve_linenos() const;
   void set_nr_of_frames(u4 nr_of_frames) { _nr_of_frames = nr_of_frames; }
   void set_hash(unsigned int hash) { _hash = hash; }
+  unsigned int hash() const { return _hash; }
   void set_frame(u4 frame_pos, JfrStackFrame& frame);
   void set_reached_root(bool reached_root) { _reached_root = reached_root; }
   bool full_stacktrace() const { return _reached_root; }
@@ -128,23 +129,26 @@
   traceid _next_id;
   u4 _entries;
 
-  size_t write_impl(JfrChunkWriter& cw, bool clear);
+  traceid add_trace(const JfrStackTrace& stacktrace);
+  static traceid add(const JfrStackTrace* stacktrace, JavaThread* thread);
   traceid record_for(JavaThread* thread, int skip, JfrStackFrame* frames, u4 max_frames);
-  traceid record_for(JavaThread* thread, int skip, JfrStackFrame* frames, u4 max_frames, unsigned int* hash);
-  traceid add_trace(const JfrStackTrace& stacktrace);
+
+  size_t write_impl(JfrChunkWriter& cw, bool clear);
   const StackTrace* resolve_entry(unsigned int hash, traceid id) const;
-
   static void write_metadata(JfrCheckpointWriter& cpw);
 
+  static bool fill_stacktrace_for(JavaThread* thread, JfrStackTrace* stacktrace, int skip);
+
   JfrStackTraceRepository();
-  static JfrStackTraceRepository& instance();
- public:
   static JfrStackTraceRepository* create();
   bool initialize();
   static void destroy();
+
+  static JfrStackTraceRepository& instance();
+
+ public:
   static traceid add(const JfrStackTrace& stacktrace);
   static traceid record(Thread* thread, int skip = 0);
-  static traceid record(Thread* thread, int skip, unsigned int* hash);
   traceid write(JfrCheckpointWriter& cpw, traceid id, unsigned int hash);
   size_t write(JfrChunkWriter& cw, bool clear);
   size_t clear();
--- a/src/hotspot/share/jfr/support/jfrFlush.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/support/jfrFlush.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -48,10 +48,12 @@
 
 template <typename Event>
 class JfrConditionalFlush {
+ protected:
+  bool _enabled;
  public:
   typedef JfrBuffer Type;
-  JfrConditionalFlush(Thread* t) {
-    if (jfr_is_event_enabled(Event::eventId)) {
+  JfrConditionalFlush(Thread* t) : _enabled(jfr_is_event_enabled(Event::eventId)) {
+    if (_enabled) {
       jfr_conditional_flush(Event::eventId, sizeof(Event), t);
     }
   }
@@ -63,7 +65,7 @@
   bool _owner;
  public:
   JfrConditionalFlushWithStacktrace(Thread* t) : JfrConditionalFlush<Event>(t), _t(t), _owner(false) {
-    if (Event::has_stacktrace() && jfr_has_stacktrace_enabled(Event::eventId)) {
+    if (this->_enabled && Event::has_stacktrace() && jfr_has_stacktrace_enabled(Event::eventId)) {
       _owner = jfr_save_stacktrace(t);
     }
   }
--- a/src/hotspot/share/jfr/support/jfrThreadLocal.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/support/jfrThreadLocal.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -150,9 +150,7 @@
 
 JfrStackFrame* JfrThreadLocal::install_stackframes() const {
   assert(_stackframes == NULL, "invariant");
-  _stackdepth = (u4)JfrOptionSet::stackdepth();
-  guarantee(_stackdepth > 0, "Stackdepth must be > 0");
-  _stackframes = NEW_C_HEAP_ARRAY(JfrStackFrame, _stackdepth, mtTracing);
+  _stackframes = NEW_C_HEAP_ARRAY(JfrStackFrame, stackdepth(), mtTracing);
   return _stackframes;
 }
 
@@ -163,3 +161,7 @@
 ByteSize JfrThreadLocal::java_event_writer_offset() {
   return in_ByteSize(offset_of(JfrThreadLocal, _java_event_writer));
 }
+
+u4 JfrThreadLocal::stackdepth() const {
+  return _stackdepth != 0 ? _stackdepth : (u4)JfrOptionSet::stackdepth();
+}
--- a/src/hotspot/share/jfr/support/jfrThreadLocal.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/support/jfrThreadLocal.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -113,9 +113,7 @@
     _stackframes = frames;
   }
 
-  u4 stackdepth() const {
-    return _stackdepth;
-  }
+  u4 stackdepth() const;
 
   void set_stackdepth(u4 depth) {
     _stackdepth = depth;
--- a/src/hotspot/share/jfr/writers/jfrJavaEventWriter.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/writers/jfrJavaEventWriter.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -135,8 +135,7 @@
 bool JfrJavaEventWriter::initialize() {
   static bool initialized = false;
   if (!initialized) {
-    Thread* thread = Thread::current();
-    initialized = setup_event_writer_offsets(thread);
+    initialized = setup_event_writer_offsets(Thread::current());
   }
   return initialized;
 }
@@ -155,6 +154,7 @@
   // large enough to accommodate the "requested size".
   const bool is_valid = buffer->free_size() >= (size_t)(used + requested);
   u1* const new_current_position = is_valid ? buffer->pos() + used : buffer->pos();
+  assert(start_pos_offset != invalid_offset, "invariant");
   w->long_field_put(start_pos_offset, (jlong)buffer->pos());
   w->long_field_put(current_pos_offset, (jlong)new_current_position);
   // only update java writer if underlying memory changed
--- a/src/hotspot/share/jfr/writers/jfrJavaEventWriter.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/jfr/writers/jfrJavaEventWriter.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -33,13 +33,14 @@
 
 class JfrJavaEventWriter : AllStatic {
   friend class JfrCheckpointThreadClosure;
+  friend class JfrJavaEventWriterNotificationClosure;
   friend class JfrJavaEventWriterNotifyOperation;
-  friend class JfrJavaEventWriterNotificationClosure;
+  friend class JfrRecorder;
  private:
+  static bool initialize();
   static void notify(JavaThread* jt);
 
  public:
-  static bool initialize();
   static void notify();
   static jobject event_writer(Thread* t);
   static jobject new_event_writer(TRAPS);
--- a/src/hotspot/share/memory/metaspace.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/memory/metaspace.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -471,9 +471,36 @@
   return cm->chunk_free_list_summary();
 }
 
-void MetaspaceUtils::print_metaspace_change(size_t prev_metadata_used) {
-  log_info(gc, metaspace)("Metaspace: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
-                          prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
+void MetaspaceUtils::print_metaspace_change(const metaspace::MetaspaceSizesSnapshot& pre_meta_values) {
+  const metaspace::MetaspaceSizesSnapshot meta_values;
+
+  if (Metaspace::using_class_space()) {
+    log_info(gc, metaspace)(HEAP_CHANGE_FORMAT" "
+                            HEAP_CHANGE_FORMAT" "
+                            HEAP_CHANGE_FORMAT,
+                            HEAP_CHANGE_FORMAT_ARGS("Metaspace",
+                                                    pre_meta_values.used(),
+                                                    pre_meta_values.committed(),
+                                                    meta_values.used(),
+                                                    meta_values.committed()),
+                            HEAP_CHANGE_FORMAT_ARGS("NonClass",
+                                                    pre_meta_values.non_class_used(),
+                                                    pre_meta_values.non_class_committed(),
+                                                    meta_values.non_class_used(),
+                                                    meta_values.non_class_committed()),
+                            HEAP_CHANGE_FORMAT_ARGS("Class",
+                                                    pre_meta_values.class_used(),
+                                                    pre_meta_values.class_committed(),
+                                                    meta_values.class_used(),
+                                                    meta_values.class_committed()));
+  } else {
+    log_info(gc, metaspace)(HEAP_CHANGE_FORMAT,
+                            HEAP_CHANGE_FORMAT_ARGS("Metaspace",
+                                                    pre_meta_values.used(),
+                                                    pre_meta_values.committed(),
+                                                    meta_values.used(),
+                                                    meta_values.committed()));
+  }
 }
 
 void MetaspaceUtils::print_on(outputStream* out) {
--- a/src/hotspot/share/memory/metaspace.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/memory/metaspace.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -28,6 +28,7 @@
 #include "memory/memRegion.hpp"
 #include "memory/metaspaceChunkFreeListSummary.hpp"
 #include "memory/virtualspace.hpp"
+#include "memory/metaspace/metaspaceSizesSnapshot.hpp"
 #include "utilities/exceptions.hpp"
 
 // Metaspace
@@ -410,8 +411,8 @@
   static bool has_chunk_free_list(Metaspace::MetadataType mdtype);
   static MetaspaceChunkFreeListSummary chunk_free_list_summary(Metaspace::MetadataType mdtype);
 
-  // Print change in used metadata.
-  static void print_metaspace_change(size_t prev_metadata_used);
+  // Log change in used metadata.
+  static void print_metaspace_change(const metaspace::MetaspaceSizesSnapshot& pre_meta_values);
   static void print_on(outputStream * out);
 
   // Prints an ASCII representation of the given space.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/metaspaceSizesSnapshot.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, Twitter, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "memory/metaspace.hpp"
+#include "memory/metaspace/metaspaceSizesSnapshot.hpp"
+
+namespace metaspace {
+
+MetaspaceSizesSnapshot::MetaspaceSizesSnapshot()
+    : _used(MetaspaceUtils::used_bytes()),
+      _committed(MetaspaceUtils::committed_bytes()),
+      _non_class_used(MetaspaceUtils::used_bytes(Metaspace::NonClassType)),
+      _non_class_committed(MetaspaceUtils::committed_bytes(Metaspace::NonClassType)),
+      _class_used(MetaspaceUtils::used_bytes(Metaspace::ClassType)),
+      _class_committed(MetaspaceUtils::committed_bytes(Metaspace::ClassType)) { }
+
+} // namespace metaspace
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/metaspace/metaspaceSizesSnapshot.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, Twitter, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_MEMORY_METASPACE_METASPACESIZESSNAPSHOT_HPP
+#define SHARE_MEMORY_METASPACE_METASPACESIZESSNAPSHOT_HPP
+
+namespace metaspace {
+
+class MetaspaceSizesSnapshot {
+public:
+  MetaspaceSizesSnapshot();
+
+  size_t used() const { return _used; }
+  size_t committed() const { return _committed; }
+  size_t non_class_used() const { return _non_class_used; }
+  size_t non_class_committed() const { return _non_class_committed; }
+  size_t class_used() const { return _class_used; }
+  size_t class_committed() const { return _class_committed; }
+
+private:
+  const size_t _used;
+  const size_t _committed;
+  const size_t _non_class_used;
+  const size_t _non_class_committed;
+  const size_t _class_used;
+  const size_t _class_committed;
+};
+
+} // namespace metaspace
+
+#endif // SHARE_MEMORY_METASPACE_METASPACESIZESSNAPSHOT_HPP
--- a/src/hotspot/share/opto/lcm.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/opto/lcm.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -170,7 +170,6 @@
     case Op_LoadI:
     case Op_LoadL:
     case Op_LoadP:
-    case Op_LoadBarrierSlowReg:
     case Op_LoadN:
     case Op_LoadS:
     case Op_LoadKlass:
--- a/src/hotspot/share/opto/loopnode.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/opto/loopnode.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -2494,9 +2494,11 @@
       }
     }
   }
-  // Add data (x1.5) and control (x1.0) count to estimate iff both are > 0.
+  // Add data and control count (x2.0) to estimate iff both are > 0. This is
+  // a rather pessimistic estimate for the most part, in particular for some
+  // complex loops, but still not enough to capture all loops.
   if (ctrl_edge_out_cnt > 0 && data_edge_out_cnt > 0) {
-    estimate += ctrl_edge_out_cnt + data_edge_out_cnt + data_edge_out_cnt / 2;
+    estimate += 2 * (ctrl_edge_out_cnt + data_edge_out_cnt);
   }
 
   return estimate;
@@ -4292,7 +4294,6 @@
     case Op_LoadL:
     case Op_LoadS:
     case Op_LoadP:
-    case Op_LoadBarrierSlowReg:
     case Op_LoadN:
     case Op_LoadRange:
     case Op_LoadD_unaligned:
--- a/src/hotspot/share/opto/node.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/opto/node.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -675,6 +675,7 @@
       DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6)
         DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0)
         DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1)
+      DEFINE_CLASS_ID(LoadBarrierSlowReg, Type, 7)
 
     DEFINE_CLASS_ID(Proj,  Node, 3)
       DEFINE_CLASS_ID(CatchProj, Proj, 0)
@@ -688,7 +689,6 @@
     DEFINE_CLASS_ID(Mem,   Node, 4)
       DEFINE_CLASS_ID(Load,  Mem, 0)
         DEFINE_CLASS_ID(LoadVector,  Load, 0)
-        DEFINE_CLASS_ID(LoadBarrierSlowReg, Load, 1)
       DEFINE_CLASS_ID(Store, Mem, 1)
         DEFINE_CLASS_ID(StoreVector, Store, 0)
       DEFINE_CLASS_ID(LoadStore, Mem, 2)
--- a/src/hotspot/share/opto/vectornode.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/opto/vectornode.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -297,7 +297,6 @@
   case Op_LoadI:   case Op_LoadL:
   case Op_LoadF:   case Op_LoadD:
   case Op_LoadP:   case Op_LoadN:
-  case Op_LoadBarrierSlowReg:
     *start = 0;
     *end   = 0; // no vector operands
     break;
--- a/src/hotspot/share/runtime/safepoint.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/runtime/safepoint.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -35,6 +35,7 @@
 #include "code/scopeDesc.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/gcLocker.hpp"
+#include "gc/shared/oopStorage.hpp"
 #include "gc/shared/strongRootsScope.hpp"
 #include "gc/shared/workgroup.hpp"
 #include "interpreter/interpreter.hpp"
@@ -643,6 +644,12 @@
       }
     }
 
+    if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_REQUEST_OOPSTORAGE_CLEANUP)) {
+      // Don't bother reporting event or time for this very short operation.
+      // To have any utility we'd also want to report whether needed.
+      OopStorage::trigger_cleanup_if_needed();
+    }
+
     _subtasks.all_tasks_completed(_num_workers);
   }
 };
--- a/src/hotspot/share/runtime/safepoint.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/runtime/safepoint.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -77,6 +77,7 @@
     SAFEPOINT_CLEANUP_STRING_TABLE_REHASH,
     SAFEPOINT_CLEANUP_CLD_PURGE,
     SAFEPOINT_CLEANUP_SYSTEM_DICTIONARY_RESIZE,
+    SAFEPOINT_CLEANUP_REQUEST_OOPSTORAGE_CLEANUP,
     // Leave this one last.
     SAFEPOINT_CLEANUP_NUM_TASKS
   };
--- a/src/hotspot/share/runtime/serviceThread.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/runtime/serviceThread.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -83,27 +83,9 @@
   }
 }
 
-static bool needs_oopstorage_cleanup(OopStorage* const* storages,
-                                     bool* needs_cleanup,
-                                     size_t size) {
-  bool any_needs_cleanup = false;
+static void cleanup_oopstorages(OopStorage* const* storages, size_t size) {
   for (size_t i = 0; i < size; ++i) {
-    assert(!needs_cleanup[i], "precondition");
-    if (storages[i]->needs_delete_empty_blocks()) {
-      needs_cleanup[i] = true;
-      any_needs_cleanup = true;
-    }
-  }
-  return any_needs_cleanup;
-}
-
-static void cleanup_oopstorages(OopStorage* const* storages,
-                                const bool* needs_cleanup,
-                                size_t size) {
-  for (size_t i = 0; i < size; ++i) {
-    if (needs_cleanup[i]) {
-      storages[i]->delete_empty_blocks();
-    }
+    storages[i]->delete_empty_blocks();
   }
 }
 
@@ -126,7 +108,6 @@
     bool resolved_method_table_work = false;
     bool protection_domain_table_work = false;
     bool oopstorage_work = false;
-    bool oopstorages_cleanup[oopstorage_count] = {}; // Zero (false) initialize.
     JvmtiDeferredEvent jvmti_event;
     {
       // Need state transition ThreadBlockInVM so that this thread
@@ -152,10 +133,7 @@
               (symboltable_work = SymbolTable::has_work()) |
               (resolved_method_table_work = ResolvedMethodTable::has_work()) |
               (protection_domain_table_work = SystemDictionary::pd_cache_table()->has_work()) |
-              (oopstorage_work = needs_oopstorage_cleanup(oopstorages,
-                                                          oopstorages_cleanup,
-                                                          oopstorage_count)))
-
+              (oopstorage_work = OopStorage::has_cleanup_work_and_reset()))
              == 0) {
         // Wait until notified that there is some work to do.
         ml.wait();
@@ -199,7 +177,7 @@
     }
 
     if (oopstorage_work) {
-      cleanup_oopstorages(oopstorages, oopstorages_cleanup, oopstorage_count);
+      cleanup_oopstorages(oopstorages, oopstorage_count);
     }
   }
 }
--- a/src/hotspot/share/runtime/vmOperations.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/runtime/vmOperations.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -128,6 +128,7 @@
   template(ScavengeMonitors)                      \
   template(PrintMetadata)                         \
   template(GTestExecuteAtSafepoint)               \
+  template(JFROldObject)                          \
 
 class VM_Operation: public CHeapObj<mtInternal> {
  public:
--- a/src/hotspot/share/services/diagnosticCommand.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/services/diagnosticCommand.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -880,7 +880,7 @@
     return "High: Switches the VM into Java debug mode.";
   }
   static const JavaPermission permission() {
-    JavaPermission p = { "java.lang.management.ManagementPermission", "monitor", NULL };
+    JavaPermission p = { "java.lang.management.ManagementPermission", "control", NULL };
     return p;
   }
   static int num_arguments() { return 0; }
--- a/src/hotspot/share/utilities/globalDefinitions.hpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/hotspot/share/utilities/globalDefinitions.hpp	Wed Jul 03 17:46:04 2019 -0400
@@ -308,6 +308,13 @@
   return s;
 }
 
+// Memory size transition formatting.
+
+#define HEAP_CHANGE_FORMAT "%s: " SIZE_FORMAT "K(" SIZE_FORMAT "K)->" SIZE_FORMAT "K(" SIZE_FORMAT "K)"
+
+#define HEAP_CHANGE_FORMAT_ARGS(_name_, _prev_used_, _prev_capacity_, _used_, _capacity_) \
+  (_name_), (_prev_used_) / K, (_prev_capacity_) / K, (_used_) / K, (_capacity_) / K
+
 //----------------------------------------------------------------------------------------------------
 // VM type definitions
 
--- a/src/java.management/share/classes/javax/management/remote/package.html	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/java.management/share/classes/javax/management/remote/package.html	Wed Jul 03 17:46:04 2019 -0400
@@ -67,7 +67,7 @@
         the optional part of the <em>JMX Remote API</em>
         are not included in the <em>Java SE Platform</em> 
         but are available from the <em>JMX Remote API 
-	<a href="http://java.sun.com/products/JavaManagement/download.html">
+	<a href="https://www.oracle.com/technetwork/java/javasebusiness/downloads/java-archive-downloads-java-plat-419418.html">
 	Reference Implementation</a></em>.</p>
 
 
--- a/src/java.security.jgss/share/classes/sun/security/krb5/Checksum.java	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/java.security.jgss/share/classes/sun/security/krb5/Checksum.java	Wed Jul 03 17:46:04 2019 -0400
@@ -197,6 +197,26 @@
             usage);
     }
 
+    // ===============  ATTENTION! Use with care  ==================
+    // According to https://tools.ietf.org/html/rfc3961#section-6.1,
+    // An unkeyed checksum should only be used "in limited circumstances
+    // where the lack of a key does not provide a window for an attack,
+    // preferably as part of an encrypted message".
+    public boolean verifyAnyChecksum(byte[] data, EncryptionKey key,
+            int usage)
+            throws KdcErrException, KrbCryptoException {
+        CksumType cksumEngine = CksumType.getInstance(cksumType);
+        if (!cksumEngine.isSafe()) {
+            return cksumEngine.verifyChecksum(data, checksum);
+        } else {
+            return cksumEngine.verifyKeyedChecksum(data,
+                    data.length,
+                    key.getBytes(),
+                    checksum,
+                    usage);
+        }
+    }
+
     /*
     public Checksum(byte[] data) throws KdcErrException, KrbCryptoException {
         this(Checksum.CKSUMTYPE_DEFAULT, data);
--- a/src/java.security.jgss/share/classes/sun/security/krb5/KrbKdcRep.java	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/java.security.jgss/share/classes/sun/security/krb5/KrbKdcRep.java	Wed Jul 03 17:46:04 2019 -0400
@@ -158,8 +158,10 @@
                             Checksum repCksum = new Checksum(
                                     new DerInputStream(
                                             pa.getValue()).getDerValue());
+                            // The checksum is inside encKDCRepPart so we don't
+                            // care if it's keyed or not.
                             repPaReqEncPaRepValid =
-                                    repCksum.verifyKeyedChecksum(
+                                    repCksum.verifyAnyChecksum(
                                             req.asn1Encode(), replyKey,
                                             KeyUsage.KU_AS_REQ);
                         } catch (Exception e) {
--- a/src/java.security.jgss/share/classes/sun/security/krb5/internal/crypto/CksumType.java	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/java.security.jgss/share/classes/sun/security/krb5/internal/crypto/CksumType.java	Wed Jul 03 17:46:04 2019 -0400
@@ -156,6 +156,11 @@
     public abstract byte[] calculateKeyedChecksum(byte[] data, int size,
         byte[] key, int usage) throws KrbCryptoException;
 
+    public boolean verifyChecksum(byte[] data, byte[] checksum)
+            throws KrbCryptoException {
+        throw new UnsupportedOperationException("Not supported");
+    }
+
     public abstract boolean verifyKeyedChecksum(byte[] data, int size,
         byte[] key, byte[] checksum, int usage) throws KrbCryptoException;
 
--- a/src/java.security.jgss/share/classes/sun/security/krb5/internal/crypto/RsaMd5CksumType.java	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/java.security.jgss/share/classes/sun/security/krb5/internal/crypto/RsaMd5CksumType.java	Wed Jul 03 17:46:04 2019 -0400
@@ -101,4 +101,14 @@
         return false;
     }
 
+    @Override
+    public boolean verifyChecksum(byte[] data, byte[] checksum)
+            throws KrbCryptoException {
+        try {
+            byte[] calculated = MessageDigest.getInstance("MD5").digest(data);
+            return CksumType.isChecksumEqual(calculated, checksum);
+        } catch (Exception e) {
+            return false;
+        }
+    }
 }
--- a/src/java.xml/share/classes/com/sun/org/apache/xml/internal/serializer/ToHTMLStream.java	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/java.xml/share/classes/com/sun/org/apache/xml/internal/serializer/ToHTMLStream.java	Wed Jul 03 17:46:04 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  */
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -40,7 +40,7 @@
  * because it is used from another package.
  *
  * @xsl.usage internal
- * @LastModified: Sept 2018
+ * @LastModified: July 2019
  */
 public final class ToHTMLStream extends ToStream
 {
@@ -719,7 +719,7 @@
     public final void endDocument() throws org.xml.sax.SAXException
     {
         if (m_doIndent) {
-            flushCharactersBuffer();
+            flushCharactersBuffer(false);
         }
         flushPending();
         if (m_doIndent && !m_isprevtext)
@@ -782,7 +782,7 @@
         if (m_doIndent) {
             // will add extra one if having namespace but no matter
             m_childNodeNum++;
-            flushCharactersBuffer();
+            flushCharactersBuffer(false);
         }
         ElemContext elemContext = m_elemContext;
 
@@ -923,7 +923,7 @@
         throws org.xml.sax.SAXException
     {
         if (m_doIndent) {
-            flushCharactersBuffer();
+            flushCharactersBuffer(false);
         }
         // deal with any pending issues
         if (m_cdataTagOpen)
@@ -1645,7 +1645,7 @@
     {
         if (m_doIndent) {
             m_childNodeNum++;
-            flushCharactersBuffer();
+            flushCharactersBuffer(false);
         }
         // Process any pending starDocument and startElement first.
         flushPending();
--- a/src/java.xml/share/classes/com/sun/org/apache/xml/internal/serializer/ToStream.java	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/java.xml/share/classes/com/sun/org/apache/xml/internal/serializer/ToStream.java	Wed Jul 03 17:46:04 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved.
  */
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -51,7 +51,7 @@
  * serializers (xml, html, text ...) that write output to a stream.
  *
  * @xsl.usage internal
- * @LastModified: Sept 2018
+ * @LastModified: July 2019
  */
 abstract public class ToStream extends SerializerBase {
 
@@ -1231,7 +1231,7 @@
                 m_elemContext.m_startTagOpen = false;
             }
 
-            if (!m_cdataTagOpen && shouldIndent())
+            if (!m_cdataTagOpen && shouldIndentForText())
                 indent();
 
             boolean writeCDataBrackets =
@@ -1270,6 +1270,7 @@
                     closeCDATA();
             }
 
+            m_isprevtext = true;
             // time to fire off CDATA event
             if (m_tracer != null)
                 super.fireCDATAEvent(ch, old_start, length);
@@ -1536,11 +1537,13 @@
     }
 
     /**
-     * Used to flush the buffered characters when indentation is on, this method
-     * will be called when the next node is traversed.
+     * Flushes the buffered characters when indentation is on. This method
+     * is called before the next node is traversed.
      *
+     * @param isText indicates whether the node to be traversed is text
+     * @throws org.xml.sax.SAXException
      */
-    final protected void flushCharactersBuffer() throws SAXException {
+    final protected void flushCharactersBuffer(boolean isText) throws SAXException {
         try {
             if (shouldFormatOutput() && m_charactersBuffer.isAnyCharactersBuffered()) {
                 if (m_elemContext.m_isCdataSection) {
@@ -1553,7 +1556,9 @@
                     return;
                 }
 
-                m_childNodeNum++;
+                if (!isText) {
+                    m_childNodeNum++;
+                }
                 boolean skipBeginningNewlines = false;
                 if (shouldIndentForText()) {
                     indent();
@@ -1846,7 +1851,7 @@
 
         if (m_doIndent) {
             m_childNodeNum++;
-            flushCharactersBuffer();
+            flushCharactersBuffer(false);
         }
 
         if (m_needToCallStartDocument)
@@ -2117,7 +2122,7 @@
             return;
 
         if (m_doIndent) {
-            flushCharactersBuffer();
+            flushCharactersBuffer(false);
         }
         // namespaces declared at the current depth are no longer valid
         // so get rid of them
@@ -2309,7 +2314,7 @@
             return;
         if (m_doIndent) {
             m_childNodeNum++;
-            flushCharactersBuffer();
+            flushCharactersBuffer(false);
         }
         if (m_elemContext.m_startTagOpen)
         {
@@ -2491,8 +2496,7 @@
     public void startCDATA() throws org.xml.sax.SAXException
     {
         if (m_doIndent) {
-            m_childNodeNum++;
-            flushCharactersBuffer();
+            flushCharactersBuffer(true);
         }
 
         m_cdataStartCalled = true;
--- a/src/java.xml/share/classes/com/sun/org/apache/xml/internal/serializer/ToXMLStream.java	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/java.xml/share/classes/com/sun/org/apache/xml/internal/serializer/ToXMLStream.java	Wed Jul 03 17:46:04 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  */
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -40,6 +40,7 @@
  * be viewed as internal or package private, this is not an API.
  *
  * @xsl.usage internal
+ * @LastModified: July 2019
  */
 public final class ToXMLStream extends ToStream
 {
@@ -200,7 +201,7 @@
     public void endDocument() throws org.xml.sax.SAXException
     {
         if (m_doIndent) {
-            flushCharactersBuffer();
+            flushCharactersBuffer(false);
         }
         flushPending();
         if (m_doIndent && !m_isprevtext)
@@ -267,7 +268,7 @@
 
         if (m_doIndent) {
             m_childNodeNum++;
-            flushCharactersBuffer();
+            flushCharactersBuffer(false);
         }
         flushPending();
 
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/JavacParser.java	Wed Jul 03 17:36:06 2019 -0400
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/JavacParser.java	Wed Jul 03 17:46:04 2019 -0400
@@ -775,9 +775,9 @@
 
     public JCExpression unannotatedType(boolean allowVar) {
         JCExpression result = term(TYPE);
-        Name restrictedTypeName;
-
-        if (!allowVar && (restrictedTypeName = restrictedTypeName(result, true)) != null) {
+        Name restrictedTypeName = restrictedTypeName(result, !allowVar);
+
+        if (restrictedTypeName != null && (!allowVar || restrictedTypeName != names.var)) {
             syntaxError(result.pos, Errors.RestrictedTypeNotAllowedHere(restrictedTypeName));
         }
 
--- a/test/hotspot/jtreg/ProblemList-aot.txt	Wed Jul 03 17:36:06 2019 -0400
+++ b/test/hotspot/jtreg/ProblemList-aot.txt	Wed Jul 03 17:46:04 2019 -0400
@@ -72,3 +72,11 @@
 serviceability/sa/TestRevPtrsForInvokeDynamic.java      8216181   generic-all
 serviceability/sa/TestType.java                         8216181   generic-all
 serviceability/sa/TestUniverse.java                     8216181   generic-all
+
+compiler/intrinsics/sha/sanity/TestSHA256MultiBlockIntrinsics.java  8167430 generic-all
+compiler/intrinsics/sha/sanity/TestSHA1Intrinsics.java              8167430 generic-all
+compiler/intrinsics/sha/sanity/TestSHA512Intrinsics.java            8167430 generic-all
+compiler/intrinsics/sha/sanity/TestSHA256Intrinsics.java            8167430 generic-all
+compiler/intrinsics/sha/sanity/TestSHA1MultiBlockIntrinsics.java    8167430 generic-all
+compiler/intrinsics/sha/sanity/TestSHA512MultiBlockIntrinsics.java  8167430 generic-all
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/InvocationTests/invocationC1Tests.java	Wed Jul 03 17:46:04 2019 -0400
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 8226956
+ * @summary Run invocation tests against C1 compiler
+ * @library /test/lib
+ * @modules java.base/jdk.internal.org.objectweb.asm
+ *          java.base/jdk.internal.misc
+ * @compile shared/AbstractGenerator.java shared/AccessCheck.java shared/AccessType.java
+ *          shared/Caller.java shared/ExecutorGenerator.java shared/Utils.java
+ *          shared/ByteArrayClassLoader.java shared/Checker.java shared/GenericClassGenerator.java
+ * @compile invokespecial/Checker.java invokespecial/ClassGenerator.java invokespecial/Generator.java
+ *          invokevirtual/Checker.java invokevirtual/ClassGenerator.java invokevirtual/Generator.java
+ *          invokeinterface/Checker.java invokeinterface/ClassGenerator.java invokeinterface/Generator.java
+ *
+ * @run main/othervm/timeout=1800 invocationC1Tests
+ */
+
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.compiler.InMemoryJavaCompiler;
+
+public class invocationC1Tests {
+
+    public static void runTest(String whichTests, String classFileVersion) throws Exception {
+        System.out.println("\nC1 invocation tests, Tests: " + whichTests +
+                           ", class file version: " + classFileVersion);
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(false, "-Xmx128M",
+            "-Xcomp", "-XX:TieredStopAtLevel=1", whichTests,
+            "--classfile_version=" + classFileVersion);
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        try {
+            output.shouldContain("EXECUTION STATUS: PASSED");
+            output.shouldHaveExitValue(0);
+        } catch (Throwable e) {
+            System.out.println(
+                "\nNote that an entry such as 'B.m/C.m' in the failure chart means that" +
+                " the test case failed because method B.m was invoked but the test " +
+                "expected method C.m to be invoked. Similarly, a result such as 'AME/C.m'" +
+                " means that an AbstractMethodError exception was thrown but the test" +
+                " case expected method C.m to be invoked.");
+            System.out.println(
+                "\nAlso note that passing --dump to invoke*.Generator will" +
+                " dump the generated classes (for debugging purposes).\n");
+            System.exit(1);
+        }
+    }
+
+    public static void main(String args[]) throws Throwable {
+        // Get current major class file version and test with it.
+        byte klassbuf[] = InMemoryJavaCompiler.compile("blah", "public class blah { }");
+        int major_version = klassbuf[6] << 8 | klassbuf[7];
+        runTest("invokespecial.Generator", String.valueOf(major_version));
+        runTest("invokeinterface.Generator", String.valueOf(major_version));
+
+      // Uncomment this test once JDK-8226588 is fixed
+        // runTest("invokevirtual.Generator", String.valueOf(major_version));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/InvocationTests/invocationGraalTests.java	Wed Jul 03 17:46:04 2019 -0400
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 8226956
+ * @requires vm.jvmci
+ * @summary Run invocation tests against Graal compiler
+ * @library /test/lib
+ * @modules java.base/jdk.internal.org.objectweb.asm
+ *          java.base/jdk.internal.misc
+ * @compile shared/AbstractGenerator.java shared/AccessCheck.java shared/AccessType.java
+ *          shared/Caller.java shared/ExecutorGenerator.java shared/Utils.java
+ *          shared/ByteArrayClassLoader.java shared/Checker.java shared/GenericClassGenerator.java
+ * @compile invokespecial/Checker.java invokespecial/ClassGenerator.java invokespecial/Generator.java
+ *          invokevirtual/Checker.java invokevirtual/ClassGenerator.java invokevirtual/Generator.java
+ *          invokeinterface/Checker.java invokeinterface/ClassGenerator.java invokeinterface/Generator.java
+ *
+ * @run main/othervm/timeout=1800 invocationGraalTests
+ */
+
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.compiler.InMemoryJavaCompiler;
+
+public class invocationGraalTests {
+
+    public static void runTest(String whichTests, String classFileVersion) throws Exception {
+        System.out.println("\nGraal invocation tests, Tests: " + whichTests +
+                           ", class file version: " + classFileVersion);
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(false, "-Xmx128M",
+            "-XX:+UnlockExperimentalVMOptions", "-XX:+EnableJVMCI", "-XX:+UseJVMCICompiler",
+            whichTests, "--classfile_version=" + classFileVersion);
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        try {
+            output.shouldContain("EXECUTION STATUS: PASSED");
+            output.shouldHaveExitValue(0);
+        } catch (Throwable e) {
+            System.out.println(
+                "\nNote that an entry such as 'B.m/C.m' in the failure chart means that" +
+                " the test case failed because method B.m was invoked but the test " +
+                "expected method C.m to be invoked. Similarly, a result such as 'AME/C.m'" +
+                " means that an AbstractMethodError exception was thrown but the test" +
+                " case expected method C.m to be invoked.");
+            System.out.println(
+                "\nAlso note that passing --dump to invoke*.Generator will" +
+                " dump the generated classes (for debugging purposes).\n");
+            System.exit(1);
+        }
+    }
+
+    public static void main(String args[]) throws Throwable {
+        // Get current major class file version and test with it.
+        byte klassbuf[] = InMemoryJavaCompiler.compile("blah", "public class blah { }");
+        int major_version = klassbuf[6] << 8 | klassbuf[7];
+        runTest("invokevirtual.Generator", String.valueOf(major_version));
+        runTest("invokespecial.Generator", String.valueOf(major_version));
+        runTest("invokeinterface.Generator", String.valueOf(major_version));
+    }
+}
--- a/test/hotspot/jtreg/runtime/InvocationTests/invokeinterface/Checker.java	Wed Jul 03 17:36:06 2019 -0400
+++ b/test/hotspot/jtreg/runtime/InvocationTests/invokeinterface/Checker.java	Wed Jul 03 17:46:04 2019 -0400
@@ -67,15 +67,13 @@
                 int modifiers = method.getModifiers();
 
                 // Check whether obtained method is public and isn't abstract
-                if ( Modifier.isPublic(modifiers))
-                {
+                if ( Modifier.isPublic(modifiers)) {
                     if (Modifier.isAbstract(modifiers)) {
                         return "java.lang.AbstractMethodError";
                     } else {
-                        return String.format("%s.%s"
-                            , method.getDeclaringClass().getSimpleName()
-                            , methodName
-                            );
+                        return String.format("%s.%s",
+                            method.getDeclaringClass().getSimpleName(),
+                            methodName);
                     }
                 } else {
                     // IAE is thrown when located method isn't PUBLIC
--- a/test/hotspot/jtreg/runtime/InvocationTests/invokespecial/Checker.java	Wed Jul 03 17:36:06 2019 -0400
+++ b/test/hotspot/jtreg/runtime/InvocationTests/invokespecial/Checker.java	Wed Jul 03 17:46:04 2019 -0400
@@ -34,16 +34,12 @@
     }
 
     public String check (Class callerClass) {
-        /*
-         * If objectref is null, the invokespecial instruction throws a NullPointerException.
-         */
+        // If objectref is null, the invokespecial instruction throws a NullPointerException.
         if (dynamicTargetClass == null) {
             return "java.lang.NullPointerException";
         }
 
-        /*
-         * TODO: find a citiation from spec for this case
-         */
+        // TODO: find a citation from spec for this case
         Method resolvedMethod;
         try {
             // May throw VerifyError
@@ -56,15 +52,13 @@
             return "java.lang.NoSuchMethodError";
         }
 
-       /*
-        * If:
-        *   - the resolved method is protected (4.7)
-        *   - it is a member of a superclass of the current class
-        *   - the method is not declared in the same run-time package (5.3) as the current class
-        * then:
-        *   the class of objectref must be either the current class or a subclass of the
-        * current class.
-        */
+       // If:
+       //   - the resolved method is protected (4.7)
+       //   - it is a member of a superclass of the current class
+       //   - the method is not declared in the same run-time package (5.3) as the current class
+       // then:
+       //   the class of objectref must be either the current class or a subclass of the
+       // current class.
 
         if (Modifier.isProtected(resolvedMethod.getModifiers())) {
             Method methodInSuperclass = getMethodInHierarchy(resolvedMethod.getDeclaringClass().getSuperclass());
--- a/test/hotspot/jtreg/runtime/InvocationTests/invokevirtual/Checker.java	Wed Jul 03 17:36:06 2019 -0400
+++ b/test/hotspot/jtreg/runtime/InvocationTests/invokevirtual/Checker.java	Wed Jul 03 17:46:04 2019 -0400
@@ -88,7 +88,7 @@
             return null;
         }
 
-        // Dynamic target doesn't have desired method, so check it's superclass
+        // Dynamic target doesn't have desired method, so check its superclass
         if (dynamicTargetMethod == null) {
             return getOverriddenMethod(staticTarget, dynamicTarget.getSuperclass());
         } else {
@@ -109,11 +109,9 @@
             String staticTargetPkg = getClassPackageName(staticTarget);
             String dynamicTargetPkg = getClassPackageName(dynamicTarget);
 
-            if ( isPublic || isProtected
-                 || ( !isPublic && !isProtected && !isPrivate
-                      && staticTargetPkg.equals(dynamicTargetPkg)
-                    ))
-            {
+            if (isPublic || isProtected
+                || (!isPublic && !isProtected && !isPrivate
+                    && staticTargetPkg.equals(dynamicTargetPkg))) {
                 return dynamicTargetMethod;
             }
         }
--- a/test/hotspot/jtreg/runtime/InvocationTests/shared/AbstractGenerator.java	Wed Jul 03 17:36:06 2019 -0400
+++ b/test/hotspot/jtreg/runtime/InvocationTests/shared/AbstractGenerator.java	Wed Jul 03 17:46:04 2019 -0400
@@ -31,9 +31,6 @@
 import java.util.List;
 import java.util.ArrayList;
 
-/**
- *
- */
 public abstract class AbstractGenerator {
     protected final boolean dumpClasses;
     protected final boolean executeTests;
--- a/test/hotspot/jtreg/runtime/InvocationTests/shared/AccessCheck.java	Wed Jul 03 17:36:06 2019 -0400
+++ b/test/hotspot/jtreg/runtime/InvocationTests/shared/AccessCheck.java	Wed Jul 03 17:46:04 2019 -0400
@@ -26,8 +26,7 @@
 
 import static jdk.internal.org.objectweb.asm.Opcodes.*;
 
-/**
- *
+/*
  * @author vi158347
  */
 public class AccessCheck {
--- a/test/hotspot/jtreg/vmTestbase/nsk/jvmti/unit/FollowReferences/followref003/followref003.cpp	Wed Jul 03 17:36:06 2019 -0400
+++ b/test/hotspot/jtreg/vmTestbase/nsk/jvmti/unit/FollowReferences/followref003/followref003.cpp	Wed Jul 03 17:46:04 2019 -0400
@@ -810,7 +810,7 @@
             }
             break;
 
-        case JVMTI_REFERENCE_ARRAY_ELEMENT:
+        case JVMTI_HEAP_REFERENCE_ARRAY_ELEMENT:
         case JVMTI_HEAP_REFERENCE_JNI_GLOBAL:
         case JVMTI_HEAP_REFERENCE_SYSTEM_CLASS:
         case JVMTI_HEAP_REFERENCE_MONITOR:
--- a/test/jaxp/javax/xml/jaxp/unittest/common/prettyprint/PrettyPrintTest.java	Wed Jul 03 17:36:06 2019 -0400
+++ b/test/jaxp/javax/xml/jaxp/unittest/common/prettyprint/PrettyPrintTest.java	Wed Jul 03 17:46:04 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -60,7 +60,7 @@
 
 /*
  * @test
- * @bug 6439439 8087303 8174025
+ * @bug 6439439 8087303 8174025 8223291
  * @library /javax/xml/jaxp/libs /javax/xml/jaxp/unittest
  * @run testng/othervm -DrunSecMngr=true common.prettyprint.PrettyPrintTest
  * @run testng/othervm common.prettyprint.PrettyPrintTest
@@ -382,7 +382,6 @@
     private Document toXmlDocument(String xmlString) throws Exception {
         InputSource xmlInputSource = new InputSource(new StringReader(xmlString));
         DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
-        dbf.setValidating(true);
         DocumentBuilder xmlDocumentBuilder = dbf.newDocumentBuilder();
         Document node = xmlDocumentBuilder.parse(xmlInputSource);
         return node;
--- a/test/jaxp/javax/xml/jaxp/unittest/common/prettyprint/xmltest1.out	Wed Jul 03 17:36:06 2019 -0400
+++ b/test/jaxp/javax/xml/jaxp/unittest/common/prettyprint/xmltest1.out	Wed Jul 03 17:46:04 2019 -0400
@@ -1,3 +1,1 @@
-<a>
-    <![CDATA[ ]]>
-</a>
+<a><![CDATA[ ]]></a>
--- a/test/jaxp/javax/xml/jaxp/unittest/common/prettyprint/xmltest2.out	Wed Jul 03 17:36:06 2019 -0400
+++ b/test/jaxp/javax/xml/jaxp/unittest/common/prettyprint/xmltest2.out	Wed Jul 03 17:46:04 2019 -0400
@@ -1,5 +1,3 @@
-<a>
-    <![CDATA[  abc def 
+<a><![CDATA[  abc def 
 line2 &a 
- test]]>
-</a>
+ test]]></a>
--- a/test/jaxp/javax/xml/jaxp/unittest/common/prettyprint/xmltest8.out	Wed Jul 03 17:36:06 2019 -0400
+++ b/test/jaxp/javax/xml/jaxp/unittest/common/prettyprint/xmltest8.out	Wed Jul 03 17:46:04 2019 -0400
@@ -1,6 +1,5 @@
 <root>
-         t
-    <![CDATA[ ]]>
+     t<![CDATA[ ]]>
     t   
     
     <child1/>
--- a/test/jaxp/javax/xml/jaxp/unittest/transform/OutputPropertiesTest.java	Wed Jul 03 17:36:06 2019 -0400
+++ b/test/jaxp/javax/xml/jaxp/unittest/transform/OutputPropertiesTest.java	Wed Jul 03 17:46:04 2019 -0400
@@ -24,21 +24,79 @@
 package transform;
 
 import java.io.StringReader;
+import java.io.StringWriter;
 import java.util.Properties;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.transform.OutputKeys;
 import javax.xml.transform.Templates;
+import javax.xml.transform.Transformer;
 import javax.xml.transform.TransformerFactory;
+import javax.xml.transform.stream.StreamResult;
 import javax.xml.transform.stream.StreamSource;
 import org.testng.Assert;
+import org.testng.annotations.DataProvider;
 import org.testng.annotations.Test;
+import org.w3c.dom.Document;
+import org.xml.sax.InputSource;
 
 /*
  * @test
- * @bug 8219705
+ * @bug 8219705 8223291
  * @library /javax/xml/jaxp/libs /javax/xml/jaxp/unittest
  * @run testng transform.OutputPropertiesTest
  * @summary Verifies the output properties are set correctly
  */
 public class OutputPropertiesTest {
+    /*
+       DataProvider: for testing indentation
+       Data: xml, expected result
+     */
+    @DataProvider(name = "Indentation")
+    public Object[][] getData() {
+        String mix = "\n" +
+                "        abc\n" +
+                "        mix\n" +
+                "        xyz\n" +
+                "    ";
+        return new Object[][]{
+            {"abc<![CDATA[data]]>xyz", "abcdataxyz"},
+            {"abc<![CDATA[ & ]]>xyz", "abc & xyz"},
+            {"<![CDATA[data]]>", "data"},
+            {"abc<mix>mix</mix>xyz", mix}
+        };
+    }
+
+
+    /**
+     * bug 8223291
+     * Verifies that no extra indentation is added for CDATA.
+     * @param xml the xml content to be tested
+     * @param expected the expected result
+     * @throws Exception
+     */
+    @Test(dataProvider = "Indentation")
+    public void testIndentation(String xml, String expected) throws Exception
+    {
+        StreamSource source = new StreamSource(new StringReader("<foo><bar>" + xml + "</bar></foo>"));
+        StreamResult result = new StreamResult(new StringWriter());
+
+        Transformer tform = TransformerFactory.newInstance().newTransformer();
+        tform.setOutputProperty(OutputKeys.INDENT, "yes");
+        tform.transform(source, result);
+
+        String xml1 = result.getWriter().toString();
+
+        Document document = DocumentBuilderFactory.newInstance()
+            .newDocumentBuilder()
+            .parse(new InputSource(new StringReader(xml1)));
+
+        String resultData = document.getElementsByTagName("bar")
+            .item(0)
+            .getTextContent();
+
+        Assert.assertEquals(resultData, expected);
+    }
+
     @Test
     public void testOutputProperties() throws Exception {
         String xslData = "<?xml version='1.0'?>"
@@ -70,4 +128,5 @@
                     prNames[i] + ": actual: " + value + ", expected: " + prValues[i]);
         }
     }
+
 }
--- a/test/jdk/javax/net/ssl/SSLSession/SessionTimeOutTests.java	Wed Jul 03 17:36:06 2019 -0400
+++ b/test/jdk/javax/net/ssl/SSLSession/SessionTimeOutTests.java	Wed Jul 03 17:46:04 2019 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -283,7 +283,7 @@
             }
             System.out.print(sess + "      " + lifetime);
             if (((timeout == 0) || (lifetime < timeout)) &&
-                                  (isTimedout == "YES")) {
+                                  (isTimedout.equals("YES"))) {
                 isTimedout = "Invalidated before timeout";
             }
 
--- a/test/langtools/tools/javac/switchexpr/WarnWrongYieldTest.java	Wed Jul 03 17:36:06 2019 -0400
+++ b/test/langtools/tools/javac/switchexpr/WarnWrongYieldTest.java	Wed Jul 03 17:46:04 2019 -0400
@@ -23,7 +23,7 @@
 
 /*
  * @test
- * @bug 8223305
+ * @bug 8223305 8226522
  * @summary Verify correct warnings w.r.t. yield
  * @compile/ref=WarnWrongYieldTest.out -source ${jdk.version} -XDrawDiagnostics -XDshould-stop.at=ATTR WarnWrongYieldTest.java
  */
@@ -159,4 +159,12 @@
         //OK - yield is a variable:
         yield[0] = 5;
     }
+
+    private void lambda() {
+        SAM s = (yield y) -> {};
+    }
+
+    interface SAM {
+        public void m(yield o);
+    }
 }
--- a/test/langtools/tools/javac/switchexpr/WarnWrongYieldTest.out	Wed Jul 03 17:36:06 2019 -0400
+++ b/test/langtools/tools/javac/switchexpr/WarnWrongYieldTest.out	Wed Jul 03 17:46:04 2019 -0400
@@ -11,8 +11,11 @@
 WarnWrongYieldTest.java:118:9: compiler.warn.invalid.yield
 WarnWrongYieldTest.java:123:22: compiler.warn.invalid.yield
 WarnWrongYieldTest.java:152:24: compiler.warn.invalid.yield
+WarnWrongYieldTest.java:164:18: compiler.warn.restricted.type.not.allowed.preview: yield, 13
+WarnWrongYieldTest.java:168:23: compiler.warn.restricted.type.not.allowed.preview: yield, 13
 WarnWrongYieldTest.java:34:28: compiler.warn.illegal.ref.to.restricted.type: yield
 WarnWrongYieldTest.java:45:5: compiler.warn.illegal.ref.to.restricted.type: yield
+WarnWrongYieldTest.java:168:23: compiler.warn.illegal.ref.to.restricted.type: yield
 WarnWrongYieldTest.java:72:9: compiler.warn.illegal.ref.to.restricted.type: yield
 WarnWrongYieldTest.java:75:9: compiler.warn.illegal.ref.to.restricted.type: yield
 WarnWrongYieldTest.java:75:24: compiler.warn.illegal.ref.to.restricted.type: yield
@@ -22,4 +25,5 @@
 WarnWrongYieldTest.java:84:27: compiler.warn.illegal.ref.to.restricted.type: yield
 WarnWrongYieldTest.java:84:43: compiler.warn.illegal.ref.to.restricted.type: yield
 WarnWrongYieldTest.java:153:24: compiler.warn.illegal.ref.to.restricted.type: yield
-24 warnings
+WarnWrongYieldTest.java:164:18: compiler.warn.illegal.ref.to.restricted.type: yield
+28 warnings
\ No newline at end of file
--- a/test/langtools/tools/javac/switchexpr/WrongYieldTest.java	Wed Jul 03 17:36:06 2019 -0400
+++ b/test/langtools/tools/javac/switchexpr/WrongYieldTest.java	Wed Jul 03 17:46:04 2019 -0400
@@ -23,7 +23,7 @@
 
 /*
  * @test
- * @bug 8223305
+ * @bug 8223305 8226522
  * @summary Ensure proper errors are returned for yields.
  * @compile/fail/ref=WrongYieldTest.out --enable-preview -source ${jdk.version} -XDrawDiagnostics -XDshould-stop.at=ATTR WrongYieldTest.java
  */
@@ -222,4 +222,12 @@
         //OK - yield is a variable:
         yield[0] = 5;
     }
+
+    private void lambda() {
+        SAM s = (yield y) -> {};
+    }
+
+    interface SAM {
+        public void m(Object o);
+    }
 }
--- a/test/langtools/tools/javac/switchexpr/WrongYieldTest.out	Wed Jul 03 17:36:06 2019 -0400
+++ b/test/langtools/tools/javac/switchexpr/WrongYieldTest.out	Wed Jul 03 17:46:04 2019 -0400
@@ -1,11 +1,13 @@
 WrongYieldTest.java:39:11: compiler.err.restricted.type.not.allowed: yield, 13
 WrongYieldTest.java:45:5: compiler.err.restricted.type.not.allowed.here: yield
+WrongYieldTest.java:123:15: compiler.err.restricted.type.not.allowed.here: yield
 WrongYieldTest.java:136:9: compiler.err.invalid.yield
 WrongYieldTest.java:146:9: compiler.err.invalid.yield
 WrongYieldTest.java:151:9: compiler.err.invalid.yield
 WrongYieldTest.java:161:9: compiler.err.invalid.yield
 WrongYieldTest.java:166:22: compiler.err.invalid.yield
 WrongYieldTest.java:215:24: compiler.err.invalid.yield
+WrongYieldTest.java:227:18: compiler.err.restricted.type.not.allowed.here: yield
 WrongYieldTest.java:34:24: compiler.err.illegal.ref.to.restricted.type: yield
 WrongYieldTest.java:95:9: compiler.err.no.switch.expression
 WrongYieldTest.java:95:15: compiler.err.cant.resolve.location: kindname.variable, y1, , , (compiler.misc.location: kindname.class, t.WrongYieldTest, null)
@@ -26,4 +28,4 @@
 WrongYieldTest.java:216:24: compiler.err.illegal.ref.to.restricted.type: yield
 - compiler.note.preview.filename: WrongYieldTest.java
 - compiler.note.preview.recompile
-26 errors
+28 errors