8223136: Move compressed oops functions to CompressedOops class
authorstefank
Thu, 09 May 2019 14:26:03 +0200
changeset 54780 f8d182aedc92
parent 54779 e0bd4c7a176e
child 54781 f3f07c76d3d1
8223136: Move compressed oops functions to CompressedOops class Reviewed-by: coleenp, lkorinth
make/hotspot/src/native/dtrace/generateJvmOffsets.cpp
src/hotspot/cpu/aarch64/aarch64.ad
src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp
src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp
src/hotspot/cpu/ppc/ppc.ad
src/hotspot/cpu/ppc/relocInfo_ppc.cpp
src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.cpp
src/hotspot/cpu/s390/macroAssembler_s390.cpp
src/hotspot/cpu/s390/s390.ad
src/hotspot/cpu/sparc/macroAssembler_sparc.cpp
src/hotspot/cpu/sparc/relocInfo_sparc.cpp
src/hotspot/cpu/sparc/sparc.ad
src/hotspot/cpu/sparc/vm_version_sparc.cpp
src/hotspot/cpu/x86/macroAssembler_x86.cpp
src/hotspot/cpu/x86/relocInfo_x86.cpp
src/hotspot/cpu/x86/x86_64.ad
src/hotspot/share/aot/aotCodeHeap.cpp
src/hotspot/share/aot/aotLoader.cpp
src/hotspot/share/asm/assembler.cpp
src/hotspot/share/classfile/compactHashtable.cpp
src/hotspot/share/classfile/stringTable.cpp
src/hotspot/share/compiler/oopMap.cpp
src/hotspot/share/compiler/oopMap.hpp
src/hotspot/share/gc/shared/gcConfiguration.cpp
src/hotspot/share/gc/shared/gcConfiguration.hpp
src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.inline.hpp
src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp
src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp
src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.inline.hpp
src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp
src/hotspot/share/jvmci/jvmciCodeInstaller.cpp
src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp
src/hotspot/share/memory/filemap.cpp
src/hotspot/share/memory/filemap.hpp
src/hotspot/share/memory/heapShared.hpp
src/hotspot/share/memory/metaspace.cpp
src/hotspot/share/memory/metaspaceShared.cpp
src/hotspot/share/memory/universe.cpp
src/hotspot/share/memory/universe.hpp
src/hotspot/share/memory/virtualspace.cpp
src/hotspot/share/oops/compressedOops.cpp
src/hotspot/share/oops/compressedOops.hpp
src/hotspot/share/oops/compressedOops.inline.hpp
src/hotspot/share/oops/cpCache.cpp
src/hotspot/share/oops/instanceRefKlass.inline.hpp
src/hotspot/share/oops/klass.cpp
src/hotspot/share/oops/klass.hpp
src/hotspot/share/oops/klass.inline.hpp
src/hotspot/share/oops/oop.cpp
src/hotspot/share/oops/oop.hpp
src/hotspot/share/oops/oop.inline.hpp
src/hotspot/share/opto/lcm.cpp
src/hotspot/share/opto/machnode.cpp
src/hotspot/share/opto/matcher.cpp
src/hotspot/share/opto/matcher.hpp
src/hotspot/share/prims/whitebox.cpp
src/hotspot/share/runtime/os.cpp
src/hotspot/share/runtime/stackValue.cpp
src/hotspot/share/runtime/vmStructs.cpp
src/hotspot/share/utilities/vmError.cpp
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/CompressedKlassPointers.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/CompressedOops.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java
--- a/make/hotspot/src/native/dtrace/generateJvmOffsets.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/make/hotspot/src/native/dtrace/generateJvmOffsets.cpp	Thu May 09 14:26:03 2019 +0200
@@ -41,6 +41,7 @@
 #include <proc_service.h>
 #include "gc/shared/collectedHeap.hpp"
 #include "memory/heap.hpp"
+#include "oops/compressedOops.hpp"
 #include "runtime/vmStructs.hpp"
 
 typedef enum GEN_variant {
--- a/src/hotspot/cpu/aarch64/aarch64.ad	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/cpu/aarch64/aarch64.ad	Thu May 09 14:26:03 2019 +0200
@@ -1962,7 +1962,7 @@
   st->print_cr("# MachUEPNode");
   if (UseCompressedClassPointers) {
     st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
-    if (Universe::narrow_klass_shift() != 0) {
+    if (CompressedKlassPointers::shift() != 0) {
       st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
     }
   } else {
@@ -2183,7 +2183,7 @@
 // Implicit_null_check optimization moves the Decode along with the
 // memory operation back up before the NullCheck.
 bool Matcher::narrow_oop_use_complex_address() {
-  return Universe::narrow_oop_shift() == 0;
+  return CompressedOops::shift() == 0;
 }
 
 bool Matcher::narrow_klass_use_complex_address() {
@@ -2194,12 +2194,12 @@
 
 bool Matcher::const_oop_prefer_decode() {
   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
-  return Universe::narrow_oop_base() == NULL;
+  return CompressedOops::base() == NULL;
 }
 
 bool Matcher::const_klass_prefer_decode() {
   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
-  return Universe::narrow_klass_base() == NULL;
+  return CompressedKlassPointers::base() == NULL;
 }
 
 // Is it better to copy float constants, or load them directly from
@@ -5044,7 +5044,7 @@
 
 operand indirectN(iRegN reg)
 %{
-  predicate(Universe::narrow_oop_shift() == 0);
+  predicate(CompressedOops::shift() == 0);
   constraint(ALLOC_IN_RC(ptr_reg));
   match(DecodeN reg);
   op_cost(0);
@@ -5059,7 +5059,7 @@
 
 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
 %{
-  predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
+  predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
   constraint(ALLOC_IN_RC(ptr_reg));
   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
   op_cost(0);
@@ -5074,7 +5074,7 @@
 
 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
 %{
-  predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
+  predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
   constraint(ALLOC_IN_RC(ptr_reg));
   match(AddP (DecodeN reg) (LShiftL lreg scale));
   op_cost(0);
@@ -5089,7 +5089,7 @@
 
 operand indIndexI2LN(iRegN reg, iRegI ireg)
 %{
-  predicate(Universe::narrow_oop_shift() == 0);
+  predicate(CompressedOops::shift() == 0);
   constraint(ALLOC_IN_RC(ptr_reg));
   match(AddP (DecodeN reg) (ConvI2L ireg));
   op_cost(0);
@@ -5104,7 +5104,7 @@
 
 operand indIndexN(iRegN reg, iRegL lreg)
 %{
-  predicate(Universe::narrow_oop_shift() == 0);
+  predicate(CompressedOops::shift() == 0);
   constraint(ALLOC_IN_RC(ptr_reg));
   match(AddP (DecodeN reg) lreg);
   op_cost(0);
@@ -5119,7 +5119,7 @@
 
 operand indOffIN(iRegN reg, immIOffset off)
 %{
-  predicate(Universe::narrow_oop_shift() == 0);
+  predicate(CompressedOops::shift() == 0);
   constraint(ALLOC_IN_RC(ptr_reg));
   match(AddP (DecodeN reg) off);
   op_cost(0);
@@ -5134,7 +5134,7 @@
 
 operand indOffLN(iRegN reg, immLoffset off)
 %{
-  predicate(Universe::narrow_oop_shift() == 0);
+  predicate(CompressedOops::shift() == 0);
   constraint(ALLOC_IN_RC(ptr_reg));
   match(AddP (DecodeN reg) off);
   op_cost(0);
@@ -7039,8 +7039,8 @@
 instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
 %{
   match(Set mem (StoreN mem zero));
-  predicate(Universe::narrow_oop_base() == NULL &&
-            Universe::narrow_klass_base() == NULL &&
+  predicate(CompressedOops::base() == NULL &&
+            CompressedKlassPointers::base() == NULL &&
             (!needs_releasing_store(n)));
 
   ins_cost(INSN_COST);
@@ -7822,7 +7822,7 @@
 // in case of 32bit oops (heap < 4Gb).
 instruct convN2I(iRegINoSp dst, iRegN src)
 %{
-  predicate(Universe::narrow_oop_shift() == 0);
+  predicate(CompressedOops::shift() == 0);
   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
 
   ins_cost(INSN_COST);
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Thu May 09 14:26:03 2019 +0200
@@ -2131,7 +2131,7 @@
   if (CheckCompressedOops) {
     Label ok;
     push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1
-    cmpptr(rheapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
+    cmpptr(rheapbase, ExternalAddress((address)CompressedOops::ptrs_base_addr()));
     br(Assembler::EQ, ok);
     stop(msg);
     bind(ok);
@@ -2264,9 +2264,9 @@
 {
   if (UseCompressedOops) {
     if (Universe::is_fully_initialized()) {
-      mov(rheapbase, Universe::narrow_ptrs_base());
+      mov(rheapbase, CompressedOops::ptrs_base());
     } else {
-      lea(rheapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
+      lea(rheapbase, ExternalAddress((address)CompressedOops::ptrs_base_addr()));
       ldr(rheapbase, Address(rheapbase));
     }
   }
@@ -3709,11 +3709,11 @@
 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
   if (UseCompressedClassPointers) {
     ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
-    if (Universe::narrow_klass_base() == NULL) {
-      cmp(trial_klass, tmp, LSL, Universe::narrow_klass_shift());
+    if (CompressedKlassPointers::base() == NULL) {
+      cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
       return;
-    } else if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0
-               && Universe::narrow_klass_shift() == 0) {
+    } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
+               && CompressedKlassPointers::shift() == 0) {
       // Only the bottom 32 bits matter
       cmpw(trial_klass, tmp);
       return;
@@ -3754,9 +3754,9 @@
   verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
 #endif
   verify_oop(s, "broken oop in encode_heap_oop");
-  if (Universe::narrow_oop_base() == NULL) {
-    if (Universe::narrow_oop_shift() != 0) {
-      assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+  if (CompressedOops::base() == NULL) {
+    if (CompressedOops::shift() != 0) {
+      assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
       lsr(d, s, LogMinObjAlignmentInBytes);
     } else {
       mov(d, s);
@@ -3787,11 +3787,11 @@
   }
 #endif
   verify_oop(r, "broken oop in encode_heap_oop_not_null");
-  if (Universe::narrow_oop_base() != NULL) {
+  if (CompressedOops::base() != NULL) {
     sub(r, r, rheapbase);
   }
-  if (Universe::narrow_oop_shift() != 0) {
-    assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+  if (CompressedOops::shift() != 0) {
+    assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
     lsr(r, r, LogMinObjAlignmentInBytes);
   }
 }
@@ -3809,12 +3809,12 @@
   verify_oop(src, "broken oop in encode_heap_oop_not_null2");
 
   Register data = src;
-  if (Universe::narrow_oop_base() != NULL) {
+  if (CompressedOops::base() != NULL) {
     sub(dst, src, rheapbase);
     data = dst;
   }
-  if (Universe::narrow_oop_shift() != 0) {
-    assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+  if (CompressedOops::shift() != 0) {
+    assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
     lsr(dst, data, LogMinObjAlignmentInBytes);
     data = dst;
   }
@@ -3826,9 +3826,9 @@
 #ifdef ASSERT
   verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
 #endif
-  if (Universe::narrow_oop_base() == NULL) {
-    if (Universe::narrow_oop_shift() != 0 || d != s) {
-      lsl(d, s, Universe::narrow_oop_shift());
+  if (CompressedOops::base() == NULL) {
+    if (CompressedOops::shift() != 0 || d != s) {
+      lsl(d, s, CompressedOops::shift());
     }
   } else {
     Label done;
@@ -3847,15 +3847,15 @@
   // Cannot assert, unverified entry point counts instructions (see .ad file)
   // vtableStubs also counts instructions in pd_code_size_limit.
   // Also do not verify_oop as this is called by verify_oop.
-  if (Universe::narrow_oop_shift() != 0) {
-    assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
-    if (Universe::narrow_oop_base() != NULL) {
+  if (CompressedOops::shift() != 0) {
+    assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
+    if (CompressedOops::base() != NULL) {
       add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes);
     } else {
       add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes);
     }
   } else {
-    assert (Universe::narrow_oop_base() == NULL, "sanity");
+    assert (CompressedOops::base() == NULL, "sanity");
   }
 }
 
@@ -3865,15 +3865,15 @@
   // Cannot assert, unverified entry point counts instructions (see .ad file)
   // vtableStubs also counts instructions in pd_code_size_limit.
   // Also do not verify_oop as this is called by verify_oop.
-  if (Universe::narrow_oop_shift() != 0) {
-    assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
-    if (Universe::narrow_oop_base() != NULL) {
+  if (CompressedOops::shift() != 0) {
+    assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
+    if (CompressedOops::base() != NULL) {
       add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes);
     } else {
       add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes);
     }
   } else {
-    assert (Universe::narrow_oop_base() == NULL, "sanity");
+    assert (CompressedOops::base() == NULL, "sanity");
     if (dst != src) {
       mov(dst, src);
     }
@@ -3881,9 +3881,9 @@
 }
 
 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
-  if (Universe::narrow_klass_base() == NULL) {
-    if (Universe::narrow_klass_shift() != 0) {
-      assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+  if (CompressedKlassPointers::base() == NULL) {
+    if (CompressedKlassPointers::shift() != 0) {
+      assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
       lsr(dst, src, LogKlassAlignmentInBytes);
     } else {
       if (dst != src) mov(dst, src);
@@ -3892,17 +3892,17 @@
   }
 
   if (use_XOR_for_compressed_class_base) {
-    if (Universe::narrow_klass_shift() != 0) {
-      eor(dst, src, (uint64_t)Universe::narrow_klass_base());
+    if (CompressedKlassPointers::shift() != 0) {
+      eor(dst, src, (uint64_t)CompressedKlassPointers::base());
       lsr(dst, dst, LogKlassAlignmentInBytes);
     } else {
-      eor(dst, src, (uint64_t)Universe::narrow_klass_base());
+      eor(dst, src, (uint64_t)CompressedKlassPointers::base());
     }
     return;
   }
 
-  if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0
-      && Universe::narrow_klass_shift() == 0) {
+  if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
+      && CompressedKlassPointers::shift() == 0) {
     movw(dst, src);
     return;
   }
@@ -3913,10 +3913,10 @@
 
   Register rbase = dst;
   if (dst == src) rbase = rheapbase;
-  mov(rbase, (uint64_t)Universe::narrow_klass_base());
+  mov(rbase, (uint64_t)CompressedKlassPointers::base());
   sub(dst, src, rbase);
-  if (Universe::narrow_klass_shift() != 0) {
-    assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+  if (CompressedKlassPointers::shift() != 0) {
+    assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
     lsr(dst, dst, LogKlassAlignmentInBytes);
   }
   if (dst == src) reinit_heapbase();
@@ -3930,9 +3930,9 @@
   Register rbase = dst;
   assert (UseCompressedClassPointers, "should only be used for compressed headers");
 
-  if (Universe::narrow_klass_base() == NULL) {
-    if (Universe::narrow_klass_shift() != 0) {
-      assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+  if (CompressedKlassPointers::base() == NULL) {
+    if (CompressedKlassPointers::shift() != 0) {
+      assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
       lsl(dst, src, LogKlassAlignmentInBytes);
     } else {
       if (dst != src) mov(dst, src);
@@ -3941,20 +3941,20 @@
   }
 
   if (use_XOR_for_compressed_class_base) {
-    if (Universe::narrow_klass_shift() != 0) {
+    if (CompressedKlassPointers::shift() != 0) {
       lsl(dst, src, LogKlassAlignmentInBytes);
-      eor(dst, dst, (uint64_t)Universe::narrow_klass_base());
+      eor(dst, dst, (uint64_t)CompressedKlassPointers::base());
     } else {
-      eor(dst, src, (uint64_t)Universe::narrow_klass_base());
+      eor(dst, src, (uint64_t)CompressedKlassPointers::base());
     }
     return;
   }
 
-  if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0
-      && Universe::narrow_klass_shift() == 0) {
+  if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
+      && CompressedKlassPointers::shift() == 0) {
     if (dst != src)
       movw(dst, src);
-    movk(dst, (uint64_t)Universe::narrow_klass_base() >> 32, 32);
+    movk(dst, (uint64_t)CompressedKlassPointers::base() >> 32, 32);
     return;
   }
 
@@ -3962,9 +3962,9 @@
   // vtableStubs also counts instructions in pd_code_size_limit.
   // Also do not verify_oop as this is called by verify_oop.
   if (dst == src) rbase = rheapbase;
-  mov(rbase, (uint64_t)Universe::narrow_klass_base());
-  if (Universe::narrow_klass_shift() != 0) {
-    assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+  mov(rbase, (uint64_t)CompressedKlassPointers::base());
+  if (CompressedKlassPointers::shift() != 0) {
+    assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
     add(dst, rbase, src, Assembler::LSL, LogKlassAlignmentInBytes);
   } else {
     add(dst, rbase, src);
@@ -4003,7 +4003,7 @@
   InstructionMark im(this);
   RelocationHolder rspec = metadata_Relocation::spec(index);
   code_section()->relocate(inst_mark(), rspec);
-  narrowKlass nk = Klass::encode_klass(k);
+  narrowKlass nk = CompressedKlassPointers::encode(k);
   movz(dst, (nk >> 16), 16);
   movk(dst, nk & 0xffff);
 }
--- a/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp	Thu May 09 14:26:03 2019 +0200
@@ -27,6 +27,7 @@
 #include "asm/macroAssembler.inline.hpp"
 #include "gc/shared/barrierSetAssembler.hpp"
 #include "interpreter/interp_masm.hpp"
+#include "oops/compressedOops.hpp"
 #include "runtime/jniHandles.hpp"
 
 #define __ masm->
@@ -83,7 +84,7 @@
         __ beq(CCR0, *L_handle_null);
         __ decode_heap_oop_not_null(dst);
       } else if (not_null) { // Guaranteed to be not null.
-        Register narrowOop = (tmp1 != noreg && Universe::narrow_oop_base_disjoint()) ? tmp1 : dst;
+        Register narrowOop = (tmp1 != noreg && CompressedOops::base_disjoint()) ? tmp1 : dst;
         __ lwz(narrowOop, ind_or_offs, base);
         __ decode_heap_oop_not_null(dst, narrowOop);
       } else { // Any oop.
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp	Thu May 09 14:26:03 2019 +0200
@@ -32,6 +32,7 @@
 #include "interpreter/interpreter.hpp"
 #include "memory/resourceArea.hpp"
 #include "nativeInst_ppc.hpp"
+#include "oops/compressedOops.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/icache.hpp"
@@ -3119,13 +3120,13 @@
 
 Register MacroAssembler::encode_klass_not_null(Register dst, Register src) {
   Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided.
-  if (Universe::narrow_klass_base() != 0) {
+  if (CompressedKlassPointers::base() != 0) {
     // Use dst as temp if it is free.
-    sub_const_optimized(dst, current, Universe::narrow_klass_base(), R0);
+    sub_const_optimized(dst, current, CompressedKlassPointers::base(), R0);
     current = dst;
   }
-  if (Universe::narrow_klass_shift() != 0) {
-    srdi(dst, current, Universe::narrow_klass_shift());
+  if (CompressedKlassPointers::shift() != 0) {
+    srdi(dst, current, CompressedKlassPointers::shift());
     current = dst;
   }
   return current;
@@ -3153,7 +3154,7 @@
 int MacroAssembler::instr_size_for_decode_klass_not_null() {
   if (!UseCompressedClassPointers) return 0;
   int num_instrs = 1;  // shift or move
-  if (Universe::narrow_klass_base() != 0) num_instrs = 7;  // shift + load const + add
+  if (CompressedKlassPointers::base() != 0) num_instrs = 7;  // shift + load const + add
   return num_instrs * BytesPerInstWord;
 }
 
@@ -3161,13 +3162,13 @@
   assert(dst != R0, "Dst reg may not be R0, as R0 is used here.");
   if (src == noreg) src = dst;
   Register shifted_src = src;
-  if (Universe::narrow_klass_shift() != 0 ||
-      Universe::narrow_klass_base() == 0 && src != dst) {  // Move required.
+  if (CompressedKlassPointers::shift() != 0 ||
+      CompressedKlassPointers::base() == 0 && src != dst) {  // Move required.
     shifted_src = dst;
-    sldi(shifted_src, src, Universe::narrow_klass_shift());
+    sldi(shifted_src, src, CompressedKlassPointers::shift());
   }
-  if (Universe::narrow_klass_base() != 0) {
-    add_const_optimized(dst, shifted_src, Universe::narrow_klass_base(), R0);
+  if (CompressedKlassPointers::base() != 0) {
+    add_const_optimized(dst, shifted_src, CompressedKlassPointers::base(), R0);
   }
 }
 
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp	Thu May 09 14:26:03 2019 +0200
@@ -378,19 +378,19 @@
 
 inline Register MacroAssembler::encode_heap_oop_not_null(Register d, Register src) {
   Register current = (src != noreg) ? src : d; // Oop to be compressed is in d if no src provided.
-  if (Universe::narrow_oop_base_overlaps()) {
-    sub_const_optimized(d, current, Universe::narrow_oop_base(), R0);
+  if (CompressedOops::base_overlaps()) {
+    sub_const_optimized(d, current, CompressedOops::base(), R0);
     current = d;
   }
-  if (Universe::narrow_oop_shift() != 0) {
-    rldicl(d, current, 64-Universe::narrow_oop_shift(), 32);  // Clears the upper bits.
+  if (CompressedOops::shift() != 0) {
+    rldicl(d, current, 64-CompressedOops::shift(), 32);  // Clears the upper bits.
     current = d;
   }
   return current; // Encoded oop is in this register.
 }
 
 inline Register MacroAssembler::encode_heap_oop(Register d, Register src) {
-  if (Universe::narrow_oop_base() != NULL) {
+  if (CompressedOops::base() != NULL) {
     if (VM_Version::has_isel()) {
       cmpdi(CCR0, src, 0);
       Register co = encode_heap_oop_not_null(d, src);
@@ -410,20 +410,20 @@
 }
 
 inline Register MacroAssembler::decode_heap_oop_not_null(Register d, Register src) {
-  if (Universe::narrow_oop_base_disjoint() && src != noreg && src != d &&
-      Universe::narrow_oop_shift() != 0) {
-    load_const_optimized(d, Universe::narrow_oop_base(), R0);
-    rldimi(d, src, Universe::narrow_oop_shift(), 32-Universe::narrow_oop_shift());
+  if (CompressedOops::base_disjoint() && src != noreg && src != d &&
+      CompressedOops::shift() != 0) {
+    load_const_optimized(d, CompressedOops::base(), R0);
+    rldimi(d, src, CompressedOops::shift(), 32-CompressedOops::shift());
     return d;
   }
 
   Register current = (src != noreg) ? src : d; // Compressed oop is in d if no src provided.
-  if (Universe::narrow_oop_shift() != 0) {
-    sldi(d, current, Universe::narrow_oop_shift());
+  if (CompressedOops::shift() != 0) {
+    sldi(d, current, CompressedOops::shift());
     current = d;
   }
-  if (Universe::narrow_oop_base() != NULL) {
-    add_const_optimized(d, current, Universe::narrow_oop_base(), R0);
+  if (CompressedOops::base() != NULL) {
+    add_const_optimized(d, current, CompressedOops::base(), R0);
     current = d;
   }
   return current; // Decoded oop is in this register.
@@ -432,7 +432,7 @@
 inline void MacroAssembler::decode_heap_oop(Register d) {
   Label isNull;
   bool use_isel = false;
-  if (Universe::narrow_oop_base() != NULL) {
+  if (CompressedOops::base() != NULL) {
     cmpwi(CCR0, d, 0);
     if (VM_Version::has_isel()) {
       use_isel = true;
--- a/src/hotspot/cpu/ppc/ppc.ad	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/cpu/ppc/ppc.ad	Thu May 09 14:26:03 2019 +0200
@@ -2378,7 +2378,7 @@
 /* TODO: PPC port
 // Make a new machine dependent decode node (with its operands).
 MachTypeNode *Matcher::make_decode_node() {
-  assert(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0,
+  assert(CompressedOops::base() == NULL && CompressedOops::shift() == 0,
          "This method is only implemented for unscaled cOops mode so far");
   MachTypeNode *decode = new decodeN_unscaledNode();
   decode->set_opnd_array(0, new iRegPdstOper());
@@ -2429,12 +2429,12 @@
 
 bool Matcher::const_oop_prefer_decode() {
   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
-  return Universe::narrow_oop_base() == NULL;
+  return CompressedOops::base() == NULL;
 }
 
 bool Matcher::const_klass_prefer_decode() {
   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
-  return Universe::narrow_klass_base() == NULL;
+  return CompressedKlassPointers::base() == NULL;
 }
 
 // Is it better to copy float constants, or load them directly from memory?
@@ -4151,7 +4151,7 @@
   // opcodes. This simplifies the register allocator.
   c_return_value %{
     assert((ideal_reg >= Op_RegI && ideal_reg <= Op_RegL) ||
-            (ideal_reg == Op_RegN && Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0),
+            (ideal_reg == Op_RegN && CompressedOops::base() == NULL && CompressedOops::shift() == 0),
             "only return normal values");
     // enum names from opcodes.hpp:    Op_Node Op_Set Op_RegN       Op_RegI       Op_RegP       Op_RegF       Op_RegD       Op_RegL
     static int typeToRegLo[Op_RegL+1] = { 0,   0,     R3_num,   R3_num,   R3_num,   F1_num,   F1_num,   R3_num };
@@ -4162,7 +4162,7 @@
   // Location of compiled Java return values.  Same as C
   return_value %{
     assert((ideal_reg >= Op_RegI && ideal_reg <= Op_RegL) ||
-            (ideal_reg == Op_RegN && Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0),
+            (ideal_reg == Op_RegN && CompressedOops::base() == NULL && CompressedOops::shift() == 0),
             "only return normal values");
     // enum names from opcodes.hpp:    Op_Node Op_Set Op_RegN       Op_RegI       Op_RegP       Op_RegF       Op_RegD       Op_RegL
     static int typeToRegLo[Op_RegL+1] = { 0,   0,     R3_num,   R3_num,   R3_num,   F1_num,   F1_num,   R3_num };
@@ -4978,7 +4978,7 @@
 // Operands to remove register moves in unscaled mode.
 // Match read/write registers with an EncodeP node if neither shift nor add are required.
 operand iRegP2N(iRegPsrc reg) %{
-  predicate(false /* TODO: PPC port MatchDecodeNodes*/&& Universe::narrow_oop_shift() == 0);
+  predicate(false /* TODO: PPC port MatchDecodeNodes*/&& CompressedOops::shift() == 0);
   constraint(ALLOC_IN_RC(bits64_reg_ro));
   match(EncodeP reg);
   format %{ "$reg" %}
@@ -4994,7 +4994,7 @@
 %}
 
 operand iRegN2P_klass(iRegNsrc reg) %{
-  predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
+  predicate(CompressedKlassPointers::base() == NULL && CompressedKlassPointers::shift() == 0);
   constraint(ALLOC_IN_RC(bits32_reg_ro));
   match(DecodeNKlass reg);
   format %{ "$reg" %}
@@ -5063,7 +5063,7 @@
 %}
 
 operand indirectNarrow_klass(iRegNsrc reg) %{
-  predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
+  predicate(CompressedKlassPointers::base() == NULL && CompressedKlassPointers::shift() == 0);
   constraint(ALLOC_IN_RC(bits64_reg_ro));
   match(DecodeNKlass reg);
   op_cost(100);
@@ -5092,7 +5092,7 @@
 %}
 
 operand indOffset16Narrow_klass(iRegNsrc reg, immL16 offset) %{
-  predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
+  predicate(CompressedKlassPointers::base() == NULL && CompressedKlassPointers::shift() == 0);
   constraint(ALLOC_IN_RC(bits64_reg_ro));
   match(AddP (DecodeNKlass reg) offset);
   op_cost(100);
@@ -5121,7 +5121,7 @@
 %}
 
 operand indOffset16NarrowAlg4_klass(iRegNsrc reg, immL16Alg4 offset) %{
-  predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
+  predicate(CompressedKlassPointers::base() == NULL && CompressedKlassPointers::shift() == 0);
   constraint(ALLOC_IN_RC(bits64_reg_ro));
   match(AddP (DecodeNKlass reg) offset);
   op_cost(100);
@@ -5850,7 +5850,7 @@
 // Load Compressed Pointer and decode it if narrow_oop_shift == 0.
 instruct loadN2P_unscaled(iRegPdst dst, memory mem) %{
   match(Set dst (DecodeN (LoadN mem)));
-  predicate(_kids[0]->_leaf->as_Load()->is_unordered() && Universe::narrow_oop_shift() == 0);
+  predicate(_kids[0]->_leaf->as_Load()->is_unordered() && CompressedOops::shift() == 0);
   ins_cost(MEMORY_REF_COST);
 
   format %{ "LWZ     $dst, $mem \t// DecodeN (unscaled)" %}
@@ -5861,7 +5861,7 @@
 
 instruct loadN2P_klass_unscaled(iRegPdst dst, memory mem) %{
   match(Set dst (DecodeNKlass (LoadNKlass mem)));
-  predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0 &&
+  predicate(CompressedKlassPointers::base() == NULL && CompressedKlassPointers::shift() == 0 &&
             _kids[0]->_leaf->as_Load()->is_unordered());
   ins_cost(MEMORY_REF_COST);
 
@@ -6327,7 +6327,7 @@
   format %{ "LoadConst $dst, heapbase" %}
   ins_encode %{
     // TODO: PPC port $archOpcode(ppc64Opcode_compound);
-    __ load_const_optimized($dst$$Register, Universe::narrow_oop_base(), R0);
+    __ load_const_optimized($dst$$Register, CompressedOops::base(), R0);
   %}
   ins_pipe(pipe_class_default);
 %}
@@ -6375,7 +6375,7 @@
   size(4);
   ins_encode %{
     // TODO: PPC port $archOpcode(ppc64Opcode_addis);
-    intptr_t Csrc = Klass::encode_klass((Klass *)$src$$constant);
+    intptr_t Csrc = CompressedKlassPointers::encode((Klass *)$src$$constant);
     __ lis($dst$$Register, (int)(short)((Csrc >> 16) & 0xffff));
   %}
   ins_pipe(pipe_class_default);
@@ -6407,7 +6407,7 @@
   size(4);
   ins_encode %{
     // TODO: PPC port $archOpcode(ppc64Opcode_ori);
-    intptr_t Csrc = Klass::encode_klass((Klass *)$src1$$constant);
+    intptr_t Csrc = CompressedKlassPointers::encode((Klass *)$src1$$constant);
     assert(__ oop_recorder() != NULL, "this assembler needs an OopRecorder");
     int klass_index = __ oop_recorder()->find_index((Klass *)$src1$$constant);
     RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@@ -6435,7 +6435,7 @@
     nodes->push(m1);
 
     MachNode *m2 = m1;
-    if (!Assembler::is_uimm((jlong)Klass::encode_klass((Klass *)op_src->constant()), 31)) {
+    if (!Assembler::is_uimm((jlong)CompressedKlassPointers::encode((Klass *)op_src->constant()), 31)) {
       // Value might be 1-extended. Mask out these bits.
       m2 = new loadConNKlass_maskNode();
       m2->add_req(NULL, m1);
@@ -6949,7 +6949,7 @@
   size(4);
   ins_encode %{
     // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
-    __ srdi($dst$$Register, $src$$Register, Universe::narrow_oop_shift() & 0x3f);
+    __ srdi($dst$$Register, $src$$Register, CompressedOops::shift() & 0x3f);
   %}
   ins_pipe(pipe_class_default);
 %}
@@ -6963,7 +6963,7 @@
   format %{ "SUB     $dst, $src, oop_base \t// encode" %}
   ins_encode %{
     // TODO: PPC port $archOpcode(ppc64Opcode_compound);
-    __ sub_const_optimized($dst$$Register, $src$$Register, Universe::narrow_oop_base(), R0);
+    __ sub_const_optimized($dst$$Register, $src$$Register, CompressedOops::base(), R0);
   %}
   ins_pipe(pipe_class_default);
 %}
@@ -6981,7 +6981,7 @@
     // TODO: PPC port $archOpcode(ppc64Opcode_compound);
     Label done;
     __ beq($crx$$CondRegister, done);
-    __ sub_const_optimized($dst$$Register, $src1$$Register, Universe::narrow_oop_base(), R0);
+    __ sub_const_optimized($dst$$Register, $src1$$Register, CompressedOops::base(), R0);
     __ bind(done);
   %}
   ins_pipe(pipe_class_default);
@@ -7006,13 +7006,13 @@
 // Disjoint narrow oop base.
 instruct encodeP_Disjoint(iRegNdst dst, iRegPsrc src) %{
   match(Set dst (EncodeP src));
-  predicate(Universe::narrow_oop_base_disjoint());
+  predicate(CompressedOops::base_disjoint());
 
   format %{ "EXTRDI  $dst, $src, #32, #3 \t// encode with disjoint base" %}
   size(4);
   ins_encode %{
     // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
-    __ rldicl($dst$$Register, $src$$Register, 64-Universe::narrow_oop_shift(), 32);
+    __ rldicl($dst$$Register, $src$$Register, 64-CompressedOops::shift(), 32);
   %}
   ins_pipe(pipe_class_default);
 %}
@@ -7022,8 +7022,8 @@
   match(Set dst (EncodeP src));
   effect(TEMP crx);
   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull &&
-            Universe::narrow_oop_shift() != 0 &&
-            Universe::narrow_oop_base_overlaps());
+            CompressedOops::shift() != 0 &&
+            CompressedOops::base_overlaps());
 
   format %{ "EncodeP $dst, $crx, $src \t// postalloc expanded" %}
   postalloc_expand( postalloc_expand_encode_oop(dst, src, crx));
@@ -7033,8 +7033,8 @@
 instruct encodeP_not_null_Ex(iRegNdst dst, iRegPsrc src) %{
   match(Set dst (EncodeP src));
   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull &&
-            Universe::narrow_oop_shift() != 0 &&
-            Universe::narrow_oop_base_overlaps());
+            CompressedOops::shift() != 0 &&
+            CompressedOops::base_overlaps());
 
   format %{ "EncodeP $dst, $src\t// $src != Null, postalloc expanded" %}
   postalloc_expand( postalloc_expand_encode_oop_not_null(dst, src) );
@@ -7044,14 +7044,14 @@
 // TODO: This is the same as encodeP_shift. Merge!
 instruct encodeP_not_null_base_null(iRegNdst dst, iRegPsrc src) %{
   match(Set dst (EncodeP src));
-  predicate(Universe::narrow_oop_shift() != 0 &&
-            Universe::narrow_oop_base() ==0);
+  predicate(CompressedOops::shift() != 0 &&
+            CompressedOops::base() ==0);
 
   format %{ "SRDI    $dst, $src, #3 \t// encodeP, $src != NULL" %}
   size(4);
   ins_encode %{
     // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
-    __ srdi($dst$$Register, $src$$Register, Universe::narrow_oop_shift() & 0x3f);
+    __ srdi($dst$$Register, $src$$Register, CompressedOops::shift() & 0x3f);
   %}
   ins_pipe(pipe_class_default);
 %}
@@ -7060,7 +7060,7 @@
 // shift == 0, base == 0
 instruct encodeP_narrow_oop_shift_0(iRegNdst dst, iRegPsrc src) %{
   match(Set dst (EncodeP src));
-  predicate(Universe::narrow_oop_shift() == 0);
+  predicate(CompressedOops::shift() == 0);
 
   format %{ "MR      $dst, $src \t// Ptr->Narrow" %}
   // variable size, 0 or 4.
@@ -7083,7 +7083,7 @@
   size(4);
   ins_encode %{
     // TODO: PPC port $archOpcode(ppc64Opcode_rldicr);
-    __ sldi($dst$$Register, $src$$Register, Universe::narrow_oop_shift());
+    __ sldi($dst$$Register, $src$$Register, CompressedOops::shift());
   %}
   ins_pipe(pipe_class_default);
 %}
@@ -7097,7 +7097,7 @@
   format %{ "ADD     $dst, $src, heapbase \t// DecodeN, add oop base" %}
   ins_encode %{
     // TODO: PPC port $archOpcode(ppc64Opcode_compound);
-    __ add_const_optimized($dst$$Register, $src$$Register, Universe::narrow_oop_base(), R0);
+    __ add_const_optimized($dst$$Register, $src$$Register, CompressedOops::base(), R0);
   %}
   ins_pipe(pipe_class_default);
 %}
@@ -7118,7 +7118,7 @@
     // TODO: PPC port $archOpcode(ppc64Opcode_compound);
     Label done;
     __ beq($crx$$CondRegister, done);
-    __ add_const_optimized($dst$$Register, $src$$Register, Universe::narrow_oop_base(), R0);
+    __ add_const_optimized($dst$$Register, $src$$Register, CompressedOops::base(), R0);
     __ bind(done);
   %}
   ins_pipe(pipe_class_default);
@@ -7147,8 +7147,8 @@
   match(Set dst (DecodeN src));
   predicate((n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull &&
              n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant) &&
-            Universe::narrow_oop_shift() != 0 &&
-            Universe::narrow_oop_base() != 0);
+            CompressedOops::shift() != 0 &&
+            CompressedOops::base() != 0);
   ins_cost(4 * DEFAULT_COST); // Should be more expensive than decodeN_Disjoint_isel_Ex.
   effect(TEMP crx);
 
@@ -7159,14 +7159,14 @@
 // shift != 0, base == 0
 instruct decodeN_nullBase(iRegPdst dst, iRegNsrc src) %{
   match(Set dst (DecodeN src));
-  predicate(Universe::narrow_oop_shift() != 0 &&
-            Universe::narrow_oop_base() == 0);
+  predicate(CompressedOops::shift() != 0 &&
+            CompressedOops::base() == 0);
 
   format %{ "SLDI    $dst, $src, #3 \t// DecodeN (zerobased)" %}
   size(4);
   ins_encode %{
     // TODO: PPC port $archOpcode(ppc64Opcode_rldicr);
-    __ sldi($dst$$Register, $src$$Register, Universe::narrow_oop_shift());
+    __ sldi($dst$$Register, $src$$Register, CompressedOops::shift());
   %}
   ins_pipe(pipe_class_default);
 %}
@@ -7183,7 +7183,7 @@
   size(4);
   ins_encode %{
     // TODO: PPC port $archOpcode(ppc64Opcode_rldimi);
-    __ rldimi($dst$$Register, $src$$Register, Universe::narrow_oop_shift(), 32-Universe::narrow_oop_shift());
+    __ rldimi($dst$$Register, $src$$Register, CompressedOops::shift(), 32-CompressedOops::shift());
   %}
   ins_pipe(pipe_class_default);
 %}
@@ -7197,7 +7197,7 @@
   effect(TEMP_DEF dst);
   predicate((n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
              n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant) &&
-            Universe::narrow_oop_base_disjoint());
+            CompressedOops::base_disjoint());
   ins_cost(DEFAULT_COST);
 
   format %{ "MOV     $dst, heapbase \t\n"
@@ -7227,7 +7227,7 @@
   effect(TEMP_DEF dst, TEMP crx);
   predicate((n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull &&
              n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant) &&
-            Universe::narrow_oop_base_disjoint() && VM_Version::has_isel());
+            CompressedOops::base_disjoint() && VM_Version::has_isel());
   ins_cost(3 * DEFAULT_COST);
 
   format %{ "DecodeN  $dst, $src \t// decode with disjoint base using isel" %}
@@ -7276,8 +7276,8 @@
   match(Set dst (DecodeN src));
   predicate((n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
              n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant) &&
-            Universe::narrow_oop_shift() != 0 &&
-            Universe::narrow_oop_base() != 0);
+            CompressedOops::shift() != 0 &&
+            CompressedOops::base() != 0);
   ins_cost(2 * DEFAULT_COST);
 
   format %{ "DecodeN $dst, $src \t// $src != NULL, postalloc expanded" %}
@@ -7287,7 +7287,7 @@
 // Compressed OOPs with narrow_oop_shift == 0.
 instruct decodeN_unscaled(iRegPdst dst, iRegNsrc src) %{
   match(Set dst (DecodeN src));
-  predicate(Universe::narrow_oop_shift() == 0);
+  predicate(CompressedOops::shift() == 0);
   ins_cost(DEFAULT_COST);
 
   format %{ "MR      $dst, $src \t// DecodeN (unscaled)" %}
@@ -7302,7 +7302,7 @@
 // Convert compressed oop into int for vectors alignment masking.
 instruct decodeN2I_unscaled(iRegIdst dst, iRegNsrc src) %{
   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
-  predicate(Universe::narrow_oop_shift() == 0);
+  predicate(CompressedOops::shift() == 0);
   ins_cost(DEFAULT_COST);
 
   format %{ "MR      $dst, $src \t// (int)DecodeN (unscaled)" %}
@@ -7328,7 +7328,7 @@
   size(4);
   ins_encode %{
     // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
-    __ srdi($dst$$Register, $src$$Register, Universe::narrow_klass_shift());
+    __ srdi($dst$$Register, $src$$Register, CompressedKlassPointers::shift());
   %}
   ins_pipe(pipe_class_default);
 %}
@@ -7351,13 +7351,13 @@
 // Disjoint narrow oop base.
 instruct encodePKlass_Disjoint(iRegNdst dst, iRegPsrc src) %{
   match(Set dst (EncodePKlass src));
-  predicate(false /* TODO: PPC port Universe::narrow_klass_base_disjoint()*/);
+  predicate(false /* TODO: PPC port CompressedKlassPointers::base_disjoint()*/);
 
   format %{ "EXTRDI  $dst, $src, #32, #3 \t// encode with disjoint base" %}
   size(4);
   ins_encode %{
     // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
-    __ rldicl($dst$$Register, $src$$Register, 64-Universe::narrow_klass_shift(), 32);
+    __ rldicl($dst$$Register, $src$$Register, 64-CompressedKlassPointers::shift(), 32);
   %}
   ins_pipe(pipe_class_default);
 %}
@@ -7392,13 +7392,13 @@
 // shift != 0, base != 0
 instruct encodePKlass_not_null_ExEx(iRegNdst dst, iRegPsrc src) %{
   match(Set dst (EncodePKlass src));
-  //predicate(Universe::narrow_klass_shift() != 0 &&
-  //          true /* TODO: PPC port Universe::narrow_klass_base_overlaps()*/);
+  //predicate(CompressedKlassPointers::shift() != 0 &&
+  //          true /* TODO: PPC port CompressedKlassPointers::base_overlaps()*/);
 
   //format %{ "EncodePKlass $dst, $src\t// $src != Null, postalloc expanded" %}
   ins_cost(DEFAULT_COST*2);  // Don't count constant.
   expand %{
-    immL baseImm %{ (jlong)(intptr_t)Universe::narrow_klass_base() %}
+    immL baseImm %{ (jlong)(intptr_t)CompressedKlassPointers::base() %}
     iRegLdst base;
     loadConL_Ex(base, baseImm);
     encodePKlass_not_null_Ex(dst, base, src);
@@ -7417,7 +7417,7 @@
   size(4);
   ins_encode %{
     // TODO: PPC port $archOpcode(ppc64Opcode_rldicr);
-    __ sldi($dst$$Register, $src$$Register, Universe::narrow_klass_shift());
+    __ sldi($dst$$Register, $src$$Register, CompressedKlassPointers::shift());
   %}
   ins_pipe(pipe_class_default);
 %}
@@ -7470,8 +7470,8 @@
 // src != 0, shift != 0, base != 0
 instruct decodeNKlass_notNull_addBase_ExEx(iRegPdst dst, iRegNsrc src) %{
   match(Set dst (DecodeNKlass src));
-  // predicate(Universe::narrow_klass_shift() != 0 &&
-  //           Universe::narrow_klass_base() != 0);
+  // predicate(CompressedKlassPointers::shift() != 0 &&
+  //           CompressedKlassPointers::base() != 0);
 
   //format %{ "DecodeNKlass $dst, $src \t// $src != NULL, expanded" %}
 
@@ -7479,7 +7479,7 @@
   expand %{
     // We add first, then we shift. Like this, we can get along with one register less.
     // But we have to load the base pre-shifted.
-    immL baseImm %{ (jlong)((intptr_t)Universe::narrow_klass_base() >> Universe::narrow_klass_shift()) %}
+    immL baseImm %{ (jlong)((intptr_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift()) %}
     iRegLdst base;
     loadConL_Ex(base, baseImm);
     decodeNKlass_notNull_addBase_Ex(dst, base, src);
--- a/src/hotspot/cpu/ppc/relocInfo_ppc.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/cpu/ppc/relocInfo_ppc.cpp	Thu May 09 14:26:03 2019 +0200
@@ -58,7 +58,7 @@
       assert(type() == relocInfo::oop_type || type() == relocInfo::metadata_type,
              "how to encode else?");
       narrowOop no = (type() == relocInfo::oop_type) ?
-          CompressedOops::encode((oop)x) : Klass::encode_klass((Klass*)x);
+          CompressedOops::encode((oop)x) : CompressedKlassPointers::encode((Klass*)x);
       nativeMovConstReg_at(addr())->set_narrow_oop(no, code());
     }
   } else {
--- a/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.cpp	Thu May 09 14:26:03 2019 +0200
@@ -27,6 +27,7 @@
 #include "asm/macroAssembler.inline.hpp"
 #include "gc/shared/barrierSetAssembler.hpp"
 #include "interpreter/interp_masm.hpp"
+#include "oops/compressedOops.hpp"
 
 #define __ masm->
 
@@ -79,7 +80,7 @@
     if (UseCompressedOops && in_heap) {
       if (val == noreg) {
         __ clear_mem(addr, 4);
-      } else if (Universe::narrow_oop_mode() == Universe::UnscaledNarrowOop) {
+      } else if (CompressedOops::mode() == CompressedOops::UnscaledNarrowOop) {
         __ z_st(val, addr);
       } else {
         Register tmp = (tmp1 != Z_R1) ? tmp1 : tmp2; // Avoid tmp == Z_R1 (see oop_encoder).
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp	Thu May 09 14:26:03 2019 +0200
@@ -1171,7 +1171,7 @@
 // Load narrow klass constant, compression required.
 void MacroAssembler::load_narrow_klass(Register t, Klass* k) {
   assert(UseCompressedClassPointers, "must be on to call this method");
-  narrowKlass encoded_k = Klass::encode_klass(k);
+  narrowKlass encoded_k = CompressedKlassPointers::encode(k);
   load_const_32to64(t, encoded_k, false /*sign_extend*/);
 }
 
@@ -1189,7 +1189,7 @@
 // Compare narrow oop in reg with narrow oop constant, no decompression.
 void MacroAssembler::compare_immediate_narrow_klass(Register klass1, Klass* klass2) {
   assert(UseCompressedClassPointers, "must be on to call this method");
-  narrowKlass encoded_k = Klass::encode_klass(klass2);
+  narrowKlass encoded_k = CompressedKlassPointers::encode(klass2);
 
   Assembler::z_clfi(klass1, encoded_k);
 }
@@ -1285,7 +1285,7 @@
 int MacroAssembler::patch_load_narrow_klass(address pos, Klass* k) {
   assert(UseCompressedClassPointers, "Can only patch compressed klass pointers");
 
-  narrowKlass nk = Klass::encode_klass(k);
+  narrowKlass nk = CompressedKlassPointers::encode(k);
   return patch_load_const_32to64(pos, nk);
 }
 
@@ -1303,7 +1303,7 @@
 int MacroAssembler::patch_compare_immediate_narrow_klass(address pos, Klass* k) {
   assert(UseCompressedClassPointers, "Can only patch compressed klass pointers");
 
-  narrowKlass nk = Klass::encode_klass(k);
+  narrowKlass nk = CompressedKlassPointers::encode(k);
   return patch_compare_immediate_32(pos, nk);
 }
 
@@ -3606,8 +3606,8 @@
 // Klass oop manipulations if compressed.
 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
   Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided. (dst == src) also possible.
-  address  base    = Universe::narrow_klass_base();
-  int      shift   = Universe::narrow_klass_shift();
+  address  base    = CompressedKlassPointers::base();
+  int      shift   = CompressedKlassPointers::shift();
   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
 
   BLOCK_COMMENT("cKlass encoder {");
@@ -3655,8 +3655,8 @@
 // when (Universe::heap() != NULL). Hence, if the instructions
 // it generates change, then this method needs to be updated.
 int MacroAssembler::instr_size_for_decode_klass_not_null() {
-  address  base    = Universe::narrow_klass_base();
-  int shift_size   = Universe::narrow_klass_shift() == 0 ? 0 : 6; /* sllg */
+  address  base    = CompressedKlassPointers::base();
+  int shift_size   = CompressedKlassPointers::shift() == 0 ? 0 : 6; /* sllg */
   int addbase_size = 0;
   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
 
@@ -3685,8 +3685,8 @@
 // This variant of decode_klass_not_null() must generate predictable code!
 // The code must only depend on globally known parameters.
 void MacroAssembler::decode_klass_not_null(Register dst) {
-  address  base    = Universe::narrow_klass_base();
-  int      shift   = Universe::narrow_klass_shift();
+  address  base    = CompressedKlassPointers::base();
+  int      shift   = CompressedKlassPointers::shift();
   int      beg_off = offset();
   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
 
@@ -3728,8 +3728,8 @@
 //  1) the size of the generated instructions may vary
 //  2) the result is (potentially) stored in a register different from the source.
 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
-  address base  = Universe::narrow_klass_base();
-  int     shift = Universe::narrow_klass_shift();
+  address base  = CompressedKlassPointers::base();
+  int     shift = CompressedKlassPointers::shift();
   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
 
   BLOCK_COMMENT("cKlass decoder {");
@@ -3829,8 +3829,8 @@
   BLOCK_COMMENT("compare klass ptr {");
 
   if (UseCompressedClassPointers) {
-    const int shift = Universe::narrow_klass_shift();
-    address   base  = Universe::narrow_klass_base();
+    const int shift = CompressedKlassPointers::shift();
+    address   base  = CompressedKlassPointers::base();
 
     assert((shift == 0) || (shift == LogKlassAlignmentInBytes), "cKlass encoder detected bad shift");
     assert_different_registers(Rop1, Z_R0);
@@ -3963,8 +3963,8 @@
   Register Rindex = mem.indexOrR0();
   int64_t  disp   = mem.disp();
 
-  const int shift = Universe::narrow_oop_shift();
-  address   base  = Universe::narrow_oop_base();
+  const int shift = CompressedOops::shift();
+  address   base  = CompressedOops::base();
 
   assert(UseCompressedOops, "must be on to call this method");
   assert(Universe::heap() != NULL, "java heap must be initialized to call this method");
@@ -4075,9 +4075,9 @@
 void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL,
                                  Register Rbase, int pow2_offset, bool only32bitValid) {
 
-  const address oop_base  = Universe::narrow_oop_base();
-  const int     oop_shift = Universe::narrow_oop_shift();
-  const bool    disjoint  = Universe::narrow_oop_base_disjoint();
+  const address oop_base  = CompressedOops::base();
+  const int     oop_shift = CompressedOops::shift();
+  const bool    disjoint  = CompressedOops::base_disjoint();
 
   assert(UseCompressedOops, "must be on to call this method");
   assert(Universe::heap() != NULL, "java heap must be initialized to call this encoder");
@@ -4210,9 +4210,9 @@
 //  - avoid Z_R1 for Rdst if Rdst == Rbase.
 void MacroAssembler::oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL, Register Rbase, int pow2_offset) {
 
-  const address oop_base  = Universe::narrow_oop_base();
-  const int     oop_shift = Universe::narrow_oop_shift();
-  const bool    disjoint  = Universe::narrow_oop_base_disjoint();
+  const address oop_base  = CompressedOops::base();
+  const int     oop_shift = CompressedOops::shift();
+  const bool    disjoint  = CompressedOops::base_disjoint();
 
   assert(UseCompressedOops, "must be on to call this method");
   assert(Universe::heap() != NULL, "java heap must be initialized to call this decoder");
--- a/src/hotspot/cpu/s390/s390.ad	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/cpu/s390/s390.ad	Thu May 09 14:26:03 2019 +0200
@@ -1644,7 +1644,7 @@
 
 // Set this as clone_shift_expressions.
 bool Matcher::narrow_oop_use_complex_address() {
-  if (Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0) return true;
+  if (CompressedOops::base() == NULL && CompressedOops::shift() == 0) return true;
   return false;
 }
 
@@ -1657,12 +1657,12 @@
 
 bool Matcher::const_oop_prefer_decode() {
   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
-  return Universe::narrow_oop_base() == NULL;
+  return CompressedOops::base() == NULL;
 }
 
 bool Matcher::const_klass_prefer_decode() {
   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
-  return Universe::narrow_klass_base() == NULL;
+  return CompressedKlassPointers::base() == NULL;
 }
 
 // Is it better to copy float constants, or load them directly from memory?
@@ -3507,7 +3507,7 @@
 // Operands to remove register moves in unscaled mode.
 // Match read/write registers with an EncodeP node if neither shift nor add are required.
 operand iRegP2N(iRegP reg) %{
-  predicate(Universe::narrow_oop_shift() == 0 && _leaf->as_EncodeP()->in(0) == NULL);
+  predicate(CompressedOops::shift() == 0 && _leaf->as_EncodeP()->in(0) == NULL);
   constraint(ALLOC_IN_RC(z_memory_ptr_reg));
   match(EncodeP reg);
   format %{ "$reg" %}
@@ -3515,7 +3515,7 @@
 %}
 
 operand iRegN2P(iRegN reg) %{
-  predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0 &&
+  predicate(CompressedOops::base() == NULL && CompressedOops::shift() == 0 &&
             _leaf->as_DecodeN()->in(0) == NULL);
   constraint(ALLOC_IN_RC(z_memory_ptr_reg));
   match(DecodeN reg);
@@ -4784,7 +4784,7 @@
 
 instruct decodeLoadN(iRegP dst, memory mem) %{
   match(Set dst (DecodeN (LoadN mem)));
-  predicate(false && (Universe::narrow_oop_base()==NULL)&&(Universe::narrow_oop_shift()==0));
+  predicate(false && (CompressedOops::base()==NULL)&&(CompressedOops::shift()==0));
   ins_cost(MEMORY_REF_COST);
   size(Z_DISP3_SIZE);
   format %{ "DecodeLoadN  $dst,$mem\t# (cOop Load+Decode)" %}
@@ -4795,7 +4795,7 @@
 
 instruct decodeLoadNKlass(iRegP dst, memory mem) %{
   match(Set dst (DecodeNKlass (LoadNKlass mem)));
-  predicate(false && (Universe::narrow_klass_base()==NULL)&&(Universe::narrow_klass_shift()==0));
+  predicate(false && (CompressedKlassPointers::base()==NULL)&&(CompressedKlassPointers::shift()==0));
   ins_cost(MEMORY_REF_COST);
   size(Z_DISP3_SIZE);
   format %{ "DecodeLoadNKlass  $dst,$mem\t# (load/decode NKlass)" %}
@@ -4823,7 +4823,7 @@
 instruct decodeN(iRegP dst, iRegN src, flagsReg cr) %{
   match(Set dst (DecodeN src));
   effect(KILL cr);
-  predicate(Universe::narrow_oop_base() == NULL || !ExpandLoadingBaseDecode);
+  predicate(CompressedOops::base() == NULL || !ExpandLoadingBaseDecode);
   ins_cost(MEMORY_REF_COST+3 * DEFAULT_COST + BRANCH_COST);
   // TODO: s390 port size(VARIABLE_SIZE);
   format %{ "decodeN  $dst,$src\t# (decode cOop)" %}
@@ -4847,7 +4847,7 @@
   effect(KILL cr);
   predicate((n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull ||
              n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant) &&
-            (Universe::narrow_oop_base()== NULL || !ExpandLoadingBaseDecode_NN));
+            (CompressedOops::base()== NULL || !ExpandLoadingBaseDecode_NN));
   ins_cost(MEMORY_REF_COST+2 * DEFAULT_COST);
   // TODO: s390 port size(VARIABLE_SIZE);
   format %{ "decodeN  $dst,$src\t# (decode cOop NN)" %}
@@ -4876,7 +4876,7 @@
     format %{ "decodeN  $dst = ($src == 0) ? NULL : ($src << 3) + $base + pow2_offset\t# (decode cOop)" %}
     ins_encode %{
       __ oop_decoder($dst$$Register, $src$$Register, true, $base$$Register,
-                     (jlong)MacroAssembler::get_oop_base_pow2_offset((uint64_t)(intptr_t)Universe::narrow_oop_base()));
+                     (jlong)MacroAssembler::get_oop_base_pow2_offset((uint64_t)(intptr_t)CompressedOops::base()));
     %}
     ins_pipe(pipe_class_dummy);
   %}
@@ -4890,7 +4890,7 @@
     format %{ "decodeN  $dst = ($src << 3) + $base + pow2_offset\t# (decode cOop)" %}
     ins_encode %{
       __ oop_decoder($dst$$Register, $src$$Register, false, $base$$Register,
-                     (jlong)MacroAssembler::get_oop_base_pow2_offset((uint64_t)(intptr_t)Universe::narrow_oop_base()));
+                     (jlong)MacroAssembler::get_oop_base_pow2_offset((uint64_t)(intptr_t)CompressedOops::base()));
     %}
     ins_pipe(pipe_class_dummy);
   %}
@@ -4898,11 +4898,11 @@
 // Decoder for heapbased mode peeling off loading the base.
 instruct decodeN_Ex(iRegP dst, iRegN src, flagsReg cr) %{
   match(Set dst (DecodeN src));
-  predicate(Universe::narrow_oop_base() != NULL && ExpandLoadingBaseDecode);
+  predicate(CompressedOops::base() != NULL && ExpandLoadingBaseDecode);
   ins_cost(MEMORY_REF_COST+3 * DEFAULT_COST + BRANCH_COST);
   // TODO: s390 port size(VARIABLE_SIZE);
   expand %{
-    immL baseImm %{ (jlong)(intptr_t)Universe::narrow_oop_base() %}
+    immL baseImm %{ (jlong)(intptr_t)CompressedOops::base() %}
     iRegL base;
     loadBase(base, baseImm);
     decodeN_base(dst, src, base, cr);
@@ -4914,11 +4914,11 @@
   match(Set dst (DecodeN src));
   predicate((n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull ||
              n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant) &&
-            Universe::narrow_oop_base() != NULL && ExpandLoadingBaseDecode_NN);
+            CompressedOops::base() != NULL && ExpandLoadingBaseDecode_NN);
   ins_cost(MEMORY_REF_COST+2 * DEFAULT_COST);
   // TODO: s390 port size(VARIABLE_SIZE);
   expand %{
-    immL baseImm %{ (jlong)(intptr_t)Universe::narrow_oop_base() %}
+    immL baseImm %{ (jlong)(intptr_t)CompressedOops::base() %}
     iRegL base;
     loadBase(base, baseImm);
     decodeN_NN_base(dst, src, base, cr);
@@ -4932,8 +4932,8 @@
   match(Set dst (EncodeP src));
   effect(KILL cr);
   predicate((n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull) &&
-            (Universe::narrow_oop_base() == 0 ||
-             Universe::narrow_oop_base_disjoint() ||
+            (CompressedOops::base() == 0 ||
+             CompressedOops::base_disjoint() ||
              !ExpandLoadingBaseEncode));
   ins_cost(MEMORY_REF_COST+3 * DEFAULT_COST);
   // TODO: s390 port size(VARIABLE_SIZE);
@@ -4955,8 +4955,8 @@
   match(Set dst (EncodeP src));
   effect(KILL cr);
   predicate((n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull) &&
-            (Universe::narrow_oop_base() == 0 ||
-             Universe::narrow_oop_base_disjoint() ||
+            (CompressedOops::base() == 0 ||
+             CompressedOops::base_disjoint() ||
              !ExpandLoadingBaseEncode_NN));
   ins_cost(MEMORY_REF_COST+3 * DEFAULT_COST);
   // TODO: s390 port size(VARIABLE_SIZE);
@@ -4975,7 +4975,7 @@
     format %{ "encodeP  $dst = ($src>>3) +$base + pow2_offset\t# (encode cOop)" %}
     ins_encode %{
       jlong offset = -(jlong)MacroAssembler::get_oop_base_pow2_offset
-        (((uint64_t)(intptr_t)Universe::narrow_oop_base()) >> Universe::narrow_oop_shift());
+        (((uint64_t)(intptr_t)CompressedOops::base()) >> CompressedOops::shift());
       __ oop_encoder($dst$$Register, $src$$Register, true, $base$$Register, offset);
     %}
     ins_pipe(pipe_class_dummy);
@@ -4998,11 +4998,11 @@
   match(Set dst (EncodeP src));
   effect(KILL cr);
   predicate((n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull) &&
-            (Universe::narrow_oop_base_overlaps() && ExpandLoadingBaseEncode));
+            (CompressedOops::base_overlaps() && ExpandLoadingBaseEncode));
   ins_cost(MEMORY_REF_COST+3 * DEFAULT_COST);
   // TODO: s390 port size(VARIABLE_SIZE);
   expand %{
-    immL baseImm %{ ((jlong)(intptr_t)Universe::narrow_oop_base()) >> Universe::narrow_oop_shift() %}
+    immL baseImm %{ ((jlong)(intptr_t)CompressedOops::base()) >> CompressedOops::shift() %}
     immL_0 zero %{ (0) %}
     flagsReg ccr;
     iRegL base;
@@ -5018,12 +5018,12 @@
   match(Set dst (EncodeP src));
   effect(KILL cr);
   predicate((n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull) &&
-            (Universe::narrow_oop_base_overlaps() && ExpandLoadingBaseEncode_NN));
+            (CompressedOops::base_overlaps() && ExpandLoadingBaseEncode_NN));
   ins_cost(MEMORY_REF_COST+3 * DEFAULT_COST);
   // TODO: s390 port size(VARIABLE_SIZE);
   expand %{
-    immL baseImm %{ (jlong)(intptr_t)Universe::narrow_oop_base() %}
-    immL pow2_offset %{ -(jlong)MacroAssembler::get_oop_base_pow2_offset(((uint64_t)(intptr_t)Universe::narrow_oop_base())) %}
+    immL baseImm %{ (jlong)(intptr_t)CompressedOops::base() %}
+    immL pow2_offset %{ -(jlong)MacroAssembler::get_oop_base_pow2_offset(((uint64_t)(intptr_t)CompressedOops::base())) %}
     immL_0 zero %{ 0 %}
     flagsReg ccr;
     iRegL base;
@@ -6209,7 +6209,7 @@
 
 instruct addP_regN_reg_imm12(iRegP dst, iRegP_N2P src1, iRegL src2, uimmL12 con) %{
   match(Set dst (AddP (AddP src1 src2) con));
-  predicate( PreferLAoverADD && Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
+  predicate( PreferLAoverADD && CompressedOops::base() == NULL && CompressedOops::shift() == 0);
   ins_cost(DEFAULT_COST_LOW);
   size(4);
   format %{ "LA      $dst,$con($src1,$src2)\t # ptr d12(x,b)" %}
@@ -6231,7 +6231,7 @@
 
 instruct addP_regN_reg_imm20(iRegP dst, iRegP_N2P src1, iRegL src2, immL20 con) %{
   match(Set dst (AddP (AddP src1 src2) con));
-  predicate( PreferLAoverADD && Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
+  predicate( PreferLAoverADD && CompressedOops::base() == NULL && CompressedOops::shift() == 0);
   ins_cost(DEFAULT_COST);
   // TODO: s390 port size(FIXED_SIZE);
   format %{ "LAY     $dst,$con($src1,$src2)\t # ptr d20(x,b)" %}
@@ -8555,7 +8555,7 @@
 // Don't use LTGFR which performs sign extend.
 instruct compP_decode_reg_imm0(flagsReg cr, iRegN op1, immP0 op2) %{
   match(Set cr (CmpP (DecodeN op1) op2));
-  predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
+  predicate(CompressedOops::base() == NULL && CompressedOops::shift() == 0);
   ins_cost(DEFAULT_COST_LOW);
   size(2);
   format %{ "LTR    $op1, $op1\t # ptr" %}
@@ -11024,4 +11024,3 @@
 
 // ============================================================================
 // TYPE PROFILING RULES
-
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Thu May 09 14:26:03 2019 +0200
@@ -33,6 +33,7 @@
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
 #include "oops/accessDecorators.hpp"
+#include "oops/compressedOops.hpp"
 #include "oops/klass.inline.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/biasedLocking.hpp"
@@ -1015,7 +1016,7 @@
   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int klass_index = oop_recorder()->find_index(k);
   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
-  narrowOop encoded_k = Klass::encode_klass(k);
+  narrowOop encoded_k = CompressedKlassPointers::encode(k);
 
   assert_not_delayed();
   // Relocation with special format (see relocInfo_sparc.hpp).
@@ -3295,9 +3296,9 @@
 void MacroAssembler::encode_heap_oop(Register src, Register dst) {
   assert (UseCompressedOops, "must be compressed");
   assert (Universe::heap() != NULL, "java heap should be initialized");
-  assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+  assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
   verify_oop(src);
-  if (Universe::narrow_oop_base() == NULL) {
+  if (CompressedOops::base() == NULL) {
     srlx(src, LogMinObjAlignmentInBytes, dst);
     return;
   }
@@ -3323,9 +3324,9 @@
 void MacroAssembler::encode_heap_oop_not_null(Register r) {
   assert (UseCompressedOops, "must be compressed");
   assert (Universe::heap() != NULL, "java heap should be initialized");
-  assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+  assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
   verify_oop(r);
-  if (Universe::narrow_oop_base() != NULL)
+  if (CompressedOops::base() != NULL)
     sub(r, G6_heapbase, r);
   srlx(r, LogMinObjAlignmentInBytes, r);
 }
@@ -3333,9 +3334,9 @@
 void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) {
   assert (UseCompressedOops, "must be compressed");
   assert (Universe::heap() != NULL, "java heap should be initialized");
-  assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+  assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
   verify_oop(src);
-  if (Universe::narrow_oop_base() == NULL) {
+  if (CompressedOops::base() == NULL) {
     srlx(src, LogMinObjAlignmentInBytes, dst);
   } else {
     sub(src, G6_heapbase, dst);
@@ -3347,9 +3348,9 @@
 void  MacroAssembler::decode_heap_oop(Register src, Register dst) {
   assert (UseCompressedOops, "must be compressed");
   assert (Universe::heap() != NULL, "java heap should be initialized");
-  assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+  assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
   sllx(src, LogMinObjAlignmentInBytes, dst);
-  if (Universe::narrow_oop_base() != NULL) {
+  if (CompressedOops::base() != NULL) {
     Label done;
     bpr(rc_nz, true, Assembler::pt, dst, done);
     delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken
@@ -3364,9 +3365,9 @@
   // Also do not verify_oop as this is called by verify_oop.
   assert (UseCompressedOops, "must be compressed");
   assert (Universe::heap() != NULL, "java heap should be initialized");
-  assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+  assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
   sllx(r, LogMinObjAlignmentInBytes, r);
-  if (Universe::narrow_oop_base() != NULL)
+  if (CompressedOops::base() != NULL)
     add(r, G6_heapbase, r);
 }
 
@@ -3375,26 +3376,26 @@
   // pd_code_size_limit.
   // Also do not verify_oop as this is called by verify_oop.
   assert (UseCompressedOops, "must be compressed");
-  assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+  assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
   sllx(src, LogMinObjAlignmentInBytes, dst);
-  if (Universe::narrow_oop_base() != NULL)
+  if (CompressedOops::base() != NULL)
     add(dst, G6_heapbase, dst);
 }
 
 void MacroAssembler::encode_klass_not_null(Register r) {
   assert (UseCompressedClassPointers, "must be compressed");
-  if (Universe::narrow_klass_base() != NULL) {
+  if (CompressedKlassPointers::base() != NULL) {
     assert(r != G6_heapbase, "bad register choice");
-    set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
+    set((intptr_t)CompressedKlassPointers::base(), G6_heapbase);
     sub(r, G6_heapbase, r);
-    if (Universe::narrow_klass_shift() != 0) {
-      assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+    if (CompressedKlassPointers::shift() != 0) {
+      assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
       srlx(r, LogKlassAlignmentInBytes, r);
     }
     reinit_heapbase();
   } else {
-    assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong");
-    srlx(r, Universe::narrow_klass_shift(), r);
+    assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift() || CompressedKlassPointers::shift() == 0, "decode alg wrong");
+    srlx(r, CompressedKlassPointers::shift(), r);
   }
 }
 
@@ -3403,16 +3404,16 @@
     encode_klass_not_null(src);
   } else {
     assert (UseCompressedClassPointers, "must be compressed");
-    if (Universe::narrow_klass_base() != NULL) {
-      set((intptr_t)Universe::narrow_klass_base(), dst);
+    if (CompressedKlassPointers::base() != NULL) {
+      set((intptr_t)CompressedKlassPointers::base(), dst);
       sub(src, dst, dst);
-      if (Universe::narrow_klass_shift() != 0) {
+      if (CompressedKlassPointers::shift() != 0) {
         srlx(dst, LogKlassAlignmentInBytes, dst);
       }
     } else {
       // shift src into dst
-      assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong");
-      srlx(src, Universe::narrow_klass_shift(), dst);
+      assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift() || CompressedKlassPointers::shift() == 0, "decode alg wrong");
+      srlx(src, CompressedKlassPointers::shift(), dst);
     }
   }
 }
@@ -3423,11 +3424,11 @@
 int MacroAssembler::instr_size_for_decode_klass_not_null() {
   assert (UseCompressedClassPointers, "only for compressed klass ptrs");
   int num_instrs = 1;  // shift src,dst or add
-  if (Universe::narrow_klass_base() != NULL) {
+  if (CompressedKlassPointers::base() != NULL) {
     // set + add + set
-    num_instrs += insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) +
-                  insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base());
-    if (Universe::narrow_klass_shift() != 0) {
+    num_instrs += insts_for_internal_set((intptr_t)CompressedKlassPointers::base()) +
+                  insts_for_internal_set((intptr_t)CompressedOops::ptrs_base());
+    if (CompressedKlassPointers::shift() != 0) {
       num_instrs += 1;  // sllx
     }
   }
@@ -3440,16 +3441,16 @@
   // Do not add assert code to this unless you change vtableStubs_sparc.cpp
   // pd_code_size_limit.
   assert (UseCompressedClassPointers, "must be compressed");
-  if (Universe::narrow_klass_base() != NULL) {
+  if (CompressedKlassPointers::base() != NULL) {
     assert(r != G6_heapbase, "bad register choice");
-    set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
-    if (Universe::narrow_klass_shift() != 0)
+    set((intptr_t)CompressedKlassPointers::base(), G6_heapbase);
+    if (CompressedKlassPointers::shift() != 0)
       sllx(r, LogKlassAlignmentInBytes, r);
     add(r, G6_heapbase, r);
     reinit_heapbase();
   } else {
-    assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong");
-    sllx(r, Universe::narrow_klass_shift(), r);
+    assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift() || CompressedKlassPointers::shift() == 0, "decode alg wrong");
+    sllx(r, CompressedKlassPointers::shift(), r);
   }
 }
 
@@ -3460,21 +3461,21 @@
     // Do not add assert code to this unless you change vtableStubs_sparc.cpp
     // pd_code_size_limit.
     assert (UseCompressedClassPointers, "must be compressed");
-    if (Universe::narrow_klass_base() != NULL) {
-      if (Universe::narrow_klass_shift() != 0) {
+    if (CompressedKlassPointers::base() != NULL) {
+      if (CompressedKlassPointers::shift() != 0) {
         assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice");
-        set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
+        set((intptr_t)CompressedKlassPointers::base(), G6_heapbase);
         sllx(src, LogKlassAlignmentInBytes, dst);
         add(dst, G6_heapbase, dst);
         reinit_heapbase();
       } else {
-        set((intptr_t)Universe::narrow_klass_base(), dst);
+        set((intptr_t)CompressedKlassPointers::base(), dst);
         add(src, dst, dst);
       }
     } else {
       // shift/mov src into dst.
-      assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong");
-      sllx(src, Universe::narrow_klass_shift(), dst);
+      assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift() || CompressedKlassPointers::shift() == 0, "decode alg wrong");
+      sllx(src, CompressedKlassPointers::shift(), dst);
     }
   }
 }
@@ -3482,9 +3483,9 @@
 void MacroAssembler::reinit_heapbase() {
   if (UseCompressedOops || UseCompressedClassPointers) {
     if (Universe::heap() != NULL) {
-      set((intptr_t)Universe::narrow_ptrs_base(), G6_heapbase);
+      set((intptr_t)CompressedOops::ptrs_base(), G6_heapbase);
     } else {
-      AddressLiteral base(Universe::narrow_ptrs_base_addr());
+      AddressLiteral base(CompressedOops::ptrs_base_addr());
       load_ptr_contents(base, G6_heapbase);
     }
   }
--- a/src/hotspot/cpu/sparc/relocInfo_sparc.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/cpu/sparc/relocInfo_sparc.cpp	Thu May 09 14:26:03 2019 +0200
@@ -98,7 +98,7 @@
     guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi");
     if (format() != 0) {
       assert(type() == relocInfo::oop_type || type() == relocInfo::metadata_type, "only narrow oops or klasses case");
-      jint np = type() == relocInfo::oop_type ? CompressedOops::encode((oop)x) : Klass::encode_klass((Klass*)x);
+      jint np = type() == relocInfo::oop_type ? CompressedOops::encode((oop)x) : CompressedKlassPointers::encode((Klass*)x);
       inst &= ~Assembler::hi22(-1);
       inst |=  Assembler::hi22((intptr_t)np);
       if (verify_only) {
--- a/src/hotspot/cpu/sparc/sparc.ad	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/cpu/sparc/sparc.ad	Thu May 09 14:26:03 2019 +0200
@@ -1579,15 +1579,15 @@
   if (UseCompressedClassPointers) {
     assert(Universe::heap() != NULL, "java heap should be initialized");
     st->print_cr("\tLDUW   [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
-    if (Universe::narrow_klass_base() != 0) {
-      st->print_cr("\tSET    Universe::narrow_klass_base,R_G6_heap_base");
-      if (Universe::narrow_klass_shift() != 0) {
-        st->print_cr("\tSLL    R_G5,Universe::narrow_klass_shift,R_G5");
+    if (CompressedKlassPointers::base() != 0) {
+      st->print_cr("\tSET    CompressedKlassPointers::base,R_G6_heap_base");
+      if (CompressedKlassPointers::shift() != 0) {
+        st->print_cr("\tSLL    R_G5,CompressedKlassPointers::shift,R_G5");
       }
       st->print_cr("\tADD    R_G5,R_G6_heap_base,R_G5");
-      st->print_cr("\tSET    Universe::narrow_ptrs_base,R_G6_heap_base");
+      st->print_cr("\tSET    CompressedOops::ptrs_base,R_G6_heap_base");
     } else {
-      st->print_cr("\tSLL    R_G5,Universe::narrow_klass_shift,R_G5");
+      st->print_cr("\tSLL    R_G5,CompressedKlassPointers::shift,R_G5");
     }
   } else {
     st->print_cr("\tLDX    [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
@@ -1827,14 +1827,14 @@
 bool Matcher::const_oop_prefer_decode() {
   // TODO: Check if loading ConP from TOC in heap-based mode is better:
   // Prefer ConN+DecodeN over ConP in simple compressed oops mode.
-  // return Universe::narrow_oop_base() == NULL;
+  // return CompressedOops::base() == NULL;
   return true;
 }
 
 bool Matcher::const_klass_prefer_decode() {
   // TODO: Check if loading ConP from TOC in heap-based mode is better:
   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
-  // return Universe::narrow_klass_base() == NULL;
+  // return CompressedKlassPointers::base() == NULL;
   return true;
 }
 
@@ -6251,7 +6251,7 @@
   ins_encode %{
     __ encode_heap_oop($src$$Register, $dst$$Register);
   %}
-  ins_avoid_back_to_back(Universe::narrow_oop_base() == NULL ? AVOID_NONE : AVOID_BEFORE);
+  ins_avoid_back_to_back(CompressedOops::base() == NULL ? AVOID_NONE : AVOID_BEFORE);
   ins_pipe(ialu_reg);
 %}
 
--- a/src/hotspot/cpu/sparc/vm_version_sparc.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/cpu/sparc/vm_version_sparc.cpp	Thu May 09 14:26:03 2019 +0200
@@ -28,6 +28,7 @@
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/compressedOops.hpp"
 #include "runtime/java.hpp"
 #include "runtime/os.hpp"
 #include "runtime/stubCodeGenerator.hpp"
@@ -84,8 +85,8 @@
 
   // 32-bit oops don't make sense for the 64-bit VM on SPARC since the 32-bit
   // VM has the same registers and smaller objects.
-  Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
-  Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
+  CompressedOops::set_shift(LogMinObjAlignmentInBytes);
+  CompressedKlassPointers::set_shift(LogKlassAlignmentInBytes);
 
 #ifdef COMPILER2
   if (has_fast_ind_br() && FLAG_IS_DEFAULT(UseJumpTables)) {
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Thu May 09 14:26:03 2019 +0200
@@ -34,6 +34,7 @@
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
 #include "oops/accessDecorators.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.inline.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/biasedLocking.hpp"
@@ -5278,7 +5279,7 @@
   if (CheckCompressedOops) {
     Label ok;
     push(rscratch1); // cmpptr trashes rscratch1
-    cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
+    cmpptr(r12_heapbase, ExternalAddress((address)CompressedOops::ptrs_base_addr()));
     jcc(Assembler::equal, ok);
     STOP(msg);
     bind(ok);
@@ -5293,9 +5294,9 @@
   verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
 #endif
   verify_oop(r, "broken oop in encode_heap_oop");
-  if (Universe::narrow_oop_base() == NULL) {
-    if (Universe::narrow_oop_shift() != 0) {
-      assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+  if (CompressedOops::base() == NULL) {
+    if (CompressedOops::shift() != 0) {
+      assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
       shrq(r, LogMinObjAlignmentInBytes);
     }
     return;
@@ -5318,11 +5319,11 @@
   }
 #endif
   verify_oop(r, "broken oop in encode_heap_oop_not_null");
-  if (Universe::narrow_oop_base() != NULL) {
+  if (CompressedOops::base() != NULL) {
     subq(r, r12_heapbase);
   }
-  if (Universe::narrow_oop_shift() != 0) {
-    assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+  if (CompressedOops::shift() != 0) {
+    assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
     shrq(r, LogMinObjAlignmentInBytes);
   }
 }
@@ -5342,11 +5343,11 @@
   if (dst != src) {
     movq(dst, src);
   }
-  if (Universe::narrow_oop_base() != NULL) {
+  if (CompressedOops::base() != NULL) {
     subq(dst, r12_heapbase);
   }
-  if (Universe::narrow_oop_shift() != 0) {
-    assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+  if (CompressedOops::shift() != 0) {
+    assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
     shrq(dst, LogMinObjAlignmentInBytes);
   }
 }
@@ -5355,9 +5356,9 @@
 #ifdef ASSERT
   verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
 #endif
-  if (Universe::narrow_oop_base() == NULL) {
-    if (Universe::narrow_oop_shift() != 0) {
-      assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+  if (CompressedOops::base() == NULL) {
+    if (CompressedOops::shift() != 0) {
+      assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
       shlq(r, LogMinObjAlignmentInBytes);
     }
   } else {
@@ -5377,14 +5378,14 @@
   // Cannot assert, unverified entry point counts instructions (see .ad file)
   // vtableStubs also counts instructions in pd_code_size_limit.
   // Also do not verify_oop as this is called by verify_oop.
-  if (Universe::narrow_oop_shift() != 0) {
-    assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+  if (CompressedOops::shift() != 0) {
+    assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
     shlq(r, LogMinObjAlignmentInBytes);
-    if (Universe::narrow_oop_base() != NULL) {
+    if (CompressedOops::base() != NULL) {
       addq(r, r12_heapbase);
     }
   } else {
-    assert (Universe::narrow_oop_base() == NULL, "sanity");
+    assert (CompressedOops::base() == NULL, "sanity");
   }
 }
 
@@ -5395,8 +5396,8 @@
   // Cannot assert, unverified entry point counts instructions (see .ad file)
   // vtableStubs also counts instructions in pd_code_size_limit.
   // Also do not verify_oop as this is called by verify_oop.
-  if (Universe::narrow_oop_shift() != 0) {
-    assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+  if (CompressedOops::shift() != 0) {
+    assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong");
     if (LogMinObjAlignmentInBytes == Address::times_8) {
       leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
     } else {
@@ -5404,12 +5405,12 @@
         movq(dst, src);
       }
       shlq(dst, LogMinObjAlignmentInBytes);
-      if (Universe::narrow_oop_base() != NULL) {
+      if (CompressedOops::base() != NULL) {
         addq(dst, r12_heapbase);
       }
     }
   } else {
-    assert (Universe::narrow_oop_base() == NULL, "sanity");
+    assert (CompressedOops::base() == NULL, "sanity");
     if (dst != src) {
       movq(dst, src);
     }
@@ -5417,17 +5418,17 @@
 }
 
 void MacroAssembler::encode_klass_not_null(Register r) {
-  if (Universe::narrow_klass_base() != NULL) {
+  if (CompressedKlassPointers::base() != NULL) {
     // Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
     assert(r != r12_heapbase, "Encoding a klass in r12");
-    mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
+    mov64(r12_heapbase, (int64_t)CompressedKlassPointers::base());
     subq(r, r12_heapbase);
   }
-  if (Universe::narrow_klass_shift() != 0) {
-    assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+  if (CompressedKlassPointers::shift() != 0) {
+    assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
     shrq(r, LogKlassAlignmentInBytes);
   }
-  if (Universe::narrow_klass_base() != NULL) {
+  if (CompressedKlassPointers::base() != NULL) {
     reinit_heapbase();
   }
 }
@@ -5436,15 +5437,15 @@
   if (dst == src) {
     encode_klass_not_null(src);
   } else {
-    if (Universe::narrow_klass_base() != NULL) {
-      mov64(dst, (int64_t)Universe::narrow_klass_base());
+    if (CompressedKlassPointers::base() != NULL) {
+      mov64(dst, (int64_t)CompressedKlassPointers::base());
       negq(dst);
       addq(dst, src);
     } else {
       movptr(dst, src);
     }
-    if (Universe::narrow_klass_shift() != 0) {
-      assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+    if (CompressedKlassPointers::shift() != 0) {
+      assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
       shrq(dst, LogKlassAlignmentInBytes);
     }
   }
@@ -5456,9 +5457,9 @@
 // generate change, then this method needs to be updated.
 int MacroAssembler::instr_size_for_decode_klass_not_null() {
   assert (UseCompressedClassPointers, "only for compressed klass ptrs");
-  if (Universe::narrow_klass_base() != NULL) {
+  if (CompressedKlassPointers::base() != NULL) {
     // mov64 + addq + shlq? + mov64  (for reinit_heapbase()).
-    return (Universe::narrow_klass_shift() == 0 ? 20 : 24);
+    return (CompressedKlassPointers::shift() == 0 ? 20 : 24);
   } else {
     // longest load decode klass function, mov64, leaq
     return 16;
@@ -5474,13 +5475,13 @@
   // Cannot assert, unverified entry point counts instructions (see .ad file)
   // vtableStubs also counts instructions in pd_code_size_limit.
   // Also do not verify_oop as this is called by verify_oop.
-  if (Universe::narrow_klass_shift() != 0) {
-    assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+  if (CompressedKlassPointers::shift() != 0) {
+    assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
     shlq(r, LogKlassAlignmentInBytes);
   }
   // Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
-  if (Universe::narrow_klass_base() != NULL) {
-    mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
+  if (CompressedKlassPointers::base() != NULL) {
+    mov64(r12_heapbase, (int64_t)CompressedKlassPointers::base());
     addq(r, r12_heapbase);
     reinit_heapbase();
   }
@@ -5495,9 +5496,9 @@
     // Cannot assert, unverified entry point counts instructions (see .ad file)
     // vtableStubs also counts instructions in pd_code_size_limit.
     // Also do not verify_oop as this is called by verify_oop.
-    mov64(dst, (int64_t)Universe::narrow_klass_base());
-    if (Universe::narrow_klass_shift() != 0) {
-      assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+    mov64(dst, (int64_t)CompressedKlassPointers::base());
+    if (CompressedKlassPointers::shift() != 0) {
+      assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
       assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
       leaq(dst, Address(dst, src, Address::times_8, 0));
     } else {
@@ -5529,7 +5530,7 @@
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int klass_index = oop_recorder()->find_index(k);
   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
-  mov_narrow_oop(dst, Klass::encode_klass(k), rspec);
+  mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
 }
 
 void  MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
@@ -5537,7 +5538,7 @@
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int klass_index = oop_recorder()->find_index(k);
   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
-  mov_narrow_oop(dst, Klass::encode_klass(k), rspec);
+  mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
 }
 
 void  MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
@@ -5563,7 +5564,7 @@
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int klass_index = oop_recorder()->find_index(k);
   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
-  Assembler::cmp_narrow_oop(dst, Klass::encode_klass(k), rspec);
+  Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
 }
 
 void  MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
@@ -5571,19 +5572,19 @@
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int klass_index = oop_recorder()->find_index(k);
   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
-  Assembler::cmp_narrow_oop(dst, Klass::encode_klass(k), rspec);
+  Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec);
 }
 
 void MacroAssembler::reinit_heapbase() {
   if (UseCompressedOops || UseCompressedClassPointers) {
     if (Universe::heap() != NULL) {
-      if (Universe::narrow_oop_base() == NULL) {
+      if (CompressedOops::base() == NULL) {
         MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
       } else {
-        mov64(r12_heapbase, (int64_t)Universe::narrow_ptrs_base());
+        mov64(r12_heapbase, (int64_t)CompressedOops::ptrs_base());
       }
     } else {
-      movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
+      movptr(r12_heapbase, ExternalAddress((address)CompressedOops::ptrs_base_addr()));
     }
   }
 }
--- a/src/hotspot/cpu/x86/relocInfo_x86.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/cpu/x86/relocInfo_x86.cpp	Thu May 09 14:26:03 2019 +0200
@@ -58,9 +58,9 @@
     }
   } else {
       if (verify_only) {
-        guarantee(*(uint32_t*) disp == Klass::encode_klass((Klass*)x), "instructions must match");
+        guarantee(*(uint32_t*) disp == CompressedKlassPointers::encode((Klass*)x), "instructions must match");
       } else {
-        *(int32_t*) disp = Klass::encode_klass((Klass*)x);
+        *(int32_t*) disp = CompressedKlassPointers::encode((Klass*)x);
       }
     }
   } else {
--- a/src/hotspot/cpu/x86/x86_64.ad	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/cpu/x86/x86_64.ad	Thu May 09 14:26:03 2019 +0200
@@ -1662,7 +1662,7 @@
   // TODO: Either support matching DecodeNKlass (heap-based) in operand
   //       or condisider the following:
   // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
-  //return Universe::narrow_klass_base() == NULL;
+  //return CompressedKlassPointers::base() == NULL;
   return true;
 }
 
@@ -3914,9 +3914,9 @@
 
 // Indirect Narrow Oop Plus Offset Operand
 // Note: x86 architecture doesn't support "scale * index + offset" without a base
-// we can't free r12 even with Universe::narrow_oop_base() == NULL.
+// we can't free r12 even with CompressedOops::base() == NULL.
 operand indCompressedOopOffset(rRegN reg, immL32 off) %{
-  predicate(UseCompressedOops && (Universe::narrow_oop_shift() == Address::times_8));
+  predicate(UseCompressedOops && (CompressedOops::shift() == Address::times_8));
   constraint(ALLOC_IN_RC(ptr_reg));
   match(AddP (DecodeN reg) off);
 
@@ -3933,7 +3933,7 @@
 // Indirect Memory Operand
 operand indirectNarrow(rRegN reg)
 %{
-  predicate(Universe::narrow_oop_shift() == 0);
+  predicate(CompressedOops::shift() == 0);
   constraint(ALLOC_IN_RC(ptr_reg));
   match(DecodeN reg);
 
@@ -3949,7 +3949,7 @@
 // Indirect Memory Plus Short Offset Operand
 operand indOffset8Narrow(rRegN reg, immL8 off)
 %{
-  predicate(Universe::narrow_oop_shift() == 0);
+  predicate(CompressedOops::shift() == 0);
   constraint(ALLOC_IN_RC(ptr_reg));
   match(AddP (DecodeN reg) off);
 
@@ -3965,7 +3965,7 @@
 // Indirect Memory Plus Long Offset Operand
 operand indOffset32Narrow(rRegN reg, immL32 off)
 %{
-  predicate(Universe::narrow_oop_shift() == 0);
+  predicate(CompressedOops::shift() == 0);
   constraint(ALLOC_IN_RC(ptr_reg));
   match(AddP (DecodeN reg) off);
 
@@ -3981,7 +3981,7 @@
 // Indirect Memory Plus Index Register Plus Offset Operand
 operand indIndexOffsetNarrow(rRegN reg, rRegL lreg, immL32 off)
 %{
-  predicate(Universe::narrow_oop_shift() == 0);
+  predicate(CompressedOops::shift() == 0);
   constraint(ALLOC_IN_RC(ptr_reg));
   match(AddP (AddP (DecodeN reg) lreg) off);
 
@@ -3998,7 +3998,7 @@
 // Indirect Memory Plus Index Register Plus Offset Operand
 operand indIndexNarrow(rRegN reg, rRegL lreg)
 %{
-  predicate(Universe::narrow_oop_shift() == 0);
+  predicate(CompressedOops::shift() == 0);
   constraint(ALLOC_IN_RC(ptr_reg));
   match(AddP (DecodeN reg) lreg);
 
@@ -4015,7 +4015,7 @@
 // Indirect Memory Times Scale Plus Index Register
 operand indIndexScaleNarrow(rRegN reg, rRegL lreg, immI2 scale)
 %{
-  predicate(Universe::narrow_oop_shift() == 0);
+  predicate(CompressedOops::shift() == 0);
   constraint(ALLOC_IN_RC(ptr_reg));
   match(AddP (DecodeN reg) (LShiftL lreg scale));
 
@@ -4032,7 +4032,7 @@
 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
 operand indIndexScaleOffsetNarrow(rRegN reg, immL32 off, rRegL lreg, immI2 scale)
 %{
-  predicate(Universe::narrow_oop_shift() == 0);
+  predicate(CompressedOops::shift() == 0);
   constraint(ALLOC_IN_RC(ptr_reg));
   match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
 
@@ -4050,7 +4050,7 @@
 operand indPosIndexOffsetNarrow(rRegN reg, immL32 off, rRegI idx)
 %{
   constraint(ALLOC_IN_RC(ptr_reg));
-  predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->as_Type()->type()->is_long()->_lo >= 0);
+  predicate(CompressedOops::shift() == 0 && n->in(2)->in(3)->as_Type()->type()->is_long()->_lo >= 0);
   match(AddP (AddP (DecodeN reg) (ConvI2L idx)) off);
 
   op_cost(10);
@@ -4067,7 +4067,7 @@
 operand indPosIndexScaleOffsetNarrow(rRegN reg, immL32 off, rRegI idx, immI2 scale)
 %{
   constraint(ALLOC_IN_RC(ptr_reg));
-  predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
+  predicate(CompressedOops::shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
   match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L idx) scale)) off);
 
   op_cost(10);
@@ -5752,7 +5752,7 @@
 // Load Effective Address which uses Narrow (32-bits) oop
 instruct leaPCompressedOopOffset(rRegP dst, indCompressedOopOffset mem)
 %{
-  predicate(UseCompressedOops && (Universe::narrow_oop_shift() != 0));
+  predicate(UseCompressedOops && (CompressedOops::shift() != 0));
   match(Set dst mem);
 
   ins_cost(110);
@@ -5764,7 +5764,7 @@
 
 instruct leaP8Narrow(rRegP dst, indOffset8Narrow mem)
 %{
-  predicate(Universe::narrow_oop_shift() == 0);
+  predicate(CompressedOops::shift() == 0);
   match(Set dst mem);
 
   ins_cost(110); // XXX
@@ -5776,7 +5776,7 @@
 
 instruct leaP32Narrow(rRegP dst, indOffset32Narrow mem)
 %{
-  predicate(Universe::narrow_oop_shift() == 0);
+  predicate(CompressedOops::shift() == 0);
   match(Set dst mem);
 
   ins_cost(110);
@@ -5788,7 +5788,7 @@
 
 instruct leaPIdxOffNarrow(rRegP dst, indIndexOffsetNarrow mem)
 %{
-  predicate(Universe::narrow_oop_shift() == 0);
+  predicate(CompressedOops::shift() == 0);
   match(Set dst mem);
 
   ins_cost(110);
@@ -5800,7 +5800,7 @@
 
 instruct leaPIdxScaleNarrow(rRegP dst, indIndexScaleNarrow mem)
 %{
-  predicate(Universe::narrow_oop_shift() == 0);
+  predicate(CompressedOops::shift() == 0);
   match(Set dst mem);
 
   ins_cost(110);
@@ -5812,7 +5812,7 @@
 
 instruct leaPIdxScaleOffNarrow(rRegP dst, indIndexScaleOffsetNarrow mem)
 %{
-  predicate(Universe::narrow_oop_shift() == 0);
+  predicate(CompressedOops::shift() == 0);
   match(Set dst mem);
 
   ins_cost(110);
@@ -5824,7 +5824,7 @@
 
 instruct leaPPosIdxOffNarrow(rRegP dst, indPosIndexOffsetNarrow mem)
 %{
-  predicate(Universe::narrow_oop_shift() == 0);
+  predicate(CompressedOops::shift() == 0);
   match(Set dst mem);
 
   ins_cost(110);
@@ -5836,7 +5836,7 @@
 
 instruct leaPPosIdxScaleOffNarrow(rRegP dst, indPosIndexScaleOffsetNarrow mem)
 %{
-  predicate(Universe::narrow_oop_shift() == 0);
+  predicate(CompressedOops::shift() == 0);
   match(Set dst mem);
 
   ins_cost(110);
@@ -6200,7 +6200,7 @@
 
 instruct storeImmP0(memory mem, immP0 zero)
 %{
-  predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL));
+  predicate(UseCompressedOops && (CompressedOops::base() == NULL) && (CompressedKlassPointers::base() == NULL));
   match(Set mem (StoreP mem zero));
 
   ins_cost(125); // XXX
@@ -6250,7 +6250,7 @@
 
 instruct storeImmN0(memory mem, immN0 zero)
 %{
-  predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_klass_base() == NULL);
+  predicate(CompressedOops::base() == NULL && CompressedKlassPointers::base() == NULL);
   match(Set mem (StoreN mem zero));
 
   ins_cost(125); // XXX
@@ -6293,7 +6293,7 @@
 // Store Integer Immediate
 instruct storeImmI0(memory mem, immI0 zero)
 %{
-  predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL));
+  predicate(UseCompressedOops && (CompressedOops::base() == NULL) && (CompressedKlassPointers::base() == NULL));
   match(Set mem (StoreI mem zero));
 
   ins_cost(125); // XXX
@@ -6318,7 +6318,7 @@
 // Store Long Immediate
 instruct storeImmL0(memory mem, immL0 zero)
 %{
-  predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL));
+  predicate(UseCompressedOops && (CompressedOops::base() == NULL) && (CompressedKlassPointers::base() == NULL));
   match(Set mem (StoreL mem zero));
 
   ins_cost(125); // XXX
@@ -6343,7 +6343,7 @@
 // Store Short/Char Immediate
 instruct storeImmC0(memory mem, immI0 zero)
 %{
-  predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL));
+  predicate(UseCompressedOops && (CompressedOops::base() == NULL) && (CompressedKlassPointers::base() == NULL));
   match(Set mem (StoreC mem zero));
 
   ins_cost(125); // XXX
@@ -6369,7 +6369,7 @@
 // Store Byte Immediate
 instruct storeImmB0(memory mem, immI0 zero)
 %{
-  predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL));
+  predicate(UseCompressedOops && (CompressedOops::base() == NULL) && (CompressedKlassPointers::base() == NULL));
   match(Set mem (StoreB mem zero));
 
   ins_cost(125); // XXX
@@ -6394,7 +6394,7 @@
 // Store CMS card-mark Immediate
 instruct storeImmCM0_reg(memory mem, immI0 zero)
 %{
-  predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL));
+  predicate(UseCompressedOops && (CompressedOops::base() == NULL) && (CompressedKlassPointers::base() == NULL));
   match(Set mem (StoreCM mem zero));
 
   ins_cost(125); // XXX
@@ -6432,7 +6432,7 @@
 // Store immediate Float value (it is faster than store from XMM register)
 instruct storeF0(memory mem, immF0 zero)
 %{
-  predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL));
+  predicate(UseCompressedOops && (CompressedOops::base() == NULL) && (CompressedKlassPointers::base() == NULL));
   match(Set mem (StoreF mem zero));
 
   ins_cost(25); // XXX
@@ -6470,7 +6470,7 @@
 // Store immediate double 0.0 (it is faster than store from XMM register)
 instruct storeD0_imm(memory mem, immD0 src)
 %{
-  predicate(!UseCompressedOops || (Universe::narrow_oop_base() != NULL));
+  predicate(!UseCompressedOops || (CompressedOops::base() != NULL));
   match(Set mem (StoreD mem src));
 
   ins_cost(50);
@@ -6482,7 +6482,7 @@
 
 instruct storeD0(memory mem, immD0 zero)
 %{
-  predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL));
+  predicate(UseCompressedOops && (CompressedOops::base() == NULL) && (CompressedKlassPointers::base() == NULL));
   match(Set mem (StoreD mem zero));
 
   ins_cost(25); // XXX
@@ -6919,7 +6919,7 @@
 // in case of 32bit oops (heap < 4Gb).
 instruct convN2I(rRegI dst, rRegN src)
 %{
-  predicate(Universe::narrow_oop_shift() == 0);
+  predicate(CompressedOops::shift() == 0);
   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
 
   format %{ "movl    $dst, $src\t# compressed ptr -> int" %}
@@ -11957,7 +11957,7 @@
 // any compare to a zero should be eq/neq.
 instruct testP_mem(rFlagsReg cr, memory op, immP0 zero)
 %{
-  predicate(!UseCompressedOops || (Universe::narrow_oop_base() != NULL));
+  predicate(!UseCompressedOops || (CompressedOops::base() != NULL));
   match(Set cr (CmpP (LoadP op) zero));
 
   ins_cost(500); // XXX
@@ -11970,7 +11970,7 @@
 
 instruct testP_mem_reg0(rFlagsReg cr, memory mem, immP0 zero)
 %{
-  predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL));
+  predicate(UseCompressedOops && (CompressedOops::base() == NULL) && (CompressedKlassPointers::base() == NULL));
   match(Set cr (CmpP (LoadP mem) zero));
 
   format %{ "cmpq    R12, $mem\t# ptr (R12_heapbase==0)" %}
@@ -12052,7 +12052,7 @@
 
 instruct testN_mem(rFlagsReg cr, memory mem, immN0 zero)
 %{
-  predicate(Universe::narrow_oop_base() != NULL);
+  predicate(CompressedOops::base() != NULL);
   match(Set cr (CmpN (LoadN mem) zero));
 
   ins_cost(500); // XXX
@@ -12065,7 +12065,7 @@
 
 instruct testN_mem_reg0(rFlagsReg cr, memory mem, immN0 zero)
 %{
-  predicate(Universe::narrow_oop_base() == NULL && (Universe::narrow_klass_base() == NULL));
+  predicate(CompressedOops::base() == NULL && (CompressedKlassPointers::base() == NULL));
   match(Set cr (CmpN (LoadN mem) zero));
 
   format %{ "cmpl    R12, $mem\t# compressed ptr (R12_heapbase==0)" %}
--- a/src/hotspot/share/aot/aotCodeHeap.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/aot/aotCodeHeap.cpp	Thu May 09 14:26:03 2019 +0200
@@ -35,6 +35,7 @@
 #include "jvmci/compilerRuntime.hpp"
 #include "jvmci/jvmciRuntime.hpp"
 #include "memory/allocation.inline.hpp"
+#include "oops/compressedOops.hpp"
 #include "oops/method.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/os.hpp"
@@ -577,8 +578,8 @@
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_heap_top_address", address, (heap->supports_inline_contig_alloc() ? heap->top_addr() : NULL));
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_heap_end_address", address, (heap->supports_inline_contig_alloc() ? heap->end_addr() : NULL));
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_polling_page", address, os::get_polling_page());
-    SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_narrow_klass_base_address", address, Universe::narrow_klass_base());
-    SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_narrow_oop_base_address", address, Universe::narrow_oop_base());
+    SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_narrow_klass_base_address", address, CompressedKlassPointers::base());
+    SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_narrow_oop_base_address", address, CompressedOops::base());
 #if INCLUDE_G1GC
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_log_of_heap_region_grain_bytes", int, HeapRegion::LogOfHRGrainBytes);
 #endif
--- a/src/hotspot/share/aot/aotLoader.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/aot/aotLoader.cpp	Thu May 09 14:26:03 2019 +0200
@@ -22,12 +22,12 @@
  */
 
 #include "precompiled.hpp"
-#include "jvm.h"
-
 #include "aot/aotCodeHeap.hpp"
 #include "aot/aotLoader.inline.hpp"
+#include "jvm.h"
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/compressedOops.hpp"
 #include "oops/method.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/os.inline.hpp"
@@ -184,14 +184,14 @@
     // AOT sets shift values during heap and metaspace initialization.
     // Check shifts value to make sure thay did not change.
     if (UseCompressedOops && AOTLib::narrow_oop_shift_initialized()) {
-      int oop_shift = Universe::narrow_oop_shift();
+      int oop_shift = CompressedOops::shift();
       FOR_ALL_AOT_LIBRARIES(lib) {
-        (*lib)->verify_flag((*lib)->config()->_narrowOopShift, oop_shift, "Universe::narrow_oop_shift");
+        (*lib)->verify_flag((*lib)->config()->_narrowOopShift, oop_shift, "CompressedOops::shift");
       }
       if (UseCompressedClassPointers) { // It is set only if UseCompressedOops is set
-        int klass_shift = Universe::narrow_klass_shift();
+        int klass_shift = CompressedKlassPointers::shift();
         FOR_ALL_AOT_LIBRARIES(lib) {
-          (*lib)->verify_flag((*lib)->config()->_narrowKlassShift, klass_shift, "Universe::narrow_klass_shift");
+          (*lib)->verify_flag((*lib)->config()->_narrowKlassShift, klass_shift, "CompressedKlassPointers::shift");
         }
       }
     }
@@ -225,10 +225,10 @@
   // This method is called from Universe::initialize_heap().
   if (UseAOT && libraries_count() > 0 &&
       UseCompressedOops && AOTLib::narrow_oop_shift_initialized()) {
-    if (Universe::narrow_oop_shift() == 0) {
+    if (CompressedOops::shift() == 0) {
       // 0 is valid shift value for small heap but we can safely increase it
       // at this point when nobody used it yet.
-      Universe::set_narrow_oop_shift(AOTLib::narrow_oop_shift());
+      CompressedOops::set_shift(AOTLib::narrow_oop_shift());
     }
   }
 }
@@ -238,8 +238,8 @@
   if (UseAOT && libraries_count() > 0 &&
       UseCompressedOops && AOTLib::narrow_oop_shift_initialized() &&
       UseCompressedClassPointers) {
-    if (Universe::narrow_klass_shift() == 0) {
-      Universe::set_narrow_klass_shift(AOTLib::narrow_klass_shift());
+    if (CompressedKlassPointers::shift() == 0) {
+      CompressedKlassPointers::set_shift(AOTLib::narrow_klass_shift());
     }
   }
 }
--- a/src/hotspot/share/asm/assembler.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/asm/assembler.cpp	Thu May 09 14:26:03 2019 +0200
@@ -27,6 +27,7 @@
 #include "asm/macroAssembler.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "gc/shared/collectedHeap.hpp"
+#include "oops/compressedOops.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
@@ -315,12 +316,12 @@
   intptr_t cell_header_size = Universe::heap()->cell_header_size();
   size_t region_size = os::vm_page_size() + cell_header_size;
 #ifdef _LP64
-  if (UseCompressedOops && Universe::narrow_oop_base() != NULL) {
+  if (UseCompressedOops && CompressedOops::base() != NULL) {
     // A SEGV can legitimately happen in C2 code at address
     // (heap_base + offset) if  Matcher::narrow_oop_use_complex_address
     // is configured to allow narrow oops field loads to be implicitly
     // null checked
-    intptr_t start = ((intptr_t)Universe::narrow_oop_base()) - cell_header_size;
+    intptr_t start = ((intptr_t)CompressedOops::base()) - cell_header_size;
     intptr_t end = start + region_size;
     if (int_address >= start && int_address < end) {
       return true;
--- a/src/hotspot/share/classfile/compactHashtable.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/classfile/compactHashtable.cpp	Thu May 09 14:26:03 2019 +0200
@@ -30,7 +30,6 @@
 #include "memory/heapShared.inline.hpp"
 #include "memory/metadataFactory.hpp"
 #include "memory/metaspaceShared.hpp"
-#include "oops/compressedOops.inline.hpp"
 #include "runtime/vmThread.hpp"
 #include "utilities/numberSeq.hpp"
 #include <sys/stat.h>
--- a/src/hotspot/share/classfile/stringTable.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/classfile/stringTable.cpp	Thu May 09 14:26:03 2019 +0200
@@ -39,6 +39,7 @@
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
 #include "oops/access.inline.hpp"
+#include "oops/compressedOops.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/typeArrayOop.inline.hpp"
 #include "oops/weakHandle.inline.hpp"
--- a/src/hotspot/share/compiler/oopMap.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/compiler/oopMap.cpp	Thu May 09 14:26:03 2019 +0200
@@ -32,6 +32,7 @@
 #include "memory/allocation.inline.hpp"
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/compressedOops.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/signature.hpp"
@@ -346,11 +347,11 @@
         oop *derived_loc = loc;
         oop *base_loc    = fr->oopmapreg_to_location(omv.content_reg(), reg_map);
         // Ignore NULL oops and decoded NULL narrow oops which
-        // equal to Universe::narrow_oop_base when a narrow oop
+        // equal to CompressedOops::base() when a narrow oop
         // implicit null check is used in compiled code.
         // The narrow_oop_base could be NULL or be the address
         // of the page below heap depending on compressed oops mode.
-        if (base_loc != NULL && *base_loc != NULL && !Universe::is_narrow_oop_base(*base_loc)) {
+        if (base_loc != NULL && *base_loc != NULL && !CompressedOops::is_base(*base_loc)) {
           derived_oop_fn(base_loc, derived_loc);
         }
         oms.next();
@@ -371,9 +372,9 @@
       guarantee(loc != NULL, "missing saved register");
       if ( omv.type() == OopMapValue::oop_value ) {
         oop val = *loc;
-        if (val == NULL || Universe::is_narrow_oop_base(val)) {
+        if (val == NULL || CompressedOops::is_base(val)) {
           // Ignore NULL oops and decoded NULL narrow oops which
-          // equal to Universe::narrow_oop_base when a narrow oop
+          // equal to CompressedOops::base() when a narrow oop
           // implicit null check is used in compiled code.
           // The narrow_oop_base could be NULL or be the address
           // of the page below heap depending on compressed oops mode.
--- a/src/hotspot/share/compiler/oopMap.hpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/compiler/oopMap.hpp	Thu May 09 14:26:03 2019 +0200
@@ -237,7 +237,7 @@
   int heap_size() const;
 
   // Methods oops_do() and all_do() filter out NULL oops and
-  // oop == Universe::narrow_oop_base() before passing oops
+  // oop == CompressedOops::base() before passing oops
   // to closures.
 
   // Iterates through frame for a compiled method
--- a/src/hotspot/share/gc/shared/gcConfiguration.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/gc/shared/gcConfiguration.cpp	Thu May 09 14:26:03 2019 +0200
@@ -27,6 +27,7 @@
 #include "gc/shared/gcArguments.hpp"
 #include "gc/shared/gcConfiguration.hpp"
 #include "memory/universe.hpp"
+#include "oops/compressedOops.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/globals.hpp"
 #include "utilities/debug.hpp"
@@ -143,8 +144,8 @@
   return UseCompressedOops;
 }
 
-Universe::NARROW_OOP_MODE GCHeapConfiguration::narrow_oop_mode() const {
-  return Universe::narrow_oop_mode();
+CompressedOops::Mode GCHeapConfiguration::narrow_oop_mode() const {
+  return CompressedOops::mode();
 }
 
 uint GCHeapConfiguration::object_alignment_in_bytes() const {
--- a/src/hotspot/share/gc/shared/gcConfiguration.hpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/gc/shared/gcConfiguration.hpp	Thu May 09 14:26:03 2019 +0200
@@ -27,6 +27,7 @@
 
 #include "gc/shared/gcName.hpp"
 #include "memory/universe.hpp"
+#include "oops/compressedOops.hpp"
 #include "utilities/globalDefinitions.hpp"
 
 class GCConfiguration {
@@ -63,7 +64,7 @@
   size_t min_size() const;
   size_t initial_size() const;
   bool uses_compressed_oops() const;
-  Universe::NARROW_OOP_MODE narrow_oop_mode() const;
+  CompressedOops::Mode narrow_oop_mode() const;
   uint object_alignment_in_bytes() const;
   int heap_address_size_in_bits() const;
 };
--- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.inline.hpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.inline.hpp	Thu May 09 14:26:03 2019 +0200
@@ -32,6 +32,7 @@
 #include "gc/shenandoah/shenandoahStringDedup.inline.hpp"
 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
 #include "memory/iterator.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/prefetch.inline.hpp"
 
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp	Thu May 09 14:26:03 2019 +0200
@@ -67,6 +67,7 @@
 #include "gc/shenandoah/heuristics/shenandoahTraversalHeuristics.hpp"
 
 #include "memory/metaspace.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/safepointMechanism.hpp"
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp	Thu May 09 14:26:03 2019 +0200
@@ -39,6 +39,7 @@
 #include "gc/shenandoah/shenandoahControlThread.hpp"
 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/prefetch.inline.hpp"
--- a/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp	Thu May 09 14:26:03 2019 +0200
@@ -43,6 +43,7 @@
 #include "gc/shenandoah/shenandoahVMOperations.hpp"
 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
 #include "memory/metaspace.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/thread.hpp"
 #include "utilities/copy.hpp"
--- a/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.inline.hpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.inline.hpp	Thu May 09 14:26:03 2019 +0200
@@ -32,6 +32,7 @@
 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 
 template <class T, bool STRING_DEDUP, bool DEGEN>
--- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp	Thu May 09 14:26:03 2019 +0200
@@ -31,6 +31,7 @@
 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
 #include "gc/shenandoah/shenandoahUtils.hpp"
 #include "gc/shenandoah/shenandoahVerifier.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "memory/allocation.hpp"
 #include "memory/iterator.inline.hpp"
 #include "memory/resourceArea.hpp"
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp	Thu May 09 14:26:03 2019 +0200
@@ -43,6 +43,7 @@
 #include "memory/metaspaceGCThresholdUpdater.hpp"
 #include "memory/referenceType.hpp"
 #include "memory/universe.hpp"
+#include "oops/compressedOops.hpp"
 #include "runtime/flags/jvmFlag.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/osThread.hpp"
@@ -258,11 +259,11 @@
 }
 
 void NarrowOopModeConstant::serialize(JfrCheckpointWriter& writer) {
-  static const u4 nof_entries = Universe::HeapBasedNarrowOop + 1;
+  static const u4 nof_entries = CompressedOops::HeapBasedNarrowOop + 1;
   writer.write_count(nof_entries);
   for (u4 i = 0; i < nof_entries; ++i) {
     writer.write_key(i);
-    writer.write(Universe::narrow_oop_mode_to_string((Universe::NARROW_OOP_MODE)i));
+    writer.write(CompressedOops::mode_to_string((CompressedOops::Mode)i));
   }
 }
 
--- a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp	Thu May 09 14:26:03 2019 +0200
@@ -27,6 +27,7 @@
 #include "jvmci/jvmciCodeInstaller.hpp"
 #include "jvmci/jvmciCompilerToVM.hpp"
 #include "jvmci/jvmciRuntime.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
@@ -268,7 +269,7 @@
   int index = _oop_recorder->find_index(klass);
   section->relocate(dest, metadata_Relocation::spec(index));
   TRACE_jvmci_3("narrowKlass[%d of %d] = %s", index, _oop_recorder->metadata_count(), klass->name()->as_C_string());
-  return Klass::encode_klass(klass);
+  return CompressedKlassPointers::encode(klass);
 }
 #endif
 
--- a/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp	Thu May 09 14:26:03 2019 +0200
@@ -25,6 +25,7 @@
 #include "ci/ciUtilities.hpp"
 #include "gc/shared/barrierSet.hpp"
 #include "gc/shared/cardTable.hpp"
+#include "oops/compressedOops.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "jvmci/jvmciEnv.hpp"
 #include "jvmci/jvmciCompilerToVM.hpp"
@@ -100,10 +101,10 @@
 
   Universe_collectedHeap = Universe::heap();
   Universe_base_vtable_size = Universe::base_vtable_size();
-  Universe_narrow_oop_base = Universe::narrow_oop_base();
-  Universe_narrow_oop_shift = Universe::narrow_oop_shift();
-  Universe_narrow_klass_base = Universe::narrow_klass_base();
-  Universe_narrow_klass_shift = Universe::narrow_klass_shift();
+  Universe_narrow_oop_base = CompressedOops::base();
+  Universe_narrow_oop_shift = CompressedOops::shift();
+  Universe_narrow_klass_base = CompressedKlassPointers::base();
+  Universe_narrow_klass_shift = CompressedKlassPointers::shift();
   Universe_non_oop_bits = Universe::non_oop_word();
   Universe_verify_oop_mask = Universe::verify_oop_mask();
   Universe_verify_oop_bits = Universe::verify_oop_bits();
--- a/src/hotspot/share/memory/filemap.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/memory/filemap.cpp	Thu May 09 14:26:03 2019 +0200
@@ -184,12 +184,12 @@
   _alignment = alignment;
   _obj_alignment = ObjectAlignmentInBytes;
   _compact_strings = CompactStrings;
-  _narrow_oop_mode = Universe::narrow_oop_mode();
-  _narrow_oop_base = Universe::narrow_oop_base();
-  _narrow_oop_shift = Universe::narrow_oop_shift();
+  _narrow_oop_mode = CompressedOops::mode();
+  _narrow_oop_base = CompressedOops::base();
+  _narrow_oop_shift = CompressedOops::shift();
   _max_heap_size = MaxHeapSize;
-  _narrow_klass_base = Universe::narrow_klass_base();
-  _narrow_klass_shift = Universe::narrow_klass_shift();
+  _narrow_klass_base = CompressedKlassPointers::base();
+  _narrow_klass_shift = CompressedKlassPointers::shift();
   _shared_path_table_size = mapinfo->_shared_path_table_size;
   _shared_path_table = mapinfo->_shared_path_table;
   _shared_path_entry_size = mapinfo->_shared_path_entry_size;
@@ -638,7 +638,7 @@
     si->_file_offset = _file_offset;
   }
   if (HeapShared::is_heap_region(region)) {
-    assert((base - (char*)Universe::narrow_oop_base()) % HeapWordSize == 0, "Sanity");
+    assert((base - (char*)CompressedOops::base()) % HeapWordSize == 0, "Sanity");
     if (base != NULL) {
       si->_addr._offset = (intx)CompressedOops::encode_not_null((oop)base);
     } else {
@@ -976,19 +976,19 @@
   log_info(cds)("The current max heap size = " SIZE_FORMAT "M, HeapRegion::GrainBytes = " SIZE_FORMAT,
                 heap_reserved.byte_size()/M, HeapRegion::GrainBytes);
   log_info(cds)("    narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
-                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
+                p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift());
   log_info(cds)("    narrow_oop_mode = %d, narrow_oop_base = " PTR_FORMAT ", narrow_oop_shift = %d",
-                Universe::narrow_oop_mode(), p2i(Universe::narrow_oop_base()), Universe::narrow_oop_shift());
+                CompressedOops::mode(), p2i(CompressedOops::base()), CompressedOops::shift());
 
-  if (narrow_klass_base() != Universe::narrow_klass_base() ||
-      narrow_klass_shift() != Universe::narrow_klass_shift()) {
+  if (narrow_klass_base() != CompressedKlassPointers::base() ||
+      narrow_klass_shift() != CompressedKlassPointers::shift()) {
     log_info(cds)("CDS heap data cannot be used because the archive was created with an incompatible narrow klass encoding mode.");
     return;
   }
 
-  if (narrow_oop_mode() != Universe::narrow_oop_mode() ||
-      narrow_oop_base() != Universe::narrow_oop_base() ||
-      narrow_oop_shift() != Universe::narrow_oop_shift()) {
+  if (narrow_oop_mode() != CompressedOops::mode() ||
+      narrow_oop_base() != CompressedOops::base() ||
+      narrow_oop_shift() != CompressedOops::shift()) {
     log_info(cds)("CDS heap data need to be relocated because the archive was created with an incompatible oop encoding mode.");
     _heap_pointers_need_patching = true;
   } else {
--- a/src/hotspot/share/memory/filemap.hpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/memory/filemap.hpp	Thu May 09 14:26:03 2019 +0200
@@ -30,6 +30,7 @@
 #include "memory/metaspaceShared.hpp"
 #include "memory/metaspace.hpp"
 #include "memory/universe.hpp"
+#include "oops/compressedOops.hpp"
 #include "utilities/align.hpp"
 
 // Layout of the file:
@@ -100,7 +101,7 @@
   int    _narrow_oop_shift;         // compressed oop encoding shift
   bool    _compact_strings;         // value of CompactStrings
   uintx  _max_heap_size;            // java max heap size during dumping
-  Universe::NARROW_OOP_MODE _narrow_oop_mode; // compressed oop encoding mode
+  CompressedOops::Mode _narrow_oop_mode; // compressed oop encoding mode
   int     _narrow_klass_shift;      // save narrow klass base and shift
   address _narrow_klass_base;
   char*   _misc_data_patching_start;
@@ -222,7 +223,7 @@
   void   invalidate();
   int    version()                    { return _header->_version; }
   size_t alignment()                  { return _header->_alignment; }
-  Universe::NARROW_OOP_MODE narrow_oop_mode() { return _header->_narrow_oop_mode; }
+  CompressedOops::Mode narrow_oop_mode() { return _header->_narrow_oop_mode; }
   address narrow_oop_base()    const  { return _header->_narrow_oop_base; }
   int     narrow_oop_shift()   const  { return _header->_narrow_oop_shift; }
   uintx   max_heap_size()      const  { return _header->_max_heap_size; }
--- a/src/hotspot/share/memory/heapShared.hpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/memory/heapShared.hpp	Thu May 09 14:26:03 2019 +0200
@@ -30,6 +30,7 @@
 #include "memory/allocation.hpp"
 #include "memory/metaspaceShared.hpp"
 #include "memory/universe.hpp"
+#include "oops/compressedOops.hpp"
 #include "oops/objArrayKlass.hpp"
 #include "oops/oop.hpp"
 #include "oops/typeArrayKlass.hpp"
@@ -321,7 +322,7 @@
   static void initialize_from_archived_subgraph(Klass* k) NOT_CDS_JAVA_HEAP_RETURN;
 
   // NarrowOops stored in the CDS archive may use a different encoding scheme
-  // than Universe::narrow_oop_{base,shift} -- see FileMapInfo::map_heap_regions_impl.
+  // than CompressedOops::{base,shift} -- see FileMapInfo::map_heap_regions_impl.
   // To decode them, do not use CompressedOops::decode_not_null. Use this
   // function instead.
   inline static oop decode_from_archive(narrowOop v) NOT_CDS_JAVA_HEAP_RETURN_(NULL);
--- a/src/hotspot/share/memory/metaspace.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/memory/metaspace.cpp	Thu May 09 14:26:03 2019 +0200
@@ -39,6 +39,7 @@
 #include "memory/metaspaceShared.hpp"
 #include "memory/metaspaceTracer.hpp"
 #include "memory/universe.hpp"
+#include "oops/compressedOops.hpp"
 #include "runtime/init.hpp"
 #include "runtime/orderAccess.hpp"
 #include "services/memTracker.hpp"
@@ -974,7 +975,7 @@
     }
   }
 
-  Universe::set_narrow_klass_base(lower_base);
+  CompressedKlassPointers::set_base(lower_base);
 
   // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See
   // MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() for
@@ -984,9 +985,9 @@
   // can be used at same time as AOT code.
   if (!UseSharedSpaces
       && (uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
-    Universe::set_narrow_klass_shift(0);
+    CompressedKlassPointers::set_shift(0);
   } else {
-    Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
+    CompressedKlassPointers::set_shift(LogKlassAlignmentInBytes);
   }
   AOTLoader::set_narrow_klass_shift();
 }
@@ -1131,7 +1132,7 @@
 
 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
-               p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
+               p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift());
   if (_class_space_list != NULL) {
     address base = (address)_class_space_list->current_virtual_space()->bottom();
     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
--- a/src/hotspot/share/memory/metaspaceShared.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/memory/metaspaceShared.cpp	Thu May 09 14:26:03 2019 +0200
@@ -246,7 +246,7 @@
       // with the archived ones, so it must be done after all encodings are determined.
       mapinfo->map_heap_regions();
     }
-    Universe::set_narrow_klass_range(CompressedClassSpaceSize);
+    CompressedKlassPointers::set_range(CompressedClassSpaceSize);
 #endif // _LP64
   } else {
     assert(!mapinfo->is_open() && !UseSharedSpaces,
@@ -308,16 +308,16 @@
   _shared_rs = _shared_rs.first_part(max_archive_size);
 
   // Set up compress class pointers.
-  Universe::set_narrow_klass_base((address)_shared_rs.base());
+  CompressedKlassPointers::set_base((address)_shared_rs.base());
   // Set narrow_klass_shift to be LogKlassAlignmentInBytes. This is consistent
   // with AOT.
-  Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
+  CompressedKlassPointers::set_shift(LogKlassAlignmentInBytes);
   // Set the range of klass addresses to 4GB.
-  Universe::set_narrow_klass_range(cds_total);
+  CompressedKlassPointers::set_range(cds_total);
 
   Metaspace::initialize_class_space(tmp_class_space);
   log_info(cds)("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
-                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
+                p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift());
 
   log_info(cds)("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
                 CompressedClassSpaceSize, p2i(tmp_class_space.base()));
--- a/src/hotspot/share/memory/universe.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/memory/universe.cpp	Thu May 09 14:26:03 2019 +0200
@@ -50,6 +50,7 @@
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
 #include "memory/universe.hpp"
+#include "oops/compressedOops.hpp"
 #include "oops/constantPool.hpp"
 #include "oops/instanceClassLoaderKlass.hpp"
 #include "oops/instanceKlass.hpp"
@@ -154,11 +155,6 @@
 
 CollectedHeap*  Universe::_collectedHeap = NULL;
 
-NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true };
-NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true };
-address Universe::_narrow_ptrs_base;
-uint64_t Universe::_narrow_klass_range = (uint64_t(max_juint)+1);
-
 void Universe::basic_type_classes_do(void f(Klass*)) {
   for (int i = T_BOOLEAN; i < T_LONG+1; i++) {
     f(_typeArrayKlassObjs[i]);
@@ -670,7 +666,8 @@
     return status;
   }
 
-  Universe::initialize_compressed_oops();
+  CompressedOops::initialize();
+
   Universe::initialize_tlab();
 
   SystemDictionary::initialize_oop_storage();
@@ -742,55 +739,6 @@
   return status;
 }
 
-// Choose the heap base address and oop encoding mode
-// when compressed oops are used:
-// Unscaled  - Use 32-bits oops without encoding when
-//     NarrowOopHeapBaseMin + heap_size < 4Gb
-// ZeroBased - Use zero based compressed oops with encoding when
-//     NarrowOopHeapBaseMin + heap_size < 32Gb
-// HeapBased - Use compressed oops with heap base + encoding.
-void Universe::initialize_compressed_oops() {
-#ifdef _LP64
-  if (UseCompressedOops) {
-    // Subtract a page because something can get allocated at heap base.
-    // This also makes implicit null checking work, because the
-    // memory+1 page below heap_base needs to cause a signal.
-    // See needs_explicit_null_check.
-    // Only set the heap base for compressed oops because it indicates
-    // compressed oops for pstack code.
-    if ((uint64_t)Universe::heap()->reserved_region().end() > UnscaledOopHeapMax) {
-      // Didn't reserve heap below 4Gb.  Must shift.
-      Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
-    }
-    if ((uint64_t)Universe::heap()->reserved_region().end() <= OopEncodingHeapMax) {
-      // Did reserve heap below 32Gb. Can use base == 0;
-      Universe::set_narrow_oop_base(0);
-    }
-    AOTLoader::set_narrow_oop_shift();
-
-    Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
-
-    LogTarget(Info, gc, heap, coops) lt;
-    if (lt.is_enabled()) {
-      ResourceMark rm;
-      LogStream ls(lt);
-      Universe::print_compressed_oops_mode(&ls);
-    }
-
-    // Tell tests in which mode we run.
-    Arguments::PropertyList_add(new SystemProperty("java.vm.compressedOopsMode",
-                                                   narrow_oop_mode_to_string(narrow_oop_mode()),
-                                                   false));
-  }
-  // Universe::narrow_oop_base() is one page below the heap.
-  assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() -
-         os::vm_page_size()) ||
-         Universe::narrow_oop_base() == NULL, "invalid value");
-  assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
-         Universe::narrow_oop_shift() == 0, "invalid value");
-#endif
-}
-
 void Universe::initialize_tlab() {
   ThreadLocalAllocBuffer::set_max_size(Universe::heap()->max_tlab_size());
   if (UseTLAB) {
@@ -800,26 +748,6 @@
   }
 }
 
-void Universe::print_compressed_oops_mode(outputStream* st) {
-  st->print("Heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
-            p2i(Universe::heap()->base()), Universe::heap()->reserved_region().byte_size()/M);
-
-  st->print(", Compressed Oops mode: %s", narrow_oop_mode_to_string(narrow_oop_mode()));
-
-  if (Universe::narrow_oop_base() != 0) {
-    st->print(": " PTR_FORMAT, p2i(Universe::narrow_oop_base()));
-  }
-
-  if (Universe::narrow_oop_shift() != 0) {
-    st->print(", Oop shift amount: %d", Universe::narrow_oop_shift());
-  }
-
-  if (!Universe::narrow_oop_use_implicit_null_checks()) {
-    st->print(", no protected page in front of the heap");
-  }
-  st->cr();
-}
-
 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
 
   assert(alignment <= Arguments::conservative_max_heap_alignment(),
@@ -847,7 +775,7 @@
       // Universe::initialize_heap() will reset this to NULL if unscaled
       // or zero-based narrow oops are actually used.
       // Else heap start and base MUST differ, so that NULL can be encoded nonambigous.
-      Universe::set_narrow_oop_base((address)total_rs.compressed_oop_base());
+      CompressedOops::set_base((address)total_rs.compressed_oop_base());
     }
 
     if (AllocateHeapAt != NULL) {
@@ -873,40 +801,6 @@
   _heap_used_at_last_gc     = heap()->used();
 }
 
-
-const char* Universe::narrow_oop_mode_to_string(Universe::NARROW_OOP_MODE mode) {
-  switch (mode) {
-    case UnscaledNarrowOop:
-      return "32-bit";
-    case ZeroBasedNarrowOop:
-      return "Zero based";
-    case DisjointBaseNarrowOop:
-      return "Non-zero disjoint base";
-    case HeapBasedNarrowOop:
-      return "Non-zero based";
-    default:
-      ShouldNotReachHere();
-      return "";
-  }
-}
-
-
-Universe::NARROW_OOP_MODE Universe::narrow_oop_mode() {
-  if (narrow_oop_base_disjoint()) {
-    return DisjointBaseNarrowOop;
-  }
-
-  if (narrow_oop_base() != 0) {
-    return HeapBasedNarrowOop;
-  }
-
-  if (narrow_oop_shift() != 0) {
-    return ZeroBasedNarrowOop;
-  }
-
-  return UnscaledNarrowOop;
-}
-
 void initialize_known_method(LatestMethodCache* method_cache,
                              InstanceKlass* ik,
                              const char* method,
--- a/src/hotspot/share/memory/universe.hpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/memory/universe.hpp	Thu May 09 14:26:03 2019 +0200
@@ -71,19 +71,6 @@
 };
 
 
-// For UseCompressedOops.
-struct NarrowPtrStruct {
-  // Base address for oop-within-java-object materialization.
-  // NULL if using wide oops or zero based narrow oops.
-  address _base;
-  // Number of shift bits for encoding/decoding narrow ptrs.
-  // 0 if using wide ptrs or zero based unscaled narrow ptrs,
-  // LogMinObjAlignmentInBytes/LogKlassAlignmentInBytes otherwise.
-  int     _shift;
-  // Generate code with implicit null checks for narrow ptrs.
-  bool    _use_implicit_null_checks;
-};
-
 enum VerifyOption {
       VerifyOption_Default = 0,
 
@@ -182,13 +169,6 @@
 
   static intptr_t _non_oop_bits;
 
-  // For UseCompressedOops.
-  static struct NarrowPtrStruct _narrow_oop;
-  // For UseCompressedClassPointers.
-  static struct NarrowPtrStruct _narrow_klass;
-  static address _narrow_ptrs_base;
-  // CompressedClassSpaceSize set to 1GB, but appear 3GB away from _narrow_ptrs_base during CDS dump.
-  static uint64_t _narrow_klass_range;
   // array of dummy objects used with +FullGCAlot
   debug_only(static objArrayOop _fullgc_alot_dummy_array;)
   // index of next entry to clear
@@ -214,7 +194,6 @@
   static size_t _heap_used_at_last_gc;
 
   static jint initialize_heap();
-  static void initialize_compressed_oops();
   static void initialize_tlab();
   static void initialize_basic_type_mirrors(TRAPS);
   static void fixup_mirrors(TRAPS);
@@ -232,23 +211,6 @@
     return m;
   }
 
-  static void     set_narrow_oop_base(address base) {
-    assert(UseCompressedOops, "no compressed oops?");
-    _narrow_oop._base    = base;
-  }
-  static void     set_narrow_klass_base(address base) {
-    assert(UseCompressedClassPointers, "no compressed klass ptrs?");
-    _narrow_klass._base   = base;
-  }
-  static void     set_narrow_klass_range(uint64_t range) {
-     assert(UseCompressedClassPointers, "no compressed klass ptrs?");
-     _narrow_klass_range = range;
-  }
-  static void     set_narrow_oop_use_implicit_null_checks(bool use) {
-    assert(UseCompressedOops, "no compressed ptrs?");
-    _narrow_oop._use_implicit_null_checks   = use;
-  }
-
   // Debugging
   static int _verify_count;                           // number of verifies done
 
@@ -372,69 +334,6 @@
   // The particular choice of collected heap.
   static CollectedHeap* heap() { return _collectedHeap; }
 
-  // For UseCompressedOops
-  // Narrow Oop encoding mode:
-  // 0 - Use 32-bits oops without encoding when
-  //     NarrowOopHeapBaseMin + heap_size < 4Gb
-  // 1 - Use zero based compressed oops with encoding when
-  //     NarrowOopHeapBaseMin + heap_size < 32Gb
-  // 2 - Use compressed oops with disjoint heap base if
-  //     base is 32G-aligned and base > 0. This allows certain
-  //     optimizations in encoding/decoding.
-  //     Disjoint: Bits used in base are disjoint from bits used
-  //     for oops ==> oop = (cOop << 3) | base.  One can disjoint
-  //     the bits of an oop into base and compressed oop.
-  // 3 - Use compressed oops with heap base + encoding.
-  enum NARROW_OOP_MODE {
-    UnscaledNarrowOop  = 0,
-    ZeroBasedNarrowOop = 1,
-    DisjointBaseNarrowOop = 2,
-    HeapBasedNarrowOop = 3,
-    AnyNarrowOopMode = 4
-  };
-  static NARROW_OOP_MODE narrow_oop_mode();
-  static const char* narrow_oop_mode_to_string(NARROW_OOP_MODE mode);
-  static address  narrow_oop_base()                  { return  _narrow_oop._base; }
-  // Test whether bits of addr and possible offsets into the heap overlap.
-  static bool     is_disjoint_heap_base_address(address addr) {
-    return (((uint64_t)(intptr_t)addr) &
-            (((uint64_t)UCONST64(0xFFFFffffFFFFffff)) >> (32-LogMinObjAlignmentInBytes))) == 0;
-  }
-  // Check for disjoint base compressed oops.
-  static bool     narrow_oop_base_disjoint()        {
-    return _narrow_oop._base != NULL && is_disjoint_heap_base_address(_narrow_oop._base);
-  }
-  // Check for real heapbased compressed oops.
-  // We must subtract the base as the bits overlap.
-  // If we negate above function, we also get unscaled and zerobased.
-  static bool     narrow_oop_base_overlaps()          {
-    return _narrow_oop._base != NULL && !is_disjoint_heap_base_address(_narrow_oop._base);
-  }
-  static bool  is_narrow_oop_base(void* addr)             { return (narrow_oop_base() == (address)addr); }
-  static int      narrow_oop_shift()                      { return  _narrow_oop._shift; }
-  static bool     narrow_oop_use_implicit_null_checks()   { return  _narrow_oop._use_implicit_null_checks; }
-
-  // For UseCompressedClassPointers
-  static address  narrow_klass_base()                     { return  _narrow_klass._base; }
-  static uint64_t narrow_klass_range()                    { return  _narrow_klass_range; }
-  static int      narrow_klass_shift()                    { return  _narrow_klass._shift; }
-
-  static address* narrow_ptrs_base_addr()                 { return &_narrow_ptrs_base; }
-  static void     set_narrow_ptrs_base(address a)         { _narrow_ptrs_base = a; }
-  static address  narrow_ptrs_base()                      { return _narrow_ptrs_base; }
-
-  static void     print_compressed_oops_mode(outputStream* st);
-
-  // this is set in vm_version on sparc (and then reset in universe afaict)
-  static void     set_narrow_oop_shift(int shift)         {
-    _narrow_oop._shift   = shift;
-  }
-
-  static void     set_narrow_klass_shift(int shift)       {
-    assert(shift == 0 || shift == LogKlassAlignmentInBytes, "invalid shift for klass ptrs");
-    _narrow_klass._shift   = shift;
-  }
-
   // Reserve Java heap and determine CompressedOops mode
   static ReservedSpace reserve_heap(size_t heap_size, size_t alignment);
 
--- a/src/hotspot/share/memory/virtualspace.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/memory/virtualspace.cpp	Thu May 09 14:26:03 2019 +0200
@@ -26,6 +26,7 @@
 #include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/virtualspace.hpp"
+#include "oops/compressedOops.hpp"
 #include "oops/markOop.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/os.inline.hpp"
@@ -309,9 +310,9 @@
                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
                                  p2i(_base),
                                  _noaccess_prefix);
-      assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
+      assert(CompressedOops::use_implicit_null_checks() == true, "not initialized?");
     } else {
-      Universe::set_narrow_oop_use_implicit_null_checks(false);
+      CompressedOops::set_use_implicit_null_checks(false);
     }
   }
 
@@ -578,7 +579,7 @@
     while (addresses[i] &&                                 // End of array not yet reached.
            ((_base == NULL) ||                             // No previous try succeeded.
             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
-             !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
+             !CompressedOops::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
       char* const attach_point = addresses[i];
       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/oops/compressedOops.cpp	Thu May 09 14:26:03 2019 +0200
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "aot/aotLoader.hpp"
+#include "logging/log.hpp"
+#include "logging/logStream.hpp"
+#include "memory/memRegion.hpp"
+#include "memory/universe.hpp"
+#include "oops/compressedOops.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "runtime/globals.hpp"
+
+// For UseCompressedOops.
+NarrowPtrStruct CompressedOops::_narrow_oop = { NULL, 0, true };
+
+address CompressedOops::_narrow_ptrs_base;
+
+// Choose the heap base address and oop encoding mode
+// when compressed oops are used:
+// Unscaled  - Use 32-bits oops without encoding when
+//     NarrowOopHeapBaseMin + heap_size < 4Gb
+// ZeroBased - Use zero based compressed oops with encoding when
+//     NarrowOopHeapBaseMin + heap_size < 32Gb
+// HeapBased - Use compressed oops with heap base + encoding.
+void CompressedOops::initialize() {
+#ifdef _LP64
+  if (UseCompressedOops) {
+    // Subtract a page because something can get allocated at heap base.
+    // This also makes implicit null checking work, because the
+    // memory+1 page below heap_base needs to cause a signal.
+    // See needs_explicit_null_check.
+    // Only set the heap base for compressed oops because it indicates
+    // compressed oops for pstack code.
+    if ((uint64_t)Universe::heap()->reserved_region().end() > UnscaledOopHeapMax) {
+      // Didn't reserve heap below 4Gb.  Must shift.
+      set_shift(LogMinObjAlignmentInBytes);
+    }
+    if ((uint64_t)Universe::heap()->reserved_region().end() <= OopEncodingHeapMax) {
+      // Did reserve heap below 32Gb. Can use base == 0;
+      set_base(0);
+    }
+    AOTLoader::set_narrow_oop_shift();
+
+    set_ptrs_base(base());
+
+    LogTarget(Info, gc, heap, coops) lt;
+    if (lt.is_enabled()) {
+      ResourceMark rm;
+      LogStream ls(lt);
+      print_mode(&ls);
+    }
+
+    // Tell tests in which mode we run.
+    Arguments::PropertyList_add(new SystemProperty("java.vm.compressedOopsMode",
+                                                   mode_to_string(mode()),
+                                                   false));
+  }
+  // base() is one page below the heap.
+  assert((intptr_t)base() <= (intptr_t)(Universe::heap()->base() - os::vm_page_size()) ||
+         base() == NULL, "invalid value");
+  assert(shift() == LogMinObjAlignmentInBytes ||
+         shift() == 0, "invalid value");
+#endif
+}
+
+void CompressedOops::set_base(address base) {
+  assert(UseCompressedOops, "no compressed oops?");
+  _narrow_oop._base    = base;
+}
+
+void CompressedOops::set_shift(int shift) {
+  _narrow_oop._shift   = shift;
+}
+
+void CompressedOops::set_use_implicit_null_checks(bool use) {
+  assert(UseCompressedOops, "no compressed ptrs?");
+  _narrow_oop._use_implicit_null_checks   = use;
+}
+
+void CompressedOops::set_ptrs_base(address addr) {
+  _narrow_ptrs_base = addr;
+}
+
+CompressedOops::Mode CompressedOops::mode() {
+  if (base_disjoint()) {
+    return DisjointBaseNarrowOop;
+  }
+
+  if (base() != 0) {
+    return HeapBasedNarrowOop;
+  }
+
+  if (shift() != 0) {
+    return ZeroBasedNarrowOop;
+  }
+
+  return UnscaledNarrowOop;
+}
+
+const char* CompressedOops::mode_to_string(Mode mode) {
+  switch (mode) {
+    case UnscaledNarrowOop:
+      return "32-bit";
+    case ZeroBasedNarrowOop:
+      return "Zero based";
+    case DisjointBaseNarrowOop:
+      return "Non-zero disjoint base";
+    case HeapBasedNarrowOop:
+      return "Non-zero based";
+    default:
+      ShouldNotReachHere();
+      return "";
+  }
+}
+
+// Test whether bits of addr and possible offsets into the heap overlap.
+bool CompressedOops::is_disjoint_heap_base_address(address addr) {
+  return (((uint64_t)(intptr_t)addr) &
+          (((uint64_t)UCONST64(0xFFFFffffFFFFffff)) >> (32-LogMinObjAlignmentInBytes))) == 0;
+}
+
+// Check for disjoint base compressed oops.
+bool CompressedOops::base_disjoint() {
+  return _narrow_oop._base != NULL && is_disjoint_heap_base_address(_narrow_oop._base);
+}
+
+// Check for real heapbased compressed oops.
+// We must subtract the base as the bits overlap.
+// If we negate above function, we also get unscaled and zerobased.
+bool CompressedOops::base_overlaps() {
+  return _narrow_oop._base != NULL && !is_disjoint_heap_base_address(_narrow_oop._base);
+}
+
+void CompressedOops::print_mode(outputStream* st) {
+  st->print("Heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
+            p2i(Universe::heap()->base()), Universe::heap()->reserved_region().byte_size()/M);
+
+  st->print(", Compressed Oops mode: %s", mode_to_string(mode()));
+
+  if (base() != 0) {
+    st->print(": " PTR_FORMAT, p2i(base()));
+  }
+
+  if (shift() != 0) {
+    st->print(", Oop shift amount: %d", shift());
+  }
+
+  if (!use_implicit_null_checks()) {
+    st->print(", no protected page in front of the heap");
+  }
+  st->cr();
+}
+
+// For UseCompressedClassPointers.
+NarrowPtrStruct CompressedKlassPointers::_narrow_klass = { NULL, 0, true };
+
+// CompressedClassSpaceSize set to 1GB, but appear 3GB away from _narrow_ptrs_base during CDS dump.
+uint64_t CompressedKlassPointers::_narrow_klass_range = (uint64_t(max_juint)+1);;
+
+void CompressedKlassPointers::set_base(address base) {
+  assert(UseCompressedClassPointers, "no compressed klass ptrs?");
+  _narrow_klass._base   = base;
+}
+
+void CompressedKlassPointers::set_shift(int shift)       {
+  assert(shift == 0 || shift == LogKlassAlignmentInBytes, "invalid shift for klass ptrs");
+  _narrow_klass._shift   = shift;
+}
+
+void CompressedKlassPointers::set_range(uint64_t range) {
+  assert(UseCompressedClassPointers, "no compressed klass ptrs?");
+  _narrow_klass_range = range;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/oops/compressedOops.hpp	Thu May 09 14:26:03 2019 +0200
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_OOPS_COMPRESSEDOOPS_HPP
+#define SHARE_OOPS_COMPRESSEDOOPS_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+class outputStream;
+
+struct NarrowPtrStruct {
+  // Base address for oop-within-java-object materialization.
+  // NULL if using wide oops or zero based narrow oops.
+  address _base;
+  // Number of shift bits for encoding/decoding narrow ptrs.
+  // 0 if using wide ptrs or zero based unscaled narrow ptrs,
+  // LogMinObjAlignmentInBytes/LogKlassAlignmentInBytes otherwise.
+  int     _shift;
+  // Generate code with implicit null checks for narrow ptrs.
+  bool    _use_implicit_null_checks;
+};
+
+class CompressedOops : public AllStatic {
+  friend class VMStructs;
+
+  // For UseCompressedOops.
+  static NarrowPtrStruct _narrow_oop;
+
+  static address _narrow_ptrs_base;
+
+public:
+  // For UseCompressedOops
+  // Narrow Oop encoding mode:
+  // 0 - Use 32-bits oops without encoding when
+  //     NarrowOopHeapBaseMin + heap_size < 4Gb
+  // 1 - Use zero based compressed oops with encoding when
+  //     NarrowOopHeapBaseMin + heap_size < 32Gb
+  // 2 - Use compressed oops with disjoint heap base if
+  //     base is 32G-aligned and base > 0. This allows certain
+  //     optimizations in encoding/decoding.
+  //     Disjoint: Bits used in base are disjoint from bits used
+  //     for oops ==> oop = (cOop << 3) | base.  One can disjoint
+  //     the bits of an oop into base and compressed oop.
+  // 3 - Use compressed oops with heap base + encoding.
+  enum Mode {
+    UnscaledNarrowOop  = 0,
+    ZeroBasedNarrowOop = 1,
+    DisjointBaseNarrowOop = 2,
+    HeapBasedNarrowOop = 3,
+    AnyNarrowOopMode = 4
+  };
+
+  static void initialize();
+
+  static void set_base(address base);
+  static void set_shift(int shift);
+  static void set_use_implicit_null_checks(bool use);
+
+  static void set_ptrs_base(address addr);
+
+  static address  base()                     { return  _narrow_oop._base; }
+  static bool     is_base(void* addr)        { return (base() == (address)addr); }
+  static int      shift()                    { return  _narrow_oop._shift; }
+  static bool     use_implicit_null_checks() { return  _narrow_oop._use_implicit_null_checks; }
+
+  static address* ptrs_base_addr()           { return &_narrow_ptrs_base; }
+  static address  ptrs_base()                { return _narrow_ptrs_base; }
+
+  static Mode mode();
+  static const char* mode_to_string(Mode mode);
+
+  // Test whether bits of addr and possible offsets into the heap overlap.
+  static bool     is_disjoint_heap_base_address(address addr);
+
+  // Check for disjoint base compressed oops.
+  static bool     base_disjoint();
+
+  // Check for real heapbased compressed oops.
+  // We must subtract the base as the bits overlap.
+  // If we negate above function, we also get unscaled and zerobased.
+  static bool     base_overlaps();
+
+  static void     print_mode(outputStream* st);
+
+  static bool is_null(oop v)       { return v == NULL; }
+  static bool is_null(narrowOop v) { return v == 0; }
+
+  static inline oop decode_raw(narrowOop v);
+  static inline oop decode_not_null(narrowOop v);
+  static inline oop decode(narrowOop v);
+  static inline narrowOop encode_not_null(oop v);
+  static inline narrowOop encode(oop v);
+
+  // No conversions needed for these overloads
+  static oop decode_not_null(oop v)             { return v; }
+  static oop decode(oop v)                      { return v; }
+  static narrowOop encode_not_null(narrowOop v) { return v; }
+  static narrowOop encode(narrowOop v)          { return v; }
+};
+
+// For UseCompressedClassPointers.
+class CompressedKlassPointers : public AllStatic {
+  friend class VMStructs;
+
+  static NarrowPtrStruct _narrow_klass;
+
+  // CompressedClassSpaceSize set to 1GB, but appear 3GB away from _narrow_ptrs_base during CDS dump.
+  static uint64_t _narrow_klass_range;
+
+public:
+  static void set_base(address base);
+  static void set_shift(int shift);
+  static void set_range(uint64_t range);
+
+  static address  base()               { return  _narrow_klass._base; }
+  static uint64_t range()              { return  _narrow_klass_range; }
+  static int      shift()              { return  _narrow_klass._shift; }
+
+  static bool is_null(Klass* v)      { return v == NULL; }
+  static bool is_null(narrowKlass v) { return v == 0; }
+
+  static inline Klass* decode_raw(narrowKlass v);
+  static inline Klass* decode_not_null(narrowKlass v);
+  static inline Klass* decode(narrowKlass v);
+  static inline narrowKlass encode_not_null(Klass* v);
+  static inline narrowKlass encode(Klass* v);
+};
+
+#endif // SHARE_OOPS_COMPRESSEDOOPS_HPP
--- a/src/hotspot/share/oops/compressedOops.inline.hpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/oops/compressedOops.inline.hpp	Thu May 09 14:26:03 2019 +0200
@@ -27,6 +27,7 @@
 
 #include "gc/shared/collectedHeap.hpp"
 #include "memory/universe.hpp"
+#include "oops/compressedOops.hpp"
 #include "oops/oop.hpp"
 
 // Functions for encoding and decoding compressed oops.
@@ -39,46 +40,69 @@
 // offset from the heap base.  Saving the check for null can save instructions
 // in inner GC loops so these are separated.
 
-namespace CompressedOops {
-  inline bool is_null(oop obj)       { return obj == NULL; }
-  inline bool is_null(narrowOop obj) { return obj == 0; }
+inline oop CompressedOops::decode_raw(narrowOop v) {
+  return (oop)(void*)((uintptr_t)base() + ((uintptr_t)v << shift()));
+}
+
+inline oop CompressedOops::decode_not_null(narrowOop v) {
+  assert(!is_null(v), "narrow oop value can never be zero");
+  oop result = decode_raw(v);
+  assert(check_obj_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result));
+  return result;
+}
+
+inline oop CompressedOops::decode(narrowOop v) {
+  return is_null(v) ? (oop)NULL : decode_not_null(v);
+}
 
-  inline oop decode_not_null(narrowOop v) {
-    assert(!is_null(v), "narrow oop value can never be zero");
-    address base = Universe::narrow_oop_base();
-    int    shift = Universe::narrow_oop_shift();
-    oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
-    assert(check_obj_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result));
-    return result;
-  }
+inline narrowOop CompressedOops::encode_not_null(oop v) {
+  assert(!is_null(v), "oop value can never be zero");
+  assert(check_obj_alignment(v), "Address not aligned");
+  assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
+  uint64_t  pd = (uint64_t)(pointer_delta((void*)v, (void*)base(), 1));
+  assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
+  uint64_t result = pd >> shift();
+  assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
+  assert(oopDesc::equals_raw(decode(result), v), "reversibility");
+  return (narrowOop)result;
+}
 
-  inline oop decode(narrowOop v) {
-    return is_null(v) ? (oop)NULL : decode_not_null(v);
+inline narrowOop CompressedOops::encode(oop v) {
+  return is_null(v) ? (narrowOop)0 : encode_not_null(v);
+}
+
+static inline bool check_alignment(Klass* v) {
+  return (intptr_t)v % KlassAlignmentInBytes == 0;
+}
+
+inline Klass* CompressedKlassPointers::decode_raw(narrowKlass v) {
+    return (Klass*)(void*)((uintptr_t)base() +((uintptr_t)v << shift()));
   }
 
-  inline narrowOop encode_not_null(oop v) {
-    assert(!is_null(v), "oop value can never be zero");
-    assert(check_obj_alignment(v), "Address not aligned");
-    assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
-    address base = Universe::narrow_oop_base();
-    int    shift = Universe::narrow_oop_shift();
-    uint64_t  pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
-    assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
-    uint64_t result = pd >> shift;
-    assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
-    assert(oopDesc::equals_raw(decode(result), v), "reversibility");
-    return (narrowOop)result;
-  }
+inline Klass* CompressedKlassPointers::decode_not_null(narrowKlass v) {
+  assert(!is_null(v), "narrow klass value can never be zero");
+  Klass* result = decode_raw(v);
+  assert(check_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result));
+  return result;
+}
+
+inline Klass* CompressedKlassPointers::decode(narrowKlass v) {
+  return is_null(v) ? (Klass*)NULL : decode_not_null(v);
+}
 
-  inline narrowOop encode(oop v) {
-    return is_null(v) ? (narrowOop)0 : encode_not_null(v);
-  }
+inline narrowKlass CompressedKlassPointers::encode_not_null(Klass* v) {
+  assert(!is_null(v), "klass value can never be zero");
+  assert(check_alignment(v), "Address not aligned");
+  uint64_t pd = (uint64_t)(pointer_delta((void*)v, base(), 1));
+  assert(KlassEncodingMetaspaceMax > pd, "change encoding max if new encoding");
+  uint64_t result = pd >> shift();
+  assert((result & CONST64(0xffffffff00000000)) == 0, "narrow klass pointer overflow");
+  assert(decode(result) == v, "reversibility");
+  return (narrowKlass)result;
+}
 
-  // No conversions needed for these overloads
-  inline oop decode_not_null(oop v)             { return v; }
-  inline oop decode(oop v)                      { return v; }
-  inline narrowOop encode_not_null(narrowOop v) { return v; }
-  inline narrowOop encode(narrowOop v)          { return v; }
+inline narrowKlass CompressedKlassPointers::encode(Klass* v) {
+  return is_null(v) ? (narrowKlass)0 : encode_not_null(v);
 }
 
 #endif // SHARE_OOPS_COMPRESSEDOOPS_INLINE_HPP
--- a/src/hotspot/share/oops/cpCache.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/oops/cpCache.cpp	Thu May 09 14:26:03 2019 +0200
@@ -37,6 +37,7 @@
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
 #include "oops/access.inline.hpp"
+#include "oops/compressedOops.hpp"
 #include "oops/constantPool.inline.hpp"
 #include "oops/cpCache.inline.hpp"
 #include "oops/objArrayOop.inline.hpp"
--- a/src/hotspot/share/oops/instanceRefKlass.inline.hpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/oops/instanceRefKlass.inline.hpp	Thu May 09 14:26:03 2019 +0200
@@ -29,7 +29,6 @@
 #include "gc/shared/referenceProcessor.hpp"
 #include "logging/log.hpp"
 #include "oops/access.inline.hpp"
-#include "oops/compressedOops.inline.hpp"
 #include "oops/instanceKlass.inline.hpp"
 #include "oops/instanceRefKlass.hpp"
 #include "oops/oop.inline.hpp"
--- a/src/hotspot/share/oops/klass.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/oops/klass.cpp	Thu May 09 14:26:03 2019 +0200
@@ -807,11 +807,6 @@
   guarantee(obj->klass()->is_klass(), "klass field is not a klass");
 }
 
-Klass* Klass::decode_klass_raw(narrowKlass narrow_klass) {
-  return (Klass*)(void*)( (uintptr_t)Universe::narrow_klass_base() +
-                         ((uintptr_t)narrow_klass << Universe::narrow_klass_shift()));
-}
-
 bool Klass::is_valid(Klass* k) {
   if (!is_aligned(k, sizeof(MetaWord))) return false;
   if ((size_t)k < os::min_page_size()) return false;
--- a/src/hotspot/share/oops/klass.hpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/oops/klass.hpp	Thu May 09 14:26:03 2019 +0200
@@ -692,18 +692,7 @@
   virtual void oop_verify_on(oop obj, outputStream* st);
 
   // for error reporting
-  static Klass* decode_klass_raw(narrowKlass narrow_klass);
   static bool is_valid(Klass* k);
-
-  static bool is_null(narrowKlass obj);
-  static bool is_null(Klass* obj);
-
-  // klass encoding for klass pointer in objects.
-  static narrowKlass encode_klass_not_null(Klass* v);
-  static narrowKlass encode_klass(Klass* v);
-
-  static Klass* decode_klass_not_null(narrowKlass v);
-  static Klass* decode_klass(narrowKlass v);
 };
 
 #endif // SHARE_OOPS_KLASS_HPP
--- a/src/hotspot/share/oops/klass.inline.hpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/oops/klass.inline.hpp	Thu May 09 14:26:03 2019 +0200
@@ -26,6 +26,7 @@
 #define SHARE_OOPS_KLASS_INLINE_HPP
 
 #include "memory/universe.hpp"
+#include "oops/compressedOops.hpp"
 #include "oops/klass.hpp"
 #include "oops/markOop.hpp"
 
@@ -34,41 +35,4 @@
   _prototype_header = header;
 }
 
-inline bool Klass::is_null(Klass* obj)  { return obj == NULL; }
-inline bool Klass::is_null(narrowKlass obj) { return obj == 0; }
-
-// Encoding and decoding for klass field.
-
-inline bool check_klass_alignment(Klass* obj) {
-  return (intptr_t)obj % KlassAlignmentInBytes == 0;
-}
-
-inline narrowKlass Klass::encode_klass_not_null(Klass* v) {
-  assert(!is_null(v), "klass value can never be zero");
-  assert(check_klass_alignment(v), "Address not aligned");
-  int    shift = Universe::narrow_klass_shift();
-  uint64_t pd = (uint64_t)(pointer_delta((void*)v, Universe::narrow_klass_base(), 1));
-  assert(KlassEncodingMetaspaceMax > pd, "change encoding max if new encoding");
-  uint64_t result = pd >> shift;
-  assert((result & CONST64(0xffffffff00000000)) == 0, "narrow klass pointer overflow");
-  assert(decode_klass(result) == v, "reversibility");
-  return (narrowKlass)result;
-}
-
-inline narrowKlass Klass::encode_klass(Klass* v) {
-  return is_null(v) ? (narrowKlass)0 : encode_klass_not_null(v);
-}
-
-inline Klass* Klass::decode_klass_not_null(narrowKlass v) {
-  assert(!is_null(v), "narrow klass value can never be zero");
-  int    shift = Universe::narrow_klass_shift();
-  Klass* result = (Klass*)(void*)((uintptr_t)Universe::narrow_klass_base() + ((uintptr_t)v << shift));
-  assert(check_klass_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result));
-  return result;
-}
-
-inline Klass* Klass::decode_klass(narrowKlass v) {
-  return is_null(v) ? (Klass*)NULL : decode_klass_not_null(v);
-}
-
 #endif // SHARE_OOPS_KLASS_INLINE_HPP
--- a/src/hotspot/share/oops/oop.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/oops/oop.cpp	Thu May 09 14:26:03 2019 +0200
@@ -28,6 +28,7 @@
 #include "memory/heapShared.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/verifyOopClosure.hpp"
 #include "runtime/handles.inline.hpp"
@@ -150,16 +151,11 @@
   return UseCompressedClassPointers;
 }
 
-oop oopDesc::decode_oop_raw(narrowOop narrow_oop) {
-  return (oop)(void*)( (uintptr_t)Universe::narrow_oop_base() +
-                      ((uintptr_t)narrow_oop << Universe::narrow_oop_shift()));
-}
-
 void* oopDesc::load_klass_raw(oop obj) {
   if (UseCompressedClassPointers) {
     narrowKlass narrow_klass = *(obj->compressed_klass_addr());
     if (narrow_klass == 0) return NULL;
-    return (void*)Klass::decode_klass_raw(narrow_klass);
+    return (void*)CompressedKlassPointers::decode_raw(narrow_klass);
   } else {
     return *(void**)(obj->klass_addr());
   }
@@ -170,7 +166,7 @@
   if (UseCompressedOops) {
     narrowOop narrow_oop = *(narrowOop*)addr;
     if (narrow_oop == 0) return NULL;
-    return (void*)decode_oop_raw(narrow_oop);
+    return (void*)CompressedOops::decode_raw(narrow_oop);
   } else {
     return *(void**)addr;
   }
--- a/src/hotspot/share/oops/oop.hpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/oops/oop.hpp	Thu May 09 14:26:03 2019 +0200
@@ -323,7 +323,6 @@
   }
 
   // for error reporting
-  static oop   decode_oop_raw(narrowOop narrow_oop);
   static void* load_klass_raw(oop obj);
   static void* load_oop_raw(oop obj, int offset);
   static bool  is_valid(oop obj);
--- a/src/hotspot/share/oops/oop.inline.hpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/oops/oop.inline.hpp	Thu May 09 14:26:03 2019 +0200
@@ -88,7 +88,7 @@
 
 Klass* oopDesc::klass() const {
   if (UseCompressedClassPointers) {
-    return Klass::decode_klass_not_null(_metadata._compressed_klass);
+    return CompressedKlassPointers::decode_not_null(_metadata._compressed_klass);
   } else {
     return _metadata._klass;
   }
@@ -96,7 +96,7 @@
 
 Klass* oopDesc::klass_or_null() const volatile {
   if (UseCompressedClassPointers) {
-    return Klass::decode_klass(_metadata._compressed_klass);
+    return CompressedKlassPointers::decode(_metadata._compressed_klass);
   } else {
     return _metadata._klass;
   }
@@ -107,7 +107,7 @@
     // Workaround for non-const load_acquire parameter.
     const volatile narrowKlass* addr = &_metadata._compressed_klass;
     volatile narrowKlass* xaddr = const_cast<volatile narrowKlass*>(addr);
-    return Klass::decode_klass(OrderAccess::load_acquire(xaddr));
+    return CompressedKlassPointers::decode(OrderAccess::load_acquire(xaddr));
   } else {
     return OrderAccess::load_acquire(&_metadata._klass);
   }
@@ -144,7 +144,7 @@
 void oopDesc::set_klass(Klass* k) {
   CHECK_SET_KLASS(k);
   if (UseCompressedClassPointers) {
-    *compressed_klass_addr() = Klass::encode_klass_not_null(k);
+    *compressed_klass_addr() = CompressedKlassPointers::encode_not_null(k);
   } else {
     *klass_addr() = k;
   }
@@ -154,7 +154,7 @@
   CHECK_SET_KLASS(klass);
   if (UseCompressedClassPointers) {
     OrderAccess::release_store(compressed_klass_addr(mem),
-                               Klass::encode_klass_not_null(klass));
+                               CompressedKlassPointers::encode_not_null(klass));
   } else {
     OrderAccess::release_store(klass_addr(mem), klass);
   }
--- a/src/hotspot/share/opto/lcm.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/opto/lcm.cpp	Thu May 09 14:26:03 2019 +0200
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "memory/allocation.inline.hpp"
+#include "oops/compressedOops.hpp"
 #include "opto/ad.hpp"
 #include "opto/block.hpp"
 #include "opto/c2compiler.hpp"
@@ -40,7 +41,7 @@
 // Check whether val is not-null-decoded compressed oop,
 // i.e. will grab into the base of the heap if it represents NULL.
 static bool accesses_heap_base_zone(Node *val) {
-  if (Universe::narrow_oop_base() != NULL) { // Implies UseCompressedOops.
+  if (CompressedOops::base() != NULL) { // Implies UseCompressedOops.
     if (val && val->is_Mach()) {
       if (val->as_Mach()->ideal_Opcode() == Op_DecodeN) {
         // This assumes all Decodes with TypePtr::NotNull are matched to nodes that
@@ -66,8 +67,8 @@
     return false;  // Implicit null check will work.
   }
   // Also a read accessing the base of a heap-based compressed heap will trap.
-  if (accesses_heap_base_zone(val) &&                    // Hits the base zone page.
-      Universe::narrow_oop_use_implicit_null_checks()) { // Base zone page is protected.
+  if (accesses_heap_base_zone(val) &&         // Hits the base zone page.
+      CompressedOops::use_implicit_null_checks()) { // Base zone page is protected.
     return false;
   }
 
@@ -261,13 +262,13 @@
         // Give up if offset is beyond page size or if heap base is not protected.
         if (val->bottom_type()->isa_narrowoop() &&
             (MacroAssembler::needs_explicit_null_check(offset) ||
-             !Universe::narrow_oop_use_implicit_null_checks()))
+             !CompressedOops::use_implicit_null_checks()))
           continue;
         // cannot reason about it; is probably not implicit null exception
       } else {
         const TypePtr* tptr;
-        if (UseCompressedOops && (Universe::narrow_oop_shift() == 0 ||
-                                  Universe::narrow_klass_shift() == 0)) {
+        if (UseCompressedOops && (CompressedOops::shift() == 0 ||
+                                  CompressedKlassPointers::shift() == 0)) {
           // 32-bits narrow oop can be the base of address expressions
           tptr = base->get_ptr_type();
         } else {
@@ -283,7 +284,7 @@
           continue;
         // Give up if base is a decode node and the heap base is not protected.
         if (base->is_Mach() && base->as_Mach()->ideal_Opcode() == Op_DecodeN &&
-            !Universe::narrow_oop_use_implicit_null_checks())
+            !CompressedOops::use_implicit_null_checks())
           continue;
       }
     }
--- a/src/hotspot/share/opto/machnode.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/opto/machnode.cpp	Thu May 09 14:26:03 2019 +0200
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "gc/shared/collectedHeap.hpp"
+#include "oops/compressedOops.hpp"
 #include "opto/machnode.hpp"
 #include "opto/regalloc.hpp"
 #include "utilities/vmError.hpp"
@@ -354,11 +355,11 @@
   if (base == NodeSentinel)  return TypePtr::BOTTOM;
 
   const Type* t = base->bottom_type();
-  if (t->isa_narrowoop() && Universe::narrow_oop_shift() == 0) {
+  if (t->isa_narrowoop() && CompressedOops::shift() == 0) {
     // 32-bit unscaled narrow oop can be the base of any address expression
     t = t->make_ptr();
   }
-  if (t->isa_narrowklass() && Universe::narrow_klass_shift() == 0) {
+  if (t->isa_narrowklass() && CompressedKlassPointers::shift() == 0) {
     // 32-bit unscaled narrow oop can be the base of any address expression
     t = t->make_ptr();
   }
--- a/src/hotspot/share/opto/matcher.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/opto/matcher.cpp	Thu May 09 14:26:03 2019 +0200
@@ -27,6 +27,7 @@
 #include "gc/shared/c2/barrierSetC2.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/compressedOops.hpp"
 #include "opto/ad.hpp"
 #include "opto/addnode.hpp"
 #include "opto/callnode.hpp"
@@ -2482,6 +2483,19 @@
   }
 }
 
+bool Matcher::gen_narrow_oop_implicit_null_checks() {
+  // Advice matcher to perform null checks on the narrow oop side.
+  // Implicit checks are not possible on the uncompressed oop side anyway
+  // (at least not for read accesses).
+  // Performs significantly better (especially on Power 6).
+  if (!os::zero_page_read_protected()) {
+    return true;
+  }
+  return CompressedOops::use_implicit_null_checks() &&
+         (narrow_oop_use_complex_address() ||
+          CompressedOops::base() != NULL);
+}
+
 // Used by the DFA in dfa_xxx.cpp.  Check for a following barrier or
 // atomic instruction acting as a store_load barrier without any
 // intervening volatile load, and thus we don't need a barrier here.
--- a/src/hotspot/share/opto/matcher.hpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/opto/matcher.hpp	Thu May 09 14:26:03 2019 +0200
@@ -487,18 +487,7 @@
   // [oop_reg + offset]
   // NullCheck oop_reg
   //
-  inline static bool gen_narrow_oop_implicit_null_checks() {
-    // Advice matcher to perform null checks on the narrow oop side.
-    // Implicit checks are not possible on the uncompressed oop side anyway
-    // (at least not for read accesses).
-    // Performs significantly better (especially on Power 6).
-    if (!os::zero_page_read_protected()) {
-      return true;
-    }
-    return Universe::narrow_oop_use_implicit_null_checks() &&
-           (narrow_oop_use_complex_address() ||
-            Universe::narrow_oop_base() != NULL);
-  }
+  static bool gen_narrow_oop_implicit_null_checks();
 
   // Is it better to copy float constants, or load them directly from memory?
   // Intel can load a float constant from a direct address, requiring no
--- a/src/hotspot/share/prims/whitebox.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/prims/whitebox.cpp	Thu May 09 14:26:03 2019 +0200
@@ -45,6 +45,7 @@
 #include "memory/universe.hpp"
 #include "memory/oopFactory.hpp"
 #include "oops/array.hpp"
+#include "oops/compressedOops.hpp"
 #include "oops/constantPool.inline.hpp"
 #include "oops/method.inline.hpp"
 #include "oops/objArrayKlass.hpp"
@@ -259,17 +260,17 @@
 
   // Check if constraints are complied
   if (!( UseCompressedOops && rhs.base() != NULL &&
-         Universe::narrow_oop_base() != NULL &&
-         Universe::narrow_oop_use_implicit_null_checks() )) {
+         CompressedOops::base() != NULL &&
+         CompressedOops::use_implicit_null_checks() )) {
     tty->print_cr("WB_ReadFromNoaccessArea method is useless:\n "
                   "\tUseCompressedOops is %d\n"
                   "\trhs.base() is " PTR_FORMAT "\n"
-                  "\tUniverse::narrow_oop_base() is " PTR_FORMAT "\n"
-                  "\tUniverse::narrow_oop_use_implicit_null_checks() is %d",
+                  "\tCompressedOops::base() is " PTR_FORMAT "\n"
+                  "\tCompressedOops::use_implicit_null_checks() is %d",
                   UseCompressedOops,
                   p2i(rhs.base()),
-                  p2i(Universe::narrow_oop_base()),
-                  Universe::narrow_oop_use_implicit_null_checks());
+                  p2i(CompressedOops::base()),
+                  CompressedOops::use_implicit_null_checks());
     return;
   }
   tty->print_cr("Reading from no access area... ");
--- a/src/hotspot/share/runtime/os.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/runtime/os.cpp	Thu May 09 14:26:03 2019 +0200
@@ -42,6 +42,7 @@
 #include "memory/guardedMemory.hpp"
 #endif
 #include "memory/resourceArea.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvm_misc.hpp"
 #include "runtime/arguments.hpp"
@@ -1072,7 +1073,7 @@
 #ifdef _LP64
   if (UseCompressedOops && ((uintptr_t)addr &~ (uintptr_t)max_juint) == 0) {
     narrowOop narrow_oop = (narrowOop)(uintptr_t)addr;
-    oop o = oopDesc::decode_oop_raw(narrow_oop);
+    oop o = CompressedOops::decode_raw(narrow_oop);
 
     if (oopDesc::is_valid(o)) {
       st->print(UINT32_FORMAT " is a compressed pointer to object: ", narrow_oop);
@@ -1143,7 +1144,7 @@
 #ifdef _LP64
   if (UseCompressedClassPointers && ((uintptr_t)addr &~ (uintptr_t)max_juint) == 0) {
     narrowKlass narrow_klass = (narrowKlass)(uintptr_t)addr;
-    Klass* k = Klass::decode_klass_raw(narrow_klass);
+    Klass* k = CompressedKlassPointers::decode_raw(narrow_klass);
 
     if (Klass::is_valid(k)) {
       st->print_cr(UINT32_FORMAT " is a compressed pointer to class: " INTPTR_FORMAT, narrow_klass, p2i((HeapWord*)k));
--- a/src/hotspot/share/runtime/stackValue.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/runtime/stackValue.cpp	Thu May 09 14:26:03 2019 +0200
@@ -114,7 +114,7 @@
     case Location::oop: {
       oop val = *(oop *)value_addr;
 #ifdef _LP64
-      if (Universe::is_narrow_oop_base(val)) {
+      if (CompressedOops::is_base(val)) {
          // Compiled code may produce decoded oop = narrow_oop_base
          // when a narrow oop implicit null check is used.
          // The narrow_oop_base could be NULL or be the address
--- a/src/hotspot/share/runtime/vmStructs.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/runtime/vmStructs.cpp	Thu May 09 14:26:03 2019 +0200
@@ -370,6 +370,7 @@
   nonstatic_field(JNIid,                       _holder,                                       Klass*)                                \
   nonstatic_field(JNIid,                       _next,                                         JNIid*)                                \
   nonstatic_field(JNIid,                       _offset,                                       int)                                   \
+                                                                                                                                     \
   /************/                                                                                                                     \
   /* Universe */                                                                                                                     \
   /************/                                                                                                                     \
@@ -389,11 +390,21 @@
      static_field(Universe,                    _verify_oop_mask,                              uintptr_t)                             \
      static_field(Universe,                    _verify_oop_bits,                              uintptr_t)                             \
      static_field(Universe,                    _non_oop_bits,                                 intptr_t)                              \
-     static_field(Universe,                    _narrow_oop._base,                             address)                               \
-     static_field(Universe,                    _narrow_oop._shift,                            int)                                   \
-     static_field(Universe,                    _narrow_oop._use_implicit_null_checks,         bool)                                  \
-     static_field(Universe,                    _narrow_klass._base,                           address)                               \
-     static_field(Universe,                    _narrow_klass._shift,                          int)                                   \
+                                                                                                                                     \
+  /******************/                                                                                                               \
+  /* CompressedOops */                                                                                                               \
+  /******************/                                                                                                               \
+                                                                                                                                     \
+     static_field(CompressedOops,              _narrow_oop._base,                             address)                               \
+     static_field(CompressedOops,              _narrow_oop._shift,                            int)                                   \
+     static_field(CompressedOops,              _narrow_oop._use_implicit_null_checks,         bool)                                  \
+                                                                                                                                     \
+  /***************************/                                                                                                      \
+  /* CompressedKlassPointers */                                                                                                      \
+  /***************************/                                                                                                      \
+                                                                                                                                     \
+     static_field(CompressedKlassPointers,     _narrow_klass._base,                           address)                               \
+     static_field(CompressedKlassPointers,     _narrow_klass._shift,                          int)                                   \
                                                                                                                                      \
   /******/                                                                                                                           \
   /* os */                                                                                                                           \
@@ -1981,6 +1992,8 @@
   declare_toplevel_type(StubQueue*)                                       \
   declare_toplevel_type(Thread*)                                          \
   declare_toplevel_type(Universe)                                         \
+  declare_toplevel_type(CompressedOops)                                   \
+  declare_toplevel_type(CompressedKlassPointers)                          \
   declare_toplevel_type(os)                                               \
   declare_toplevel_type(vframeArray)                                      \
   declare_toplevel_type(vframeArrayElement)                               \
--- a/src/hotspot/share/utilities/vmError.cpp	Thu May 09 07:21:32 2019 -0400
+++ b/src/hotspot/share/utilities/vmError.cpp	Thu May 09 14:26:03 2019 +0200
@@ -31,6 +31,7 @@
 #include "logging/logConfiguration.hpp"
 #include "jfr/jfrEvents.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/compressedOops.hpp"
 #include "prims/whitebox.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/atomic.hpp"
@@ -291,14 +292,14 @@
   st->print_cr("#   Decrease Java thread stack sizes (-Xss)");
   st->print_cr("#   Set larger code cache with -XX:ReservedCodeCacheSize=");
   if (UseCompressedOops) {
-    switch (Universe::narrow_oop_mode()) {
-      case Universe::UnscaledNarrowOop:
+    switch (CompressedOops::mode()) {
+      case CompressedOops::UnscaledNarrowOop:
         st->print_cr("#   JVM is running with Unscaled Compressed Oops mode in which the Java heap is");
         st->print_cr("#     placed in the first 4GB address space. The Java Heap base address is the");
         st->print_cr("#     maximum limit for the native heap growth. Please use -XX:HeapBaseMinAddress");
         st->print_cr("#     to set the Java Heap base and to place the Java Heap above 4GB virtual address.");
         break;
-      case Universe::ZeroBasedNarrowOop:
+      case CompressedOops::ZeroBasedNarrowOop:
         st->print_cr("#   JVM is running with Zero Based Compressed Oops mode in which the Java heap is");
         st->print_cr("#     placed in the first 32GB address space. The Java Heap base address is the");
         st->print_cr("#     maximum limit for the native heap growth. Please use -XX:HeapBaseMinAddress");
@@ -882,7 +883,7 @@
   STEP("printing compressed oops mode")
 
      if (_verbose && UseCompressedOops) {
-       Universe::print_compressed_oops_mode(st);
+       CompressedOops::print_mode(st);
        if (UseCompressedClassPointers) {
          Metaspace::print_compressed_class_space(st);
        }
@@ -1083,7 +1084,7 @@
   // STEP("printing compressed oops mode")
 
   if (UseCompressedOops) {
-    Universe::print_compressed_oops_mode(st);
+    CompressedOops::print_mode(st);
     if (UseCompressedClassPointers) {
       Metaspace::print_compressed_class_space(st);
     }
@@ -1804,4 +1805,3 @@
   ShouldNotReachHere();
 }
 #endif // !PRODUCT
-
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java	Thu May 09 07:21:32 2019 -0400
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java	Thu May 09 14:26:03 2019 +0200
@@ -54,17 +54,6 @@
   private static sun.jvm.hotspot.types.OopField mainThreadGroupField;
   private static sun.jvm.hotspot.types.OopField systemThreadGroupField;
 
-  private static AddressField narrowOopBaseField;
-  private static CIntegerField narrowOopShiftField;
-  private static AddressField narrowKlassBaseField;
-  private static CIntegerField narrowKlassShiftField;
-
-  public enum NARROW_OOP_MODE {
-    UnscaledNarrowOop,
-    ZeroBasedNarrowOop,
-    HeapBasedNarrowOop
-  }
-
   static {
     VM.registerVMInitializedObserver(new Observer() {
         public void update(Observable o, Object data) {
@@ -106,55 +95,15 @@
     mainThreadGroupField   = type.getOopField("_main_thread_group");
     systemThreadGroupField = type.getOopField("_system_thread_group");
 
-    narrowOopBaseField = type.getAddressField("_narrow_oop._base");
-    narrowOopShiftField = type.getCIntegerField("_narrow_oop._shift");
-    narrowKlassBaseField = type.getAddressField("_narrow_klass._base");
-    narrowKlassShiftField = type.getCIntegerField("_narrow_klass._shift");
-
     UniverseExt.initialize(heapConstructor);
   }
 
   public Universe() {
   }
-  public static String narrowOopModeToString(NARROW_OOP_MODE mode) {
-    switch (mode) {
-    case UnscaledNarrowOop:
-      return "32-bits Oops";
-    case ZeroBasedNarrowOop:
-      return "zero based Compressed Oops";
-    case HeapBasedNarrowOop:
-      return "Compressed Oops with base";
-    }
-    return "";
-  }
   public CollectedHeap heap() {
     return (CollectedHeap) heapConstructor.instantiateWrapperFor(collectedHeapField.getValue());
   }
 
-  public static long getNarrowOopBase() {
-    if (narrowOopBaseField.getValue() == null) {
-      return 0;
-    } else {
-      return narrowOopBaseField.getValue().minus(null);
-    }
-  }
-
-  public static int getNarrowOopShift() {
-    return (int)narrowOopShiftField.getValue();
-  }
-
-  public static long getNarrowKlassBase() {
-    if (narrowKlassBaseField.getValue() == null) {
-      return 0;
-    } else {
-      return narrowKlassBaseField.getValue().minus(null);
-    }
-  }
-
-  public static int getNarrowKlassShift() {
-    return (int)narrowKlassShiftField.getValue();
-  }
-
 
   /** Returns "TRUE" iff "p" points into the allocated area of the heap. */
   public boolean isIn(Address p) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/CompressedKlassPointers.java	Thu May 09 14:26:03 2019 +0200
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.oops;
+
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.types.AddressField;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+public class CompressedKlassPointers {
+  private static AddressField baseField;
+  private static CIntegerField shiftField;
+
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static boolean typeExists(TypeDataBase db, String type) {
+      try {
+          db.lookupType(type);
+      } catch (RuntimeException e) {
+          return false;
+      }
+      return true;
+  }
+
+  private static synchronized void initialize(TypeDataBase db) {
+    Type type = db.lookupType("CompressedKlassPointers");
+
+    baseField = type.getAddressField("_narrow_klass._base");
+    shiftField = type.getCIntegerField("_narrow_klass._shift");
+  }
+
+  public CompressedKlassPointers() {
+  }
+
+  public static long getBase() {
+    if (baseField.getValue() == null) {
+      return 0;
+    } else {
+      return baseField.getValue().minus(null);
+    }
+  }
+
+  public static int getShift() {
+    return (int)shiftField.getValue();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/CompressedOops.java	Thu May 09 14:26:03 2019 +0200
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.oops;
+
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.types.AddressField;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+public class CompressedOops {
+  private static AddressField baseField;
+  private static CIntegerField shiftField;
+
+  public enum Mode {
+    UnscaledNarrowOop,
+    ZeroBasedNarrowOop,
+    HeapBasedNarrowOop
+  }
+
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static boolean typeExists(TypeDataBase db, String type) {
+      try {
+          db.lookupType(type);
+      } catch (RuntimeException e) {
+          return false;
+      }
+      return true;
+  }
+
+  private static synchronized void initialize(TypeDataBase db) {
+    Type type = db.lookupType("CompressedOops");
+
+    baseField = type.getAddressField("_narrow_oop._base");
+    shiftField = type.getCIntegerField("_narrow_oop._shift");
+  }
+
+  public CompressedOops() {
+  }
+  public static String modeToString(Mode mode) {
+    switch (mode) {
+    case UnscaledNarrowOop:
+      return "32-bits Oops";
+    case ZeroBasedNarrowOop:
+      return "zero based Compressed Oops";
+    case HeapBasedNarrowOop:
+      return "Compressed Oops with base";
+    }
+    return "";
+  }
+
+  public static long getBase() {
+    if (baseField.getValue() == null) {
+      return 0;
+    } else {
+      return baseField.getValue().minus(null);
+    }
+  }
+
+  public static int getShift() {
+    return (int)shiftField.getValue();
+  }
+}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java	Thu May 09 07:21:32 2019 -0400
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java	Thu May 09 14:26:03 2019 +0200
@@ -544,8 +544,8 @@
     }
 
     debugger.putHeapConst(soleInstance.getHeapOopSize(), soleInstance.getKlassPtrSize(),
-                          Universe.getNarrowOopBase(), Universe.getNarrowOopShift(),
-                          Universe.getNarrowKlassBase(), Universe.getNarrowKlassShift());
+                          CompressedOops.getBase(), CompressedOops.getShift(),
+                          CompressedKlassPointers.getBase(), CompressedKlassPointers.getShift());
   }
 
   /** This is used by the debugging system */