8210754: print_location is not reliable enough (printing register info)
authormdoerr
Thu, 04 Oct 2018 16:39:07 +0200
changeset 52014 1aa9beac610e
parent 52013 92383597fa21
child 52015 821bfc24d750
8210754: print_location is not reliable enough (printing register info) Reviewed-by: stuefe, coleenp
src/hotspot/share/classfile/classLoaderDataGraph.cpp
src/hotspot/share/classfile/classLoaderDataGraph.hpp
src/hotspot/share/code/codeBlob.cpp
src/hotspot/share/code/codeBlob.hpp
src/hotspot/share/memory/metaspace.cpp
src/hotspot/share/memory/metaspace.hpp
src/hotspot/share/memory/metaspace/virtualSpaceList.cpp
src/hotspot/share/memory/metaspace/virtualSpaceList.hpp
src/hotspot/share/oops/klass.cpp
src/hotspot/share/oops/klass.hpp
src/hotspot/share/oops/oop.cpp
src/hotspot/share/oops/oop.hpp
src/hotspot/share/oops/symbol.cpp
src/hotspot/share/oops/symbol.hpp
src/hotspot/share/runtime/os.cpp
src/hotspot/share/runtime/os.hpp
--- a/src/hotspot/share/classfile/classLoaderDataGraph.cpp	Thu Oct 04 14:03:13 2018 +0200
+++ b/src/hotspot/share/classfile/classLoaderDataGraph.cpp	Thu Oct 04 16:39:07 2018 +0200
@@ -487,6 +487,21 @@
 }
 #endif // PRODUCT
 
+bool ClassLoaderDataGraph::is_valid(ClassLoaderData* loader_data) {
+  DEBUG_ONLY( if (!VMError::is_error_reported()) { assert_locked_or_safepoint(ClassLoaderDataGraph_lock); } )
+  if (loader_data != NULL) {
+    if (loader_data == ClassLoaderData::the_null_class_loader_data()) {
+      return true;
+    }
+    for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
+      if (loader_data == data) {
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
 // Move class loader data from main list to the unloaded list for unloading
 // and deallocation later.
 bool ClassLoaderDataGraph::do_unloading(bool do_cleaning) {
--- a/src/hotspot/share/classfile/classLoaderDataGraph.hpp	Thu Oct 04 14:03:13 2018 +0200
+++ b/src/hotspot/share/classfile/classLoaderDataGraph.hpp	Thu Oct 04 16:39:07 2018 +0200
@@ -146,6 +146,10 @@
 #ifndef PRODUCT
   static bool contains_loader_data(ClassLoaderData* loader_data);
 #endif
+
+  // Check if ClassLoaderData is part of the ClassLoaderDataGraph (not unloaded)
+  // Usage without lock only allowed during error reporting.
+  static bool is_valid(ClassLoaderData* loader_data);
 };
 
 class LockedClassesDo : public KlassClosure {
--- a/src/hotspot/share/code/codeBlob.cpp	Thu Oct 04 14:03:13 2018 +0200
+++ b/src/hotspot/share/code/codeBlob.cpp	Thu Oct 04 16:39:07 2018 +0200
@@ -26,7 +26,9 @@
 #include "jvm.h"
 #include "code/codeBlob.hpp"
 #include "code/codeCache.hpp"
+#include "code/icBuffer.hpp"
 #include "code/relocInfo.hpp"
+#include "code/vtableStubs.hpp"
 #include "compiler/disassembler.hpp"
 #include "interpreter/bytecode.hpp"
 #include "memory/allocation.inline.hpp"
@@ -559,6 +561,67 @@
   st->print_cr("[CodeBlob]");
 }
 
+void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const {
+  if (is_buffer_blob()) {
+    // the interpreter is generated into a buffer blob
+    InterpreterCodelet* i = Interpreter::codelet_containing(addr);
+    if (i != NULL) {
+      st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", p2i(addr), (int)(addr - i->code_begin()));
+      i->print_on(st);
+      return;
+    }
+    if (Interpreter::contains(addr)) {
+      st->print_cr(INTPTR_FORMAT " is pointing into interpreter code"
+                   " (not bytecode specific)", p2i(addr));
+      return;
+    }
+    //
+    if (AdapterHandlerLibrary::contains(this)) {
+      st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", p2i(addr), (int)(addr - code_begin()));
+      AdapterHandlerLibrary::print_handler_on(st, this);
+    }
+    // the stubroutines are generated into a buffer blob
+    StubCodeDesc* d = StubCodeDesc::desc_for(addr);
+    if (d != NULL) {
+      st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", p2i(addr), (int)(addr - d->begin()));
+      d->print_on(st);
+      st->cr();
+      return;
+    }
+    if (StubRoutines::contains(addr)) {
+      st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", p2i(addr));
+      return;
+    }
+    // the InlineCacheBuffer is using stubs generated into a buffer blob
+    if (InlineCacheBuffer::contains(addr)) {
+      st->print_cr(INTPTR_FORMAT " is pointing into InlineCacheBuffer", p2i(addr));
+      return;
+    }
+    VtableStub* v = VtableStubs::stub_containing(addr);
+    if (v != NULL) {
+      st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", p2i(addr), (int)(addr - v->entry_point()));
+      v->print_on(st);
+      st->cr();
+      return;
+    }
+  }
+  if (is_nmethod()) {
+    nmethod* nm = (nmethod*)this;
+    ResourceMark rm;
+    st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT,
+              p2i(addr), (int)(addr - nm->entry_point()), p2i(nm));
+    if (verbose) {
+      st->print(" for ");
+      nm->method()->print_value_on(st);
+    }
+    st->cr();
+    nm->print_nmethod(verbose);
+    return;
+  }
+  st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", p2i(addr), (int)(addr - code_begin()));
+  print_on(st);
+}
+
 void RuntimeBlob::verify() {
   ShouldNotReachHere();
 }
--- a/src/hotspot/share/code/codeBlob.hpp	Thu Oct 04 14:03:13 2018 +0200
+++ b/src/hotspot/share/code/codeBlob.hpp	Thu Oct 04 16:39:07 2018 +0200
@@ -221,6 +221,7 @@
   virtual void print() const                     { print_on(tty); };
   virtual void print_on(outputStream* st) const;
   virtual void print_value_on(outputStream* st) const;
+  void dump_for_addr(address addr, outputStream* st, bool verbose) const;
   void print_code();
 
   // Print the comment associated with offset on stream, if there is one
--- a/src/hotspot/share/memory/metaspace.cpp	Thu Oct 04 14:03:13 2018 +0200
+++ b/src/hotspot/share/memory/metaspace.cpp	Thu Oct 04 16:39:07 2018 +0200
@@ -863,6 +863,42 @@
 #endif
 }
 
+// Utils to check if a pointer or range is part of a committed metaspace region.
+metaspace::VirtualSpaceNode* MetaspaceUtils::find_enclosing_virtual_space(const void* p) {
+  VirtualSpaceNode* vsn = Metaspace::space_list()->find_enclosing_space(p);
+  if (Metaspace::using_class_space() && vsn == NULL) {
+    vsn = Metaspace::class_space_list()->find_enclosing_space(p);
+  }
+  return vsn;
+}
+
+bool MetaspaceUtils::is_in_committed(const void* p) {
+#if INCLUDE_CDS
+  if (UseSharedSpaces) {
+    for (int idx = MetaspaceShared::ro; idx <= MetaspaceShared::mc; idx++) {
+      if (FileMapInfo::current_info()->is_in_shared_region(p, idx)) {
+        return true;
+      }
+    }
+  }
+#endif
+  return find_enclosing_virtual_space(p) != NULL;
+}
+
+bool MetaspaceUtils::is_range_in_committed(const void* from, const void* to) {
+#if INCLUDE_CDS
+  if (UseSharedSpaces) {
+    for (int idx = MetaspaceShared::ro; idx <= MetaspaceShared::mc; idx++) {
+      if (FileMapInfo::current_info()->is_in_shared_region(from, idx)) {
+        return FileMapInfo::current_info()->is_in_shared_region(to, idx);
+      }
+    }
+  }
+#endif
+  VirtualSpaceNode* vsn = find_enclosing_virtual_space(from);
+  return (vsn != NULL) && vsn->contains(to);
+}
+
 
 // Metaspace methods
 
--- a/src/hotspot/share/memory/metaspace.hpp	Thu Oct 04 14:03:13 2018 +0200
+++ b/src/hotspot/share/memory/metaspace.hpp	Thu Oct 04 16:39:07 2018 +0200
@@ -71,6 +71,7 @@
   class PrintCLDMetaspaceInfoClosure;
   class SpaceManager;
   class VirtualSpaceList;
+  class VirtualSpaceNode;
 }
 
 // Metaspaces each have a  SpaceManager and allocations
@@ -297,6 +298,10 @@
   // Spacemanager updates running counters.
   friend class metaspace::SpaceManager;
 
+  // Special access for error reporting (checks without locks).
+  friend class oopDesc;
+  friend class Klass;
+
   // Running counters for statistics concerning in-use chunks.
   // Note: capacity = used + free + waste + overhead. Note that we do not
   // count free and waste. Their sum can be deduces from the three other values.
@@ -324,6 +329,12 @@
   // Helper for print_xx_report.
   static void print_vs(outputStream* out, size_t scale);
 
+  // Utils to check if a pointer or range is part of a committed metaspace region
+  // without acquiring any locks.
+  static metaspace::VirtualSpaceNode* find_enclosing_virtual_space(const void* p);
+  static bool is_in_committed(const void* p);
+  static bool is_range_in_committed(const void* from, const void* to);
+
 public:
 
   // Collect used metaspace statistics. This involves walking the CLDG. The resulting
--- a/src/hotspot/share/memory/metaspace/virtualSpaceList.cpp	Thu Oct 04 14:03:13 2018 +0200
+++ b/src/hotspot/share/memory/metaspace/virtualSpaceList.cpp	Thu Oct 04 16:39:07 2018 +0200
@@ -141,17 +141,17 @@
 // This function looks at the mmap regions in the metaspace without locking.
 // The chunks are added with store ordering and not deleted except for at
 // unloading time during a safepoint.
-bool VirtualSpaceList::contains(const void* ptr) {
+VirtualSpaceNode* VirtualSpaceList::find_enclosing_space(const void* ptr) {
   // List should be stable enough to use an iterator here because removing virtual
   // space nodes is only allowed at a safepoint.
   VirtualSpaceListIterator iter(virtual_space_list());
   while (iter.repeat()) {
     VirtualSpaceNode* vsn = iter.get_next();
     if (vsn->contains(ptr)) {
-      return true;
+      return vsn;
     }
   }
-  return false;
+  return NULL;
 }
 
 void VirtualSpaceList::retire_current_virtual_space() {
--- a/src/hotspot/share/memory/metaspace/virtualSpaceList.hpp	Thu Oct 04 14:03:13 2018 +0200
+++ b/src/hotspot/share/memory/metaspace/virtualSpaceList.hpp	Thu Oct 04 16:39:07 2018 +0200
@@ -116,7 +116,8 @@
   void inc_virtual_space_count();
   void dec_virtual_space_count();
 
-  bool contains(const void* ptr);
+  VirtualSpaceNode* find_enclosing_space(const void* ptr);
+  bool contains(const void* ptr) { return find_enclosing_space(ptr) != NULL; }
 
   // Unlink empty VirtualSpaceNodes and free it.
   void purge(ChunkManager* chunk_manager);
--- a/src/hotspot/share/oops/klass.cpp	Thu Oct 04 14:03:13 2018 +0200
+++ b/src/hotspot/share/oops/klass.cpp	Thu Oct 04 16:39:07 2018 +0200
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "classfile/classLoaderData.inline.hpp"
+#include "classfile/classLoaderDataGraph.inline.hpp"
 #include "classfile/dictionary.hpp"
 #include "classfile/javaClasses.hpp"
 #include "classfile/systemDictionary.hpp"
@@ -740,6 +741,22 @@
   guarantee(obj->klass()->is_klass(), "klass field is not a klass");
 }
 
+Klass* Klass::decode_klass_raw(narrowKlass narrow_klass) {
+  return (Klass*)(void*)( (uintptr_t)Universe::narrow_klass_base() +
+                         ((uintptr_t)narrow_klass << Universe::narrow_klass_shift()));
+}
+
+bool Klass::is_valid(Klass* k) {
+  if (!is_aligned(k, sizeof(MetaWord))) return false;
+  if ((size_t)k < os::min_page_size()) return false;
+
+  if (!os::is_readable_range(k, k + 1)) return false;
+  if (!MetaspaceUtils::is_range_in_committed(k, k + 1)) return false;
+
+  if (!Symbol::is_valid(k->name())) return false;
+  return ClassLoaderDataGraph::is_valid(k->class_loader_data());
+}
+
 klassVtable Klass::vtable() const {
   return klassVtable(const_cast<Klass*>(this), start_of_vtable(), vtable_length() / vtableEntry::size());
 }
--- a/src/hotspot/share/oops/klass.hpp	Thu Oct 04 14:03:13 2018 +0200
+++ b/src/hotspot/share/oops/klass.hpp	Thu Oct 04 16:39:07 2018 +0200
@@ -715,6 +715,10 @@
 
   virtual void oop_verify_on(oop obj, outputStream* st);
 
+  // for error reporting
+  static Klass* decode_klass_raw(narrowKlass narrow_klass);
+  static bool is_valid(Klass* k);
+
   static bool is_null(narrowKlass obj);
   static bool is_null(Klass* obj);
 
--- a/src/hotspot/share/oops/oop.cpp	Thu Oct 04 14:03:13 2018 +0200
+++ b/src/hotspot/share/oops/oop.cpp	Thu Oct 04 16:39:07 2018 +0200
@@ -171,6 +171,63 @@
   return UseCompressedClassPointers;
 }
 
+oop oopDesc::decode_oop_raw(narrowOop narrow_oop) {
+  return (oop)(void*)( (uintptr_t)Universe::narrow_oop_base() +
+                      ((uintptr_t)narrow_oop << Universe::narrow_oop_shift()));
+}
+
+void* oopDesc::load_klass_raw(oop obj) {
+  if (UseCompressedClassPointers) {
+    narrowKlass narrow_klass = *(obj->compressed_klass_addr());
+    if (narrow_klass == 0) return NULL;
+    return (void*)Klass::decode_klass_raw(narrow_klass);
+  } else {
+    return *(void**)(obj->klass_addr());
+  }
+}
+
+void* oopDesc::load_oop_raw(oop obj, int offset) {
+  uintptr_t addr = (uintptr_t)(void*)obj + (uint)offset;
+  if (UseCompressedOops) {
+    narrowOop narrow_oop = *(narrowOop*)addr;
+    if (narrow_oop == 0) return NULL;
+    return (void*)decode_oop_raw(narrow_oop);
+  } else {
+    return *(void**)addr;
+  }
+}
+
+bool oopDesc::is_valid(oop obj) {
+  if (!is_object_aligned(obj)) return false;
+  if ((size_t)(oopDesc*)obj < os::min_page_size()) return false;
+
+  // We need at least the mark and the klass word in the committed region.
+  if (!os::is_readable_range(obj, (oopDesc*)obj + 1)) return false;
+  if (!Universe::heap()->is_in(obj)) return false;
+
+  Klass* k = (Klass*)load_klass_raw(obj);
+
+  if (!os::is_readable_range(k, k + 1)) return false;
+  return MetaspaceUtils::is_range_in_committed(k, k + 1);
+}
+
+oop oopDesc::oop_or_null(address addr) {
+  if (is_valid(oop(addr))) {
+    // We were just given an oop directly.
+    return oop(addr);
+  }
+
+  // Try to find addr using block_start.
+  HeapWord* p = Universe::heap()->block_start(addr);
+  if (p != NULL && Universe::heap()->block_is_obj(p)) {
+    if (!is_valid(oop(p))) return NULL;
+    return oop(p);
+  }
+
+  // If we can't find it it just may mean that heap wasn't parsable.
+  return NULL;
+}
+
 oop oopDesc::obj_field_acquire(int offset) const                      { return HeapAccess<MO_ACQUIRE>::oop_load_at(as_oop(), offset); }
 
 void oopDesc::obj_field_put_raw(int offset, oop value)                { RawAccess<>::oop_store_at(as_oop(), offset, value); }
--- a/src/hotspot/share/oops/oop.hpp	Thu Oct 04 14:03:13 2018 +0200
+++ b/src/hotspot/share/oops/oop.hpp	Thu Oct 04 16:39:07 2018 +0200
@@ -334,6 +334,13 @@
     assert(has_klass_gap(), "only applicable to compressed klass pointers");
     return klass_offset_in_bytes() + sizeof(narrowKlass);
   }
+
+  // for error reporting
+  static oop   decode_oop_raw(narrowOop narrow_oop);
+  static void* load_klass_raw(oop obj);
+  static void* load_oop_raw(oop obj, int offset);
+  static bool  is_valid(oop obj);
+  static oop   oop_or_null(address addr);
 };
 
 #endif // SHARE_VM_OOPS_OOP_HPP
--- a/src/hotspot/share/oops/symbol.cpp	Thu Oct 04 14:03:13 2018 +0200
+++ b/src/hotspot/share/oops/symbol.cpp	Thu Oct 04 16:39:07 2018 +0200
@@ -26,6 +26,7 @@
 #include "precompiled.hpp"
 #include "classfile/altHashing.hpp"
 #include "classfile/classLoaderData.hpp"
+#include "gc/shared/collectedHeap.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/allocation.inline.hpp"
@@ -317,5 +318,21 @@
   }
 }
 
+bool Symbol::is_valid(Symbol* s) {
+  if (!is_aligned(s, sizeof(MetaWord))) return false;
+  if ((size_t)s < os::min_page_size()) return false;
+
+  if (!os::is_readable_range(s, s + 1)) return false;
+
+  // Symbols are not allocated in Java heap.
+  if (Universe::heap()->is_in_reserved(s)) return false;
+
+  int len = s->utf8_length();
+  if (len < 0) return false;
+
+  jbyte* bytes = (jbyte*) s->bytes();
+  return os::is_readable_range(bytes, bytes + len);
+}
+
 // SymbolTable prints this in its statistics
 NOT_PRODUCT(size_t Symbol::_total_count = 0;)
--- a/src/hotspot/share/oops/symbol.hpp	Thu Oct 04 14:03:13 2018 +0200
+++ b/src/hotspot/share/oops/symbol.hpp	Thu Oct 04 16:39:07 2018 +0200
@@ -255,6 +255,8 @@
   void print()         { print_on(tty);       }
   void print_value()   { print_value_on(tty); }
 
+  static bool is_valid(Symbol* s);
+
 #ifndef PRODUCT
   // Empty constructor to create a dummy symbol object on stack
   // only for getting its vtable pointer.
--- a/src/hotspot/share/runtime/os.cpp	Thu Oct 04 14:03:13 2018 +0200
+++ b/src/hotspot/share/runtime/os.cpp	Thu Oct 04 16:39:07 2018 +0200
@@ -1010,6 +1010,15 @@
   return (SafeFetch32(aligned, cafebabe) != cafebabe) || (SafeFetch32(aligned, deadbeef) != deadbeef);
 }
 
+bool os::is_readable_range(const void* from, const void* to) {
+  for (address p = align_down((address)from, min_page_size()); p < to; p += min_page_size()) {
+    if (!is_readable_pointer(p)) {
+      return false;
+    }
+  }
+  return true;
+}
+
 
 // moved from debug.cpp (used to be find()) but still called from there
 // The verbose parameter is only set by the debug code in one case
@@ -1020,99 +1029,48 @@
     st->print_cr("0x0 is NULL");
     return;
   }
+
+  // Check if addr points into a code blob.
   CodeBlob* b = CodeCache::find_blob_unsafe(addr);
   if (b != NULL) {
-    if (b->is_buffer_blob()) {
-      // the interpreter is generated into a buffer blob
-      InterpreterCodelet* i = Interpreter::codelet_containing(addr);
-      if (i != NULL) {
-        st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", p2i(addr), (int)(addr - i->code_begin()));
-        i->print_on(st);
-        return;
-      }
-      if (Interpreter::contains(addr)) {
-        st->print_cr(INTPTR_FORMAT " is pointing into interpreter code"
-                     " (not bytecode specific)", p2i(addr));
-        return;
-      }
-      //
-      if (AdapterHandlerLibrary::contains(b)) {
-        st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", p2i(addr), (int)(addr - b->code_begin()));
-        AdapterHandlerLibrary::print_handler_on(st, b);
-      }
-      // the stubroutines are generated into a buffer blob
-      StubCodeDesc* d = StubCodeDesc::desc_for(addr);
-      if (d != NULL) {
-        st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", p2i(addr), (int)(addr - d->begin()));
-        d->print_on(st);
-        st->cr();
-        return;
-      }
-      if (StubRoutines::contains(addr)) {
-        st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", p2i(addr));
-        return;
-      }
-      // the InlineCacheBuffer is using stubs generated into a buffer blob
-      if (InlineCacheBuffer::contains(addr)) {
-        st->print_cr(INTPTR_FORMAT " is pointing into InlineCacheBuffer", p2i(addr));
-        return;
-      }
-      VtableStub* v = VtableStubs::stub_containing(addr);
-      if (v != NULL) {
-        st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", p2i(addr), (int)(addr - v->entry_point()));
-        v->print_on(st);
-        st->cr();
-        return;
-      }
-    }
-    nmethod* nm = b->as_nmethod_or_null();
-    if (nm != NULL) {
-      ResourceMark rm;
-      st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT,
-                p2i(addr), (int)(addr - nm->entry_point()), p2i(nm));
-      if (verbose) {
-        st->print(" for ");
-        nm->method()->print_value_on(st);
-      }
-      st->cr();
-      nm->print_nmethod(verbose);
-      return;
-    }
-    st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", p2i(addr), (int)(addr - b->code_begin()));
-    b->print_on(st);
+    b->dump_for_addr(addr, st, verbose);
     return;
   }
 
+  // Check if addr points into Java heap.
   if (Universe::heap()->is_in(addr)) {
-    HeapWord* p = Universe::heap()->block_start(addr);
-    bool print = false;
-    // If we couldn't find it it just may mean that heap wasn't parsable
-    // See if we were just given an oop directly
-    if (p != NULL && Universe::heap()->block_is_obj(p)) {
-      print = true;
-    } else if (p == NULL && oopDesc::is_oop(oop(addr))) {
-      p = (HeapWord*) addr;
-      print = true;
-    }
-    if (print) {
-      if (p == (HeapWord*) addr) {
-        st->print_cr(INTPTR_FORMAT " is an oop", p2i(addr));
+    oop o = oopDesc::oop_or_null(addr);
+    if (o != NULL) {
+      if ((HeapWord*)o == (HeapWord*)addr) {
+        st->print(INTPTR_FORMAT " is an oop: ", p2i(addr));
       } else {
-        st->print_cr(INTPTR_FORMAT " is pointing into object: " INTPTR_FORMAT, p2i(addr), p2i(p));
+        st->print(INTPTR_FORMAT " is pointing into object: " , p2i(addr));
       }
-      oop(p)->print_on(st);
+      o->print_on(st);
       return;
     }
-  } else {
-    if (Universe::heap()->is_in_reserved(addr)) {
-      st->print_cr(INTPTR_FORMAT " is an unallocated location "
-                   "in the heap", p2i(addr));
+  } else if (Universe::heap()->is_in_reserved(addr)) {
+    st->print_cr(INTPTR_FORMAT " is an unallocated location in the heap", p2i(addr));
+    return;
+  }
+
+  // Compressed oop needs to be decoded first.
+#ifdef _LP64
+  if (UseCompressedOops && ((uintptr_t)addr &~ (uintptr_t)max_juint) == 0) {
+    narrowOop narrow_oop = (narrowOop)(uintptr_t)addr;
+    oop o = oopDesc::decode_oop_raw(narrow_oop);
+
+    if (oopDesc::is_valid(o)) {
+      st->print(UINT32_FORMAT " is a compressed pointer to object: ", narrow_oop);
+      o->print_on(st);
       return;
     }
   }
+#endif
 
   bool accessible = is_readable_pointer(addr);
 
+  // Check if addr is a JNI handle.
   if (align_down((intptr_t)addr, sizeof(intptr_t)) != 0 && accessible) {
     if (JNIHandles::is_global_handle((jobject) addr)) {
       st->print_cr(INTPTR_FORMAT " is a global jni handle", p2i(addr));
@@ -1131,6 +1089,7 @@
 #endif
   }
 
+  // Check if addr belongs to a Java thread.
   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
     // Check for privilege stack
     if (thread->privileged_stack_top() != NULL &&
@@ -1159,9 +1118,12 @@
     }
   }
 
-  // Check if in metaspace and print types that have vptrs (only method now)
+  // Check if in metaspace and print types that have vptrs
   if (Metaspace::contains(addr)) {
-    if (Method::has_method_vptr((const void*)addr)) {
+    if (Klass::is_valid((Klass*)addr)) {
+      st->print_cr(INTPTR_FORMAT " is a pointer to class: ", p2i(addr));
+      ((Klass*)addr)->print_on(st);
+    } else if (Method::is_valid_method((const Method*)addr)) {
       ((Method*)addr)->print_value_on(st);
       st->cr();
     } else {
@@ -1171,6 +1133,20 @@
     return;
   }
 
+  // Compressed klass needs to be decoded first.
+#ifdef _LP64
+  if (UseCompressedClassPointers && ((uintptr_t)addr &~ (uintptr_t)max_juint) == 0) {
+    narrowKlass narrow_klass = (narrowKlass)(uintptr_t)addr;
+    Klass* k = Klass::decode_klass_raw(narrow_klass);
+
+    if (Klass::is_valid(k)) {
+      st->print_cr(UINT32_FORMAT " is a compressed pointer to class: " INTPTR_FORMAT, narrow_klass, p2i((HeapWord*)k));
+      k->print_on(st);
+      return;
+    }
+  }
+#endif
+
   // Try an OS specific find
   if (os::find(addr, st)) {
     return;
--- a/src/hotspot/share/runtime/os.hpp	Thu Oct 04 14:03:13 2018 +0200
+++ b/src/hotspot/share/runtime/os.hpp	Thu Oct 04 16:39:07 2018 +0200
@@ -303,6 +303,9 @@
     return _page_sizes[0];
   }
 
+  // Return a lower bound for page sizes. Also works before os::init completed.
+  static size_t min_page_size() { return 4 * K; }
+
   // Methods for tracing page sizes returned by the above method.
   // The region_{min,max}_size parameters should be the values
   // passed to page_size_for_region() and page_size should be the result of that
@@ -415,6 +418,7 @@
 
   // Check if pointer points to readable memory (by 4-byte read access)
   static bool    is_readable_pointer(const void* p);
+  static bool    is_readable_range(const void* from, const void* to);
 
   // Routines used to serialize the thread state without using membars
   static void    serialize_thread_states();