8217921: Runtime dead code removal
authorredestad
Tue, 29 Jan 2019 14:43:05 +0100
changeset 53547 9d1a788dea3d
parent 53546 63eb7e38ce84
child 53548 0a35de73f7a8
child 57118 cf2b4754174d
8217921: Runtime dead code removal Reviewed-by: coleenp, sgehwolf, dholmes
src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp
src/hotspot/cpu/arm/frame_arm.inline.hpp
src/hotspot/cpu/s390/frame_s390.inline.hpp
src/hotspot/cpu/sparc/frame_sparc.inline.hpp
src/hotspot/cpu/x86/frame_x86.inline.hpp
src/hotspot/share/interpreter/bytecodeStream.hpp
src/hotspot/share/interpreter/bytecodeTracer.cpp
src/hotspot/share/interpreter/bytecodes.hpp
src/hotspot/share/interpreter/interpreterRuntime.cpp
src/hotspot/share/interpreter/interpreterRuntime.hpp
src/hotspot/share/memory/binaryTreeDictionary.hpp
src/hotspot/share/memory/binaryTreeDictionary.inline.hpp
src/hotspot/share/memory/heapShared.hpp
src/hotspot/share/memory/metaspace.cpp
src/hotspot/share/memory/metaspace.hpp
src/hotspot/share/memory/metaspaceShared.cpp
src/hotspot/share/memory/metaspaceShared.hpp
src/hotspot/share/memory/universe.cpp
src/hotspot/share/memory/universe.hpp
src/hotspot/share/memory/virtualspace.cpp
src/hotspot/share/memory/virtualspace.hpp
src/hotspot/share/prims/jvmtiImpl.hpp
src/hotspot/share/prims/methodHandles.cpp
src/hotspot/share/prims/methodHandles.hpp
src/hotspot/share/runtime/arguments.cpp
src/hotspot/share/runtime/arguments.hpp
src/hotspot/share/runtime/frame.cpp
src/hotspot/share/runtime/frame.hpp
src/hotspot/share/runtime/statSampler.cpp
src/hotspot/share/runtime/statSampler.hpp
src/hotspot/share/runtime/vmOperations.hpp
src/hotspot/share/utilities/resourceHash.hpp
--- a/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp	Tue Jan 29 14:43:05 2019 +0100
@@ -137,11 +137,6 @@
 // frame.
 inline intptr_t* frame::id(void) const { return unextended_sp(); }
 
-// Relationals on frames based
-// Return true if the frame is younger (more recent activation) than the frame represented by id
-inline bool frame::is_younger(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id");
-                                                    return this->id() < id ; }
-
 // Return true if the frame is older (less recent activation) than the frame represented by id
 inline bool frame::is_older(intptr_t* id) const   { assert(this->id() != NULL && id != NULL, "NULL frame id");
                                                     return this->id() > id ; }
--- a/src/hotspot/cpu/arm/frame_arm.inline.hpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/cpu/arm/frame_arm.inline.hpp	Tue Jan 29 14:43:05 2019 +0100
@@ -120,17 +120,11 @@
 // frame.
 inline intptr_t* frame::id(void) const { return unextended_sp(); }
 
-// Relationals on frames based
-// Return true if the frame is younger (more recent activation) than the frame represented by id
-inline bool frame::is_younger(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id");
-                                                    return this->id() < id ; }
-
 // Return true if the frame is older (less recent activation) than the frame represented by id
 inline bool frame::is_older(intptr_t* id) const   { assert(this->id() != NULL && id != NULL, "NULL frame id");
                                                     return this->id() > id ; }
 
 
-
 inline intptr_t* frame::link() const              { return (intptr_t*) *(intptr_t **)addr_at(link_offset); }
 
 inline intptr_t* frame::unextended_sp() const     { return _unextended_sp; }
--- a/src/hotspot/cpu/s390/frame_s390.inline.hpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/cpu/s390/frame_s390.inline.hpp	Tue Jan 29 14:43:05 2019 +0100
@@ -111,14 +111,6 @@
   return _fp;
 }
 
-// Return true if this frame is younger (more recent activation) than
-// the frame represented by id.
-inline bool frame::is_younger(intptr_t* id) const {
-  assert(this->id() != NULL && id != NULL, "NULL frame id");
-  // Stack grows towards smaller addresses on z/Architecture.
-  return this->id() < id;
-}
-
 // Return true if this frame is older (less recent activation) than
 // the frame represented by id.
 inline bool frame::is_older(intptr_t* id) const {
--- a/src/hotspot/cpu/sparc/frame_sparc.inline.hpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/cpu/sparc/frame_sparc.inline.hpp	Tue Jan 29 14:43:05 2019 +0100
@@ -57,11 +57,6 @@
 // frame.
 inline intptr_t* frame::id(void) const { return unextended_sp(); }
 
-// Relationals on frames based
-// Return true if the frame is younger (more recent activation) than the frame represented by id
-inline bool frame::is_younger(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id");
-                                                    return this->id() < id ; }
-
 // Return true if the frame is older (less recent activation) than the frame represented by id
 inline bool frame::is_older(intptr_t* id) const   { assert(this->id() != NULL && id != NULL, "NULL frame id");
                                                     return this->id() > id ; }
--- a/src/hotspot/cpu/x86/frame_x86.inline.hpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/cpu/x86/frame_x86.inline.hpp	Tue Jan 29 14:43:05 2019 +0100
@@ -133,11 +133,6 @@
 // frame.
 inline intptr_t* frame::id(void) const { return unextended_sp(); }
 
-// Relationals on frames based
-// Return true if the frame is younger (more recent activation) than the frame represented by id
-inline bool frame::is_younger(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id");
-                                                    return this->id() < id ; }
-
 // Return true if the frame is older (less recent activation) than the frame represented by id
 inline bool frame::is_older(intptr_t* id) const   { assert(this->id() != NULL && id != NULL, "NULL frame id");
                                                     return this->id() > id ; }
--- a/src/hotspot/share/interpreter/bytecodeStream.hpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/interpreter/bytecodeStream.hpp	Tue Jan 29 14:43:05 2019 +0100
@@ -211,7 +211,6 @@
     return _code;
   }
 
-  bool            is_active_breakpoint() const   { return Bytecodes::is_active_breakpoint_at(bcp()); }
   Bytecodes::Code code() const                   { return _code; }
 
   // Unsigned indices, widening
--- a/src/hotspot/share/interpreter/bytecodeTracer.cpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/interpreter/bytecodeTracer.cpp	Tue Jan 29 14:43:05 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -225,7 +225,6 @@
   int ilimit = constants->length();
   Bytecodes::Code code = raw_code();
 
-  ConstantPoolCache* cache = NULL;
   if (Bytecodes::uses_cp_cache(code)) {
     bool okay = true;
     switch (code) {
@@ -256,8 +255,7 @@
 
 bool BytecodePrinter::check_cp_cache_index(int i, int& cp_index, outputStream* st) {
   ConstantPool* constants = method()->constants();
-  int ilimit = constants->length(), climit = 0;
-  Bytecodes::Code code = raw_code();
+  int climit = 0;
 
   ConstantPoolCache* cache = constants->cache();
   // If rewriter hasn't run, the index is the cp_index
@@ -307,7 +305,6 @@
 
 
 bool BytecodePrinter::check_invokedynamic_index(int i, int& cp_index, outputStream* st) {
-  ConstantPool* constants = method()->constants();
   assert(ConstantPool::is_invokedynamic_index(i), "not secondary index?");
   i = ConstantPool::decode_invokedynamic_index(i) + ConstantPool::CPCACHE_INDEX_TAG;
 
--- a/src/hotspot/share/interpreter/bytecodes.hpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/interpreter/bytecodes.hpp	Tue Jan 29 14:43:05 2019 +0100
@@ -380,7 +380,6 @@
   static Code       code_or_bp_at(address bcp)    { return (Code)cast(*bcp); }
 
   static Code       code_at(Method* method, int bci);
-  static bool       is_active_breakpoint_at(address bcp) { return (Code)*bcp == _breakpoint; }
 
   // find a bytecode, behind a breakpoint if necessary:
   static Code       non_breakpoint_code_at(const Method* method, address bcp);
@@ -405,18 +404,12 @@
   // if 'end' is provided, it indicates the end of the code buffer which
   // should not be read past when parsing.
   static int         special_length_at(Bytecodes::Code code, address bcp, address end = NULL);
-  static int         special_length_at(Method* method, address bcp, address end = NULL) { return special_length_at(code_at(method, bcp), bcp, end); }
   static int         raw_special_length_at(address bcp, address end = NULL);
   static int         length_for_code_at(Bytecodes::Code code, address bcp)  { int l = length_for(code); return l > 0 ? l : special_length_at(code, bcp); }
   static int         length_at      (Method* method, address bcp)  { return length_for_code_at(code_at(method, bcp), bcp); }
   static int         java_length_at (Method* method, address bcp)  { return length_for_code_at(java_code_at(method, bcp), bcp); }
   static bool        is_java_code   (Code code)    { return 0 <= code && code < number_of_java_codes; }
 
-  static bool        is_aload       (Code code)    { return (code == _aload  || code == _aload_0  || code == _aload_1
-                                                                             || code == _aload_2  || code == _aload_3); }
-  static bool        is_astore      (Code code)    { return (code == _astore || code == _astore_0 || code == _astore_1
-                                                                             || code == _astore_2 || code == _astore_3); }
-
   static bool        is_store_into_local(Code code){ return (_istore <= code && code <= _astore_3); }
   static bool        is_const       (Code code)    { return (_aconst_null <= code && code <= _ldc2_w); }
   static bool        is_zero_const  (Code code)    { return (code == _aconst_null || code == _iconst_0
@@ -433,7 +426,6 @@
     assert(code == (u_char)code, "must be a byte");
     return _flags[code + (is_wide ? (1<<BitsPerByte) : 0)];
   }
-  static int         format_bits    (Code code, bool is_wide) { return flags(code, is_wide) & _all_fmt_bits; }
   static bool        has_all_flags  (Code code, int test_flags, bool is_wide) {
     return (flags(code, is_wide) & test_flags) == test_flags;
   }
--- a/src/hotspot/share/interpreter/interpreterRuntime.cpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp	Tue Jan 29 14:43:05 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -135,11 +135,6 @@
   frame& get_frame()                             { return _last_frame; }
 };
 
-
-bool InterpreterRuntime::is_breakpoint(JavaThread *thread) {
-  return Bytecodes::code_or_bp_at(LastFrameAccessor(thread).bcp()) == Bytecodes::_breakpoint;
-}
-
 //------------------------------------------------------------------------------------------------------------------------
 // State accessors
 
--- a/src/hotspot/share/interpreter/interpreterRuntime.hpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/interpreter/interpreterRuntime.hpp	Tue Jan 29 14:43:05 2019 +0100
@@ -126,7 +126,6 @@
   static void _breakpoint(JavaThread* thread, Method* method, address bcp);
   static Bytecodes::Code get_original_bytecode_at(JavaThread* thread, Method* method, address bcp);
   static void            set_original_bytecode_at(JavaThread* thread, Method* method, address bcp, Bytecodes::Code new_code);
-  static bool is_breakpoint(JavaThread *thread);
 
   // Safepoints
   static void    at_safepoint(JavaThread* thread);
--- a/src/hotspot/share/memory/binaryTreeDictionary.hpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/memory/binaryTreeDictionary.hpp	Tue Jan 29 14:43:05 2019 +0100
@@ -120,7 +120,6 @@
   // node to point to the new node.
   TreeList<Chunk_t, FreeList_t>* remove_chunk_replace_if_needed(TreeChunk<Chunk_t, FreeList_t>* tc);
   // See FreeList.
-  void return_chunk_at_head(TreeChunk<Chunk_t, FreeList_t>* tc);
   void return_chunk_at_tail(TreeChunk<Chunk_t, FreeList_t>* tc);
 };
 
@@ -236,7 +235,6 @@
   size_t     num_free_blocks()  const;
   size_t     tree_height() const;
   size_t     tree_height_helper(TreeList<Chunk_t, FreeList_t>* tl) const;
-  size_t     total_nodes_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const;
   size_t     total_nodes_helper(TreeList<Chunk_t, FreeList_t>* tl) const;
 
  public:
--- a/src/hotspot/share/memory/binaryTreeDictionary.inline.hpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/memory/binaryTreeDictionary.inline.hpp	Tue Jan 29 14:43:05 2019 +0100
@@ -245,35 +245,6 @@
   assert(tail() == NULL || tail()->next() == NULL, "list invariant");
 }
 
-// Add this chunk at the head of the list.  "At the head of the list"
-// is defined to be after the chunk pointer to by head().  This is
-// because the TreeList<Chunk_t, FreeList_t> is embedded in the first TreeChunk<Chunk_t, FreeList_t> in the
-// list.  See the definition of TreeChunk<Chunk_t, FreeList_t>.
-template <class Chunk_t, class FreeList_t>
-void TreeList<Chunk_t, FreeList_t>::return_chunk_at_head(TreeChunk<Chunk_t, FreeList_t>* chunk) {
-  assert(chunk->list() == this, "list should be set for chunk");
-  assert(head() != NULL, "The tree list is embedded in the first chunk");
-  assert(chunk != NULL, "returning NULL chunk");
-  // This is expensive for metaspace
-  assert(!FLSVerifyDictionary || !this->verify_chunk_in_free_list(chunk), "Double entry");
-  assert(head() == NULL || head()->prev() == NULL, "list invariant");
-  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
-
-  Chunk_t* fc = head()->next();
-  if (fc != NULL) {
-    chunk->link_after(fc);
-  } else {
-    assert(tail() == NULL, "List is inconsistent");
-    this->link_tail(chunk);
-  }
-  head()->link_after(chunk);
-  assert(!head() || size() == head()->size(), "Wrong sized chunk in list");
-  FreeList_t::increment_count();
-  debug_only(this->increment_returned_bytes_by(chunk->size()*sizeof(HeapWord));)
-  assert(head() == NULL || head()->prev() == NULL, "list invariant");
-  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
-}
-
 template <class Chunk_t, class FreeList_t>
 void TreeChunk<Chunk_t, FreeList_t>::assert_is_mangled() const {
   assert((ZapUnusedHeapArea &&
@@ -799,11 +770,6 @@
     total_nodes_helper(tl->right());
 }
 
-template <class Chunk_t, class FreeList_t>
-size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_nodes_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const {
-  return total_nodes_helper(root());
-}
-
 // Searches the tree for a chunk that ends at the
 // specified address.
 template <class Chunk_t, class FreeList_t>
--- a/src/hotspot/share/memory/heapShared.hpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/memory/heapShared.hpp	Tue Jan 29 14:43:05 2019 +0100
@@ -191,9 +191,6 @@
   static void verify_subgraph_from(oop orig_obj) PRODUCT_RETURN;
 
   static KlassSubGraphInfo* get_subgraph_info(Klass *k);
-  static int num_of_subgraph_infos();
-
-  static void build_archived_subgraph_info_records(int num_records);
 
   static void init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
                                          int num, Thread* THREAD);
@@ -321,10 +318,6 @@
 
   inline static bool is_archived_object(oop p) NOT_CDS_JAVA_HEAP_RETURN_(false);
 
-  static void archive_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN;
-
-  static char* read_archived_subgraph_infos(char* buffer) NOT_CDS_JAVA_HEAP_RETURN_(buffer);
-  static void write_archived_subgraph_infos() NOT_CDS_JAVA_HEAP_RETURN;
   static void initialize_from_archived_subgraph(Klass* k) NOT_CDS_JAVA_HEAP_RETURN;
 
   // NarrowOops stored in the CDS archive may use a different encoding scheme
--- a/src/hotspot/share/memory/metaspace.cpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/memory/metaspace.cpp	Tue Jan 29 14:43:05 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -873,19 +873,6 @@
   return vsn;
 }
 
-bool MetaspaceUtils::is_in_committed(const void* p) {
-#if INCLUDE_CDS
-  if (UseSharedSpaces) {
-    for (int idx = MetaspaceShared::ro; idx <= MetaspaceShared::mc; idx++) {
-      if (FileMapInfo::current_info()->is_in_shared_region(p, idx)) {
-        return true;
-      }
-    }
-  }
-#endif
-  return find_enclosing_virtual_space(p) != NULL;
-}
-
 bool MetaspaceUtils::is_range_in_committed(const void* from, const void* to) {
 #if INCLUDE_CDS
   if (UseSharedSpaces) {
--- a/src/hotspot/share/memory/metaspace.hpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/memory/metaspace.hpp	Tue Jan 29 14:43:05 2019 +0100
@@ -333,7 +333,6 @@
   // Utils to check if a pointer or range is part of a committed metaspace region
   // without acquiring any locks.
   static metaspace::VirtualSpaceNode* find_enclosing_virtual_space(const void* p);
-  static bool is_in_committed(const void* p);
   static bool is_range_in_committed(const void* from, const void* to);
 
 public:
@@ -387,9 +386,6 @@
   }
 
   static size_t min_chunk_size_words();
-  static size_t min_chunk_size_bytes() {
-    return min_chunk_size_words() * BytesPerWord;
-  }
 
   // Flags for print_report().
   enum ReportFlag {
--- a/src/hotspot/share/memory/metaspaceShared.cpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/memory/metaspaceShared.cpp	Tue Jan 29 14:43:05 2019 +0100
@@ -218,10 +218,6 @@
   return _ro_region.allocate(num_bytes);
 }
 
-char* MetaspaceShared::read_only_space_top() {
-  return _ro_region.top();
-}
-
 void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() {
   assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled");
 
--- a/src/hotspot/share/memory/metaspaceShared.hpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/memory/metaspaceShared.hpp	Tue Jan 29 14:43:05 2019 +0100
@@ -168,14 +168,11 @@
 
   static bool try_link_class(InstanceKlass* ik, TRAPS);
   static void link_and_cleanup_shared_classes(TRAPS);
-  static void check_shared_class_loader_type(InstanceKlass* ik);
 
   // Allocate a block of memory from the "mc", "ro", or "rw" regions.
   static char* misc_code_space_alloc(size_t num_bytes);
   static char* read_only_space_alloc(size_t num_bytes);
 
-  static char* read_only_space_top();
-
   template <typename T>
   static Array<T>* new_ro_array(int length) {
 #if INCLUDE_CDS
--- a/src/hotspot/share/memory/universe.cpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/memory/universe.cpp	Tue Jan 29 14:43:05 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1286,14 +1286,6 @@
 #endif // PRODUCT
 
 
-void Universe::compute_verify_oop_data() {
-  verify_oop_mask();
-  verify_oop_bits();
-  verify_mark_mask();
-  verify_mark_bits();
-}
-
-
 void LatestMethodCache::init(Klass* k, Method* m) {
   if (!UseSharedSpaces) {
     _klass = k;
--- a/src/hotspot/share/memory/universe.hpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/memory/universe.hpp	Tue Jan 29 14:43:05 2019 +0100
@@ -258,7 +258,6 @@
   static uintptr_t _verify_oop_bits;
 
   static void calculate_verify_data(HeapWord* low_boundary, HeapWord* high_boundary) PRODUCT_RETURN;
-  static void compute_verify_oop_data();
 
  public:
   // Known classes in the VM
@@ -392,8 +391,6 @@
   };
   static NARROW_OOP_MODE narrow_oop_mode();
   static const char* narrow_oop_mode_to_string(NARROW_OOP_MODE mode);
-  static char*    preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode);
-  static char*    preferred_metaspace_base(size_t heap_size, NARROW_OOP_MODE mode);
   static address  narrow_oop_base()                  { return  _narrow_oop._base; }
   // Test whether bits of addr and possible offsets into the heap overlap.
   static bool     is_disjoint_heap_base_address(address addr) {
@@ -416,10 +413,8 @@
 
   // For UseCompressedClassPointers
   static address  narrow_klass_base()                     { return  _narrow_klass._base; }
-  static bool  is_narrow_klass_base(void* addr)           { return (narrow_klass_base() == (address)addr); }
   static uint64_t narrow_klass_range()                    { return  _narrow_klass_range; }
   static int      narrow_klass_shift()                    { return  _narrow_klass._shift; }
-  static bool     narrow_klass_use_implicit_null_checks() { return  _narrow_klass._use_implicit_null_checks; }
 
   static address* narrow_ptrs_base_addr()                 { return &_narrow_ptrs_base; }
   static void     set_narrow_ptrs_base(address a)         { _narrow_ptrs_base = a; }
@@ -441,7 +436,6 @@
   static ReservedSpace reserve_heap(size_t heap_size, size_t alignment);
 
   // Historic gc information
-  static size_t get_heap_capacity_at_last_gc()         { return _heap_capacity_at_last_gc; }
   static size_t get_heap_free_at_last_gc()             { return _heap_capacity_at_last_gc - _heap_used_at_last_gc; }
   static size_t get_heap_used_at_last_gc()             { return _heap_used_at_last_gc; }
   static void update_heap_info_at_gc();
@@ -517,25 +511,4 @@
   static int base_vtable_size()               { return _base_vtable_size; }
 };
 
-class DeferredObjAllocEvent : public CHeapObj<mtInternal> {
-  private:
-    oop    _oop;
-    size_t _bytesize;
-    jint   _arena_id;
-
-  public:
-    DeferredObjAllocEvent(const oop o, const size_t s, const jint id) {
-      _oop      = o;
-      _bytesize = s;
-      _arena_id = id;
-    }
-
-    ~DeferredObjAllocEvent() {
-    }
-
-    jint   arena_id() { return _arena_id; }
-    size_t bytesize() { return _bytesize; }
-    oop    get_oop()  { return _oop; }
-};
-
 #endif // SHARE_MEMORY_UNIVERSE_HPP
--- a/src/hotspot/share/memory/virtualspace.cpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/memory/virtualspace.cpp	Tue Jan 29 14:43:05 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -266,11 +266,6 @@
 }
 
 
-size_t ReservedSpace::allocation_align_size_down(size_t size) {
-  return align_down(size, os::vm_allocation_granularity());
-}
-
-
 void ReservedSpace::release() {
   if (is_reserved()) {
     char *real_base = _base - _noaccess_prefix;
--- a/src/hotspot/share/memory/virtualspace.hpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/memory/virtualspace.hpp	Tue Jan 29 14:43:05 2019 +0100
@@ -88,7 +88,6 @@
   static size_t page_align_size_up(size_t size);
   static size_t page_align_size_down(size_t size);
   static size_t allocation_align_size_up(size_t size);
-  static size_t allocation_align_size_down(size_t size);
   bool contains(const void* p) const {
     return (base() <= ((char*)p)) && (((char*)p) < (base() + size()));
   }
--- a/src/hotspot/share/prims/jvmtiImpl.hpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/prims/jvmtiImpl.hpp	Tue Jan 29 14:43:05 2019 +0100
@@ -297,25 +297,11 @@
   // lazily create _jvmti_breakpoints and _breakpoint_list
   static JvmtiBreakpoints& get_jvmti_breakpoints();
 
-  // quickly test whether the bcp matches a cached breakpoint in the list
-  static inline bool is_breakpoint(address bcp);
-
   static void oops_do(OopClosure* f);
   static void metadata_do(void f(Metadata*)) NOT_JVMTI_RETURN;
   static void gc_epilogue();
 };
 
-// quickly test whether the bcp matches a cached breakpoint in the list
-bool JvmtiCurrentBreakpoints::is_breakpoint(address bcp) {
-    address *bps = get_breakpoint_list();
-    if (bps == NULL) return false;
-    for ( ; (*bps) != NULL; bps++) {
-      if ((*bps) == bcp) return true;
-    }
-    return false;
-}
-
-
 ///////////////////////////////////////////////////////////////
 //
 // class VM_ChangeBreakpoints
--- a/src/hotspot/share/prims/methodHandles.cpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/prims/methodHandles.cpp	Tue Jan 29 14:43:05 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -968,7 +968,6 @@
   bool search_superc = ((match_flags & SEARCH_SUPERCLASSES) != 0);
   bool search_intfc  = ((match_flags & SEARCH_INTERFACES)   != 0);
   bool local_only = !(search_superc | search_intfc);
-  bool classes_only = false;
 
   if (name != NULL) {
     if (name->utf8_length() == 0)  return 0; // a match is not possible
--- a/src/hotspot/share/prims/methodHandles.hpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/prims/methodHandles.hpp	Tue Jan 29 14:43:05 2019 +0100
@@ -67,7 +67,6 @@
   static oop init_MemberName(Handle mname_h, Handle target_h, TRAPS); // compute vmtarget/vmindex from target
   static oop init_field_MemberName(Handle mname_h, fieldDescriptor& fd, bool is_setter = false);
   static oop init_method_MemberName(Handle mname_h, CallInfo& info);
-  static int method_ref_kind(Method* m, bool do_dispatch_if_possible = true);
   static int find_MemberNames(Klass* k, Symbol* name, Symbol* sig,
                               int mflags, Klass* caller,
                               int skip, objArrayHandle results, TRAPS);
@@ -148,8 +147,6 @@
 
   static Bytecodes::Code signature_polymorphic_intrinsic_bytecode(vmIntrinsics::ID id);
 
-  static int get_named_constant(int which, Handle name_box, TRAPS);
-
 public:
   static Symbol* lookup_signature(oop type_str, bool polymorphic, TRAPS);  // use TempNewSymbol
   static Symbol* lookup_basic_type_signature(Symbol* sig, bool keep_last_arg, TRAPS);  // use TempNewSymbol
@@ -158,11 +155,6 @@
   }
   static bool is_basic_type_signature(Symbol* sig);
 
-  static Symbol* lookup_method_type(Symbol* msig, Handle mtype, TRAPS);
-
-  static void print_as_method_type_on(outputStream* st, Symbol* sig) {
-    print_as_basic_type_signature_on(st, sig, true, true);
-  }
   static void print_as_basic_type_signature_on(outputStream* st, Symbol* sig, bool keep_arrays = false, bool keep_basic_names = false);
 
   // decoding CONSTANT_MethodHandle constants
@@ -188,13 +180,6 @@
     assert(ref_kind_is_valid(ref_kind), "");
     return (ref_kind & 1) != 0;
   }
-  static bool ref_kind_is_static(int ref_kind) {
-    return !ref_kind_has_receiver(ref_kind) && (ref_kind != JVM_REF_newInvokeSpecial);
-  }
-  static bool ref_kind_does_dispatch(int ref_kind) {
-    return (ref_kind == JVM_REF_invokeVirtual ||
-            ref_kind == JVM_REF_invokeInterface);
-  }
 
   static int ref_kind_to_flags(int ref_kind);
 
--- a/src/hotspot/share/runtime/arguments.cpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/runtime/arguments.cpp	Tue Jan 29 14:43:05 2019 +0100
@@ -328,10 +328,6 @@
   _agentList.add(agentLib);
 }
 
-void Arguments::add_loaded_agent(const char* name, char* options, bool absolute_path, void* os_lib) {
-  _agentList.add(new AgentLibrary(name, options, absolute_path, os_lib));
-}
-
 // Return TRUE if option matches 'property', or 'property=', or 'property.'.
 static bool matches_property_suffix(const char* option, const char* property, size_t len) {
   return ((strncmp(option, property, len) == 0) &&
--- a/src/hotspot/share/runtime/arguments.hpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/runtime/arguments.hpp	Tue Jan 29 14:43:05 2019 +0100
@@ -346,7 +346,6 @@
 
   // Late-binding agents not started via arguments
   static void add_loaded_agent(AgentLibrary *agentLib);
-  static void add_loaded_agent(const char* name, char* options, bool absolute_path, void* os_lib);
 
   // Operation modi
   static Mode _mode;
@@ -368,7 +367,6 @@
   static bool _UseOnStackReplacement;
   static bool _BackgroundCompilation;
   static bool _ClipInlining;
-  static bool _CIDynamicCompilePriority;
   static intx _Tier3InvokeNotifyFreqLog;
   static intx _Tier4InvocationThreshold;
 
--- a/src/hotspot/share/runtime/frame.cpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/runtime/frame.cpp	Tue Jan 29 14:43:05 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -368,29 +368,6 @@
   return result;
 }
 
-// Note: called by profiler - NOT for current thread
-frame frame::profile_find_Java_sender_frame(JavaThread *thread) {
-// If we don't recognize this frame, walk back up the stack until we do
-  RegisterMap map(thread, false);
-  frame first_java_frame = frame();
-
-  // Find the first Java frame on the stack starting with input frame
-  if (is_java_frame()) {
-    // top frame is compiled frame or deoptimized frame
-    first_java_frame = *this;
-  } else if (safe_for_sender(thread)) {
-    for (frame sender_frame = sender(&map);
-      sender_frame.safe_for_sender(thread) && !sender_frame.is_first_frame();
-      sender_frame = sender_frame.sender(&map)) {
-      if (sender_frame.is_java_frame()) {
-        first_java_frame = sender_frame;
-        break;
-      }
-    }
-  }
-  return first_java_frame;
-}
-
 // Interpreter frames
 
 
--- a/src/hotspot/share/runtime/frame.hpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/runtime/frame.hpp	Tue Jan 29 14:43:05 2019 +0100
@@ -148,9 +148,6 @@
   // returns the sending frame
   frame sender(RegisterMap* map) const;
 
-  // for Profiling - acting on another frame. walks sender frames
-  // if valid.
-  frame profile_find_Java_sender_frame(JavaThread *thread);
   bool safe_for_sender(JavaThread *thread);
 
   // returns the sender, but skips conversion frames
--- a/src/hotspot/share/runtime/statSampler.cpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/runtime/statSampler.cpp	Tue Jan 29 14:43:05 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -356,14 +356,3 @@
   PerfDataManager::create_counter(SUN_OS, "hrt.ticks",
                                   PerfData::U_Ticks, psh, CHECK);
 }
-
-/*
- * the statSampler_exit() function is called from os_init.cpp on
- * exit of the vm.
- */
-void statSampler_exit() {
-
-  if (!UsePerfData) return;
-
-  StatSampler::destroy();
-}
--- a/src/hotspot/share/runtime/statSampler.hpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/runtime/statSampler.hpp	Tue Jan 29 14:43:05 2019 +0100
@@ -65,6 +65,4 @@
     static void destroy();
 };
 
-void statSampler_exit();
-
 #endif // SHARE_RUNTIME_STATSAMPLER_HPP
--- a/src/hotspot/share/runtime/vmOperations.hpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/runtime/vmOperations.hpp	Tue Jan 29 14:43:05 2019 +0100
@@ -121,7 +121,6 @@
   template(PrintCompileQueue)                     \
   template(PrintClassHierarchy)                   \
   template(ThreadSuspend)                         \
-  template(CTWThreshold)                          \
   template(ThreadsSuspendJVMTI)                   \
   template(ICBufferFull)                          \
   template(ScavengeMonitors)                      \
@@ -272,12 +271,6 @@
   VMOp_Type type() const { return VMOp_ThreadSuspend; }
 };
 
-// empty vm op, when forcing a safepoint due to ctw threshold is reached for the sweeper
-class VM_CTWThreshold: public VM_ForceSafepoint {
- public:
-  VMOp_Type type() const { return VMOp_CTWThreshold; }
-};
-
 // empty vm op, when forcing a safepoint to suspend threads from jvmti
 class VM_ThreadsSuspendJVMTI: public VM_ForceSafepoint {
  public:
--- a/src/hotspot/share/utilities/resourceHash.hpp	Tue Jan 29 14:34:26 2019 +0100
+++ b/src/hotspot/share/utilities/resourceHash.hpp	Tue Jan 29 14:43:05 2019 +0100
@@ -27,11 +27,6 @@
 
 #include "memory/allocation.hpp"
 
-template<typename K> struct ResourceHashtableFns {
-    typedef unsigned (*hash_fn)(K const&);
-    typedef bool (*equals_fn)(K const&, K const&);
-};
-
 template<
     typename K, typename V,
     // xlC does not compile this:
@@ -160,10 +155,6 @@
       ++bucket;
     }
   }
-
-  static size_t node_size() {
-    return sizeof(Node);
-  }
 };