8145221: Use trampolines for i2i and i2c entries in Methods that are stored in CDS archive
authorccheung
Thu, 07 Apr 2016 22:03:04 -0700
changeset 37439 e8970711113b
parent 37438 873c4aea8d1b
child 37441 016084ef6634
child 37443 1ac0e91a0f53
child 37445 5565e6e80352
8145221: Use trampolines for i2i and i2c entries in Methods that are stored in CDS archive Summary: This optimization reduces the size of the RW region of the CDS archive. It also reduces the amount of pages in the RW region that are actually written into during runtime. Reviewed-by: dlong, iklam, jiangli Contributed-by: ioi.lam@oracle.com, calvin.cheung@oracle.com, goetz.lindenmaier@sap.com
hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp
hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp
hotspot/src/cpu/sparc/vm/metaspaceShared_sparc.cpp
hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
hotspot/src/cpu/zero/vm/sharedRuntime_zero.cpp
hotspot/src/os_cpu/bsd_x86/vm/thread_bsd_x86.cpp
hotspot/src/os_cpu/linux_aarch64/vm/thread_linux_aarch64.cpp
hotspot/src/os_cpu/linux_sparc/vm/thread_linux_sparc.cpp
hotspot/src/os_cpu/linux_x86/vm/thread_linux_x86.cpp
hotspot/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.cpp
hotspot/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp
hotspot/src/os_cpu/windows_x86/vm/thread_windows_x86.cpp
hotspot/src/share/vm/interpreter/abstractInterpreter.cpp
hotspot/src/share/vm/interpreter/abstractInterpreter.hpp
hotspot/src/share/vm/interpreter/templateInterpreterGenerator.cpp
hotspot/src/share/vm/memory/filemap.cpp
hotspot/src/share/vm/memory/filemap.hpp
hotspot/src/share/vm/memory/metaspaceShared.cpp
hotspot/src/share/vm/memory/metaspaceShared.hpp
hotspot/src/share/vm/oops/constMethod.hpp
hotspot/src/share/vm/oops/method.cpp
hotspot/src/share/vm/oops/method.hpp
hotspot/src/share/vm/runtime/sharedRuntime.cpp
hotspot/src/share/vm/runtime/sharedRuntime.hpp
hotspot/src/share/vm/runtime/vmStructs.cpp
--- a/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Thu Apr 07 22:03:04 2016 -0700
@@ -198,6 +198,16 @@
 bool SharedRuntime::is_wide_vector(int size) {
   return size > 8;
 }
+
+size_t SharedRuntime::trampoline_size() {
+  return 16;
+}
+
+void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
+  __ mov(rscratch1, destination);
+  __ br(rscratch1);
+}
+
 // The java_calling_convention describes stack locations as ideal slots on
 // a frame with no abi restrictions. Since we must observe abi restrictions
 // (like the placement of the register window) the slots must be biased by
--- a/hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp	Thu Apr 07 22:03:04 2016 -0700
@@ -483,6 +483,18 @@
   assert(size <= 8, "%d bytes vectors are not supported", size);
   return size > 8;
 }
+
+size_t SharedRuntime::trampoline_size() {
+  return Assembler::load_const_size + 8;
+}
+
+void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
+  Register Rtemp = R12;
+  __ load_const(Rtemp, destination);
+  __ mtctr(Rtemp);
+  __ bctr();
+}
+
 #ifdef COMPILER2
 static int reg2slot(VMReg r) {
   return r->reg2stack() + SharedRuntime::out_preserve_stack_slots();
--- a/hotspot/src/cpu/sparc/vm/metaspaceShared_sparc.cpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/cpu/sparc/vm/metaspaceShared_sparc.cpp	Thu Apr 07 22:03:04 2016 -0700
@@ -65,8 +65,6 @@
   *vtable = dummy_vtable;
   *md_top += vtable_bytes;
 
-  guarantee(*md_top <= md_end, "Insufficient space for vtables.");
-
   // Get ready to generate dummy methods.
 
   CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Thu Apr 07 22:03:04 2016 -0700
@@ -324,6 +324,16 @@
   return size > 8;
 }
 
+size_t SharedRuntime::trampoline_size() {
+  return 40;
+}
+
+void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
+  __ set((intptr_t)destination, G3_scratch);
+  __ JMP(G3_scratch, 0);
+  __ delayed()->nop();
+}
+
 // The java_calling_convention describes stack locations as ideal slots on
 // a frame with no abi restrictions. Since we must observe abi restrictions
 // (like the placement of the register window) the slots must be biased by
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Thu Apr 07 22:03:04 2016 -0700
@@ -355,6 +355,14 @@
   return size > 16;
 }
 
+size_t SharedRuntime::trampoline_size() {
+  return 16;
+}
+
+void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
+  __ jump(RuntimeAddress(destination));
+}
+
 // The java_calling_convention describes stack locations as ideal slots on
 // a frame with no abi restrictions. Since we must observe abi restrictions
 // (like the placement of the register window) the slots must be biased by
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Thu Apr 07 22:03:04 2016 -0700
@@ -391,6 +391,14 @@
   return size > 16;
 }
 
+size_t SharedRuntime::trampoline_size() {
+  return 16;
+}
+
+void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
+  __ jump(RuntimeAddress(destination));
+}
+
 // The java_calling_convention describes stack locations as ideal slots on
 // a frame with no abi restrictions. Since we must observe abi restrictions
 // (like the placement of the register window) the slots must be biased by
--- a/hotspot/src/cpu/zero/vm/sharedRuntime_zero.cpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/cpu/zero/vm/sharedRuntime_zero.cpp	Thu Apr 07 22:03:04 2016 -0700
@@ -132,6 +132,15 @@
   return generate_empty_runtime_stub("resolve_blob");
 }
 
+size_t SharedRuntime::trampoline_size() {
+  ShouldNotCallThis();
+  return 0;
+}
+
+void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
+  ShouldNotCallThis();
+  return;
+}
 
 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
                                          VMRegPair *regs,
--- a/hotspot/src/os_cpu/bsd_x86/vm/thread_bsd_x86.cpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/os_cpu/bsd_x86/vm/thread_bsd_x86.cpp	Thu Apr 07 22:03:04 2016 -0700
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "memory/metaspaceShared.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/thread.inline.hpp"
 
@@ -64,6 +65,14 @@
       return false;
     }
 
+#if INCLUDE_CDS
+    if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
+      // In the middle of a trampoline call. Bail out for safety.
+      // This happens rarely so shouldn't affect profiling.
+      return false;
+    }
+#endif
+
     frame ret_frame(ret_sp, ret_fp, addr.pc());
     if (!ret_frame.safe_for_sender(jt)) {
 #if defined(COMPILER2) || INCLUDE_JVMCI
--- a/hotspot/src/os_cpu/linux_aarch64/vm/thread_linux_aarch64.cpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/os_cpu/linux_aarch64/vm/thread_linux_aarch64.cpp	Thu Apr 07 22:03:04 2016 -0700
@@ -24,6 +24,7 @@
  */
 
 #include "precompiled.hpp"
+#include "memory/metaspaceShared.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/thread.inline.hpp"
 
@@ -66,6 +67,14 @@
       return false;
     }
 
+#if INCLUDE_CDS
+    if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
+      // In the middle of a trampoline call. Bail out for safety.
+      // This happens rarely so shouldn't affect profiling.
+      return false;
+    }
+#endif
+
     frame ret_frame(ret_sp, ret_fp, addr.pc());
     if (!ret_frame.safe_for_sender(jt)) {
 #ifdef COMPILER2
--- a/hotspot/src/os_cpu/linux_sparc/vm/thread_linux_sparc.cpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/os_cpu/linux_sparc/vm/thread_linux_sparc.cpp	Thu Apr 07 22:03:04 2016 -0700
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "memory/metaspaceShared.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/thread.inline.hpp"
 
@@ -64,6 +65,14 @@
     return false;
   }
 
+#if INCLUDE_CDS
+  if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
+    // In the middle of a trampoline call. Bail out for safety.
+    // This happens rarely so shouldn't affect profiling.
+    return false;
+  }
+#endif
+
   // we were running Java code when SIGPROF came in
   if (isInJava) {
     // If we have a last_Java_sp, then the SIGPROF signal caught us
--- a/hotspot/src/os_cpu/linux_x86/vm/thread_linux_x86.cpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/os_cpu/linux_x86/vm/thread_linux_x86.cpp	Thu Apr 07 22:03:04 2016 -0700
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "memory/metaspaceShared.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/thread.inline.hpp"
 
@@ -65,6 +66,14 @@
       return false;
     }
 
+#if INCLUDE_CDS
+    if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
+      // In the middle of a trampoline call. Bail out for safety.
+      // This happens rarely so shouldn't affect profiling.
+      return false;
+    }
+#endif
+
     frame ret_frame(ret_sp, ret_fp, addr.pc());
     if (!ret_frame.safe_for_sender(jt)) {
 #if defined(COMPILER2) || INCLUDE_JVMCI
--- a/hotspot/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.cpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.cpp	Thu Apr 07 22:03:04 2016 -0700
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "memory/metaspaceShared.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/thread.inline.hpp"
 
@@ -77,6 +78,14 @@
     return false;
   }
 
+#if INCLUDE_CDS
+  if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
+    // In the middle of a trampoline call. Bail out for safety.
+    // This happens rarely so shouldn't affect profiling.
+    return false;
+  }
+#endif
+
   frame ret_frame(ret_sp, frame::unpatchable, addr.pc());
 
   // we were running Java code when SIGPROF came in
--- a/hotspot/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp	Thu Apr 07 22:03:04 2016 -0700
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "memory/metaspaceShared.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/thread.inline.hpp"
 
@@ -70,6 +71,14 @@
     return false;
   }
 
+#if INCLUDE_CDS
+  if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
+    // In the middle of a trampoline call. Bail out for safety.
+    // This happens rarely so shouldn't affect profiling.
+    return false;
+  }
+#endif
+
   // If sp and fp are nonsense just leave them out
 
   if (!jt->on_local_stack((address)ret_sp)) {
--- a/hotspot/src/os_cpu/windows_x86/vm/thread_windows_x86.cpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/os_cpu/windows_x86/vm/thread_windows_x86.cpp	Thu Apr 07 22:03:04 2016 -0700
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "memory/metaspaceShared.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/thread.inline.hpp"
 
@@ -72,6 +73,14 @@
       return false;
     }
 
+#if INCLUDE_CDS
+    if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
+      // In the middle of a trampoline call. Bail out for safety.
+      // This happens rarely so shouldn't affect profiling.
+      return false;
+    }
+#endif
+
     frame ret_frame(ret_sp, ret_fp, addr.pc());
     if (!ret_frame.safe_for_sender(jt)) {
 #if defined(COMPILER2) || INCLUDE_JVMCI
--- a/hotspot/src/share/vm/interpreter/abstractInterpreter.cpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/share/vm/interpreter/abstractInterpreter.cpp	Thu Apr 07 22:03:04 2016 -0700
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "asm/macroAssembler.hpp"
 #include "asm/macroAssembler.inline.hpp"
+#include "compiler/disassembler.hpp"
 #include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/bytecodeInterpreter.hpp"
 #include "interpreter/interpreter.hpp"
@@ -32,6 +33,7 @@
 #include "interpreter/interp_masm.hpp"
 #include "interpreter/templateTable.hpp"
 #include "memory/allocation.inline.hpp"
+#include "memory/metaspaceShared.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/arrayOop.hpp"
 #include "oops/methodData.hpp"
@@ -93,6 +95,7 @@
 address    AbstractInterpreter::_native_entry_end                           = NULL;
 address    AbstractInterpreter::_slow_signature_handler;
 address    AbstractInterpreter::_entry_table            [AbstractInterpreter::number_of_method_entries];
+address    AbstractInterpreter::_cds_entry_table        [AbstractInterpreter::number_of_method_entries];
 address    AbstractInterpreter::_native_abi_to_tosca    [AbstractInterpreter::number_of_result_handlers];
 
 //------------------------------------------------------------------------------------------------------------------------
@@ -204,15 +207,42 @@
   return zerolocals;
 }
 
+#if INCLUDE_CDS
+
+address AbstractInterpreter::get_trampoline_code_buffer(AbstractInterpreter::MethodKind kind) {
+  const size_t trampoline_size = SharedRuntime::trampoline_size();
+  address addr = MetaspaceShared::cds_i2i_entry_code_buffers((size_t)(AbstractInterpreter::number_of_method_entries) * trampoline_size);
+  addr += (size_t)(kind) * trampoline_size;
+
+  return addr;
+}
+
+void AbstractInterpreter::update_cds_entry_table(AbstractInterpreter::MethodKind kind) {
+  if (DumpSharedSpaces || UseSharedSpaces) {
+    address trampoline = get_trampoline_code_buffer(kind);
+    _cds_entry_table[kind] = trampoline;
+
+    CodeBuffer buffer(trampoline, (int)(SharedRuntime::trampoline_size()));
+    MacroAssembler _masm(&buffer);
+    SharedRuntime::generate_trampoline(&_masm, _entry_table[kind]);
+
+    if (PrintInterpreter) {
+      Disassembler::decode(buffer.insts_begin(), buffer.insts_end());
+    }
+  }
+}
+
+#endif
 
 void AbstractInterpreter::set_entry_for_kind(AbstractInterpreter::MethodKind kind, address entry) {
   assert(kind >= method_handle_invoke_FIRST &&
          kind <= method_handle_invoke_LAST, "late initialization only for MH entry points");
   assert(_entry_table[kind] == _entry_table[abstract], "previous value must be AME entry");
   _entry_table[kind] = entry;
+
+  update_cds_entry_table(kind);
 }
 
-
 // Return true if the interpreter can prove that the given bytecode has
 // not yet been executed (in Java semantics, not in actual operation).
 bool AbstractInterpreter::is_not_reached(const methodHandle& method, int bci) {
@@ -416,5 +446,6 @@
   for (int i = method_handle_invoke_FIRST; i <= method_handle_invoke_LAST; i++) {
     MethodKind kind = (MethodKind) i;
     _entry_table[kind] = _entry_table[Interpreter::abstract];
+    Interpreter::update_cds_entry_table(kind);
   }
 }
--- a/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp	Thu Apr 07 22:03:04 2016 -0700
@@ -113,6 +113,7 @@
 
   // method entry points
   static address    _entry_table[number_of_method_entries];     // entry points for a given method
+  static address    _cds_entry_table[number_of_method_entries]; // entry points for methods in the CDS archive
   static address    _native_abi_to_tosca[number_of_result_handlers];  // for native method result handlers
   static address    _slow_signature_handler;                              // the native method generic (slow) signature handler
 
@@ -132,6 +133,17 @@
   static address    entry_for_kind(MethodKind k)                { assert(0 <= k && k < number_of_method_entries, "illegal kind"); return _entry_table[k]; }
   static address    entry_for_method(methodHandle m)            { return entry_for_kind(method_kind(m)); }
 
+  static address entry_for_cds_method(methodHandle m) {
+    MethodKind k = method_kind(m);
+    assert(0 <= k && k < number_of_method_entries, "illegal kind");
+    return _cds_entry_table[k];
+  }
+
+  // used by class data sharing
+  static void       update_cds_entry_table(MethodKind kind) NOT_CDS_RETURN;
+
+  static address    get_trampoline_code_buffer(AbstractInterpreter::MethodKind kind) NOT_CDS_RETURN_(0);
+
   // used for bootstrapping method handles:
   static void       set_entry_for_kind(MethodKind k, address e);
 
--- a/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.cpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.cpp	Thu Apr 07 22:03:04 2016 -0700
@@ -212,6 +212,7 @@
 #define method_entry(kind)                                              \
       { CodeletMark cm(_masm, "method entry point (kind = " #kind ")"); \
         Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind); \
+        Interpreter::update_cds_entry_table(Interpreter::kind); \
       }
 
       // all non-native method kinds
--- a/hotspot/src/share/vm/memory/filemap.cpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/share/vm/memory/filemap.cpp	Thu Apr 07 22:03:04 2016 -0700
@@ -959,6 +959,16 @@
   return false;
 }
 
+// Check if a given address is within one of the shared regions (ro, rw, md, mc)
+bool FileMapInfo::is_in_shared_region(const void* p, int idx) {
+  assert((idx >= MetaspaceShared::ro) && (idx <= MetaspaceShared::mc), "invalid region index");
+  char* base = _header->region_addr(idx);
+  if (p >= base && p < base + _header->_space[idx]._used) {
+    return true;
+  }
+  return false;
+}
+
 void FileMapInfo::print_shared_spaces() {
   tty->print_cr("Shared Spaces:");
   for (int i = 0; i < MetaspaceShared::n_regions; i++) {
--- a/hotspot/src/share/vm/memory/filemap.hpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/share/vm/memory/filemap.hpp	Thu Apr 07 22:03:04 2016 -0700
@@ -107,6 +107,8 @@
     int     _narrow_klass_shift;      // save narrow klass base and shift
     address _narrow_klass_base;
     char*   _misc_data_patching_start;
+    address _cds_i2i_entry_code_buffers;
+    size_t  _cds_i2i_entry_code_buffers_size;
 
     struct space_info {
       int    _crc;           // crc checksum of the current space
@@ -195,6 +197,19 @@
   char* misc_data_patching_start()            { return _header->_misc_data_patching_start; }
   void set_misc_data_patching_start(char* p)  { _header->_misc_data_patching_start = p; }
 
+  address cds_i2i_entry_code_buffers() {
+    return _header->_cds_i2i_entry_code_buffers;
+  }
+  void set_cds_i2i_entry_code_buffers(address addr) {
+    _header->_cds_i2i_entry_code_buffers = addr;
+  }
+  size_t cds_i2i_entry_code_buffers_size() {
+    return _header->_cds_i2i_entry_code_buffers_size;
+  }
+  void set_cds_i2i_entry_code_buffers_size(size_t s) {
+    _header->_cds_i2i_entry_code_buffers_size = s;
+  }
+
   static FileMapInfo* current_info() {
     CDS_ONLY(return _current_info;)
     NOT_CDS(return NULL;)
@@ -234,6 +249,7 @@
 
   // Return true if given address is in the mapped shared space.
   bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false);
+  bool is_in_shared_region(const void* p, int idx) NOT_CDS_RETURN_(false);
   void print_shared_spaces() NOT_CDS_RETURN;
 
   static size_t shared_spaces_size() {
--- a/hotspot/src/share/vm/memory/metaspaceShared.cpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp	Thu Apr 07 22:03:04 2016 -0700
@@ -59,6 +59,8 @@
 bool MetaspaceShared::_check_classes_made_progress;
 bool MetaspaceShared::_has_error_classes;
 bool MetaspaceShared::_archive_loading_failed = false;
+address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL;
+size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0;
 SharedMiscRegion MetaspaceShared::_mc;
 SharedMiscRegion MetaspaceShared::_md;
 
@@ -129,6 +131,21 @@
   soc->do_tag(666);
 }
 
+address MetaspaceShared::cds_i2i_entry_code_buffers(size_t total_size) {
+  if (DumpSharedSpaces) {
+    if (_cds_i2i_entry_code_buffers == NULL) {
+      _cds_i2i_entry_code_buffers = (address)misc_data_space_alloc(total_size);
+      _cds_i2i_entry_code_buffers_size = total_size;
+    }
+  } else if (UseSharedSpaces) {
+    assert(_cds_i2i_entry_code_buffers != NULL, "must already been initialized");
+  } else {
+    return NULL;
+  }
+
+  assert(_cds_i2i_entry_code_buffers_size == total_size, "must not change");
+  return _cds_i2i_entry_code_buffers;
+}
 
 // CDS code for dumping shared archive.
 
@@ -576,6 +593,8 @@
                                      &md_top, md_end,
                                      &mc_top, mc_end);
 
+  guarantee(md_top <= md_end, "Insufficient space for vtables.");
+
   // Reorder the system dictionary.  (Moving the symbols affects
   // how the hash table indices are calculated.)
   // Not doing this either.
@@ -668,6 +687,8 @@
   FileMapInfo* mapinfo = new FileMapInfo();
   mapinfo->populate_header(MetaspaceShared::max_alignment());
   mapinfo->set_misc_data_patching_start((char*)vtbl_list);
+  mapinfo->set_cds_i2i_entry_code_buffers(MetaspaceShared::cds_i2i_entry_code_buffers());
+  mapinfo->set_cds_i2i_entry_code_buffers_size(MetaspaceShared::cds_i2i_entry_code_buffers_size());
 
   for (int pass=1; pass<=2; pass++) {
     if (pass == 1) {
@@ -686,7 +707,7 @@
     mapinfo->write_region(MetaspaceShared::md, _md_vs.low(),
                           pointer_delta(md_top, _md_vs.low(), sizeof(char)),
                           SharedMiscDataSize,
-                          false, false);
+                          false, true);
     mapinfo->write_region(MetaspaceShared::mc, _mc_vs.low(),
                           pointer_delta(mc_top, _mc_vs.low(), sizeof(char)),
                           SharedMiscCodeSize,
@@ -980,6 +1001,11 @@
   return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_space(p);
 }
 
+// Return true if given address is in the misc data region
+bool MetaspaceShared::is_in_shared_region(const void* p, int idx) {
+  return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx);
+}
+
 bool MetaspaceShared::is_string_region(int idx) {
   return (idx >= MetaspaceShared::first_string &&
           idx < MetaspaceShared::first_string + MetaspaceShared::max_strings);
@@ -1053,6 +1079,8 @@
 
 void MetaspaceShared::initialize_shared_spaces() {
   FileMapInfo *mapinfo = FileMapInfo::current_info();
+  _cds_i2i_entry_code_buffers = mapinfo->cds_i2i_entry_code_buffers();
+  _cds_i2i_entry_code_buffers_size = mapinfo->cds_i2i_entry_code_buffers_size();
   char* buffer = mapinfo->misc_data_patching_start();
 
   // Skip over (reserve space for) a list of addresses of C++ vtables
--- a/hotspot/src/share/vm/memory/metaspaceShared.hpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/share/vm/memory/metaspaceShared.hpp	Thu Apr 07 22:03:04 2016 -0700
@@ -50,17 +50,14 @@
 #define MIN_SHARED_READ_ONLY_SIZE       (NOT_LP64(8*M) LP64_ONLY(9*M))
 
 // the MIN_SHARED_MISC_DATA_SIZE and MIN_SHARED_MISC_CODE_SIZE estimates are based on
-// MetaspaceShared::generate_vtable_methods().
-// The minimum size only accounts for the vtable methods. Any size less than the
-// minimum required size would cause vm crash when allocating the vtable methods.
-#define SHARED_MISC_SIZE_FOR(size)      (DEFAULT_VTBL_VIRTUALS_COUNT*DEFAULT_VTBL_LIST_SIZE*size)
+// the sizes required for dumping the archive using the default classlist. The sizes
+// are multiplied by 1.5 for a safety margin.
 
 #define DEFAULT_SHARED_MISC_DATA_SIZE   (NOT_LP64(2*M) LP64_ONLY(4*M))
-#define MIN_SHARED_MISC_DATA_SIZE       (SHARED_MISC_SIZE_FOR(sizeof(void*)))
+#define MIN_SHARED_MISC_DATA_SIZE       (NOT_LP64(1*M) LP64_ONLY(1200*K))
 
 #define DEFAULT_SHARED_MISC_CODE_SIZE   (120*K)
-#define MIN_SHARED_MISC_CODE_SIZE       (SHARED_MISC_SIZE_FOR(sizeof(void*))+SHARED_MISC_SIZE_FOR(DEFAULT_VTBL_METHOD_SIZE)+DEFAULT_VTBL_COMMON_CODE_SIZE)
-
+#define MIN_SHARED_MISC_CODE_SIZE       (NOT_LP64(63*K) LP64_ONLY(69*K))
 #define DEFAULT_COMBINED_SIZE           (DEFAULT_SHARED_READ_WRITE_SIZE+DEFAULT_SHARED_READ_ONLY_SIZE+DEFAULT_SHARED_MISC_DATA_SIZE+DEFAULT_SHARED_MISC_CODE_SIZE)
 
 // the max size is the MAX size (ie. 0x7FFFFFFF) - the total size of
@@ -128,6 +125,8 @@
   static bool _check_classes_made_progress;
   static bool _has_error_classes;
   static bool _archive_loading_failed;
+  static address _cds_i2i_entry_code_buffers;
+  static size_t  _cds_i2i_entry_code_buffers_size;
 
   // Used only during dumping.
   static SharedMiscRegion _md;
@@ -185,6 +184,9 @@
   // Return true if given address is in the mapped shared space.
   static bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false);
 
+  // Return true if given address is in the shared region corresponding to the idx
+  static bool is_in_shared_region(const void* p, int idx) NOT_CDS_RETURN_(false);
+
   static bool is_string_region(int idx) NOT_CDS_RETURN_(false);
 
   static void generate_vtable_methods(void** vtbl_list,
@@ -218,6 +220,15 @@
   static char* misc_code_space_alloc(size_t num_bytes) {  return _mc.alloc(num_bytes); }
   static char* misc_data_space_alloc(size_t num_bytes) {  return _md.alloc(num_bytes); }
 
+  static address cds_i2i_entry_code_buffers(size_t total_size);
+
+  static address cds_i2i_entry_code_buffers() {
+    return _cds_i2i_entry_code_buffers;
+  }
+  static size_t cds_i2i_entry_code_buffers_size() {
+    return _cds_i2i_entry_code_buffers_size;
+  }
+
   static SharedMiscRegion* misc_code_region() {
     assert(DumpSharedSpaces, "used during dumping only");
     return &_mc;
--- a/hotspot/src/share/vm/oops/constMethod.hpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/share/vm/oops/constMethod.hpp	Thu Apr 07 22:03:04 2016 -0700
@@ -121,6 +121,7 @@
 };
 
 class KlassSizeStats;
+class AdapterHandlerEntry;
 
 // Class to collect the sizes of ConstMethod inline tables
 #define INLINE_TABLES_DO(do_element)            \
@@ -201,6 +202,12 @@
   // Raw stackmap data for the method
   Array<u1>*        _stackmap_data;
 
+  // Adapter blob (i2c/c2i) for this Method*. Set once when method is linked.
+  union {
+    AdapterHandlerEntry* _adapter;
+    AdapterHandlerEntry** _adapter_trampoline;
+  };
+
   int               _constMethod_size;
   u2                _flags;
 
@@ -276,6 +283,29 @@
   void copy_stackmap_data(ClassLoaderData* loader_data, u1* sd, int length, TRAPS);
   bool has_stackmap_table() const { return _stackmap_data != NULL; }
 
+  // adapter
+  void set_adapter_entry(AdapterHandlerEntry* adapter) {
+    assert(!is_shared(), "shared methods have fixed adapter_trampoline");
+    _adapter = adapter;
+  }
+  void set_adapter_trampoline(AdapterHandlerEntry** trampoline) {
+    assert(DumpSharedSpaces, "must be");
+    assert(*trampoline == NULL, "must be NULL during dump time, to be initialized at run time");
+    _adapter_trampoline = trampoline;
+  }
+  void update_adapter_trampoline(AdapterHandlerEntry* adapter) {
+    assert(is_shared(), "must be");
+    *_adapter_trampoline = adapter;
+    assert(this->adapter() == adapter, "must be");
+  }
+  AdapterHandlerEntry* adapter() {
+    if (is_shared()) {
+      return *_adapter_trampoline;
+    } else {
+      return _adapter;
+    }
+  }
+
   void init_fingerprint() {
     const uint64_t initval = UCONST64(0x8000000000000000);
     _fingerprint = initval;
--- a/hotspot/src/share/vm/oops/method.cpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/share/vm/oops/method.cpp	Thu Apr 07 22:03:04 2016 -0700
@@ -38,6 +38,7 @@
 #include "interpreter/oopMapCache.hpp"
 #include "memory/heapInspection.hpp"
 #include "memory/metadataFactory.hpp"
+#include "memory/metaspaceShared.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/constMethod.hpp"
@@ -123,18 +124,18 @@
 }
 
 address Method::get_i2c_entry() {
-  assert(_adapter != NULL, "must have");
-  return _adapter->get_i2c_entry();
+  assert(adapter() != NULL, "must have");
+  return adapter()->get_i2c_entry();
 }
 
 address Method::get_c2i_entry() {
-  assert(_adapter != NULL, "must have");
-  return _adapter->get_c2i_entry();
+  assert(adapter() != NULL, "must have");
+  return adapter()->get_c2i_entry();
 }
 
 address Method::get_c2i_unverified_entry() {
-  assert(_adapter != NULL, "must have");
-  return _adapter->get_c2i_unverified_entry();
+  assert(adapter() != NULL, "must have");
+  return adapter()->get_c2i_unverified_entry();
 }
 
 char* Method::name_and_sig_as_C_string() const {
@@ -892,10 +893,10 @@
 
   // this may be NULL if c2i adapters have not been made yet
   // Only should happen at allocate time.
-  if (_adapter == NULL) {
+  if (adapter() == NULL) {
     _from_compiled_entry    = NULL;
   } else {
-    _from_compiled_entry    = _adapter->get_c2i_entry();
+    _from_compiled_entry    = adapter()->get_c2i_entry();
   }
   OrderAccess::storestore();
   _from_interpreted_entry = _i2i_entry;
@@ -903,47 +904,68 @@
   _code = NULL;
 }
 
+#if INCLUDE_CDS
 // Called by class data sharing to remove any entry points (which are not shared)
 void Method::unlink_method() {
   _code = NULL;
-  _i2i_entry = NULL;
-  _from_interpreted_entry = NULL;
+
+  assert(DumpSharedSpaces, "dump time only");
+  // Set the values to what they should be at run time. Note that
+  // this Method can no longer be executed during dump time.
+  _i2i_entry = Interpreter::entry_for_cds_method(this);
+  _from_interpreted_entry = _i2i_entry;
+
   if (is_native()) {
     *native_function_addr() = NULL;
     set_signature_handler(NULL);
   }
   NOT_PRODUCT(set_compiled_invocation_count(0);)
-  _adapter = NULL;
-  _from_compiled_entry = NULL;
+
+  CDSAdapterHandlerEntry* cds_adapter = (CDSAdapterHandlerEntry*)adapter();
+  constMethod()->set_adapter_trampoline(cds_adapter->get_adapter_trampoline());
+  _from_compiled_entry = cds_adapter->get_c2i_entry_trampoline();
+  assert(*((int*)_from_compiled_entry) == 0, "must be NULL during dump time, to be initialized at run time");
+
 
   // In case of DumpSharedSpaces, _method_data should always be NULL.
-  //
-  // During runtime (!DumpSharedSpaces), when we are cleaning a
-  // shared class that failed to load, this->link_method() may
-  // have already been called (before an exception happened), so
-  // this->_method_data may not be NULL.
-  assert(!DumpSharedSpaces || _method_data == NULL, "unexpected method data?");
+  assert(_method_data == NULL, "unexpected method data?");
 
   set_method_data(NULL);
   clear_method_counters();
 }
+#endif
 
 // Called when the method_holder is getting linked. Setup entrypoints so the method
 // is ready to be called from interpreter, compiler, and vtables.
 void Method::link_method(const methodHandle& h_method, TRAPS) {
   // If the code cache is full, we may reenter this function for the
   // leftover methods that weren't linked.
-  if (_i2i_entry != NULL) return;
+  if (is_shared()) {
+    if (adapter() != NULL) return;
+  } else {
+    if (_i2i_entry != NULL) return;
 
-  assert(_adapter == NULL, "init'd to NULL" );
+    assert(adapter() == NULL, "init'd to NULL" );
+  }
   assert( _code == NULL, "nothing compiled yet" );
 
   // Setup interpreter entrypoint
   assert(this == h_method(), "wrong h_method()" );
-  address entry = Interpreter::entry_for_method(h_method);
+  address entry;
+
+  if (this->is_shared()) {
+    entry = Interpreter::entry_for_cds_method(h_method);
+  } else {
+    entry = Interpreter::entry_for_method(h_method);
+  }
   assert(entry != NULL, "interpreter entry must be non-null");
-  // Sets both _i2i_entry and _from_interpreted_entry
-  set_interpreter_entry(entry);
+  if (is_shared()) {
+    assert(entry == _i2i_entry && entry == _from_interpreted_entry,
+           "should be correctly set during dump time");
+  } else {
+    // Sets both _i2i_entry and _from_interpreted_entry
+    set_interpreter_entry(entry);
+  }
 
   // Don't overwrite already registered native entries.
   if (is_native() && !has_native_function()) {
@@ -975,8 +997,13 @@
     THROW_MSG_NULL(vmSymbols::java_lang_VirtualMachineError(), "Out of space in CodeCache for adapters");
   }
 
-  mh->set_adapter_entry(adapter);
-  mh->_from_compiled_entry = adapter->get_c2i_entry();
+  if (mh->is_shared()) {
+    assert(mh->adapter() == adapter, "must be");
+    assert(mh->_from_compiled_entry != NULL, "must be"); // FIXME, the instructions also not NULL
+  } else {
+    mh->set_adapter_entry(adapter);
+    mh->_from_compiled_entry = adapter->get_c2i_entry();
+  }
   return adapter->get_c2i_entry();
 }
 
@@ -992,6 +1019,14 @@
   }
 }
 
+volatile address Method::from_compiled_entry_no_trampoline() const {
+  nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code);
+  if (code) {
+    return code->verified_entry_point();
+  } else {
+    return adapter()->get_c2i_entry();
+  }
+}
 
 // The verified_code_entry() must be called when a invoke is resolved
 // on this method.
--- a/hotspot/src/share/vm/oops/method.hpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/share/vm/oops/method.hpp	Thu Apr 07 22:03:04 2016 -0700
@@ -93,8 +93,6 @@
 #endif
   // Entry point for calling both from and to the interpreter.
   address _i2i_entry;           // All-args-on-stack calling convention
-  // Adapter blob (i2c/c2i) for this Method*. Set once when method is linked.
-  AdapterHandlerEntry* _adapter;
   // Entry point for calling from compiled code, to compiled code if it exists
   // or else the interpreter.
   volatile address _from_compiled_entry;        // Cache of: _code ? _code->entry_point() : _adapter->c2i_entry()
@@ -137,6 +135,7 @@
 
   static address make_adapters(methodHandle mh, TRAPS);
   volatile address from_compiled_entry() const   { return (address)OrderAccess::load_ptr_acquire(&_from_compiled_entry); }
+  volatile address from_compiled_entry_no_trampoline() const;
   volatile address from_interpreted_entry() const{ return (address)OrderAccess::load_ptr_acquire(&_from_interpreted_entry); }
 
   // access flag
@@ -431,15 +430,23 @@
   nmethod* volatile code() const                 { assert( check_code(), "" ); return (nmethod *)OrderAccess::load_ptr_acquire(&_code); }
   void clear_code();            // Clear out any compiled code
   static void set_code(methodHandle mh, nmethod* code);
-  void set_adapter_entry(AdapterHandlerEntry* adapter) {  _adapter = adapter; }
+  void set_adapter_entry(AdapterHandlerEntry* adapter) {
+    constMethod()->set_adapter_entry(adapter);
+  }
+  void update_adapter_trampoline(AdapterHandlerEntry* adapter) {
+    constMethod()->update_adapter_trampoline(adapter);
+  }
+
   address get_i2c_entry();
   address get_c2i_entry();
   address get_c2i_unverified_entry();
-  AdapterHandlerEntry* adapter() {  return _adapter; }
+  AdapterHandlerEntry* adapter() const {
+    return constMethod()->adapter();
+  }
   // setup entry points
   void link_method(const methodHandle& method, TRAPS);
-  // clear entry points. Used by sharing code
-  void unlink_method();
+  // clear entry points. Used by sharing code during dump time
+  void unlink_method() NOT_CDS_RETURN;
 
   // vtable index
   enum VtableIndexFlag {
@@ -465,7 +472,15 @@
   // interpreter entry
   address interpreter_entry() const              { return _i2i_entry; }
   // Only used when first initialize so we can set _i2i_entry and _from_interpreted_entry
-  void set_interpreter_entry(address entry)      { _i2i_entry = entry;  _from_interpreted_entry = entry; }
+  void set_interpreter_entry(address entry) {
+    assert(!is_shared(), "shared method's interpreter entry should not be changed at run time");
+    if (_i2i_entry != entry) {
+      _i2i_entry = entry;
+    }
+    if (_from_interpreted_entry != entry) {
+      _from_interpreted_entry = entry;
+    }
+  }
 
   // native function (used for native methods only)
   enum {
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Thu Apr 07 22:03:04 2016 -0700
@@ -38,6 +38,7 @@
 #include "interpreter/interpreter.hpp"
 #include "interpreter/interpreterRuntime.hpp"
 #include "logging/log.hpp"
+#include "memory/metaspaceShared.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.inline.hpp"
 #include "oops/klass.hpp"
@@ -1788,7 +1789,7 @@
 IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address caller_pc))
   Method* moop(method);
 
-  address entry_point = moop->from_compiled_entry();
+  address entry_point = moop->from_compiled_entry_no_trampoline();
 
   // It's possible that deoptimization can occur at a call site which hasn't
   // been resolved yet, in which case this function will be called from
@@ -2351,12 +2352,15 @@
 
  public:
   AdapterHandlerTable()
-    : BasicHashtable<mtCode>(293, sizeof(AdapterHandlerEntry)) { }
+    : BasicHashtable<mtCode>(293, (DumpSharedSpaces ? sizeof(CDSAdapterHandlerEntry) : sizeof(AdapterHandlerEntry))) { }
 
   // Create a new entry suitable for insertion in the table
   AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, address c2i_unverified_entry) {
     AdapterHandlerEntry* entry = (AdapterHandlerEntry*)BasicHashtable<mtCode>::new_entry(fingerprint->compute_hash());
     entry->init(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
+    if (DumpSharedSpaces) {
+      ((CDSAdapterHandlerEntry*)entry)->init();
+    }
     return entry;
   }
 
@@ -2519,6 +2523,28 @@
 }
 
 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
+  AdapterHandlerEntry* entry = get_adapter0(method);
+  if (method->is_shared()) {
+    MutexLocker mu(AdapterHandlerLibrary_lock);
+    if (method->adapter() == NULL) {
+      method->update_adapter_trampoline(entry);
+    }
+    address trampoline = method->from_compiled_entry();
+    if (*(int*)trampoline == 0) {
+      CodeBuffer buffer(trampoline, (int)SharedRuntime::trampoline_size());
+      MacroAssembler _masm(&buffer);
+      SharedRuntime::generate_trampoline(&_masm, entry->get_c2i_entry());
+
+      if (PrintInterpreter) {
+        Disassembler::decode(buffer.insts_begin(), buffer.insts_end());
+      }
+    }
+  }
+
+  return entry;
+}
+
+AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter0(const methodHandle& method) {
   // Use customized signature handler.  Need to lock around updates to
   // the AdapterHandlerTable (it is not safe for concurrent readers
   // and a single writer: this could be fixed if it becomes a
@@ -2535,7 +2561,9 @@
     // make sure data structure is initialized
     initialize();
 
-    if (CodeCacheExtensions::skip_compiler_support()) {
+    // during dump time, always generate adapters, even if the
+    // compiler has been turned off.
+    if (!DumpSharedSpaces && CodeCacheExtensions::skip_compiler_support()) {
       // adapters are useless and should not be used, including the
       // abstract_method_handler. However, some callers check that
       // an adapter was installed.
@@ -3017,6 +3045,17 @@
 
 }
 
+#if INCLUDE_CDS
+
+void CDSAdapterHandlerEntry::init() {
+  assert(DumpSharedSpaces, "used during dump time only");
+  _c2i_entry_trampoline = (address)MetaspaceShared::misc_data_space_alloc(SharedRuntime::trampoline_size());
+  _adapter_trampoline = (AdapterHandlerEntry**)MetaspaceShared::misc_data_space_alloc(sizeof(AdapterHandlerEntry*));
+};
+
+#endif // INCLUDE_CDS
+
+
 #ifndef PRODUCT
 
 void AdapterHandlerLibrary::print_statistics() {
--- a/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Thu Apr 07 22:03:04 2016 -0700
@@ -398,6 +398,10 @@
   static void convert_ints_to_longints(int i2l_argcnt, int& in_args_count,
                                        BasicType*& in_sig_bt, VMRegPair*& in_regs);
 
+  static size_t trampoline_size();
+
+  static void generate_trampoline(MacroAssembler *masm, address destination);
+
   // Generate I2C and C2I adapters. These adapters are simple argument marshalling
   // blobs. Unlike adapters in the tiger and earlier releases the code in these
   // blobs does not create a new frame and are therefore virtually invisible
@@ -680,6 +684,17 @@
   void print_adapter_on(outputStream* st) const;
 };
 
+class CDSAdapterHandlerEntry: public AdapterHandlerEntry {
+  address               _c2i_entry_trampoline;   // allocated from shared spaces "MC" region
+  AdapterHandlerEntry** _adapter_trampoline;     // allocated from shared spaces "MD" region
+
+public:
+  address get_c2i_entry_trampoline()             const { return _c2i_entry_trampoline; }
+  AdapterHandlerEntry** get_adapter_trampoline() const { return _adapter_trampoline; }
+  void init() NOT_CDS_RETURN;
+};
+
+
 class AdapterHandlerLibrary: public AllStatic {
  private:
   static BufferBlob* _buffer; // the temporary code buffer in CodeCache
@@ -687,6 +702,7 @@
   static AdapterHandlerEntry* _abstract_method_handler;
   static BufferBlob* buffer_blob();
   static void initialize();
+  static AdapterHandlerEntry* get_adapter0(const methodHandle& method);
 
  public:
 
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Fri Apr 08 12:26:29 2016 -0700
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Thu Apr 07 22:03:04 2016 -0700
@@ -400,7 +400,6 @@
   nonproduct_nonstatic_field(Method,           _compiled_invocation_count,                    int)                                   \
   volatile_nonstatic_field(Method,             _code,                                         nmethod*)                              \
   nonstatic_field(Method,                      _i2i_entry,                                    address)                               \
-  nonstatic_field(Method,                      _adapter,                                      AdapterHandlerEntry*)                  \
   volatile_nonstatic_field(Method,             _from_compiled_entry,                          address)                               \
   volatile_nonstatic_field(Method,             _from_interpreted_entry,                       address)                               \
   volatile_nonstatic_field(ConstMethod,        _fingerprint,                                  uint64_t)                              \