--- a/src/hotspot/share/code/vtableStubs.hpp Sat Sep 01 12:02:07 2018 -0700
+++ b/src/hotspot/share/code/vtableStubs.hpp Mon Sep 03 09:43:08 2018 +0200
@@ -25,12 +25,94 @@
#ifndef SHARE_VM_CODE_VTABLESTUBS_HPP
#define SHARE_VM_CODE_VTABLESTUBS_HPP
+#include "asm/macroAssembler.hpp"
#include "code/vmreg.hpp"
#include "memory/allocation.hpp"
// A VtableStub holds an individual code stub for a pair (vtable index, #args) for either itables or vtables
// There's a one-to-one relationship between a VtableStub and such a pair.
+// A word on VtableStub sizing:
+// Such a vtable/itable stub consists of the instance data
+// and an immediately following CodeBuffer.
+// Unfortunately, the required space for the code buffer varies, depending on
+// the setting of compile time macros (PRODUCT, ASSERT, ...) and of command line
+// parameters. Actual data may have an influence on the size as well.
+//
+// A simple approximation for the VtableStub size would be to just take a value
+// "large enough" for all circumstances - a worst case estimate.
+// As there can exist many stubs - and they never go away - we certainly don't
+// want to waste more code cache space than absolutely necessary.
+//
+// We need a different approach which, as far as possible, should be independent
+// from or adaptive to code size variations. These variations may be caused by
+// changed compile time or run time switches as well as by changed emitter code.
+//
+// Here is the idea:
+// For the first stub we generate, we allocate a "large enough" code buffer.
+// Once all instructions are emitted, we know the actual size of the stub.
+// Remembering that size allows us to allocate a tightly matching code buffer
+// for all subsequent stubs. That covers all "static variance", i.e. all variance
+// that is due to compile time macros, command line parameters, machine capabilities,
+// and other influences which are immutable for the life span of the vm.
+//
+// Life isn't always that easy. Code size may depend on actual data, "load constant"
+// being an example for that. All code segments with such "dynamic variance" require
+// additional care. We need to know or estimate the worst case code size for each
+// such segment. With that knowledge, we can maintain a "slop counter" in the
+// platform-specific stub emitters. It accumulates the difference between worst-case
+// and actual code size. When the stub is fully generated, the actual stub size is
+// adjusted (increased) by the slop counter value.
+//
+// As a result, we allocate all but the first code buffers with the same, tightly matching size.
+//
+
+// VtableStubs creates the code stubs for compiled calls through vtables.
+// There is one stub per (vtable index, args_size) pair, and the stubs are
+// never deallocated. They don't need to be GCed because they contain no oops.
+class VtableStub;
+
+class VtableStubs : AllStatic {
+ public: // N must be public (some compilers need this for _table)
+ enum {
+ N = 256, // size of stub table; must be power of two
+ mask = N - 1
+ };
+
+ private:
+ friend class VtableStub;
+ static VtableStub* _table[N]; // table of existing stubs
+ static int _number_of_vtable_stubs; // number of stubs created so far (for statistics)
+ static int _vtab_stub_size; // current size estimate for vtable stub (quasi-constant)
+ static int _itab_stub_size; // current size estimate for itable stub (quasi-constant)
+
+ static VtableStub* create_vtable_stub(int vtable_index);
+ static VtableStub* create_itable_stub(int vtable_index);
+ static VtableStub* lookup (bool is_vtable_stub, int vtable_index);
+ static void enter (bool is_vtable_stub, int vtable_index, VtableStub* s);
+ static inline uint hash (bool is_vtable_stub, int vtable_index);
+ static address find_stub (bool is_vtable_stub, int vtable_index);
+ static void bookkeeping(MacroAssembler* masm, outputStream* out, VtableStub* s,
+ address npe_addr, address ame_addr, bool is_vtable_stub,
+ int index, int slop_bytes, int index_dependent_slop);
+ static int code_size_limit(bool is_vtable_stub);
+ static void check_and_set_size_limit(bool is_vtable_stub,
+ int code_size,
+ int padding);
+
+ public:
+ static address find_vtable_stub(int vtable_index) { return find_stub(true, vtable_index); }
+ static address find_itable_stub(int itable_index) { return find_stub(false, itable_index); }
+
+ static VtableStub* entry_point(address pc); // vtable stub entry point for a pc
+ static bool contains(address pc); // is pc within any stub?
+ static VtableStub* stub_containing(address pc); // stub containing pc or NULL
+ static int number_of_vtable_stubs() { return _number_of_vtable_stubs; }
+ static void initialize();
+ static void vtable_stub_do(void f(VtableStub*)); // iterates over all vtable stubs
+};
+
+
class VtableStub {
private:
friend class VtableStubs;
@@ -58,7 +140,7 @@
public:
address code_begin() const { return (address)(this + 1); }
- address code_end() const { return code_begin() + pd_code_size_limit(_is_vtable_stub); }
+ address code_end() const { return code_begin() + VtableStubs::code_size_limit(_is_vtable_stub); }
address entry_point() const { return code_begin(); }
static int entry_offset() { return sizeof(class VtableStub); }
@@ -78,7 +160,6 @@
}
// platform-dependent routines
- static int pd_code_size_limit(bool is_vtable_stub);
static int pd_code_alignment();
// CNC: Removed because vtable stubs are now made with an ideal graph
// static bool pd_disregard_arg_size();
@@ -100,38 +181,4 @@
};
-
-// VtableStubs creates the code stubs for compiled calls through vtables.
-// There is one stub per (vtable index, args_size) pair, and the stubs are
-// never deallocated. They don't need to be GCed because they contain no oops.
-
-class VtableStubs : AllStatic {
- public: // N must be public (some compilers need this for _table)
- enum {
- N = 256, // size of stub table; must be power of two
- mask = N - 1
- };
-
- private:
- static VtableStub* _table[N]; // table of existing stubs
- static int _number_of_vtable_stubs; // number of stubs created so far (for statistics)
-
- static VtableStub* create_vtable_stub(int vtable_index);
- static VtableStub* create_itable_stub(int vtable_index);
- static VtableStub* lookup (bool is_vtable_stub, int vtable_index);
- static void enter (bool is_vtable_stub, int vtable_index, VtableStub* s);
- static inline uint hash (bool is_vtable_stub, int vtable_index);
- static address find_stub (bool is_vtable_stub, int vtable_index);
-
- public:
- static address find_vtable_stub(int vtable_index) { return find_stub(true, vtable_index); }
- static address find_itable_stub(int itable_index) { return find_stub(false, itable_index); }
- static VtableStub* entry_point(address pc); // vtable stub entry point for a pc
- static bool contains(address pc); // is pc within any stub?
- static VtableStub* stub_containing(address pc); // stub containing pc or NULL
- static int number_of_vtable_stubs() { return _number_of_vtable_stubs; }
- static void initialize();
- static void vtable_stub_do(void f(VtableStub*)); // iterates over all vtable stubs
-};
-
#endif // SHARE_VM_CODE_VTABLESTUBS_HPP