23 */ |
23 */ |
24 |
24 |
25 #ifndef SHARE_VM_CODE_VTABLESTUBS_HPP |
25 #ifndef SHARE_VM_CODE_VTABLESTUBS_HPP |
26 #define SHARE_VM_CODE_VTABLESTUBS_HPP |
26 #define SHARE_VM_CODE_VTABLESTUBS_HPP |
27 |
27 |
|
28 #include "asm/macroAssembler.hpp" |
28 #include "code/vmreg.hpp" |
29 #include "code/vmreg.hpp" |
29 #include "memory/allocation.hpp" |
30 #include "memory/allocation.hpp" |
30 |
31 |
31 // A VtableStub holds an individual code stub for a pair (vtable index, #args) for either itables or vtables |
32 // A VtableStub holds an individual code stub for a pair (vtable index, #args) for either itables or vtables |
32 // There's a one-to-one relationship between a VtableStub and such a pair. |
33 // There's a one-to-one relationship between a VtableStub and such a pair. |
|
34 |
|
35 // A word on VtableStub sizing: |
|
36 // Such a vtable/itable stub consists of the instance data |
|
37 // and an immediately following CodeBuffer. |
|
38 // Unfortunately, the required space for the code buffer varies, depending on |
|
39 // the setting of compile time macros (PRODUCT, ASSERT, ...) and of command line |
|
40 // parameters. Actual data may have an influence on the size as well. |
|
41 // |
|
42 // A simple approximation for the VtableStub size would be to just take a value |
|
43 // "large enough" for all circumstances - a worst case estimate. |
|
44 // As there can exist many stubs - and they never go away - we certainly don't |
|
45 // want to waste more code cache space than absolutely necessary. |
|
46 // |
|
47 // We need a different approach which, as far as possible, should be independent |
|
48 // from or adaptive to code size variations. These variations may be caused by |
|
49 // changed compile time or run time switches as well as by changed emitter code. |
|
50 // |
|
51 // Here is the idea: |
|
52 // For the first stub we generate, we allocate a "large enough" code buffer. |
|
53 // Once all instructions are emitted, we know the actual size of the stub. |
|
54 // Remembering that size allows us to allocate a tightly matching code buffer |
|
55 // for all subsequent stubs. That covers all "static variance", i.e. all variance |
|
56 // that is due to compile time macros, command line parameters, machine capabilities, |
|
57 // and other influences which are immutable for the life span of the vm. |
|
58 // |
|
59 // Life isn't always that easy. Code size may depend on actual data, "load constant" |
|
60 // being an example for that. All code segments with such "dynamic variance" require |
|
61 // additional care. We need to know or estimate the worst case code size for each |
|
62 // such segment. With that knowledge, we can maintain a "slop counter" in the |
|
63 // platform-specific stub emitters. It accumulates the difference between worst-case |
|
64 // and actual code size. When the stub is fully generated, the actual stub size is |
|
65 // adjusted (increased) by the slop counter value. |
|
66 // |
|
67 // As a result, we allocate all but the first code buffers with the same, tightly matching size. |
|
68 // |
|
69 |
|
70 // VtableStubs creates the code stubs for compiled calls through vtables. |
|
71 // There is one stub per (vtable index, args_size) pair, and the stubs are |
|
72 // never deallocated. They don't need to be GCed because they contain no oops. |
|
73 class VtableStub; |
|
74 |
|
75 class VtableStubs : AllStatic { |
|
76 public: // N must be public (some compilers need this for _table) |
|
77 enum { |
|
78 N = 256, // size of stub table; must be power of two |
|
79 mask = N - 1 |
|
80 }; |
|
81 |
|
82 private: |
|
83 friend class VtableStub; |
|
84 static VtableStub* _table[N]; // table of existing stubs |
|
85 static int _number_of_vtable_stubs; // number of stubs created so far (for statistics) |
|
86 static int _vtab_stub_size; // current size estimate for vtable stub (quasi-constant) |
|
87 static int _itab_stub_size; // current size estimate for itable stub (quasi-constant) |
|
88 |
|
89 static VtableStub* create_vtable_stub(int vtable_index); |
|
90 static VtableStub* create_itable_stub(int vtable_index); |
|
91 static VtableStub* lookup (bool is_vtable_stub, int vtable_index); |
|
92 static void enter (bool is_vtable_stub, int vtable_index, VtableStub* s); |
|
93 static inline uint hash (bool is_vtable_stub, int vtable_index); |
|
94 static address find_stub (bool is_vtable_stub, int vtable_index); |
|
95 static void bookkeeping(MacroAssembler* masm, outputStream* out, VtableStub* s, |
|
96 address npe_addr, address ame_addr, bool is_vtable_stub, |
|
97 int index, int slop_bytes, int index_dependent_slop); |
|
98 static int code_size_limit(bool is_vtable_stub); |
|
99 static void check_and_set_size_limit(bool is_vtable_stub, |
|
100 int code_size, |
|
101 int padding); |
|
102 |
|
103 public: |
|
104 static address find_vtable_stub(int vtable_index) { return find_stub(true, vtable_index); } |
|
105 static address find_itable_stub(int itable_index) { return find_stub(false, itable_index); } |
|
106 |
|
107 static VtableStub* entry_point(address pc); // vtable stub entry point for a pc |
|
108 static bool contains(address pc); // is pc within any stub? |
|
109 static VtableStub* stub_containing(address pc); // stub containing pc or NULL |
|
110 static int number_of_vtable_stubs() { return _number_of_vtable_stubs; } |
|
111 static void initialize(); |
|
112 static void vtable_stub_do(void f(VtableStub*)); // iterates over all vtable stubs |
|
113 }; |
|
114 |
33 |
115 |
34 class VtableStub { |
116 class VtableStub { |
35 private: |
117 private: |
36 friend class VtableStubs; |
118 friend class VtableStubs; |
37 |
119 |
98 void print_on(outputStream* st) const; |
179 void print_on(outputStream* st) const; |
99 void print() const { print_on(tty); } |
180 void print() const { print_on(tty); } |
100 |
181 |
101 }; |
182 }; |
102 |
183 |
103 |
|
104 // VtableStubs creates the code stubs for compiled calls through vtables. |
|
105 // There is one stub per (vtable index, args_size) pair, and the stubs are |
|
106 // never deallocated. They don't need to be GCed because they contain no oops. |
|
107 |
|
108 class VtableStubs : AllStatic { |
|
109 public: // N must be public (some compilers need this for _table) |
|
110 enum { |
|
111 N = 256, // size of stub table; must be power of two |
|
112 mask = N - 1 |
|
113 }; |
|
114 |
|
115 private: |
|
116 static VtableStub* _table[N]; // table of existing stubs |
|
117 static int _number_of_vtable_stubs; // number of stubs created so far (for statistics) |
|
118 |
|
119 static VtableStub* create_vtable_stub(int vtable_index); |
|
120 static VtableStub* create_itable_stub(int vtable_index); |
|
121 static VtableStub* lookup (bool is_vtable_stub, int vtable_index); |
|
122 static void enter (bool is_vtable_stub, int vtable_index, VtableStub* s); |
|
123 static inline uint hash (bool is_vtable_stub, int vtable_index); |
|
124 static address find_stub (bool is_vtable_stub, int vtable_index); |
|
125 |
|
126 public: |
|
127 static address find_vtable_stub(int vtable_index) { return find_stub(true, vtable_index); } |
|
128 static address find_itable_stub(int itable_index) { return find_stub(false, itable_index); } |
|
129 static VtableStub* entry_point(address pc); // vtable stub entry point for a pc |
|
130 static bool contains(address pc); // is pc within any stub? |
|
131 static VtableStub* stub_containing(address pc); // stub containing pc or NULL |
|
132 static int number_of_vtable_stubs() { return _number_of_vtable_stubs; } |
|
133 static void initialize(); |
|
134 static void vtable_stub_do(void f(VtableStub*)); // iterates over all vtable stubs |
|
135 }; |
|
136 |
|
137 #endif // SHARE_VM_CODE_VTABLESTUBS_HPP |
184 #endif // SHARE_VM_CODE_VTABLESTUBS_HPP |