1
|
1 |
/*
|
|
2 |
* Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 |
* have any questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
|
25 |
#include "incls/_precompiled.incl"
|
|
26 |
#include "incls/_assembler.cpp.incl"
|
|
27 |
|
|
28 |
|
|
29 |
// Implementation of AbstractAssembler
|
|
30 |
//
|
|
31 |
// The AbstractAssembler is generating code into a CodeBuffer. To make code generation faster,
|
|
32 |
// the assembler keeps a copy of the code buffers boundaries & modifies them when
|
|
33 |
// emitting bytes rather than using the code buffers accessor functions all the time.
|
|
34 |
// The code buffer is updated via set_code_end(...) after emiting a whole instruction.
|
|
35 |
|
|
36 |
AbstractAssembler::AbstractAssembler(CodeBuffer* code) {
|
|
37 |
if (code == NULL) return;
|
|
38 |
CodeSection* cs = code->insts();
|
|
39 |
cs->clear_mark(); // new assembler kills old mark
|
|
40 |
_code_section = cs;
|
|
41 |
_code_begin = cs->start();
|
|
42 |
_code_limit = cs->limit();
|
|
43 |
_code_pos = cs->end();
|
|
44 |
_oop_recorder= code->oop_recorder();
|
|
45 |
if (_code_begin == NULL) {
|
|
46 |
vm_exit_out_of_memory1(0, "CodeCache: no room for %s", code->name());
|
|
47 |
}
|
|
48 |
}
|
|
49 |
|
|
50 |
void AbstractAssembler::set_code_section(CodeSection* cs) {
|
|
51 |
assert(cs->outer() == code_section()->outer(), "sanity");
|
|
52 |
assert(cs->is_allocated(), "need to pre-allocate this section");
|
|
53 |
cs->clear_mark(); // new assembly into this section kills old mark
|
|
54 |
_code_section = cs;
|
|
55 |
_code_begin = cs->start();
|
|
56 |
_code_limit = cs->limit();
|
|
57 |
_code_pos = cs->end();
|
|
58 |
}
|
|
59 |
|
|
60 |
// Inform CodeBuffer that incoming code and relocation will be for stubs
|
|
61 |
address AbstractAssembler::start_a_stub(int required_space) {
|
|
62 |
CodeBuffer* cb = code();
|
|
63 |
CodeSection* cs = cb->stubs();
|
|
64 |
assert(_code_section == cb->insts(), "not in insts?");
|
|
65 |
sync();
|
|
66 |
if (cs->maybe_expand_to_ensure_remaining(required_space)
|
|
67 |
&& cb->blob() == NULL) {
|
|
68 |
return NULL;
|
|
69 |
}
|
|
70 |
set_code_section(cs);
|
|
71 |
return pc();
|
|
72 |
}
|
|
73 |
|
|
74 |
// Inform CodeBuffer that incoming code and relocation will be code
|
|
75 |
// Should not be called if start_a_stub() returned NULL
|
|
76 |
void AbstractAssembler::end_a_stub() {
|
|
77 |
assert(_code_section == code()->stubs(), "not in stubs?");
|
|
78 |
sync();
|
|
79 |
set_code_section(code()->insts());
|
|
80 |
}
|
|
81 |
|
|
82 |
// Inform CodeBuffer that incoming code and relocation will be for stubs
|
|
83 |
address AbstractAssembler::start_a_const(int required_space, int required_align) {
|
|
84 |
CodeBuffer* cb = code();
|
|
85 |
CodeSection* cs = cb->consts();
|
|
86 |
assert(_code_section == cb->insts(), "not in insts?");
|
|
87 |
sync();
|
|
88 |
address end = cs->end();
|
|
89 |
int pad = -(intptr_t)end & (required_align-1);
|
|
90 |
if (cs->maybe_expand_to_ensure_remaining(pad + required_space)) {
|
|
91 |
if (cb->blob() == NULL) return NULL;
|
|
92 |
end = cs->end(); // refresh pointer
|
|
93 |
}
|
|
94 |
if (pad > 0) {
|
|
95 |
while (--pad >= 0) { *end++ = 0; }
|
|
96 |
cs->set_end(end);
|
|
97 |
}
|
|
98 |
set_code_section(cs);
|
|
99 |
return end;
|
|
100 |
}
|
|
101 |
|
|
102 |
// Inform CodeBuffer that incoming code and relocation will be code
|
|
103 |
// Should not be called if start_a_const() returned NULL
|
|
104 |
void AbstractAssembler::end_a_const() {
|
|
105 |
assert(_code_section == code()->consts(), "not in consts?");
|
|
106 |
sync();
|
|
107 |
set_code_section(code()->insts());
|
|
108 |
}
|
|
109 |
|
|
110 |
|
|
111 |
void AbstractAssembler::flush() {
|
|
112 |
sync();
|
|
113 |
ICache::invalidate_range(addr_at(0), offset());
|
|
114 |
}
|
|
115 |
|
|
116 |
|
|
117 |
void AbstractAssembler::a_byte(int x) {
|
|
118 |
emit_byte(x);
|
|
119 |
}
|
|
120 |
|
|
121 |
|
|
122 |
void AbstractAssembler::a_long(jint x) {
|
|
123 |
emit_long(x);
|
|
124 |
}
|
|
125 |
|
|
126 |
// Labels refer to positions in the (to be) generated code. There are bound
|
|
127 |
// and unbound
|
|
128 |
//
|
|
129 |
// Bound labels refer to known positions in the already generated code.
|
|
130 |
// offset() is the position the label refers to.
|
|
131 |
//
|
|
132 |
// Unbound labels refer to unknown positions in the code to be generated; it
|
|
133 |
// may contain a list of unresolved displacements that refer to it
|
|
134 |
#ifndef PRODUCT
|
|
135 |
void AbstractAssembler::print(Label& L) {
|
|
136 |
if (L.is_bound()) {
|
|
137 |
tty->print_cr("bound label to %d|%d", L.loc_pos(), L.loc_sect());
|
|
138 |
} else if (L.is_unbound()) {
|
|
139 |
L.print_instructions((MacroAssembler*)this);
|
|
140 |
} else {
|
|
141 |
tty->print_cr("label in inconsistent state (loc = %d)", L.loc());
|
|
142 |
}
|
|
143 |
}
|
|
144 |
#endif // PRODUCT
|
|
145 |
|
|
146 |
|
|
147 |
void AbstractAssembler::bind(Label& L) {
|
|
148 |
if (L.is_bound()) {
|
|
149 |
// Assembler can bind a label more than once to the same place.
|
|
150 |
guarantee(L.loc() == locator(), "attempt to redefine label");
|
|
151 |
return;
|
|
152 |
}
|
|
153 |
L.bind_loc(locator());
|
|
154 |
L.patch_instructions((MacroAssembler*)this);
|
|
155 |
}
|
|
156 |
|
|
157 |
void AbstractAssembler::generate_stack_overflow_check( int frame_size_in_bytes) {
|
|
158 |
if (UseStackBanging) {
|
|
159 |
// Each code entry causes one stack bang n pages down the stack where n
|
|
160 |
// is configurable by StackBangPages. The setting depends on the maximum
|
|
161 |
// depth of VM call stack or native before going back into java code,
|
|
162 |
// since only java code can raise a stack overflow exception using the
|
|
163 |
// stack banging mechanism. The VM and native code does not detect stack
|
|
164 |
// overflow.
|
|
165 |
// The code in JavaCalls::call() checks that there is at least n pages
|
|
166 |
// available, so all entry code needs to do is bang once for the end of
|
|
167 |
// this shadow zone.
|
|
168 |
// The entry code may need to bang additional pages if the framesize
|
|
169 |
// is greater than a page.
|
|
170 |
|
|
171 |
const int page_size = os::vm_page_size();
|
|
172 |
int bang_end = StackShadowPages*page_size;
|
|
173 |
|
|
174 |
// This is how far the previous frame's stack banging extended.
|
|
175 |
const int bang_end_safe = bang_end;
|
|
176 |
|
|
177 |
if (frame_size_in_bytes > page_size) {
|
|
178 |
bang_end += frame_size_in_bytes;
|
|
179 |
}
|
|
180 |
|
|
181 |
int bang_offset = bang_end_safe;
|
|
182 |
while (bang_offset <= bang_end) {
|
|
183 |
// Need at least one stack bang at end of shadow zone.
|
|
184 |
bang_stack_with_offset(bang_offset);
|
|
185 |
bang_offset += page_size;
|
|
186 |
}
|
|
187 |
} // end (UseStackBanging)
|
|
188 |
}
|
|
189 |
|
|
190 |
void Label::add_patch_at(CodeBuffer* cb, int branch_loc) {
|
|
191 |
assert(_loc == -1, "Label is unbound");
|
|
192 |
if (_patch_index < PatchCacheSize) {
|
|
193 |
_patches[_patch_index] = branch_loc;
|
|
194 |
} else {
|
|
195 |
if (_patch_overflow == NULL) {
|
|
196 |
_patch_overflow = cb->create_patch_overflow();
|
|
197 |
}
|
|
198 |
_patch_overflow->push(branch_loc);
|
|
199 |
}
|
|
200 |
++_patch_index;
|
|
201 |
}
|
|
202 |
|
|
203 |
void Label::patch_instructions(MacroAssembler* masm) {
|
|
204 |
assert(is_bound(), "Label is bound");
|
|
205 |
CodeBuffer* cb = masm->code();
|
|
206 |
int target_sect = CodeBuffer::locator_sect(loc());
|
|
207 |
address target = cb->locator_address(loc());
|
|
208 |
while (_patch_index > 0) {
|
|
209 |
--_patch_index;
|
|
210 |
int branch_loc;
|
|
211 |
if (_patch_index >= PatchCacheSize) {
|
|
212 |
branch_loc = _patch_overflow->pop();
|
|
213 |
} else {
|
|
214 |
branch_loc = _patches[_patch_index];
|
|
215 |
}
|
|
216 |
int branch_sect = CodeBuffer::locator_sect(branch_loc);
|
|
217 |
address branch = cb->locator_address(branch_loc);
|
|
218 |
if (branch_sect == CodeBuffer::SECT_CONSTS) {
|
|
219 |
// The thing to patch is a constant word.
|
|
220 |
*(address*)branch = target;
|
|
221 |
continue;
|
|
222 |
}
|
|
223 |
|
|
224 |
#ifdef ASSERT
|
|
225 |
// Cross-section branches only work if the
|
|
226 |
// intermediate section boundaries are frozen.
|
|
227 |
if (target_sect != branch_sect) {
|
|
228 |
for (int n = MIN2(target_sect, branch_sect),
|
|
229 |
nlimit = (target_sect + branch_sect) - n;
|
|
230 |
n < nlimit; n++) {
|
|
231 |
CodeSection* cs = cb->code_section(n);
|
|
232 |
assert(cs->is_frozen(), "cross-section branch needs stable offsets");
|
|
233 |
}
|
|
234 |
}
|
|
235 |
#endif //ASSERT
|
|
236 |
|
|
237 |
// Push the target offset into the branch instruction.
|
|
238 |
masm->pd_patch_instruction(branch, target);
|
|
239 |
}
|
|
240 |
}
|
|
241 |
|
|
242 |
|
|
243 |
void AbstractAssembler::block_comment(const char* comment) {
|
|
244 |
if (sect() == CodeBuffer::SECT_INSTS) {
|
|
245 |
code_section()->outer()->block_comment(offset(), comment);
|
|
246 |
}
|
|
247 |
}
|
|
248 |
|
|
249 |
|
|
250 |
#ifndef PRODUCT
|
|
251 |
void Label::print_instructions(MacroAssembler* masm) const {
|
|
252 |
CodeBuffer* cb = masm->code();
|
|
253 |
for (int i = 0; i < _patch_index; ++i) {
|
|
254 |
int branch_loc;
|
|
255 |
if (i >= PatchCacheSize) {
|
|
256 |
branch_loc = _patch_overflow->at(i - PatchCacheSize);
|
|
257 |
} else {
|
|
258 |
branch_loc = _patches[i];
|
|
259 |
}
|
|
260 |
int branch_pos = CodeBuffer::locator_pos(branch_loc);
|
|
261 |
int branch_sect = CodeBuffer::locator_sect(branch_loc);
|
|
262 |
address branch = cb->locator_address(branch_loc);
|
|
263 |
tty->print_cr("unbound label");
|
|
264 |
tty->print("@ %d|%d ", branch_pos, branch_sect);
|
|
265 |
if (branch_sect == CodeBuffer::SECT_CONSTS) {
|
|
266 |
tty->print_cr(PTR_FORMAT, *(address*)branch);
|
|
267 |
continue;
|
|
268 |
}
|
|
269 |
masm->pd_print_patched_instruction(branch);
|
|
270 |
tty->cr();
|
|
271 |
}
|
|
272 |
}
|
|
273 |
#endif // ndef PRODUCT
|