1
|
1 |
/*
|
|
2 |
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 |
* have any questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
|
25 |
# include "incls/_precompiled.incl"
|
|
26 |
# include "incls/_codeBuffer.cpp.incl"
|
|
27 |
|
|
28 |
// The structure of a CodeSection:
|
|
29 |
//
|
|
30 |
// _start -> +----------------+
|
|
31 |
// | machine code...|
|
|
32 |
// _end -> |----------------|
|
|
33 |
// | |
|
|
34 |
// | (empty) |
|
|
35 |
// | |
|
|
36 |
// | |
|
|
37 |
// +----------------+
|
|
38 |
// _limit -> | |
|
|
39 |
//
|
|
40 |
// _locs_start -> +----------------+
|
|
41 |
// |reloc records...|
|
|
42 |
// |----------------|
|
|
43 |
// _locs_end -> | |
|
|
44 |
// | |
|
|
45 |
// | (empty) |
|
|
46 |
// | |
|
|
47 |
// | |
|
|
48 |
// +----------------+
|
|
49 |
// _locs_limit -> | |
|
|
50 |
// The _end (resp. _limit) pointer refers to the first
|
|
51 |
// unused (resp. unallocated) byte.
|
|
52 |
|
|
53 |
// The structure of the CodeBuffer while code is being accumulated:
|
|
54 |
//
|
|
55 |
// _total_start -> \
|
|
56 |
// _insts._start -> +----------------+
|
|
57 |
// | |
|
|
58 |
// | Code |
|
|
59 |
// | |
|
|
60 |
// _stubs._start -> |----------------|
|
|
61 |
// | |
|
|
62 |
// | Stubs | (also handlers for deopt/exception)
|
|
63 |
// | |
|
|
64 |
// _consts._start -> |----------------|
|
|
65 |
// | |
|
|
66 |
// | Constants |
|
|
67 |
// | |
|
|
68 |
// +----------------+
|
|
69 |
// + _total_size -> | |
|
|
70 |
//
|
|
71 |
// When the code and relocations are copied to the code cache,
|
|
72 |
// the empty parts of each section are removed, and everything
|
|
73 |
// is copied into contiguous locations.
|
|
74 |
|
|
75 |
typedef CodeBuffer::csize_t csize_t; // file-local definition
|
|
76 |
|
|
77 |
// external buffer, in a predefined CodeBlob or other buffer area
|
|
78 |
// Important: The code_start must be taken exactly, and not realigned.
|
|
79 |
CodeBuffer::CodeBuffer(address code_start, csize_t code_size) {
|
|
80 |
assert(code_start != NULL, "sanity");
|
|
81 |
initialize_misc("static buffer");
|
|
82 |
initialize(code_start, code_size);
|
|
83 |
assert(verify_section_allocation(), "initial use of buffer OK");
|
|
84 |
}
|
|
85 |
|
|
86 |
void CodeBuffer::initialize(csize_t code_size, csize_t locs_size) {
|
|
87 |
// Compute maximal alignment.
|
|
88 |
int align = _insts.alignment();
|
|
89 |
// Always allow for empty slop around each section.
|
|
90 |
int slop = (int) CodeSection::end_slop();
|
|
91 |
|
|
92 |
assert(blob() == NULL, "only once");
|
|
93 |
set_blob(BufferBlob::create(_name, code_size + (align+slop) * (SECT_LIMIT+1)));
|
|
94 |
if (blob() == NULL) {
|
|
95 |
// The assembler constructor will throw a fatal on an empty CodeBuffer.
|
|
96 |
return; // caller must test this
|
|
97 |
}
|
|
98 |
|
|
99 |
// Set up various pointers into the blob.
|
|
100 |
initialize(_total_start, _total_size);
|
|
101 |
|
|
102 |
assert((uintptr_t)code_begin() % CodeEntryAlignment == 0, "instruction start not code entry aligned");
|
|
103 |
|
|
104 |
pd_initialize();
|
|
105 |
|
|
106 |
if (locs_size != 0) {
|
|
107 |
_insts.initialize_locs(locs_size / sizeof(relocInfo));
|
|
108 |
}
|
|
109 |
|
|
110 |
assert(verify_section_allocation(), "initial use of blob is OK");
|
|
111 |
}
|
|
112 |
|
|
113 |
|
|
114 |
CodeBuffer::~CodeBuffer() {
|
|
115 |
// If we allocate our code buffer from the CodeCache
|
|
116 |
// via a BufferBlob, and it's not permanent, then
|
|
117 |
// free the BufferBlob.
|
|
118 |
// The rest of the memory will be freed when the ResourceObj
|
|
119 |
// is released.
|
|
120 |
assert(verify_section_allocation(), "final storage configuration still OK");
|
|
121 |
for (CodeBuffer* cb = this; cb != NULL; cb = cb->before_expand()) {
|
|
122 |
// Previous incarnations of this buffer are held live, so that internal
|
|
123 |
// addresses constructed before expansions will not be confused.
|
|
124 |
cb->free_blob();
|
|
125 |
}
|
|
126 |
#ifdef ASSERT
|
|
127 |
Copy::fill_to_bytes(this, sizeof(*this), badResourceValue);
|
|
128 |
#endif
|
|
129 |
}
|
|
130 |
|
|
131 |
void CodeBuffer::initialize_oop_recorder(OopRecorder* r) {
|
|
132 |
assert(_oop_recorder == &_default_oop_recorder && _default_oop_recorder.is_unused(), "do this once");
|
|
133 |
DEBUG_ONLY(_default_oop_recorder.oop_size()); // force unused OR to be frozen
|
|
134 |
_oop_recorder = r;
|
|
135 |
}
|
|
136 |
|
|
137 |
void CodeBuffer::initialize_section_size(CodeSection* cs, csize_t size) {
|
|
138 |
assert(cs != &_insts, "insts is the memory provider, not the consumer");
|
|
139 |
#ifdef ASSERT
|
|
140 |
for (int n = (int)SECT_INSTS+1; n < (int)SECT_LIMIT; n++) {
|
|
141 |
CodeSection* prevCS = code_section(n);
|
|
142 |
if (prevCS == cs) break;
|
|
143 |
assert(!prevCS->is_allocated(), "section allocation must be in reverse order");
|
|
144 |
}
|
|
145 |
#endif
|
|
146 |
csize_t slop = CodeSection::end_slop(); // margin between sections
|
|
147 |
int align = cs->alignment();
|
|
148 |
assert(is_power_of_2(align), "sanity");
|
|
149 |
address start = _insts._start;
|
|
150 |
address limit = _insts._limit;
|
|
151 |
address middle = limit - size;
|
|
152 |
middle -= (intptr_t)middle & (align-1); // align the division point downward
|
|
153 |
guarantee(middle - slop > start, "need enough space to divide up");
|
|
154 |
_insts._limit = middle - slop; // subtract desired space, plus slop
|
|
155 |
cs->initialize(middle, limit - middle);
|
|
156 |
assert(cs->start() == middle, "sanity");
|
|
157 |
assert(cs->limit() == limit, "sanity");
|
|
158 |
// give it some relocations to start with, if the main section has them
|
|
159 |
if (_insts.has_locs()) cs->initialize_locs(1);
|
|
160 |
}
|
|
161 |
|
|
162 |
void CodeBuffer::freeze_section(CodeSection* cs) {
|
|
163 |
CodeSection* next_cs = (cs == consts())? NULL: code_section(cs->index()+1);
|
|
164 |
csize_t frozen_size = cs->size();
|
|
165 |
if (next_cs != NULL) {
|
|
166 |
frozen_size = next_cs->align_at_start(frozen_size);
|
|
167 |
}
|
|
168 |
address old_limit = cs->limit();
|
|
169 |
address new_limit = cs->start() + frozen_size;
|
|
170 |
relocInfo* old_locs_limit = cs->locs_limit();
|
|
171 |
relocInfo* new_locs_limit = cs->locs_end();
|
|
172 |
// Patch the limits.
|
|
173 |
cs->_limit = new_limit;
|
|
174 |
cs->_locs_limit = new_locs_limit;
|
|
175 |
cs->_frozen = true;
|
|
176 |
if (!next_cs->is_allocated() && !next_cs->is_frozen()) {
|
|
177 |
// Give remaining buffer space to the following section.
|
|
178 |
next_cs->initialize(new_limit, old_limit - new_limit);
|
|
179 |
next_cs->initialize_shared_locs(new_locs_limit,
|
|
180 |
old_locs_limit - new_locs_limit);
|
|
181 |
}
|
|
182 |
}
|
|
183 |
|
|
184 |
void CodeBuffer::set_blob(BufferBlob* blob) {
|
|
185 |
_blob = blob;
|
|
186 |
if (blob != NULL) {
|
|
187 |
address start = blob->instructions_begin();
|
|
188 |
address end = blob->instructions_end();
|
|
189 |
// Round up the starting address.
|
|
190 |
int align = _insts.alignment();
|
|
191 |
start += (-(intptr_t)start) & (align-1);
|
|
192 |
_total_start = start;
|
|
193 |
_total_size = end - start;
|
|
194 |
} else {
|
|
195 |
#ifdef ASSERT
|
|
196 |
// Clean out dangling pointers.
|
|
197 |
_total_start = badAddress;
|
|
198 |
_insts._start = _insts._end = badAddress;
|
|
199 |
_stubs._start = _stubs._end = badAddress;
|
|
200 |
_consts._start = _consts._end = badAddress;
|
|
201 |
#endif //ASSERT
|
|
202 |
}
|
|
203 |
}
|
|
204 |
|
|
205 |
void CodeBuffer::free_blob() {
|
|
206 |
if (_blob != NULL) {
|
|
207 |
BufferBlob::free(_blob);
|
|
208 |
set_blob(NULL);
|
|
209 |
}
|
|
210 |
}
|
|
211 |
|
|
212 |
const char* CodeBuffer::code_section_name(int n) {
|
|
213 |
#ifdef PRODUCT
|
|
214 |
return NULL;
|
|
215 |
#else //PRODUCT
|
|
216 |
switch (n) {
|
|
217 |
case SECT_INSTS: return "insts";
|
|
218 |
case SECT_STUBS: return "stubs";
|
|
219 |
case SECT_CONSTS: return "consts";
|
|
220 |
default: return NULL;
|
|
221 |
}
|
|
222 |
#endif //PRODUCT
|
|
223 |
}
|
|
224 |
|
|
225 |
int CodeBuffer::section_index_of(address addr) const {
|
|
226 |
for (int n = 0; n < (int)SECT_LIMIT; n++) {
|
|
227 |
const CodeSection* cs = code_section(n);
|
|
228 |
if (cs->allocates(addr)) return n;
|
|
229 |
}
|
|
230 |
return SECT_NONE;
|
|
231 |
}
|
|
232 |
|
|
233 |
int CodeBuffer::locator(address addr) const {
|
|
234 |
for (int n = 0; n < (int)SECT_LIMIT; n++) {
|
|
235 |
const CodeSection* cs = code_section(n);
|
|
236 |
if (cs->allocates(addr)) {
|
|
237 |
return locator(addr - cs->start(), n);
|
|
238 |
}
|
|
239 |
}
|
|
240 |
return -1;
|
|
241 |
}
|
|
242 |
|
|
243 |
address CodeBuffer::locator_address(int locator) const {
|
|
244 |
if (locator < 0) return NULL;
|
|
245 |
address start = code_section(locator_sect(locator))->start();
|
|
246 |
return start + locator_pos(locator);
|
|
247 |
}
|
|
248 |
|
|
249 |
address CodeBuffer::decode_begin() {
|
|
250 |
address begin = _insts.start();
|
|
251 |
if (_decode_begin != NULL && _decode_begin > begin)
|
|
252 |
begin = _decode_begin;
|
|
253 |
return begin;
|
|
254 |
}
|
|
255 |
|
|
256 |
|
|
257 |
GrowableArray<int>* CodeBuffer::create_patch_overflow() {
|
|
258 |
if (_overflow_arena == NULL) {
|
|
259 |
_overflow_arena = new Arena();
|
|
260 |
}
|
|
261 |
return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0);
|
|
262 |
}
|
|
263 |
|
|
264 |
|
|
265 |
// Helper function for managing labels and their target addresses.
|
|
266 |
// Returns a sensible address, and if it is not the label's final
|
|
267 |
// address, notes the dependency (at 'branch_pc') on the label.
|
|
268 |
address CodeSection::target(Label& L, address branch_pc) {
|
|
269 |
if (L.is_bound()) {
|
|
270 |
int loc = L.loc();
|
|
271 |
if (index() == CodeBuffer::locator_sect(loc)) {
|
|
272 |
return start() + CodeBuffer::locator_pos(loc);
|
|
273 |
} else {
|
|
274 |
return outer()->locator_address(loc);
|
|
275 |
}
|
|
276 |
} else {
|
|
277 |
assert(allocates2(branch_pc), "sanity");
|
|
278 |
address base = start();
|
|
279 |
int patch_loc = CodeBuffer::locator(branch_pc - base, index());
|
|
280 |
L.add_patch_at(outer(), patch_loc);
|
|
281 |
|
|
282 |
// Need to return a pc, doesn't matter what it is since it will be
|
|
283 |
// replaced during resolution later.
|
|
284 |
// (Don't return NULL or badAddress, since branches shouldn't overflow.)
|
|
285 |
return base;
|
|
286 |
}
|
|
287 |
}
|
|
288 |
|
|
289 |
void CodeSection::relocate(address at, RelocationHolder const& spec, int format) {
|
|
290 |
Relocation* reloc = spec.reloc();
|
|
291 |
relocInfo::relocType rtype = (relocInfo::relocType) reloc->type();
|
|
292 |
if (rtype == relocInfo::none) return;
|
|
293 |
|
|
294 |
// The assertion below has been adjusted, to also work for
|
|
295 |
// relocation for fixup. Sometimes we want to put relocation
|
|
296 |
// information for the next instruction, since it will be patched
|
|
297 |
// with a call.
|
|
298 |
assert(start() <= at && at <= end()+1,
|
|
299 |
"cannot relocate data outside code boundaries");
|
|
300 |
|
|
301 |
if (!has_locs()) {
|
|
302 |
// no space for relocation information provided => code cannot be
|
|
303 |
// relocated. Make sure that relocate is only called with rtypes
|
|
304 |
// that can be ignored for this kind of code.
|
|
305 |
assert(rtype == relocInfo::none ||
|
|
306 |
rtype == relocInfo::runtime_call_type ||
|
|
307 |
rtype == relocInfo::internal_word_type||
|
|
308 |
rtype == relocInfo::section_word_type ||
|
|
309 |
rtype == relocInfo::external_word_type,
|
|
310 |
"code needs relocation information");
|
|
311 |
// leave behind an indication that we attempted a relocation
|
|
312 |
DEBUG_ONLY(_locs_start = _locs_limit = (relocInfo*)badAddress);
|
|
313 |
return;
|
|
314 |
}
|
|
315 |
|
|
316 |
// Advance the point, noting the offset we'll have to record.
|
|
317 |
csize_t offset = at - locs_point();
|
|
318 |
set_locs_point(at);
|
|
319 |
|
|
320 |
// Test for a couple of overflow conditions; maybe expand the buffer.
|
|
321 |
relocInfo* end = locs_end();
|
|
322 |
relocInfo* req = end + relocInfo::length_limit;
|
|
323 |
// Check for (potential) overflow
|
|
324 |
if (req >= locs_limit() || offset >= relocInfo::offset_limit()) {
|
|
325 |
req += (uint)offset / (uint)relocInfo::offset_limit();
|
|
326 |
if (req >= locs_limit()) {
|
|
327 |
// Allocate or reallocate.
|
|
328 |
expand_locs(locs_count() + (req - end));
|
|
329 |
// reload pointer
|
|
330 |
end = locs_end();
|
|
331 |
}
|
|
332 |
}
|
|
333 |
|
|
334 |
// If the offset is giant, emit filler relocs, of type 'none', but
|
|
335 |
// each carrying the largest possible offset, to advance the locs_point.
|
|
336 |
while (offset >= relocInfo::offset_limit()) {
|
|
337 |
assert(end < locs_limit(), "adjust previous paragraph of code");
|
|
338 |
*end++ = filler_relocInfo();
|
|
339 |
offset -= filler_relocInfo().addr_offset();
|
|
340 |
}
|
|
341 |
|
|
342 |
// If it's a simple reloc with no data, we'll just write (rtype | offset).
|
|
343 |
(*end) = relocInfo(rtype, offset, format);
|
|
344 |
|
|
345 |
// If it has data, insert the prefix, as (data_prefix_tag | data1), data2.
|
|
346 |
end->initialize(this, reloc);
|
|
347 |
}
|
|
348 |
|
|
349 |
void CodeSection::initialize_locs(int locs_capacity) {
|
|
350 |
assert(_locs_start == NULL, "only one locs init step, please");
|
|
351 |
// Apply a priori lower limits to relocation size:
|
|
352 |
csize_t min_locs = MAX2(size() / 16, (csize_t)4);
|
|
353 |
if (locs_capacity < min_locs) locs_capacity = min_locs;
|
|
354 |
relocInfo* locs_start = NEW_RESOURCE_ARRAY(relocInfo, locs_capacity);
|
|
355 |
_locs_start = locs_start;
|
|
356 |
_locs_end = locs_start;
|
|
357 |
_locs_limit = locs_start + locs_capacity;
|
|
358 |
_locs_own = true;
|
|
359 |
}
|
|
360 |
|
|
361 |
void CodeSection::initialize_shared_locs(relocInfo* buf, int length) {
|
|
362 |
assert(_locs_start == NULL, "do this before locs are allocated");
|
|
363 |
// Internal invariant: locs buf must be fully aligned.
|
|
364 |
// See copy_relocations_to() below.
|
|
365 |
while ((uintptr_t)buf % HeapWordSize != 0 && length > 0) {
|
|
366 |
++buf; --length;
|
|
367 |
}
|
|
368 |
if (length > 0) {
|
|
369 |
_locs_start = buf;
|
|
370 |
_locs_end = buf;
|
|
371 |
_locs_limit = buf + length;
|
|
372 |
_locs_own = false;
|
|
373 |
}
|
|
374 |
}
|
|
375 |
|
|
376 |
void CodeSection::initialize_locs_from(const CodeSection* source_cs) {
|
|
377 |
int lcount = source_cs->locs_count();
|
|
378 |
if (lcount != 0) {
|
|
379 |
initialize_shared_locs(source_cs->locs_start(), lcount);
|
|
380 |
_locs_end = _locs_limit = _locs_start + lcount;
|
|
381 |
assert(is_allocated(), "must have copied code already");
|
|
382 |
set_locs_point(start() + source_cs->locs_point_off());
|
|
383 |
}
|
|
384 |
assert(this->locs_count() == source_cs->locs_count(), "sanity");
|
|
385 |
}
|
|
386 |
|
|
387 |
void CodeSection::expand_locs(int new_capacity) {
|
|
388 |
if (_locs_start == NULL) {
|
|
389 |
initialize_locs(new_capacity);
|
|
390 |
return;
|
|
391 |
} else {
|
|
392 |
int old_count = locs_count();
|
|
393 |
int old_capacity = locs_capacity();
|
|
394 |
if (new_capacity < old_capacity * 2)
|
|
395 |
new_capacity = old_capacity * 2;
|
|
396 |
relocInfo* locs_start;
|
|
397 |
if (_locs_own) {
|
|
398 |
locs_start = REALLOC_RESOURCE_ARRAY(relocInfo, _locs_start, old_capacity, new_capacity);
|
|
399 |
} else {
|
|
400 |
locs_start = NEW_RESOURCE_ARRAY(relocInfo, new_capacity);
|
|
401 |
Copy::conjoint_bytes(_locs_start, locs_start, old_capacity * sizeof(relocInfo));
|
|
402 |
_locs_own = true;
|
|
403 |
}
|
|
404 |
_locs_start = locs_start;
|
|
405 |
_locs_end = locs_start + old_count;
|
|
406 |
_locs_limit = locs_start + new_capacity;
|
|
407 |
}
|
|
408 |
}
|
|
409 |
|
|
410 |
|
|
411 |
/// Support for emitting the code to its final location.
|
|
412 |
/// The pattern is the same for all functions.
|
|
413 |
/// We iterate over all the sections, padding each to alignment.
|
|
414 |
|
|
415 |
csize_t CodeBuffer::total_code_size() const {
|
|
416 |
csize_t code_size_so_far = 0;
|
|
417 |
for (int n = 0; n < (int)SECT_LIMIT; n++) {
|
|
418 |
const CodeSection* cs = code_section(n);
|
|
419 |
if (cs->is_empty()) continue; // skip trivial section
|
|
420 |
code_size_so_far = cs->align_at_start(code_size_so_far);
|
|
421 |
code_size_so_far += cs->size();
|
|
422 |
}
|
|
423 |
return code_size_so_far;
|
|
424 |
}
|
|
425 |
|
|
426 |
void CodeBuffer::compute_final_layout(CodeBuffer* dest) const {
|
|
427 |
address buf = dest->_total_start;
|
|
428 |
csize_t buf_offset = 0;
|
|
429 |
assert(dest->_total_size >= total_code_size(), "must be big enough");
|
|
430 |
|
|
431 |
{
|
|
432 |
// not sure why this is here, but why not...
|
|
433 |
int alignSize = MAX2((intx) sizeof(jdouble), CodeEntryAlignment);
|
|
434 |
assert( (dest->_total_start - _insts.start()) % alignSize == 0, "copy must preserve alignment");
|
|
435 |
}
|
|
436 |
|
|
437 |
const CodeSection* prev_cs = NULL;
|
|
438 |
CodeSection* prev_dest_cs = NULL;
|
|
439 |
for (int n = 0; n < (int)SECT_LIMIT; n++) {
|
|
440 |
// figure compact layout of each section
|
|
441 |
const CodeSection* cs = code_section(n);
|
|
442 |
address cstart = cs->start();
|
|
443 |
address cend = cs->end();
|
|
444 |
csize_t csize = cend - cstart;
|
|
445 |
|
|
446 |
CodeSection* dest_cs = dest->code_section(n);
|
|
447 |
if (!cs->is_empty()) {
|
|
448 |
// Compute initial padding; assign it to the previous non-empty guy.
|
|
449 |
// Cf. figure_expanded_capacities.
|
|
450 |
csize_t padding = cs->align_at_start(buf_offset) - buf_offset;
|
|
451 |
if (padding != 0) {
|
|
452 |
buf_offset += padding;
|
|
453 |
assert(prev_dest_cs != NULL, "sanity");
|
|
454 |
prev_dest_cs->_limit += padding;
|
|
455 |
}
|
|
456 |
#ifdef ASSERT
|
|
457 |
if (prev_cs != NULL && prev_cs->is_frozen() && n < SECT_CONSTS) {
|
|
458 |
// Make sure the ends still match up.
|
|
459 |
// This is important because a branch in a frozen section
|
|
460 |
// might target code in a following section, via a Label,
|
|
461 |
// and without a relocation record. See Label::patch_instructions.
|
|
462 |
address dest_start = buf+buf_offset;
|
|
463 |
csize_t start2start = cs->start() - prev_cs->start();
|
|
464 |
csize_t dest_start2start = dest_start - prev_dest_cs->start();
|
|
465 |
assert(start2start == dest_start2start, "cannot stretch frozen sect");
|
|
466 |
}
|
|
467 |
#endif //ASSERT
|
|
468 |
prev_dest_cs = dest_cs;
|
|
469 |
prev_cs = cs;
|
|
470 |
}
|
|
471 |
|
|
472 |
debug_only(dest_cs->_start = NULL); // defeat double-initialization assert
|
|
473 |
dest_cs->initialize(buf+buf_offset, csize);
|
|
474 |
dest_cs->set_end(buf+buf_offset+csize);
|
|
475 |
assert(dest_cs->is_allocated(), "must always be allocated");
|
|
476 |
assert(cs->is_empty() == dest_cs->is_empty(), "sanity");
|
|
477 |
|
|
478 |
buf_offset += csize;
|
|
479 |
}
|
|
480 |
|
|
481 |
// Done calculating sections; did it come out to the right end?
|
|
482 |
assert(buf_offset == total_code_size(), "sanity");
|
|
483 |
assert(dest->verify_section_allocation(), "final configuration works");
|
|
484 |
}
|
|
485 |
|
|
486 |
csize_t CodeBuffer::total_offset_of(address addr) const {
|
|
487 |
csize_t code_size_so_far = 0;
|
|
488 |
for (int n = 0; n < (int)SECT_LIMIT; n++) {
|
|
489 |
const CodeSection* cs = code_section(n);
|
|
490 |
if (!cs->is_empty()) {
|
|
491 |
code_size_so_far = cs->align_at_start(code_size_so_far);
|
|
492 |
}
|
|
493 |
if (cs->contains2(addr)) {
|
|
494 |
return code_size_so_far + (addr - cs->start());
|
|
495 |
}
|
|
496 |
code_size_so_far += cs->size();
|
|
497 |
}
|
|
498 |
#ifndef PRODUCT
|
|
499 |
tty->print_cr("Dangling address " PTR_FORMAT " in:", addr);
|
|
500 |
((CodeBuffer*)this)->print();
|
|
501 |
#endif
|
|
502 |
ShouldNotReachHere();
|
|
503 |
return -1;
|
|
504 |
}
|
|
505 |
|
|
506 |
csize_t CodeBuffer::total_relocation_size() const {
|
|
507 |
csize_t lsize = copy_relocations_to(NULL); // dry run only
|
|
508 |
csize_t csize = total_code_size();
|
|
509 |
csize_t total = RelocIterator::locs_and_index_size(csize, lsize);
|
|
510 |
return (csize_t) align_size_up(total, HeapWordSize);
|
|
511 |
}
|
|
512 |
|
|
513 |
csize_t CodeBuffer::copy_relocations_to(CodeBlob* dest) const {
|
|
514 |
address buf = NULL;
|
|
515 |
csize_t buf_offset = 0;
|
|
516 |
csize_t buf_limit = 0;
|
|
517 |
if (dest != NULL) {
|
|
518 |
buf = (address)dest->relocation_begin();
|
|
519 |
buf_limit = (address)dest->relocation_end() - buf;
|
|
520 |
assert((uintptr_t)buf % HeapWordSize == 0, "buf must be fully aligned");
|
|
521 |
assert(buf_limit % HeapWordSize == 0, "buf must be evenly sized");
|
|
522 |
}
|
|
523 |
// if dest == NULL, this is just the sizing pass
|
|
524 |
|
|
525 |
csize_t code_end_so_far = 0;
|
|
526 |
csize_t code_point_so_far = 0;
|
|
527 |
for (int n = 0; n < (int)SECT_LIMIT; n++) {
|
|
528 |
// pull relocs out of each section
|
|
529 |
const CodeSection* cs = code_section(n);
|
|
530 |
assert(!(cs->is_empty() && cs->locs_count() > 0), "sanity");
|
|
531 |
if (cs->is_empty()) continue; // skip trivial section
|
|
532 |
relocInfo* lstart = cs->locs_start();
|
|
533 |
relocInfo* lend = cs->locs_end();
|
|
534 |
csize_t lsize = (csize_t)( (address)lend - (address)lstart );
|
|
535 |
csize_t csize = cs->size();
|
|
536 |
code_end_so_far = cs->align_at_start(code_end_so_far);
|
|
537 |
|
|
538 |
if (lsize > 0) {
|
|
539 |
// Figure out how to advance the combined relocation point
|
|
540 |
// first to the beginning of this section.
|
|
541 |
// We'll insert one or more filler relocs to span that gap.
|
|
542 |
// (Don't bother to improve this by editing the first reloc's offset.)
|
|
543 |
csize_t new_code_point = code_end_so_far;
|
|
544 |
for (csize_t jump;
|
|
545 |
code_point_so_far < new_code_point;
|
|
546 |
code_point_so_far += jump) {
|
|
547 |
jump = new_code_point - code_point_so_far;
|
|
548 |
relocInfo filler = filler_relocInfo();
|
|
549 |
if (jump >= filler.addr_offset()) {
|
|
550 |
jump = filler.addr_offset();
|
|
551 |
} else { // else shrink the filler to fit
|
|
552 |
filler = relocInfo(relocInfo::none, jump);
|
|
553 |
}
|
|
554 |
if (buf != NULL) {
|
|
555 |
assert(buf_offset + (csize_t)sizeof(filler) <= buf_limit, "filler in bounds");
|
|
556 |
*(relocInfo*)(buf+buf_offset) = filler;
|
|
557 |
}
|
|
558 |
buf_offset += sizeof(filler);
|
|
559 |
}
|
|
560 |
|
|
561 |
// Update code point and end to skip past this section:
|
|
562 |
csize_t last_code_point = code_end_so_far + cs->locs_point_off();
|
|
563 |
assert(code_point_so_far <= last_code_point, "sanity");
|
|
564 |
code_point_so_far = last_code_point; // advance past this guy's relocs
|
|
565 |
}
|
|
566 |
code_end_so_far += csize; // advance past this guy's instructions too
|
|
567 |
|
|
568 |
// Done with filler; emit the real relocations:
|
|
569 |
if (buf != NULL && lsize != 0) {
|
|
570 |
assert(buf_offset + lsize <= buf_limit, "target in bounds");
|
|
571 |
assert((uintptr_t)lstart % HeapWordSize == 0, "sane start");
|
|
572 |
if (buf_offset % HeapWordSize == 0) {
|
|
573 |
// Use wordwise copies if possible:
|
|
574 |
Copy::disjoint_words((HeapWord*)lstart,
|
|
575 |
(HeapWord*)(buf+buf_offset),
|
|
576 |
(lsize + HeapWordSize-1) / HeapWordSize);
|
|
577 |
} else {
|
|
578 |
Copy::conjoint_bytes(lstart, buf+buf_offset, lsize);
|
|
579 |
}
|
|
580 |
}
|
|
581 |
buf_offset += lsize;
|
|
582 |
}
|
|
583 |
|
|
584 |
// Align end of relocation info in target.
|
|
585 |
while (buf_offset % HeapWordSize != 0) {
|
|
586 |
if (buf != NULL) {
|
|
587 |
relocInfo padding = relocInfo(relocInfo::none, 0);
|
|
588 |
assert(buf_offset + (csize_t)sizeof(padding) <= buf_limit, "padding in bounds");
|
|
589 |
*(relocInfo*)(buf+buf_offset) = padding;
|
|
590 |
}
|
|
591 |
buf_offset += sizeof(relocInfo);
|
|
592 |
}
|
|
593 |
|
|
594 |
assert(code_end_so_far == total_code_size(), "sanity");
|
|
595 |
|
|
596 |
// Account for index:
|
|
597 |
if (buf != NULL) {
|
|
598 |
RelocIterator::create_index(dest->relocation_begin(),
|
|
599 |
buf_offset / sizeof(relocInfo),
|
|
600 |
dest->relocation_end());
|
|
601 |
}
|
|
602 |
|
|
603 |
return buf_offset;
|
|
604 |
}
|
|
605 |
|
|
606 |
void CodeBuffer::copy_code_to(CodeBlob* dest_blob) {
|
|
607 |
#ifndef PRODUCT
|
|
608 |
if (PrintNMethods && (WizardMode || Verbose)) {
|
|
609 |
tty->print("done with CodeBuffer:");
|
|
610 |
((CodeBuffer*)this)->print();
|
|
611 |
}
|
|
612 |
#endif //PRODUCT
|
|
613 |
|
|
614 |
CodeBuffer dest(dest_blob->instructions_begin(),
|
|
615 |
dest_blob->instructions_size());
|
|
616 |
assert(dest_blob->instructions_size() >= total_code_size(), "good sizing");
|
|
617 |
this->compute_final_layout(&dest);
|
|
618 |
relocate_code_to(&dest);
|
|
619 |
|
|
620 |
// transfer comments from buffer to blob
|
|
621 |
dest_blob->set_comments(_comments);
|
|
622 |
|
|
623 |
// Done moving code bytes; were they the right size?
|
|
624 |
assert(round_to(dest.total_code_size(), oopSize) == dest_blob->instructions_size(), "sanity");
|
|
625 |
|
|
626 |
// Flush generated code
|
|
627 |
ICache::invalidate_range(dest_blob->instructions_begin(),
|
|
628 |
dest_blob->instructions_size());
|
|
629 |
}
|
|
630 |
|
|
631 |
// Move all my code into another code buffer.
|
|
632 |
// Consult applicable relocs to repair embedded addresses.
|
|
633 |
void CodeBuffer::relocate_code_to(CodeBuffer* dest) const {
|
|
634 |
DEBUG_ONLY(address dest_end = dest->_total_start + dest->_total_size);
|
|
635 |
for (int n = 0; n < (int)SECT_LIMIT; n++) {
|
|
636 |
// pull code out of each section
|
|
637 |
const CodeSection* cs = code_section(n);
|
|
638 |
if (cs->is_empty()) continue; // skip trivial section
|
|
639 |
CodeSection* dest_cs = dest->code_section(n);
|
|
640 |
assert(cs->size() == dest_cs->size(), "sanity");
|
|
641 |
csize_t usize = dest_cs->size();
|
|
642 |
csize_t wsize = align_size_up(usize, HeapWordSize);
|
|
643 |
assert(dest_cs->start() + wsize <= dest_end, "no overflow");
|
|
644 |
// Copy the code as aligned machine words.
|
|
645 |
// This may also include an uninitialized partial word at the end.
|
|
646 |
Copy::disjoint_words((HeapWord*)cs->start(),
|
|
647 |
(HeapWord*)dest_cs->start(),
|
|
648 |
wsize / HeapWordSize);
|
|
649 |
|
|
650 |
if (dest->blob() == NULL) {
|
|
651 |
// Destination is a final resting place, not just another buffer.
|
|
652 |
// Normalize uninitialized bytes in the final padding.
|
|
653 |
Copy::fill_to_bytes(dest_cs->end(), dest_cs->remaining(),
|
|
654 |
Assembler::code_fill_byte());
|
|
655 |
}
|
|
656 |
|
|
657 |
assert(cs->locs_start() != (relocInfo*)badAddress,
|
|
658 |
"this section carries no reloc storage, but reloc was attempted");
|
|
659 |
|
|
660 |
// Make the new code copy use the old copy's relocations:
|
|
661 |
dest_cs->initialize_locs_from(cs);
|
|
662 |
|
|
663 |
{ // Repair the pc relative information in the code after the move
|
|
664 |
RelocIterator iter(dest_cs);
|
|
665 |
while (iter.next()) {
|
|
666 |
iter.reloc()->fix_relocation_after_move(this, dest);
|
|
667 |
}
|
|
668 |
}
|
|
669 |
}
|
|
670 |
}
|
|
671 |
|
|
672 |
csize_t CodeBuffer::figure_expanded_capacities(CodeSection* which_cs,
|
|
673 |
csize_t amount,
|
|
674 |
csize_t* new_capacity) {
|
|
675 |
csize_t new_total_cap = 0;
|
|
676 |
|
|
677 |
int prev_n = -1;
|
|
678 |
for (int n = 0; n < (int)SECT_LIMIT; n++) {
|
|
679 |
const CodeSection* sect = code_section(n);
|
|
680 |
|
|
681 |
if (!sect->is_empty()) {
|
|
682 |
// Compute initial padding; assign it to the previous non-empty guy.
|
|
683 |
// Cf. compute_final_layout.
|
|
684 |
csize_t padding = sect->align_at_start(new_total_cap) - new_total_cap;
|
|
685 |
if (padding != 0) {
|
|
686 |
new_total_cap += padding;
|
|
687 |
assert(prev_n >= 0, "sanity");
|
|
688 |
new_capacity[prev_n] += padding;
|
|
689 |
}
|
|
690 |
prev_n = n;
|
|
691 |
}
|
|
692 |
|
|
693 |
csize_t exp = sect->size(); // 100% increase
|
|
694 |
if ((uint)exp < 4*K) exp = 4*K; // minimum initial increase
|
|
695 |
if (sect == which_cs) {
|
|
696 |
if (exp < amount) exp = amount;
|
|
697 |
if (StressCodeBuffers) exp = amount; // expand only slightly
|
|
698 |
} else if (n == SECT_INSTS) {
|
|
699 |
// scale down inst increases to a more modest 25%
|
|
700 |
exp = 4*K + ((exp - 4*K) >> 2);
|
|
701 |
if (StressCodeBuffers) exp = amount / 2; // expand only slightly
|
|
702 |
} else if (sect->is_empty()) {
|
|
703 |
// do not grow an empty secondary section
|
|
704 |
exp = 0;
|
|
705 |
}
|
|
706 |
// Allow for inter-section slop:
|
|
707 |
exp += CodeSection::end_slop();
|
|
708 |
csize_t new_cap = sect->size() + exp;
|
|
709 |
if (new_cap < sect->capacity()) {
|
|
710 |
// No need to expand after all.
|
|
711 |
new_cap = sect->capacity();
|
|
712 |
}
|
|
713 |
new_capacity[n] = new_cap;
|
|
714 |
new_total_cap += new_cap;
|
|
715 |
}
|
|
716 |
|
|
717 |
return new_total_cap;
|
|
718 |
}
|
|
719 |
|
|
720 |
void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
|
|
721 |
#ifndef PRODUCT
|
|
722 |
if (PrintNMethods && (WizardMode || Verbose)) {
|
|
723 |
tty->print("expanding CodeBuffer:");
|
|
724 |
this->print();
|
|
725 |
}
|
|
726 |
|
|
727 |
if (StressCodeBuffers && blob() != NULL) {
|
|
728 |
static int expand_count = 0;
|
|
729 |
if (expand_count >= 0) expand_count += 1;
|
|
730 |
if (expand_count > 100 && is_power_of_2(expand_count)) {
|
|
731 |
tty->print_cr("StressCodeBuffers: have expanded %d times", expand_count);
|
|
732 |
// simulate an occasional allocation failure:
|
|
733 |
free_blob();
|
|
734 |
}
|
|
735 |
}
|
|
736 |
#endif //PRODUCT
|
|
737 |
|
|
738 |
// Resizing must be allowed
|
|
739 |
{
|
|
740 |
if (blob() == NULL) return; // caller must check for blob == NULL
|
|
741 |
for (int n = 0; n < (int)SECT_LIMIT; n++) {
|
|
742 |
guarantee(!code_section(n)->is_frozen(), "resizing not allowed when frozen");
|
|
743 |
}
|
|
744 |
}
|
|
745 |
|
|
746 |
// Figure new capacity for each section.
|
|
747 |
csize_t new_capacity[SECT_LIMIT];
|
|
748 |
csize_t new_total_cap
|
|
749 |
= figure_expanded_capacities(which_cs, amount, new_capacity);
|
|
750 |
|
|
751 |
// Create a new (temporary) code buffer to hold all the new data
|
|
752 |
CodeBuffer cb(name(), new_total_cap, 0);
|
|
753 |
if (cb.blob() == NULL) {
|
|
754 |
// Failed to allocate in code cache.
|
|
755 |
free_blob();
|
|
756 |
return;
|
|
757 |
}
|
|
758 |
|
|
759 |
// Create an old code buffer to remember which addresses used to go where.
|
|
760 |
// This will be useful when we do final assembly into the code cache,
|
|
761 |
// because we will need to know how to warp any internal address that
|
|
762 |
// has been created at any time in this CodeBuffer's past.
|
|
763 |
CodeBuffer* bxp = new CodeBuffer(_total_start, _total_size);
|
|
764 |
bxp->take_over_code_from(this); // remember the old undersized blob
|
|
765 |
DEBUG_ONLY(this->_blob = NULL); // silence a later assert
|
|
766 |
bxp->_before_expand = this->_before_expand;
|
|
767 |
this->_before_expand = bxp;
|
|
768 |
|
|
769 |
// Give each section its required (expanded) capacity.
|
|
770 |
for (int n = (int)SECT_LIMIT-1; n >= SECT_INSTS; n--) {
|
|
771 |
CodeSection* cb_sect = cb.code_section(n);
|
|
772 |
CodeSection* this_sect = code_section(n);
|
|
773 |
if (new_capacity[n] == 0) continue; // already nulled out
|
|
774 |
if (n > SECT_INSTS) {
|
|
775 |
cb.initialize_section_size(cb_sect, new_capacity[n]);
|
|
776 |
}
|
|
777 |
assert(cb_sect->capacity() >= new_capacity[n], "big enough");
|
|
778 |
address cb_start = cb_sect->start();
|
|
779 |
cb_sect->set_end(cb_start + this_sect->size());
|
|
780 |
if (this_sect->mark() == NULL) {
|
|
781 |
cb_sect->clear_mark();
|
|
782 |
} else {
|
|
783 |
cb_sect->set_mark(cb_start + this_sect->mark_off());
|
|
784 |
}
|
|
785 |
}
|
|
786 |
|
|
787 |
// Move all the code and relocations to the new blob:
|
|
788 |
relocate_code_to(&cb);
|
|
789 |
|
|
790 |
// Copy the temporary code buffer into the current code buffer.
|
|
791 |
// Basically, do {*this = cb}, except for some control information.
|
|
792 |
this->take_over_code_from(&cb);
|
|
793 |
cb.set_blob(NULL);
|
|
794 |
|
|
795 |
// Zap the old code buffer contents, to avoid mistakenly using them.
|
|
796 |
debug_only(Copy::fill_to_bytes(bxp->_total_start, bxp->_total_size,
|
|
797 |
badCodeHeapFreeVal));
|
|
798 |
|
|
799 |
_decode_begin = NULL; // sanity
|
|
800 |
|
|
801 |
// Make certain that the new sections are all snugly inside the new blob.
|
|
802 |
assert(verify_section_allocation(), "expanded allocation is ship-shape");
|
|
803 |
|
|
804 |
#ifndef PRODUCT
|
|
805 |
if (PrintNMethods && (WizardMode || Verbose)) {
|
|
806 |
tty->print("expanded CodeBuffer:");
|
|
807 |
this->print();
|
|
808 |
}
|
|
809 |
#endif //PRODUCT
|
|
810 |
}
|
|
811 |
|
|
812 |
void CodeBuffer::take_over_code_from(CodeBuffer* cb) {
|
|
813 |
// Must already have disposed of the old blob somehow.
|
|
814 |
assert(blob() == NULL, "must be empty");
|
|
815 |
#ifdef ASSERT
|
|
816 |
|
|
817 |
#endif
|
|
818 |
// Take the new blob away from cb.
|
|
819 |
set_blob(cb->blob());
|
|
820 |
// Take over all the section pointers.
|
|
821 |
for (int n = 0; n < (int)SECT_LIMIT; n++) {
|
|
822 |
CodeSection* cb_sect = cb->code_section(n);
|
|
823 |
CodeSection* this_sect = code_section(n);
|
|
824 |
this_sect->take_over_code_from(cb_sect);
|
|
825 |
}
|
|
826 |
_overflow_arena = cb->_overflow_arena;
|
|
827 |
// Make sure the old cb won't try to use it or free it.
|
|
828 |
DEBUG_ONLY(cb->_blob = (BufferBlob*)badAddress);
|
|
829 |
}
|
|
830 |
|
|
831 |
#ifdef ASSERT
|
|
832 |
bool CodeBuffer::verify_section_allocation() {
|
|
833 |
address tstart = _total_start;
|
|
834 |
if (tstart == badAddress) return true; // smashed by set_blob(NULL)
|
|
835 |
address tend = tstart + _total_size;
|
|
836 |
if (_blob != NULL) {
|
|
837 |
assert(tstart >= _blob->instructions_begin(), "sanity");
|
|
838 |
assert(tend <= _blob->instructions_end(), "sanity");
|
|
839 |
}
|
|
840 |
address tcheck = tstart; // advancing pointer to verify disjointness
|
|
841 |
for (int n = 0; n < (int)SECT_LIMIT; n++) {
|
|
842 |
CodeSection* sect = code_section(n);
|
|
843 |
if (!sect->is_allocated()) continue;
|
|
844 |
assert(sect->start() >= tcheck, "sanity");
|
|
845 |
tcheck = sect->start();
|
|
846 |
assert((intptr_t)tcheck % sect->alignment() == 0
|
|
847 |
|| sect->is_empty() || _blob == NULL,
|
|
848 |
"start is aligned");
|
|
849 |
assert(sect->end() >= tcheck, "sanity");
|
|
850 |
assert(sect->end() <= tend, "sanity");
|
|
851 |
}
|
|
852 |
return true;
|
|
853 |
}
|
|
854 |
#endif //ASSERT
|
|
855 |
|
|
856 |
#ifndef PRODUCT
|
|
857 |
|
|
858 |
void CodeSection::dump() {
|
|
859 |
address ptr = start();
|
|
860 |
for (csize_t step; ptr < end(); ptr += step) {
|
|
861 |
step = end() - ptr;
|
|
862 |
if (step > jintSize * 4) step = jintSize * 4;
|
|
863 |
tty->print(PTR_FORMAT ": ", ptr);
|
|
864 |
while (step > 0) {
|
|
865 |
tty->print(" " PTR32_FORMAT, *(jint*)ptr);
|
|
866 |
ptr += jintSize;
|
|
867 |
}
|
|
868 |
tty->cr();
|
|
869 |
}
|
|
870 |
}
|
|
871 |
|
|
872 |
|
|
873 |
void CodeSection::decode() {
|
|
874 |
Disassembler::decode(start(), end());
|
|
875 |
}
|
|
876 |
|
|
877 |
|
|
878 |
void CodeBuffer::block_comment(intptr_t offset, const char * comment) {
|
|
879 |
_comments.add_comment(offset, comment);
|
|
880 |
}
|
|
881 |
|
|
882 |
|
|
883 |
class CodeComment: public CHeapObj {
|
|
884 |
private:
|
|
885 |
friend class CodeComments;
|
|
886 |
intptr_t _offset;
|
|
887 |
const char * _comment;
|
|
888 |
CodeComment* _next;
|
|
889 |
|
|
890 |
~CodeComment() {
|
|
891 |
assert(_next == NULL, "wrong interface for freeing list");
|
|
892 |
os::free((void*)_comment);
|
|
893 |
}
|
|
894 |
|
|
895 |
public:
|
|
896 |
CodeComment(intptr_t offset, const char * comment) {
|
|
897 |
_offset = offset;
|
|
898 |
_comment = os::strdup(comment);
|
|
899 |
_next = NULL;
|
|
900 |
}
|
|
901 |
|
|
902 |
intptr_t offset() const { return _offset; }
|
|
903 |
const char * comment() const { return _comment; }
|
|
904 |
CodeComment* next() { return _next; }
|
|
905 |
|
|
906 |
void set_next(CodeComment* next) { _next = next; }
|
|
907 |
|
|
908 |
CodeComment* find(intptr_t offset) {
|
|
909 |
CodeComment* a = this;
|
|
910 |
while (a != NULL && a->_offset != offset) {
|
|
911 |
a = a->_next;
|
|
912 |
}
|
|
913 |
return a;
|
|
914 |
}
|
|
915 |
};
|
|
916 |
|
|
917 |
|
|
918 |
void CodeComments::add_comment(intptr_t offset, const char * comment) {
|
|
919 |
CodeComment* c = new CodeComment(offset, comment);
|
|
920 |
CodeComment* insert = NULL;
|
|
921 |
if (_comments != NULL) {
|
|
922 |
CodeComment* c = _comments->find(offset);
|
|
923 |
insert = c;
|
|
924 |
while (c && c->offset() == offset) {
|
|
925 |
insert = c;
|
|
926 |
c = c->next();
|
|
927 |
}
|
|
928 |
}
|
|
929 |
if (insert) {
|
|
930 |
// insert after comments with same offset
|
|
931 |
c->set_next(insert->next());
|
|
932 |
insert->set_next(c);
|
|
933 |
} else {
|
|
934 |
c->set_next(_comments);
|
|
935 |
_comments = c;
|
|
936 |
}
|
|
937 |
}
|
|
938 |
|
|
939 |
|
|
940 |
void CodeComments::assign(CodeComments& other) {
|
|
941 |
assert(_comments == NULL, "don't overwrite old value");
|
|
942 |
_comments = other._comments;
|
|
943 |
}
|
|
944 |
|
|
945 |
|
|
946 |
void CodeComments::print_block_comment(outputStream* stream, intptr_t offset) {
|
|
947 |
if (_comments != NULL) {
|
|
948 |
CodeComment* c = _comments->find(offset);
|
|
949 |
while (c && c->offset() == offset) {
|
|
950 |
stream->print(" ;; ");
|
|
951 |
stream->print_cr(c->comment());
|
|
952 |
c = c->next();
|
|
953 |
}
|
|
954 |
}
|
|
955 |
}
|
|
956 |
|
|
957 |
|
|
958 |
void CodeComments::free() {
|
|
959 |
CodeComment* n = _comments;
|
|
960 |
while (n) {
|
|
961 |
// unlink the node from the list saving a pointer to the next
|
|
962 |
CodeComment* p = n->_next;
|
|
963 |
n->_next = NULL;
|
|
964 |
delete n;
|
|
965 |
n = p;
|
|
966 |
}
|
|
967 |
_comments = NULL;
|
|
968 |
}
|
|
969 |
|
|
970 |
|
|
971 |
|
|
972 |
void CodeBuffer::decode() {
|
|
973 |
Disassembler::decode(decode_begin(), code_end());
|
|
974 |
_decode_begin = code_end();
|
|
975 |
}
|
|
976 |
|
|
977 |
|
|
978 |
void CodeBuffer::skip_decode() {
|
|
979 |
_decode_begin = code_end();
|
|
980 |
}
|
|
981 |
|
|
982 |
|
|
983 |
void CodeBuffer::decode_all() {
|
|
984 |
for (int n = 0; n < (int)SECT_LIMIT; n++) {
|
|
985 |
// dump contents of each section
|
|
986 |
CodeSection* cs = code_section(n);
|
|
987 |
tty->print_cr("! %s:", code_section_name(n));
|
|
988 |
if (cs != consts())
|
|
989 |
cs->decode();
|
|
990 |
else
|
|
991 |
cs->dump();
|
|
992 |
}
|
|
993 |
}
|
|
994 |
|
|
995 |
|
|
996 |
void CodeSection::print(const char* name) {
|
|
997 |
csize_t locs_size = locs_end() - locs_start();
|
|
998 |
tty->print_cr(" %7s.code = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d)%s",
|
|
999 |
name, start(), end(), limit(), size(), capacity(),
|
|
1000 |
is_frozen()? " [frozen]": "");
|
|
1001 |
tty->print_cr(" %7s.locs = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d) point=%d",
|
|
1002 |
name, locs_start(), locs_end(), locs_limit(), locs_size, locs_capacity(), locs_point_off());
|
|
1003 |
if (PrintRelocations) {
|
|
1004 |
RelocIterator iter(this);
|
|
1005 |
iter.print();
|
|
1006 |
}
|
|
1007 |
}
|
|
1008 |
|
|
1009 |
void CodeBuffer::print() {
|
|
1010 |
if (this == NULL) {
|
|
1011 |
tty->print_cr("NULL CodeBuffer pointer");
|
|
1012 |
return;
|
|
1013 |
}
|
|
1014 |
|
|
1015 |
tty->print_cr("CodeBuffer:");
|
|
1016 |
for (int n = 0; n < (int)SECT_LIMIT; n++) {
|
|
1017 |
// print each section
|
|
1018 |
CodeSection* cs = code_section(n);
|
|
1019 |
cs->print(code_section_name(n));
|
|
1020 |
}
|
|
1021 |
}
|
|
1022 |
|
|
1023 |
#endif // PRODUCT
|