author | dcubed |
Sun, 17 Mar 2013 08:57:56 -0700 | |
changeset 16355 | f4d5aba63f4e |
parent 13728 | 882756847a04 |
child 24424 | 2658d7834c6e |
permissions | -rw-r--r-- |
1 | 1 |
/* |
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13195
diff
changeset
|
2 |
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5402
diff
changeset
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5402
diff
changeset
|
20 |
* or visit www.oracle.com if you need additional information or have any |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5402
diff
changeset
|
21 |
* questions. |
1 | 22 |
* |
23 |
*/ |
|
24 |
||
7397 | 25 |
#include "precompiled.hpp" |
26 |
#include "interpreter/oopMapCache.hpp" |
|
27 |
#include "memory/allocation.inline.hpp" |
|
28 |
#include "memory/resourceArea.hpp" |
|
29 |
#include "oops/oop.inline.hpp" |
|
30 |
#include "prims/jvmtiRedefineClassesTrace.hpp" |
|
31 |
#include "runtime/handles.inline.hpp" |
|
32 |
#include "runtime/signature.hpp" |
|
1 | 33 |
|
34 |
class OopMapCacheEntry: private InterpreterOopMap { |
|
35 |
friend class InterpreterOopMap; |
|
36 |
friend class OopMapForCacheEntry; |
|
37 |
friend class OopMapCache; |
|
38 |
friend class VerifyClosure; |
|
39 |
||
40 |
protected: |
|
41 |
// Initialization |
|
42 |
void fill(methodHandle method, int bci); |
|
43 |
// fills the bit mask for native calls |
|
44 |
void fill_for_native(methodHandle method); |
|
45 |
void set_mask(CellTypeState* vars, CellTypeState* stack, int stack_top); |
|
46 |
||
47 |
// Deallocate bit masks and initialize fields |
|
48 |
void flush(); |
|
49 |
||
50 |
private: |
|
51 |
void allocate_bit_mask(); // allocates the bit mask on C heap f necessary |
|
52 |
void deallocate_bit_mask(); // allocates the bit mask on C heap f necessary |
|
53 |
bool verify_mask(CellTypeState *vars, CellTypeState *stack, int max_locals, int stack_top); |
|
54 |
||
55 |
public: |
|
56 |
OopMapCacheEntry() : InterpreterOopMap() { |
|
57 |
#ifdef ASSERT |
|
58 |
_resource_allocate_bit_mask = false; |
|
59 |
#endif |
|
60 |
} |
|
61 |
}; |
|
62 |
||
63 |
||
64 |
// Implementation of OopMapForCacheEntry |
|
65 |
// (subclass of GenerateOopMap, initializes an OopMapCacheEntry for a given method and bci) |
|
66 |
||
67 |
class OopMapForCacheEntry: public GenerateOopMap { |
|
68 |
OopMapCacheEntry *_entry; |
|
69 |
int _bci; |
|
70 |
int _stack_top; |
|
71 |
||
72 |
virtual bool report_results() const { return false; } |
|
73 |
virtual bool possible_gc_point (BytecodeStream *bcs); |
|
74 |
virtual void fill_stackmap_prolog (int nof_gc_points); |
|
75 |
virtual void fill_stackmap_epilog (); |
|
76 |
virtual void fill_stackmap_for_opcodes (BytecodeStream *bcs, |
|
77 |
CellTypeState* vars, |
|
78 |
CellTypeState* stack, |
|
79 |
int stack_top); |
|
80 |
virtual void fill_init_vars (GrowableArray<intptr_t> *init_vars); |
|
81 |
||
82 |
public: |
|
83 |
OopMapForCacheEntry(methodHandle method, int bci, OopMapCacheEntry *entry); |
|
84 |
||
85 |
// Computes stack map for (method,bci) and initialize entry |
|
86 |
void compute_map(TRAPS); |
|
87 |
int size(); |
|
88 |
}; |
|
89 |
||
90 |
||
91 |
OopMapForCacheEntry::OopMapForCacheEntry(methodHandle method, int bci, OopMapCacheEntry* entry) : GenerateOopMap(method) { |
|
92 |
_bci = bci; |
|
93 |
_entry = entry; |
|
94 |
_stack_top = -1; |
|
95 |
} |
|
96 |
||
97 |
||
98 |
void OopMapForCacheEntry::compute_map(TRAPS) { |
|
99 |
assert(!method()->is_native(), "cannot compute oop map for native methods"); |
|
100 |
// First check if it is a method where the stackmap is always empty |
|
101 |
if (method()->code_size() == 0 || method()->max_locals() + method()->max_stack() == 0) { |
|
102 |
_entry->set_mask_size(0); |
|
103 |
} else { |
|
104 |
ResourceMark rm; |
|
105 |
GenerateOopMap::compute_map(CATCH); |
|
106 |
result_for_basicblock(_bci); |
|
107 |
} |
|
108 |
} |
|
109 |
||
110 |
||
111 |
bool OopMapForCacheEntry::possible_gc_point(BytecodeStream *bcs) { |
|
112 |
return false; // We are not reporting any result. We call result_for_basicblock directly |
|
113 |
} |
|
114 |
||
115 |
||
116 |
void OopMapForCacheEntry::fill_stackmap_prolog(int nof_gc_points) { |
|
117 |
// Do nothing |
|
118 |
} |
|
119 |
||
120 |
||
121 |
void OopMapForCacheEntry::fill_stackmap_epilog() { |
|
122 |
// Do nothing |
|
123 |
} |
|
124 |
||
125 |
||
126 |
void OopMapForCacheEntry::fill_init_vars(GrowableArray<intptr_t> *init_vars) { |
|
127 |
// Do nothing |
|
128 |
} |
|
129 |
||
130 |
||
131 |
void OopMapForCacheEntry::fill_stackmap_for_opcodes(BytecodeStream *bcs, |
|
132 |
CellTypeState* vars, |
|
133 |
CellTypeState* stack, |
|
134 |
int stack_top) { |
|
135 |
// Only interested in one specific bci |
|
136 |
if (bcs->bci() == _bci) { |
|
137 |
_entry->set_mask(vars, stack, stack_top); |
|
138 |
_stack_top = stack_top; |
|
139 |
} |
|
140 |
} |
|
141 |
||
142 |
||
143 |
int OopMapForCacheEntry::size() { |
|
144 |
assert(_stack_top != -1, "compute_map must be called first"); |
|
145 |
return ((method()->is_static()) ? 0 : 1) + method()->max_locals() + _stack_top; |
|
146 |
} |
|
147 |
||
148 |
||
149 |
// Implementation of InterpreterOopMap and OopMapCacheEntry |
|
150 |
||
151 |
class VerifyClosure : public OffsetClosure { |
|
152 |
private: |
|
153 |
OopMapCacheEntry* _entry; |
|
154 |
bool _failed; |
|
155 |
||
156 |
public: |
|
157 |
VerifyClosure(OopMapCacheEntry* entry) { _entry = entry; _failed = false; } |
|
158 |
void offset_do(int offset) { if (!_entry->is_oop(offset)) _failed = true; } |
|
159 |
bool failed() const { return _failed; } |
|
160 |
}; |
|
161 |
||
162 |
InterpreterOopMap::InterpreterOopMap() { |
|
163 |
initialize(); |
|
164 |
#ifdef ASSERT |
|
165 |
_resource_allocate_bit_mask = true; |
|
166 |
#endif |
|
167 |
} |
|
168 |
||
169 |
InterpreterOopMap::~InterpreterOopMap() { |
|
170 |
// The expection is that the bit mask was allocated |
|
171 |
// last in this resource area. That would make the free of the |
|
172 |
// bit_mask effective (see how FREE_RESOURCE_ARRAY does a free). |
|
173 |
// If it was not allocated last, there is not a correctness problem |
|
174 |
// but the space for the bit_mask is not freed. |
|
175 |
assert(_resource_allocate_bit_mask, "Trying to free C heap space"); |
|
176 |
if (mask_size() > small_mask_limit) { |
|
177 |
FREE_RESOURCE_ARRAY(uintptr_t, _bit_mask[0], mask_word_size()); |
|
178 |
} |
|
179 |
} |
|
180 |
||
181 |
bool InterpreterOopMap::is_empty() { |
|
182 |
bool result = _method == NULL; |
|
183 |
assert(_method != NULL || (_bci == 0 && |
|
184 |
(_mask_size == 0 || _mask_size == USHRT_MAX) && |
|
185 |
_bit_mask[0] == 0), "Should be completely empty"); |
|
186 |
return result; |
|
187 |
} |
|
188 |
||
189 |
void InterpreterOopMap::initialize() { |
|
190 |
_method = NULL; |
|
191 |
_mask_size = USHRT_MAX; // This value should cause a failure quickly |
|
192 |
_bci = 0; |
|
193 |
_expression_stack_size = 0; |
|
194 |
for (int i = 0; i < N; i++) _bit_mask[i] = 0; |
|
195 |
} |
|
196 |
||
197 |
void InterpreterOopMap::iterate_oop(OffsetClosure* oop_closure) { |
|
198 |
int n = number_of_entries(); |
|
199 |
int word_index = 0; |
|
200 |
uintptr_t value = 0; |
|
201 |
uintptr_t mask = 0; |
|
202 |
// iterate over entries |
|
203 |
for (int i = 0; i < n; i++, mask <<= bits_per_entry) { |
|
204 |
// get current word |
|
205 |
if (mask == 0) { |
|
206 |
value = bit_mask()[word_index++]; |
|
207 |
mask = 1; |
|
208 |
} |
|
209 |
// test for oop |
|
210 |
if ((value & (mask << oop_bit_number)) != 0) oop_closure->offset_do(i); |
|
211 |
} |
|
212 |
} |
|
213 |
||
214 |
||
215 |
#ifdef ENABLE_ZAP_DEAD_LOCALS |
|
216 |
||
217 |
void InterpreterOopMap::iterate_all(OffsetClosure* oop_closure, OffsetClosure* value_closure, OffsetClosure* dead_closure) { |
|
218 |
int n = number_of_entries(); |
|
219 |
int word_index = 0; |
|
220 |
uintptr_t value = 0; |
|
221 |
uintptr_t mask = 0; |
|
222 |
// iterate over entries |
|
223 |
for (int i = 0; i < n; i++, mask <<= bits_per_entry) { |
|
224 |
// get current word |
|
225 |
if (mask == 0) { |
|
226 |
value = bit_mask()[word_index++]; |
|
227 |
mask = 1; |
|
228 |
} |
|
229 |
// test for dead values & oops, and for live values |
|
230 |
if ((value & (mask << dead_bit_number)) != 0) dead_closure->offset_do(i); // call this for all dead values or oops |
|
231 |
else if ((value & (mask << oop_bit_number)) != 0) oop_closure->offset_do(i); // call this for all live oops |
|
232 |
else value_closure->offset_do(i); // call this for all live values |
|
233 |
} |
|
234 |
} |
|
235 |
||
236 |
#endif |
|
237 |
||
238 |
||
239 |
void InterpreterOopMap::print() { |
|
240 |
int n = number_of_entries(); |
|
241 |
tty->print("oop map for "); |
|
242 |
method()->print_value(); |
|
243 |
tty->print(" @ %d = [%d] { ", bci(), n); |
|
244 |
for (int i = 0; i < n; i++) { |
|
245 |
#ifdef ENABLE_ZAP_DEAD_LOCALS |
|
246 |
if (is_dead(i)) tty->print("%d+ ", i); |
|
247 |
else |
|
248 |
#endif |
|
249 |
if (is_oop(i)) tty->print("%d ", i); |
|
250 |
} |
|
251 |
tty->print_cr("}"); |
|
252 |
} |
|
253 |
||
254 |
class MaskFillerForNative: public NativeSignatureIterator { |
|
255 |
private: |
|
256 |
uintptr_t * _mask; // the bit mask to be filled |
|
257 |
int _size; // the mask size in bits |
|
258 |
||
259 |
void set_one(int i) { |
|
260 |
i *= InterpreterOopMap::bits_per_entry; |
|
261 |
assert(0 <= i && i < _size, "offset out of bounds"); |
|
262 |
_mask[i / BitsPerWord] |= (((uintptr_t) 1 << InterpreterOopMap::oop_bit_number) << (i % BitsPerWord)); |
|
263 |
} |
|
264 |
||
265 |
public: |
|
266 |
void pass_int() { /* ignore */ } |
|
267 |
void pass_long() { /* ignore */ } |
|
268 |
void pass_float() { /* ignore */ } |
|
269 |
void pass_double() { /* ignore */ } |
|
270 |
void pass_object() { set_one(offset()); } |
|
271 |
||
272 |
MaskFillerForNative(methodHandle method, uintptr_t* mask, int size) : NativeSignatureIterator(method) { |
|
273 |
_mask = mask; |
|
274 |
_size = size; |
|
275 |
// initialize with 0 |
|
276 |
int i = (size + BitsPerWord - 1) / BitsPerWord; |
|
277 |
while (i-- > 0) _mask[i] = 0; |
|
278 |
} |
|
279 |
||
280 |
void generate() { |
|
281 |
NativeSignatureIterator::iterate(); |
|
282 |
} |
|
283 |
}; |
|
284 |
||
285 |
bool OopMapCacheEntry::verify_mask(CellTypeState* vars, CellTypeState* stack, int max_locals, int stack_top) { |
|
286 |
// Check mask includes map |
|
287 |
VerifyClosure blk(this); |
|
288 |
iterate_oop(&blk); |
|
289 |
if (blk.failed()) return false; |
|
290 |
||
291 |
// Check if map is generated correctly |
|
292 |
// (Use ?: operator to make sure all 'true' & 'false' are represented exactly the same so we can use == afterwards) |
|
293 |
if (TraceOopMapGeneration && Verbose) tty->print("Locals (%d): ", max_locals); |
|
294 |
||
295 |
for(int i = 0; i < max_locals; i++) { |
|
296 |
bool v1 = is_oop(i) ? true : false; |
|
297 |
bool v2 = vars[i].is_reference() ? true : false; |
|
298 |
assert(v1 == v2, "locals oop mask generation error"); |
|
299 |
if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0); |
|
300 |
#ifdef ENABLE_ZAP_DEAD_LOCALS |
|
301 |
bool v3 = is_dead(i) ? true : false; |
|
302 |
bool v4 = !vars[i].is_live() ? true : false; |
|
303 |
assert(v3 == v4, "locals live mask generation error"); |
|
304 |
assert(!(v1 && v3), "dead value marked as oop"); |
|
305 |
#endif |
|
306 |
} |
|
307 |
||
308 |
if (TraceOopMapGeneration && Verbose) { tty->cr(); tty->print("Stack (%d): ", stack_top); } |
|
309 |
for(int j = 0; j < stack_top; j++) { |
|
310 |
bool v1 = is_oop(max_locals + j) ? true : false; |
|
311 |
bool v2 = stack[j].is_reference() ? true : false; |
|
312 |
assert(v1 == v2, "stack oop mask generation error"); |
|
313 |
if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0); |
|
314 |
#ifdef ENABLE_ZAP_DEAD_LOCALS |
|
315 |
bool v3 = is_dead(max_locals + j) ? true : false; |
|
316 |
bool v4 = !stack[j].is_live() ? true : false; |
|
317 |
assert(v3 == v4, "stack live mask generation error"); |
|
318 |
assert(!(v1 && v3), "dead value marked as oop"); |
|
319 |
#endif |
|
320 |
} |
|
321 |
if (TraceOopMapGeneration && Verbose) tty->cr(); |
|
322 |
return true; |
|
323 |
} |
|
324 |
||
325 |
void OopMapCacheEntry::allocate_bit_mask() { |
|
326 |
if (mask_size() > small_mask_limit) { |
|
327 |
assert(_bit_mask[0] == 0, "bit mask should be new or just flushed"); |
|
328 |
_bit_mask[0] = (intptr_t) |
|
13195 | 329 |
NEW_C_HEAP_ARRAY(uintptr_t, mask_word_size(), mtClass); |
1 | 330 |
} |
331 |
} |
|
332 |
||
333 |
void OopMapCacheEntry::deallocate_bit_mask() { |
|
334 |
if (mask_size() > small_mask_limit && _bit_mask[0] != 0) { |
|
335 |
assert(!Thread::current()->resource_area()->contains((void*)_bit_mask[0]), |
|
336 |
"This bit mask should not be in the resource area"); |
|
13195 | 337 |
FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0], mtClass); |
1 | 338 |
debug_only(_bit_mask[0] = 0;) |
339 |
} |
|
340 |
} |
|
341 |
||
342 |
||
343 |
void OopMapCacheEntry::fill_for_native(methodHandle mh) { |
|
344 |
assert(mh->is_native(), "method must be native method"); |
|
345 |
set_mask_size(mh->size_of_parameters() * bits_per_entry); |
|
346 |
allocate_bit_mask(); |
|
347 |
// fill mask for parameters |
|
348 |
MaskFillerForNative mf(mh, bit_mask(), mask_size()); |
|
349 |
mf.generate(); |
|
350 |
} |
|
351 |
||
352 |
||
353 |
void OopMapCacheEntry::fill(methodHandle method, int bci) { |
|
354 |
HandleMark hm; |
|
355 |
// Flush entry to deallocate an existing entry |
|
356 |
flush(); |
|
357 |
set_method(method()); |
|
358 |
set_bci(bci); |
|
359 |
if (method->is_native()) { |
|
360 |
// Native method activations have oops only among the parameters and one |
|
361 |
// extra oop following the parameters (the mirror for static native methods). |
|
362 |
fill_for_native(method); |
|
363 |
} else { |
|
364 |
EXCEPTION_MARK; |
|
365 |
OopMapForCacheEntry gen(method, bci, this); |
|
366 |
gen.compute_map(CATCH); |
|
367 |
} |
|
368 |
} |
|
369 |
||
370 |
||
371 |
void OopMapCacheEntry::set_mask(CellTypeState *vars, CellTypeState *stack, int stack_top) { |
|
372 |
// compute bit mask size |
|
373 |
int max_locals = method()->max_locals(); |
|
374 |
int n_entries = max_locals + stack_top; |
|
375 |
set_mask_size(n_entries * bits_per_entry); |
|
376 |
allocate_bit_mask(); |
|
377 |
set_expression_stack_size(stack_top); |
|
378 |
||
379 |
// compute bits |
|
380 |
int word_index = 0; |
|
381 |
uintptr_t value = 0; |
|
382 |
uintptr_t mask = 1; |
|
383 |
||
384 |
CellTypeState* cell = vars; |
|
385 |
for (int entry_index = 0; entry_index < n_entries; entry_index++, mask <<= bits_per_entry, cell++) { |
|
386 |
// store last word |
|
387 |
if (mask == 0) { |
|
388 |
bit_mask()[word_index++] = value; |
|
389 |
value = 0; |
|
390 |
mask = 1; |
|
391 |
} |
|
392 |
||
393 |
// switch to stack when done with locals |
|
394 |
if (entry_index == max_locals) { |
|
395 |
cell = stack; |
|
396 |
} |
|
397 |
||
398 |
// set oop bit |
|
399 |
if ( cell->is_reference()) { |
|
400 |
value |= (mask << oop_bit_number ); |
|
401 |
} |
|
402 |
||
403 |
#ifdef ENABLE_ZAP_DEAD_LOCALS |
|
404 |
// set dead bit |
|
405 |
if (!cell->is_live()) { |
|
406 |
value |= (mask << dead_bit_number); |
|
407 |
assert(!cell->is_reference(), "dead value marked as oop"); |
|
408 |
} |
|
409 |
#endif |
|
410 |
} |
|
411 |
||
412 |
// make sure last word is stored |
|
413 |
bit_mask()[word_index] = value; |
|
414 |
||
415 |
// verify bit mask |
|
416 |
assert(verify_mask(vars, stack, max_locals, stack_top), "mask could not be verified"); |
|
417 |
||
418 |
||
419 |
} |
|
420 |
||
421 |
void OopMapCacheEntry::flush() { |
|
422 |
deallocate_bit_mask(); |
|
423 |
initialize(); |
|
424 |
} |
|
425 |
||
426 |
||
427 |
// Implementation of OopMapCache |
|
428 |
||
429 |
#ifndef PRODUCT |
|
430 |
||
431 |
static long _total_memory_usage = 0; |
|
432 |
||
433 |
long OopMapCache::memory_usage() { |
|
434 |
return _total_memory_usage; |
|
435 |
} |
|
436 |
||
437 |
#endif |
|
438 |
||
439 |
void InterpreterOopMap::resource_copy(OopMapCacheEntry* from) { |
|
440 |
assert(_resource_allocate_bit_mask, |
|
441 |
"Should not resource allocate the _bit_mask"); |
|
442 |
||
443 |
set_method(from->method()); |
|
444 |
set_bci(from->bci()); |
|
445 |
set_mask_size(from->mask_size()); |
|
446 |
set_expression_stack_size(from->expression_stack_size()); |
|
447 |
||
448 |
// Is the bit mask contained in the entry? |
|
449 |
if (from->mask_size() <= small_mask_limit) { |
|
450 |
memcpy((void *)_bit_mask, (void *)from->_bit_mask, |
|
451 |
mask_word_size() * BytesPerWord); |
|
452 |
} else { |
|
453 |
// The expectation is that this InterpreterOopMap is a recently created |
|
454 |
// and empty. It is used to get a copy of a cached entry. |
|
455 |
// If the bit mask has a value, it should be in the |
|
456 |
// resource area. |
|
457 |
assert(_bit_mask[0] == 0 || |
|
458 |
Thread::current()->resource_area()->contains((void*)_bit_mask[0]), |
|
459 |
"The bit mask should have been allocated from a resource area"); |
|
460 |
// Allocate the bit_mask from a Resource area for performance. Allocating |
|
461 |
// from the C heap as is done for OopMapCache has a significant |
|
462 |
// performance impact. |
|
463 |
_bit_mask[0] = (uintptr_t) NEW_RESOURCE_ARRAY(uintptr_t, mask_word_size()); |
|
464 |
assert(_bit_mask[0] != 0, "bit mask was not allocated"); |
|
465 |
memcpy((void*) _bit_mask[0], (void*) from->_bit_mask[0], |
|
466 |
mask_word_size() * BytesPerWord); |
|
467 |
} |
|
468 |
} |
|
469 |
||
470 |
inline unsigned int OopMapCache::hash_value_for(methodHandle method, int bci) { |
|
471 |
// We use method->code_size() rather than method->identity_hash() below since |
|
472 |
// the mark may not be present if a pointer to the method is already reversed. |
|
473 |
return ((unsigned int) bci) |
|
474 |
^ ((unsigned int) method->max_locals() << 2) |
|
475 |
^ ((unsigned int) method->code_size() << 4) |
|
476 |
^ ((unsigned int) method->size_of_parameters() << 6); |
|
477 |
} |
|
478 |
||
479 |
||
480 |
OopMapCache::OopMapCache() : |
|
481 |
_mut(Mutex::leaf, "An OopMapCache lock", true) |
|
482 |
{ |
|
13195 | 483 |
_array = NEW_C_HEAP_ARRAY(OopMapCacheEntry, _size, mtClass); |
1 | 484 |
// Cannot call flush for initialization, since flush |
485 |
// will check if memory should be deallocated |
|
486 |
for(int i = 0; i < _size; i++) _array[i].initialize(); |
|
487 |
NOT_PRODUCT(_total_memory_usage += sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);) |
|
488 |
} |
|
489 |
||
490 |
||
491 |
OopMapCache::~OopMapCache() { |
|
492 |
assert(_array != NULL, "sanity check"); |
|
493 |
// Deallocate oop maps that are allocated out-of-line |
|
494 |
flush(); |
|
495 |
// Deallocate array |
|
496 |
NOT_PRODUCT(_total_memory_usage -= sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);) |
|
13195 | 497 |
FREE_C_HEAP_ARRAY(OopMapCacheEntry, _array, mtClass); |
1 | 498 |
} |
499 |
||
500 |
OopMapCacheEntry* OopMapCache::entry_at(int i) const { |
|
501 |
return &_array[i % _size]; |
|
502 |
} |
|
503 |
||
504 |
void OopMapCache::flush() { |
|
505 |
for (int i = 0; i < _size; i++) _array[i].flush(); |
|
506 |
} |
|
507 |
||
508 |
void OopMapCache::flush_obsolete_entries() { |
|
509 |
for (int i = 0; i < _size; i++) |
|
510 |
if (!_array[i].is_empty() && _array[i].method()->is_old()) { |
|
511 |
// Cache entry is occupied by an old redefined method and we don't want |
|
512 |
// to pin it down so flush the entry. |
|
221
ec745a0fe922
6599425: 4/3 OopMapCache::lookup() can cause later crash or assert() failure
dcubed
parents:
1
diff
changeset
|
513 |
RC_TRACE(0x08000000, ("flush: %s(%s): cached entry @%d", |
ec745a0fe922
6599425: 4/3 OopMapCache::lookup() can cause later crash or assert() failure
dcubed
parents:
1
diff
changeset
|
514 |
_array[i].method()->name()->as_C_string(), |
ec745a0fe922
6599425: 4/3 OopMapCache::lookup() can cause later crash or assert() failure
dcubed
parents:
1
diff
changeset
|
515 |
_array[i].method()->signature()->as_C_string(), i)); |
ec745a0fe922
6599425: 4/3 OopMapCache::lookup() can cause later crash or assert() failure
dcubed
parents:
1
diff
changeset
|
516 |
|
1 | 517 |
_array[i].flush(); |
518 |
} |
|
519 |
} |
|
520 |
||
521 |
void OopMapCache::lookup(methodHandle method, |
|
522 |
int bci, |
|
523 |
InterpreterOopMap* entry_for) { |
|
524 |
MutexLocker x(&_mut); |
|
525 |
||
526 |
OopMapCacheEntry* entry = NULL; |
|
527 |
int probe = hash_value_for(method, bci); |
|
528 |
||
529 |
// Search hashtable for match |
|
530 |
int i; |
|
531 |
for(i = 0; i < _probe_depth; i++) { |
|
532 |
entry = entry_at(probe + i); |
|
533 |
if (entry->match(method, bci)) { |
|
534 |
entry_for->resource_copy(entry); |
|
535 |
assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); |
|
536 |
return; |
|
537 |
} |
|
538 |
} |
|
539 |
||
540 |
if (TraceOopMapGeneration) { |
|
541 |
static int count = 0; |
|
542 |
ResourceMark rm; |
|
543 |
tty->print("%d - Computing oopmap at bci %d for ", ++count, bci); |
|
544 |
method->print_value(); tty->cr(); |
|
545 |
} |
|
546 |
||
547 |
// Entry is not in hashtable. |
|
548 |
// Compute entry and return it |
|
549 |
||
221
ec745a0fe922
6599425: 4/3 OopMapCache::lookup() can cause later crash or assert() failure
dcubed
parents:
1
diff
changeset
|
550 |
if (method->should_not_be_cached()) { |
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13195
diff
changeset
|
551 |
// It is either not safe or not a good idea to cache this Method* |
221
ec745a0fe922
6599425: 4/3 OopMapCache::lookup() can cause later crash or assert() failure
dcubed
parents:
1
diff
changeset
|
552 |
// at this time. We give the caller of lookup() a copy of the |
ec745a0fe922
6599425: 4/3 OopMapCache::lookup() can cause later crash or assert() failure
dcubed
parents:
1
diff
changeset
|
553 |
// interesting info via parameter entry_for, but we don't add it to |
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13195
diff
changeset
|
554 |
// the cache. See the gory details in Method*.cpp. |
221
ec745a0fe922
6599425: 4/3 OopMapCache::lookup() can cause later crash or assert() failure
dcubed
parents:
1
diff
changeset
|
555 |
compute_one_oop_map(method, bci, entry_for); |
ec745a0fe922
6599425: 4/3 OopMapCache::lookup() can cause later crash or assert() failure
dcubed
parents:
1
diff
changeset
|
556 |
return; |
ec745a0fe922
6599425: 4/3 OopMapCache::lookup() can cause later crash or assert() failure
dcubed
parents:
1
diff
changeset
|
557 |
} |
ec745a0fe922
6599425: 4/3 OopMapCache::lookup() can cause later crash or assert() failure
dcubed
parents:
1
diff
changeset
|
558 |
|
1 | 559 |
// First search for an empty slot |
560 |
for(i = 0; i < _probe_depth; i++) { |
|
561 |
entry = entry_at(probe + i); |
|
562 |
if (entry->is_empty()) { |
|
563 |
entry->fill(method, bci); |
|
564 |
entry_for->resource_copy(entry); |
|
565 |
assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); |
|
566 |
return; |
|
567 |
} |
|
568 |
} |
|
569 |
||
570 |
if (TraceOopMapGeneration) { |
|
571 |
ResourceMark rm; |
|
572 |
tty->print_cr("*** collision in oopmap cache - flushing item ***"); |
|
573 |
} |
|
574 |
||
575 |
// No empty slot (uncommon case). Use (some approximation of a) LRU algorithm |
|
576 |
//entry_at(probe + _probe_depth - 1)->flush(); |
|
577 |
//for(i = _probe_depth - 1; i > 0; i--) { |
|
578 |
// // Coping entry[i] = entry[i-1]; |
|
579 |
// OopMapCacheEntry *to = entry_at(probe + i); |
|
580 |
// OopMapCacheEntry *from = entry_at(probe + i - 1); |
|
581 |
// to->copy(from); |
|
582 |
// } |
|
583 |
||
584 |
assert(method->is_method(), "gaga"); |
|
585 |
||
586 |
entry = entry_at(probe + 0); |
|
587 |
entry->fill(method, bci); |
|
588 |
||
589 |
// Copy the newly cached entry to input parameter |
|
590 |
entry_for->resource_copy(entry); |
|
591 |
||
592 |
if (TraceOopMapGeneration) { |
|
593 |
ResourceMark rm; |
|
594 |
tty->print("Done with "); |
|
595 |
method->print_value(); tty->cr(); |
|
596 |
} |
|
597 |
assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); |
|
598 |
||
599 |
return; |
|
600 |
} |
|
601 |
||
602 |
void OopMapCache::compute_one_oop_map(methodHandle method, int bci, InterpreterOopMap* entry) { |
|
603 |
// Due to the invariants above it's tricky to allocate a temporary OopMapCacheEntry on the stack |
|
13195 | 604 |
OopMapCacheEntry* tmp = NEW_C_HEAP_ARRAY(OopMapCacheEntry, 1, mtClass); |
1 | 605 |
tmp->initialize(); |
606 |
tmp->fill(method, bci); |
|
607 |
entry->resource_copy(tmp); |
|
13195 | 608 |
FREE_C_HEAP_ARRAY(OopMapCacheEntry, tmp, mtInternal); |
1 | 609 |
} |