276 return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset(); |
276 return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset(); |
277 } |
277 } |
278 |
278 |
279 void PcDescCache::reset_to(PcDesc* initial_pc_desc) { |
279 void PcDescCache::reset_to(PcDesc* initial_pc_desc) { |
280 if (initial_pc_desc == NULL) { |
280 if (initial_pc_desc == NULL) { |
281 _last_pc_desc = NULL; // native method |
281 _pc_descs[0] = NULL; // native method; no PcDescs at all |
282 return; |
282 return; |
283 } |
283 } |
284 NOT_PRODUCT(++nmethod_stats.pc_desc_resets); |
284 NOT_PRODUCT(++nmethod_stats.pc_desc_resets); |
285 // reset the cache by filling it with benign (non-null) values |
285 // reset the cache by filling it with benign (non-null) values |
286 assert(initial_pc_desc->pc_offset() < 0, "must be sentinel"); |
286 assert(initial_pc_desc->pc_offset() < 0, "must be sentinel"); |
287 _last_pc_desc = initial_pc_desc + 1; // first valid one is after sentinel |
|
288 for (int i = 0; i < cache_size; i++) |
287 for (int i = 0; i < cache_size; i++) |
289 _pc_descs[i] = initial_pc_desc; |
288 _pc_descs[i] = initial_pc_desc; |
290 } |
289 } |
291 |
290 |
292 PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) { |
291 PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) { |
293 NOT_PRODUCT(++nmethod_stats.pc_desc_queries); |
292 NOT_PRODUCT(++nmethod_stats.pc_desc_queries); |
294 NOT_PRODUCT(if (approximate) ++nmethod_stats.pc_desc_approx); |
293 NOT_PRODUCT(if (approximate) ++nmethod_stats.pc_desc_approx); |
|
294 |
|
295 // Note: one might think that caching the most recently |
|
296 // read value separately would be a win, but one would be |
|
297 // wrong. When many threads are updating it, the cache |
|
298 // line it's in would bounce between caches, negating |
|
299 // any benefit. |
295 |
300 |
296 // In order to prevent race conditions do not load cache elements |
301 // In order to prevent race conditions do not load cache elements |
297 // repeatedly, but use a local copy: |
302 // repeatedly, but use a local copy: |
298 PcDesc* res; |
303 PcDesc* res; |
299 |
304 |
300 // Step one: Check the most recently returned value. |
305 // Step one: Check the most recently added value. |
301 res = _last_pc_desc; |
306 res = _pc_descs[0]; |
302 if (res == NULL) return NULL; // native method; no PcDescs at all |
307 if (res == NULL) return NULL; // native method; no PcDescs at all |
303 if (match_desc(res, pc_offset, approximate)) { |
308 if (match_desc(res, pc_offset, approximate)) { |
304 NOT_PRODUCT(++nmethod_stats.pc_desc_repeats); |
309 NOT_PRODUCT(++nmethod_stats.pc_desc_repeats); |
305 return res; |
310 return res; |
306 } |
311 } |
307 |
312 |
308 // Step two: Check the LRU cache. |
313 // Step two: Check the rest of the LRU cache. |
309 for (int i = 0; i < cache_size; i++) { |
314 for (int i = 1; i < cache_size; ++i) { |
310 res = _pc_descs[i]; |
315 res = _pc_descs[i]; |
311 if (res->pc_offset() < 0) break; // optimization: skip empty cache |
316 if (res->pc_offset() < 0) break; // optimization: skip empty cache |
312 if (match_desc(res, pc_offset, approximate)) { |
317 if (match_desc(res, pc_offset, approximate)) { |
313 NOT_PRODUCT(++nmethod_stats.pc_desc_hits); |
318 NOT_PRODUCT(++nmethod_stats.pc_desc_hits); |
314 _last_pc_desc = res; // record this cache hit in case of repeat |
|
315 return res; |
319 return res; |
316 } |
320 } |
317 } |
321 } |
318 |
322 |
319 // Report failure. |
323 // Report failure. |
320 return NULL; |
324 return NULL; |
321 } |
325 } |
322 |
326 |
323 void PcDescCache::add_pc_desc(PcDesc* pc_desc) { |
327 void PcDescCache::add_pc_desc(PcDesc* pc_desc) { |
324 NOT_PRODUCT(++nmethod_stats.pc_desc_adds); |
328 NOT_PRODUCT(++nmethod_stats.pc_desc_adds); |
325 // Update the LRU cache by shifting pc_desc forward: |
329 // Update the LRU cache by shifting pc_desc forward. |
326 for (int i = 0; i < cache_size; i++) { |
330 for (int i = 0; i < cache_size; i++) { |
327 PcDesc* next = _pc_descs[i]; |
331 PcDesc* next = _pc_descs[i]; |
328 _pc_descs[i] = pc_desc; |
332 _pc_descs[i] = pc_desc; |
329 pc_desc = next; |
333 pc_desc = next; |
330 } |
334 } |
331 // Note: Do not update _last_pc_desc. It fronts for the LRU cache. |
|
332 } |
335 } |
333 |
336 |
334 // adjust pcs_size so that it is a multiple of both oopSize and |
337 // adjust pcs_size so that it is a multiple of both oopSize and |
335 // sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple |
338 // sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple |
336 // of oopSize, then 2*sizeof(PcDesc) is) |
339 // of oopSize, then 2*sizeof(PcDesc) is) |
337 static int adjust_pcs_size(int pcs_size) { |
340 static int adjust_pcs_size(int pcs_size) { |
338 int nsize = round_to(pcs_size, oopSize); |
341 int nsize = round_to(pcs_size, oopSize); |
339 if ((nsize % sizeof(PcDesc)) != 0) { |
342 if ((nsize % sizeof(PcDesc)) != 0) { |
340 nsize = pcs_size + sizeof(PcDesc); |
343 nsize = pcs_size + sizeof(PcDesc); |
341 } |
344 } |
342 assert((nsize % oopSize) == 0, "correct alignment"); |
345 assert((nsize % oopSize) == 0, "correct alignment"); |
343 return nsize; |
346 return nsize; |
344 } |
347 } |
345 |
348 |
346 //----------------------------------------------------------------------------- |
349 //----------------------------------------------------------------------------- |
347 |
350 |