39 private: |
39 private: |
40 enum { cache_size = 16 }; |
40 enum { cache_size = 16 }; |
41 Klass* _exception_type; |
41 Klass* _exception_type; |
42 address _pc[cache_size]; |
42 address _pc[cache_size]; |
43 address _handler[cache_size]; |
43 address _handler[cache_size]; |
44 int _count; |
44 volatile int _count; |
45 ExceptionCache* _next; |
45 ExceptionCache* _next; |
46 |
46 |
47 address pc_at(int index) { assert(index >= 0 && index < count(),""); return _pc[index]; } |
47 address pc_at(int index) { assert(index >= 0 && index < count(),""); return _pc[index]; } |
48 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; } |
48 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; } |
49 address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; } |
49 address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; } |
50 void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; } |
50 void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; } |
51 int count() { return _count; } |
51 int count() { return OrderAccess::load_acquire(&_count); } |
52 void increment_count() { _count++; } |
52 // increment_count is only called under lock, but there may be concurrent readers. |
|
53 void increment_count() { OrderAccess::release_store(&_count, _count + 1); } |
53 |
54 |
54 public: |
55 public: |
55 |
56 |
56 ExceptionCache(Handle exception, address pc, address handler); |
57 ExceptionCache(Handle exception, address pc, address handler); |
57 |
58 |
239 // set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method |
240 // set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method |
240 // is active while stack scanning (mark_active_nmethods()). The hotness |
241 // is active while stack scanning (mark_active_nmethods()). The hotness |
241 // counter is decreased (by 1) while sweeping. |
242 // counter is decreased (by 1) while sweeping. |
242 int _hotness_counter; |
243 int _hotness_counter; |
243 |
244 |
244 ExceptionCache *_exception_cache; |
245 ExceptionCache * volatile _exception_cache; |
245 PcDescCache _pc_desc_cache; |
246 PcDescCache _pc_desc_cache; |
246 |
247 |
247 // These are used for compiled synchronized native methods to |
248 // These are used for compiled synchronized native methods to |
248 // locate the owner and stack slot for the BasicLock so that we can |
249 // locate the owner and stack slot for the BasicLock so that we can |
249 // properly revoke the bias of the owner if necessary. They are |
250 // properly revoke the bias of the owner if necessary. They are |
431 unloaded = 3 }; // there should be no activations, should not be called, |
432 unloaded = 3 }; // there should be no activations, should not be called, |
432 // will be transformed to zombie immediately |
433 // will be transformed to zombie immediately |
433 |
434 |
434 // flag accessing and manipulation |
435 // flag accessing and manipulation |
435 bool is_in_use() const { return _state == in_use; } |
436 bool is_in_use() const { return _state == in_use; } |
436 bool is_alive() const { return _state == in_use || _state == not_entrant; } |
437 bool is_alive() const { unsigned char s = _state; return s == in_use || s == not_entrant; } |
437 bool is_not_entrant() const { return _state == not_entrant; } |
438 bool is_not_entrant() const { return _state == not_entrant; } |
438 bool is_zombie() const { return _state == zombie; } |
439 bool is_zombie() const { return _state == zombie; } |
439 bool is_unloaded() const { return _state == unloaded; } |
440 bool is_unloaded() const { return _state == unloaded; } |
440 |
441 |
441 // returns a string version of the nmethod state |
442 // returns a string version of the nmethod state |
574 // Sweeper support |
575 // Sweeper support |
575 long stack_traversal_mark() { return _stack_traversal_mark; } |
576 long stack_traversal_mark() { return _stack_traversal_mark; } |
576 void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; } |
577 void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; } |
577 |
578 |
578 // Exception cache support |
579 // Exception cache support |
|
580 // Note: _exception_cache may be read concurrently. We rely on memory_order_consume here. |
579 ExceptionCache* exception_cache() const { return _exception_cache; } |
581 ExceptionCache* exception_cache() const { return _exception_cache; } |
580 void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; } |
582 void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; } |
|
583 void release_set_exception_cache(ExceptionCache *ec) { OrderAccess::release_store_ptr(&_exception_cache, ec); } |
581 address handler_for_exception_and_pc(Handle exception, address pc); |
584 address handler_for_exception_and_pc(Handle exception, address pc); |
582 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler); |
585 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler); |
583 void clean_exception_cache(BoolObjectClosure* is_alive); |
586 void clean_exception_cache(BoolObjectClosure* is_alive); |
584 |
587 |
585 // implicit exceptions support |
588 // implicit exceptions support |