46 // |
46 // |
47 class nmethodBucket: public CHeapObj<mtClass> { |
47 class nmethodBucket: public CHeapObj<mtClass> { |
48 friend class VMStructs; |
48 friend class VMStructs; |
49 private: |
49 private: |
50 nmethod* _nmethod; |
50 nmethod* _nmethod; |
51 int _count; |
51 volatile int _count; |
52 nmethodBucket* _next; |
52 nmethodBucket* volatile _next; |
|
53 nmethodBucket* volatile _purge_list_next; |
53 |
54 |
54 public: |
55 public: |
55 nmethodBucket(nmethod* nmethod, nmethodBucket* next) : |
56 nmethodBucket(nmethod* nmethod, nmethodBucket* next) : |
56 _nmethod(nmethod), _count(1), _next(next) {} |
57 _nmethod(nmethod), _count(1), _next(next), _purge_list_next(NULL) {} |
57 |
58 |
58 int count() { return _count; } |
59 int count() { return _count; } |
59 int increment() { _count += 1; return _count; } |
60 int increment() { _count += 1; return _count; } |
60 int decrement(); |
61 int decrement(); |
61 nmethodBucket* next() { return _next; } |
62 nmethodBucket* next(); |
62 void set_next(nmethodBucket* b) { _next = b; } |
63 nmethodBucket* next_not_unloading(); |
63 nmethod* get_nmethod() { return _nmethod; } |
64 void set_next(nmethodBucket* b); |
|
65 nmethodBucket* purge_list_next(); |
|
66 void set_purge_list_next(nmethodBucket* b); |
|
67 nmethod* get_nmethod() { return _nmethod; } |
64 }; |
68 }; |
65 |
69 |
66 // |
70 // |
67 // Utility class to manipulate nmethod dependency context. |
71 // Utility class to manipulate nmethod dependency context. |
68 // The context consists of nmethodBucket* (a head of a linked list) |
|
69 // and a boolean flag (does the list contains stale entries). The structure is |
|
70 // encoded as an intptr_t: lower bit is used for the flag. It is possible since |
|
71 // nmethodBucket* is aligned - the structure is malloc'ed in C heap. |
|
72 // Dependency context can be attached either to an InstanceKlass (_dep_context field) |
72 // Dependency context can be attached either to an InstanceKlass (_dep_context field) |
73 // or CallSiteContext oop for call_site_target dependencies (see javaClasses.hpp). |
73 // or CallSiteContext oop for call_site_target dependencies (see javaClasses.hpp). |
74 // DependencyContext class operates on some location which holds a intptr_t value. |
74 // DependencyContext class operates on some location which holds a nmethodBucket* value |
|
75 // and uint64_t integer recording the safepoint counter at the last cleanup. |
75 // |
76 // |
76 class DependencyContext : public StackObj { |
77 class DependencyContext : public StackObj { |
77 friend class VMStructs; |
78 friend class VMStructs; |
78 friend class TestDependencyContext; |
79 friend class TestDependencyContext; |
79 private: |
80 private: |
80 enum TagBits { _has_stale_entries_bit = 1, _has_stale_entries_mask = 1 }; |
81 nmethodBucket* volatile* _dependency_context_addr; |
|
82 volatile uint64_t* _last_cleanup_addr; |
81 |
83 |
82 intptr_t* _dependency_context_addr; |
84 bool claim_cleanup(); |
83 |
85 void set_dependencies(nmethodBucket* b); |
84 void set_dependencies(nmethodBucket* b) { |
86 nmethodBucket* dependencies(); |
85 assert((intptr_t(b) & _has_stale_entries_mask) == 0, "should be aligned"); |
87 nmethodBucket* dependencies_not_unloading(); |
86 if (has_stale_entries()) { |
|
87 *_dependency_context_addr = intptr_t(b) | _has_stale_entries_mask; |
|
88 } else { |
|
89 *_dependency_context_addr = intptr_t(b); |
|
90 } |
|
91 } |
|
92 |
|
93 void set_has_stale_entries(bool x) { |
|
94 if (x) { |
|
95 *_dependency_context_addr |= _has_stale_entries_mask; |
|
96 } else { |
|
97 *_dependency_context_addr &= ~_has_stale_entries_mask; |
|
98 } |
|
99 } |
|
100 |
|
101 nmethodBucket* dependencies() { |
|
102 intptr_t value = *_dependency_context_addr; |
|
103 return (nmethodBucket*) (value & ~_has_stale_entries_mask); |
|
104 } |
|
105 |
|
106 bool has_stale_entries() const { |
|
107 intptr_t value = *_dependency_context_addr; |
|
108 return (value & _has_stale_entries_mask) != 0; |
|
109 } |
|
110 |
88 |
111 static PerfCounter* _perf_total_buckets_allocated_count; |
89 static PerfCounter* _perf_total_buckets_allocated_count; |
112 static PerfCounter* _perf_total_buckets_deallocated_count; |
90 static PerfCounter* _perf_total_buckets_deallocated_count; |
113 static PerfCounter* _perf_total_buckets_stale_count; |
91 static PerfCounter* _perf_total_buckets_stale_count; |
114 static PerfCounter* _perf_total_buckets_stale_acc_count; |
92 static PerfCounter* _perf_total_buckets_stale_acc_count; |
|
93 static nmethodBucket* volatile _purge_list; |
|
94 static volatile uint64_t _cleaning_epoch; |
115 |
95 |
116 public: |
96 public: |
117 #ifdef ASSERT |
97 #ifdef ASSERT |
118 // Safepoints are forbidden during DC lifetime. GC can invalidate |
98 // Safepoints are forbidden during DC lifetime. GC can invalidate |
119 // _dependency_context_addr if it relocates the holder |
99 // _dependency_context_addr if it relocates the holder |
120 // (e.g. CallSiteContext Java object). |
100 // (e.g. CallSiteContext Java object). |
121 uint64_t _safepoint_counter; |
101 uint64_t _safepoint_counter; |
122 |
102 |
123 DependencyContext(intptr_t* addr) : _dependency_context_addr(addr), |
103 DependencyContext(nmethodBucket* volatile* bucket_addr, volatile uint64_t* last_cleanup_addr) |
124 _safepoint_counter(SafepointSynchronize::safepoint_counter()) {} |
104 : _dependency_context_addr(bucket_addr), |
|
105 _last_cleanup_addr(last_cleanup_addr), |
|
106 _safepoint_counter(SafepointSynchronize::safepoint_counter()) {} |
125 |
107 |
126 ~DependencyContext() { |
108 ~DependencyContext() { |
127 assert(_safepoint_counter == SafepointSynchronize::safepoint_counter(), "safepoint happened"); |
109 assert(_safepoint_counter == SafepointSynchronize::safepoint_counter(), "safepoint happened"); |
128 } |
110 } |
129 #else |
111 #else |
130 DependencyContext(intptr_t* addr) : _dependency_context_addr(addr) {} |
112 DependencyContext(nmethodBucket* volatile* bucket_addr, volatile uint64_t* last_cleanup_addr) |
|
113 : _dependency_context_addr(bucket_addr), |
|
114 _last_cleanup_addr(last_cleanup_addr) {} |
131 #endif // ASSERT |
115 #endif // ASSERT |
132 |
|
133 static const intptr_t EMPTY = 0; // dependencies = NULL, has_stale_entries = false |
|
134 |
116 |
135 static void init(); |
117 static void init(); |
136 |
118 |
137 int mark_dependent_nmethods(DepChange& changes); |
119 int mark_dependent_nmethods(DepChange& changes); |
138 void add_dependent_nmethod(nmethod* nm, bool expunge_stale_entries = false); |
120 void add_dependent_nmethod(nmethod* nm); |
139 void remove_dependent_nmethod(nmethod* nm, bool expunge_stale_entries = false); |
121 void remove_dependent_nmethod(nmethod* nm); |
140 int remove_all_dependents(); |
122 int remove_all_dependents(); |
141 |
123 void clean_unloading_dependents(); |
142 void expunge_stale_entries(); |
124 static void purge_dependency_contexts(); |
|
125 static void release(nmethodBucket* b); |
|
126 static void cleaning_start(); |
|
127 static void cleaning_end(); |
143 |
128 |
144 #ifndef PRODUCT |
129 #ifndef PRODUCT |
145 void print_dependent_nmethods(bool verbose); |
130 void print_dependent_nmethods(bool verbose); |
146 bool is_dependent_nmethod(nmethod* nm); |
131 bool is_dependent_nmethod(nmethod* nm); |
147 bool find_stale_entries(); |
|
148 #endif //PRODUCT |
132 #endif //PRODUCT |
149 }; |
133 }; |
150 #endif // SHARE_VM_CODE_DEPENDENCYCONTEXT_HPP |
134 #endif // SHARE_VM_CODE_DEPENDENCYCONTEXT_HPP |