101 } |
101 } |
102 } |
102 } |
103 |
103 |
104 //----------------------------------------------------------------------------- |
104 //----------------------------------------------------------------------------- |
105 |
105 |
|
106 ExceptionCache* CompiledMethod::exception_cache_acquire() const { |
|
107 return OrderAccess::load_acquire(&_exception_cache); |
|
108 } |
|
109 |
106 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) { |
110 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) { |
107 assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock"); |
111 assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock"); |
108 assert(new_entry != NULL,"Must be non null"); |
112 assert(new_entry != NULL,"Must be non null"); |
109 assert(new_entry->next() == NULL, "Must be null"); |
113 assert(new_entry->next() == NULL, "Must be null"); |
110 |
114 |
111 ExceptionCache *ec = exception_cache(); |
115 for (;;) { |
112 if (ec != NULL) { |
116 ExceptionCache *ec = exception_cache(); |
113 new_entry->set_next(ec); |
117 if (ec != NULL) { |
114 } |
118 Klass* ex_klass = ec->exception_type(); |
115 release_set_exception_cache(new_entry); |
119 if (!ex_klass->is_loader_alive()) { |
|
120 // We must guarantee that entries are not inserted with new next pointer |
|
121 // edges to ExceptionCache entries with dead klasses, due to bad interactions |
|
122 // with concurrent ExceptionCache cleanup. Therefore, the inserts roll |
|
123 // the head pointer forward to the first live ExceptionCache, so that the new |
|
124 // next pointers always point at live ExceptionCaches, that are not removed due |
|
125 // to concurrent ExceptionCache cleanup. |
|
126 ExceptionCache* next = ec->next(); |
|
127 if (Atomic::cmpxchg(next, &_exception_cache, ec) == ec) { |
|
128 CodeCache::release_exception_cache(ec); |
|
129 } |
|
130 continue; |
|
131 } |
|
132 ec = exception_cache(); |
|
133 if (ec != NULL) { |
|
134 new_entry->set_next(ec); |
|
135 } |
|
136 } |
|
137 if (Atomic::cmpxchg(new_entry, &_exception_cache, ec) == ec) { |
|
138 return; |
|
139 } |
|
140 } |
116 } |
141 } |
117 |
142 |
118 void CompiledMethod::clean_exception_cache() { |
143 void CompiledMethod::clean_exception_cache() { |
|
144 // For each nmethod, only a single thread may call this cleanup function |
|
145 // at the same time, whether called in STW cleanup or concurrent cleanup. |
|
146 // Note that if the GC is processing exception cache cleaning in a concurrent phase, |
|
147 // then a single writer may contend with cleaning up the head pointer to the |
|
148 // first ExceptionCache node that has a Klass* that is alive. That is fine, |
|
149 // as long as there is no concurrent cleanup of next pointers from concurrent writers. |
|
150 // And the concurrent writers do not clean up next pointers, only the head. |
|
151 // Also note that concurent readers will walk through Klass* pointers that are not |
|
152 // alive. That does not cause ABA problems, because Klass* is deleted after |
|
153 // a handshake with all threads, after all stale ExceptionCaches have been |
|
154 // unlinked. That is also when the CodeCache::exception_cache_purge_list() |
|
155 // is deleted, with all ExceptionCache entries that were cleaned concurrently. |
|
156 // That similarly implies that CAS operations on ExceptionCache entries do not |
|
157 // suffer from ABA problems as unlinking and deletion is separated by a global |
|
158 // handshake operation. |
119 ExceptionCache* prev = NULL; |
159 ExceptionCache* prev = NULL; |
120 ExceptionCache* curr = exception_cache(); |
160 ExceptionCache* curr = exception_cache_acquire(); |
121 |
161 |
122 while (curr != NULL) { |
162 while (curr != NULL) { |
123 ExceptionCache* next = curr->next(); |
163 ExceptionCache* next = curr->next(); |
124 |
164 |
125 Klass* ex_klass = curr->exception_type(); |
165 if (!curr->exception_type()->is_loader_alive()) { |
126 if (ex_klass != NULL && !ex_klass->is_loader_alive()) { |
|
127 if (prev == NULL) { |
166 if (prev == NULL) { |
128 set_exception_cache(next); |
167 // Try to clean head; this is contended by concurrent inserts, that |
|
168 // both lazily clean the head, and insert entries at the head. If |
|
169 // the CAS fails, the operation is restarted. |
|
170 if (Atomic::cmpxchg(next, &_exception_cache, curr) != curr) { |
|
171 prev = NULL; |
|
172 curr = exception_cache_acquire(); |
|
173 continue; |
|
174 } |
129 } else { |
175 } else { |
|
176 // It is impossible to during cleanup connect the next pointer to |
|
177 // an ExceptionCache that has not been published before a safepoint |
|
178 // prior to the cleanup. Therefore, release is not required. |
130 prev->set_next(next); |
179 prev->set_next(next); |
131 } |
180 } |
132 delete curr; |
|
133 // prev stays the same. |
181 // prev stays the same. |
|
182 |
|
183 CodeCache::release_exception_cache(curr); |
134 } else { |
184 } else { |
135 prev = curr; |
185 prev = curr; |
136 } |
186 } |
137 |
187 |
138 curr = next; |
188 curr = next; |
143 // These are the public access methods. |
193 // These are the public access methods. |
144 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) { |
194 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) { |
145 // We never grab a lock to read the exception cache, so we may |
195 // We never grab a lock to read the exception cache, so we may |
146 // have false negatives. This is okay, as it can only happen during |
196 // have false negatives. This is okay, as it can only happen during |
147 // the first few exception lookups for a given nmethod. |
197 // the first few exception lookups for a given nmethod. |
148 ExceptionCache* ec = exception_cache(); |
198 ExceptionCache* ec = exception_cache_acquire(); |
149 while (ec != NULL) { |
199 while (ec != NULL) { |
150 address ret_val; |
200 address ret_val; |
151 if ((ret_val = ec->match(exception,pc)) != NULL) { |
201 if ((ret_val = ec->match(exception,pc)) != NULL) { |
152 return ret_val; |
202 return ret_val; |
153 } |
203 } |
170 target_entry = new ExceptionCache(exception,pc,handler); |
220 target_entry = new ExceptionCache(exception,pc,handler); |
171 add_exception_cache_entry(target_entry); |
221 add_exception_cache_entry(target_entry); |
172 } |
222 } |
173 } |
223 } |
174 |
224 |
175 //-------------end of code for ExceptionCache-------------- |
|
176 |
|
177 // private method for handling exception cache |
225 // private method for handling exception cache |
178 // These methods are private, and used to manipulate the exception cache |
226 // These methods are private, and used to manipulate the exception cache |
179 // directly. |
227 // directly. |
180 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) { |
228 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) { |
181 ExceptionCache* ec = exception_cache(); |
229 ExceptionCache* ec = exception_cache_acquire(); |
182 while (ec != NULL) { |
230 while (ec != NULL) { |
183 if (ec->match_exception_with_space(exception)) { |
231 if (ec->match_exception_with_space(exception)) { |
184 return ec; |
232 return ec; |
185 } |
233 } |
186 ec = ec->next(); |
234 ec = ec->next(); |
187 } |
235 } |
188 return NULL; |
236 return NULL; |
189 } |
237 } |
|
238 |
|
239 //-------------end of code for ExceptionCache-------------- |
190 |
240 |
191 bool CompiledMethod::is_at_poll_return(address pc) { |
241 bool CompiledMethod::is_at_poll_return(address pc) { |
192 RelocIterator iter(this, pc, pc+1); |
242 RelocIterator iter(this, pc, pc+1); |
193 while (iter.next()) { |
243 while (iter.next()) { |
194 if (iter.type() == relocInfo::poll_return_type) |
244 if (iter.type() == relocInfo::poll_return_type) |