|
1 /* |
|
2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #include "precompiled.hpp" |
|
26 #include "classfile/systemDictionary.hpp" |
|
27 #include "code/codeCache.hpp" |
|
28 #include "code/compiledIC.hpp" |
|
29 #include "code/icBuffer.hpp" |
|
30 #include "code/nmethod.hpp" |
|
31 #include "code/vtableStubs.hpp" |
|
32 #include "interpreter/interpreter.hpp" |
|
33 #include "interpreter/linkResolver.hpp" |
|
34 #include "memory/metadataFactory.hpp" |
|
35 #include "memory/oopFactory.hpp" |
|
36 #include "memory/resourceArea.hpp" |
|
37 #include "oops/method.hpp" |
|
38 #include "oops/oop.inline.hpp" |
|
39 #include "oops/symbol.hpp" |
|
40 #include "runtime/icache.hpp" |
|
41 #include "runtime/sharedRuntime.hpp" |
|
42 #include "runtime/stubRoutines.hpp" |
|
43 #include "utilities/events.hpp" |
|
44 |
|
45 |
|
46 // Every time a compiled IC is changed or its type is being accessed, |
|
47 // either the CompiledIC_lock must be set or we must be at a safe point. |
|
48 |
|
49 //----------------------------------------------------------------------------- |
|
50 // Low-level access to an inline cache. Private, since they might not be |
|
51 // MT-safe to use. |
|
52 |
|
53 void* CompiledIC::cached_value() const { |
|
54 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); |
|
55 assert (!is_optimized(), "an optimized virtual call does not have a cached metadata"); |
|
56 |
|
57 if (!is_in_transition_state()) { |
|
58 void* data = get_data(); |
|
59 // If we let the metadata value here be initialized to zero... |
|
60 assert(data != NULL || Universe::non_oop_word() == NULL, |
|
61 "no raw nulls in CompiledIC metadatas, because of patching races"); |
|
62 return (data == (void*)Universe::non_oop_word()) ? NULL : data; |
|
63 } else { |
|
64 return InlineCacheBuffer::cached_value_for((CompiledIC *)this); |
|
65 } |
|
66 } |
|
67 |
|
68 |
|
69 void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder) { |
|
70 assert(entry_point != NULL, "must set legal entry point"); |
|
71 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); |
|
72 assert (!is_optimized() || cache == NULL, "an optimized virtual call does not have a cached metadata"); |
|
73 assert (cache == NULL || cache != (Metadata*)badOopVal, "invalid metadata"); |
|
74 |
|
75 assert(!is_icholder || is_icholder_entry(entry_point), "must be"); |
|
76 |
|
77 // Don't use ic_destination for this test since that forwards |
|
78 // through ICBuffer instead of returning the actual current state of |
|
79 // the CompiledIC. |
|
80 if (is_icholder_entry(_call->destination())) { |
|
81 // When patching for the ICStub case the cached value isn't |
|
82 // overwritten until the ICStub copied into the CompiledIC during |
|
83 // the next safepoint. Make sure that the CompiledICHolder* is |
|
84 // marked for release at this point since it won't be identifiable |
|
85 // once the entry point is overwritten. |
|
86 InlineCacheBuffer::queue_for_release((CompiledICHolder*)get_data()); |
|
87 } |
|
88 |
|
89 if (TraceCompiledIC) { |
|
90 tty->print(" "); |
|
91 print_compiled_ic(); |
|
92 tty->print(" changing destination to " INTPTR_FORMAT, p2i(entry_point)); |
|
93 if (!is_optimized()) { |
|
94 tty->print(" changing cached %s to " INTPTR_FORMAT, is_icholder ? "icholder" : "metadata", p2i((address)cache)); |
|
95 } |
|
96 if (is_icstub) { |
|
97 tty->print(" (icstub)"); |
|
98 } |
|
99 tty->cr(); |
|
100 } |
|
101 |
|
102 { |
|
103 MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag); |
|
104 #ifdef ASSERT |
|
105 CodeBlob* cb = CodeCache::find_blob_unsafe(_call->instruction_address()); |
|
106 assert(cb != NULL && cb->is_compiled(), "must be compiled"); |
|
107 #endif |
|
108 _call->set_destination_mt_safe(entry_point); |
|
109 } |
|
110 |
|
111 if (is_optimized() || is_icstub) { |
|
112 // Optimized call sites don't have a cache value and ICStub call |
|
113 // sites only change the entry point. Changing the value in that |
|
114 // case could lead to MT safety issues. |
|
115 assert(cache == NULL, "must be null"); |
|
116 return; |
|
117 } |
|
118 |
|
119 if (cache == NULL) cache = (void*)Universe::non_oop_word(); |
|
120 |
|
121 set_data((intptr_t)cache); |
|
122 } |
|
123 |
|
124 |
|
125 void CompiledIC::set_ic_destination(ICStub* stub) { |
|
126 internal_set_ic_destination(stub->code_begin(), true, NULL, false); |
|
127 } |
|
128 |
|
129 |
|
130 |
|
131 address CompiledIC::ic_destination() const { |
|
132 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); |
|
133 if (!is_in_transition_state()) { |
|
134 return _call->destination(); |
|
135 } else { |
|
136 return InlineCacheBuffer::ic_destination_for((CompiledIC *)this); |
|
137 } |
|
138 } |
|
139 |
|
140 |
|
141 bool CompiledIC::is_in_transition_state() const { |
|
142 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); |
|
143 return InlineCacheBuffer::contains(_call->destination());; |
|
144 } |
|
145 |
|
146 |
|
147 bool CompiledIC::is_icholder_call() const { |
|
148 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); |
|
149 return !_is_optimized && is_icholder_entry(ic_destination()); |
|
150 } |
|
151 |
|
152 // Returns native address of 'call' instruction in inline-cache. Used by |
|
153 // the InlineCacheBuffer when it needs to find the stub. |
|
154 address CompiledIC::stub_address() const { |
|
155 assert(is_in_transition_state(), "should only be called when we are in a transition state"); |
|
156 return _call->destination(); |
|
157 } |
|
158 |
|
159 // Clears the IC stub if the compiled IC is in transition state |
|
160 void CompiledIC::clear_ic_stub() { |
|
161 if (is_in_transition_state()) { |
|
162 ICStub* stub = ICStub_from_destination_address(stub_address()); |
|
163 stub->clear(); |
|
164 } |
|
165 } |
|
166 |
|
167 //----------------------------------------------------------------------------- |
|
168 // High-level access to an inline cache. Guaranteed to be MT-safe. |
|
169 |
|
170 void CompiledIC::initialize_from_iter(RelocIterator* iter) { |
|
171 assert(iter->addr() == _call->instruction_address(), "must find ic_call"); |
|
172 |
|
173 if (iter->type() == relocInfo::virtual_call_type) { |
|
174 virtual_call_Relocation* r = iter->virtual_call_reloc(); |
|
175 _is_optimized = false; |
|
176 _value = _call->get_load_instruction(r); |
|
177 } else { |
|
178 assert(iter->type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); |
|
179 _is_optimized = true; |
|
180 _value = NULL; |
|
181 } |
|
182 } |
|
183 |
|
184 CompiledIC::CompiledIC(CompiledMethod* cm, NativeCall* call) |
|
185 : _method(cm) |
|
186 { |
|
187 _call = _method->call_wrapper_at((address) call); |
|
188 address ic_call = _call->instruction_address(); |
|
189 |
|
190 assert(ic_call != NULL, "ic_call address must be set"); |
|
191 assert(cm != NULL, "must pass compiled method"); |
|
192 assert(cm->contains(ic_call), "must be in compiled method"); |
|
193 |
|
194 // Search for the ic_call at the given address. |
|
195 RelocIterator iter(cm, ic_call, ic_call+1); |
|
196 bool ret = iter.next(); |
|
197 assert(ret == true, "relocInfo must exist at this address"); |
|
198 assert(iter.addr() == ic_call, "must find ic_call"); |
|
199 |
|
200 initialize_from_iter(&iter); |
|
201 } |
|
202 |
|
203 CompiledIC::CompiledIC(RelocIterator* iter) |
|
204 : _method(iter->code()) |
|
205 { |
|
206 _call = _method->call_wrapper_at(iter->addr()); |
|
207 address ic_call = _call->instruction_address(); |
|
208 |
|
209 CompiledMethod* nm = iter->code(); |
|
210 assert(ic_call != NULL, "ic_call address must be set"); |
|
211 assert(nm != NULL, "must pass compiled method"); |
|
212 assert(nm->contains(ic_call), "must be in compiled method"); |
|
213 |
|
214 initialize_from_iter(iter); |
|
215 } |
|
216 |
|
217 bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) { |
|
218 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); |
|
219 assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic"); |
|
220 assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?"); |
|
221 |
|
222 address entry; |
|
223 if (call_info->call_kind() == CallInfo::itable_call) { |
|
224 assert(bytecode == Bytecodes::_invokeinterface, ""); |
|
225 int itable_index = call_info->itable_index(); |
|
226 entry = VtableStubs::find_itable_stub(itable_index); |
|
227 if (entry == false) { |
|
228 return false; |
|
229 } |
|
230 #ifdef ASSERT |
|
231 int index = call_info->resolved_method()->itable_index(); |
|
232 assert(index == itable_index, "CallInfo pre-computes this"); |
|
233 #endif //ASSERT |
|
234 InstanceKlass* k = call_info->resolved_method()->method_holder(); |
|
235 assert(k->verify_itable_index(itable_index), "sanity check"); |
|
236 InlineCacheBuffer::create_transition_stub(this, k, entry); |
|
237 } else { |
|
238 assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable"); |
|
239 // Can be different than selected_method->vtable_index(), due to package-private etc. |
|
240 int vtable_index = call_info->vtable_index(); |
|
241 assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check"); |
|
242 entry = VtableStubs::find_vtable_stub(vtable_index); |
|
243 if (entry == NULL) { |
|
244 return false; |
|
245 } |
|
246 InlineCacheBuffer::create_transition_stub(this, NULL, entry); |
|
247 } |
|
248 |
|
249 if (TraceICs) { |
|
250 ResourceMark rm; |
|
251 tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT, |
|
252 p2i(instruction_address()), call_info->selected_method()->print_value_string(), p2i(entry)); |
|
253 } |
|
254 |
|
255 // We can't check this anymore. With lazy deopt we could have already |
|
256 // cleaned this IC entry before we even return. This is possible if |
|
257 // we ran out of space in the inline cache buffer trying to do the |
|
258 // set_next and we safepointed to free up space. This is a benign |
|
259 // race because the IC entry was complete when we safepointed so |
|
260 // cleaning it immediately is harmless. |
|
261 // assert(is_megamorphic(), "sanity check"); |
|
262 return true; |
|
263 } |
|
264 |
|
265 |
|
266 // true if destination is megamorphic stub |
|
267 bool CompiledIC::is_megamorphic() const { |
|
268 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); |
|
269 assert(!is_optimized(), "an optimized call cannot be megamorphic"); |
|
270 |
|
271 // Cannot rely on cached_value. It is either an interface or a method. |
|
272 return VtableStubs::is_entry_point(ic_destination()); |
|
273 } |
|
274 |
|
275 bool CompiledIC::is_call_to_compiled() const { |
|
276 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); |
|
277 |
|
278 // Use unsafe, since an inline cache might point to a zombie method. However, the zombie |
|
279 // method is guaranteed to still exist, since we only remove methods after all inline caches |
|
280 // has been cleaned up |
|
281 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination()); |
|
282 bool is_monomorphic = (cb != NULL && cb->is_compiled()); |
|
283 // Check that the cached_value is a klass for non-optimized monomorphic calls |
|
284 // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used |
|
285 // for calling directly to vep without using the inline cache (i.e., cached_value == NULL). |
|
286 // For JVMCI this occurs because CHA is only used to improve inlining so call sites which could be optimized |
|
287 // virtuals because there are no currently loaded subclasses of a type are left as virtual call sites. |
|
288 #ifdef ASSERT |
|
289 CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address()); |
|
290 bool is_c1_or_jvmci_method = caller->is_compiled_by_c1() || caller->is_compiled_by_jvmci(); |
|
291 assert( is_c1_or_jvmci_method || |
|
292 !is_monomorphic || |
|
293 is_optimized() || |
|
294 !caller->is_alive() || |
|
295 (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check"); |
|
296 #endif // ASSERT |
|
297 return is_monomorphic; |
|
298 } |
|
299 |
|
300 |
|
301 bool CompiledIC::is_call_to_interpreted() const { |
|
302 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); |
|
303 // Call to interpreter if destination is either calling to a stub (if it |
|
304 // is optimized), or calling to an I2C blob |
|
305 bool is_call_to_interpreted = false; |
|
306 if (!is_optimized()) { |
|
307 // must use unsafe because the destination can be a zombie (and we're cleaning) |
|
308 // and the print_compiled_ic code wants to know if site (in the non-zombie) |
|
309 // is to the interpreter. |
|
310 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination()); |
|
311 is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob()); |
|
312 assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != NULL), "sanity check"); |
|
313 } else { |
|
314 // Check if we are calling into our own codeblob (i.e., to a stub) |
|
315 address dest = ic_destination(); |
|
316 #ifdef ASSERT |
|
317 { |
|
318 _call->verify_resolve_call(dest); |
|
319 } |
|
320 #endif /* ASSERT */ |
|
321 is_call_to_interpreted = _call->is_call_to_interpreted(dest); |
|
322 } |
|
323 return is_call_to_interpreted; |
|
324 } |
|
325 |
|
326 void CompiledIC::set_to_clean(bool in_use) { |
|
327 assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call"); |
|
328 if (TraceInlineCacheClearing || TraceICs) { |
|
329 tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address())); |
|
330 print(); |
|
331 } |
|
332 |
|
333 address entry = _call->get_resolve_call_stub(is_optimized()); |
|
334 |
|
335 // A zombie transition will always be safe, since the metadata has already been set to NULL, so |
|
336 // we only need to patch the destination |
|
337 bool safe_transition = _call->is_safe_for_patching() || !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint(); |
|
338 |
|
339 if (safe_transition) { |
|
340 // Kill any leftover stub we might have too |
|
341 clear_ic_stub(); |
|
342 if (is_optimized()) { |
|
343 set_ic_destination(entry); |
|
344 } else { |
|
345 set_ic_destination_and_value(entry, (void*)NULL); |
|
346 } |
|
347 } else { |
|
348 // Unsafe transition - create stub. |
|
349 InlineCacheBuffer::create_transition_stub(this, NULL, entry); |
|
350 } |
|
351 // We can't check this anymore. With lazy deopt we could have already |
|
352 // cleaned this IC entry before we even return. This is possible if |
|
353 // we ran out of space in the inline cache buffer trying to do the |
|
354 // set_next and we safepointed to free up space. This is a benign |
|
355 // race because the IC entry was complete when we safepointed so |
|
356 // cleaning it immediately is harmless. |
|
357 // assert(is_clean(), "sanity check"); |
|
358 } |
|
359 |
|
360 bool CompiledIC::is_clean() const { |
|
361 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); |
|
362 bool is_clean = false; |
|
363 address dest = ic_destination(); |
|
364 is_clean = dest == _call->get_resolve_call_stub(is_optimized()); |
|
365 assert(!is_clean || is_optimized() || cached_value() == NULL, "sanity check"); |
|
366 return is_clean; |
|
367 } |
|
368 |
|
369 void CompiledIC::set_to_monomorphic(CompiledICInfo& info) { |
|
370 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); |
|
371 // Updating a cache to the wrong entry can cause bugs that are very hard |
|
372 // to track down - if cache entry gets invalid - we just clean it. In |
|
373 // this way it is always the same code path that is responsible for |
|
374 // updating and resolving an inline cache |
|
375 // |
|
376 // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized |
|
377 // callsites. In addition ic_miss code will update a site to monomorphic if it determines |
|
378 // that an monomorphic call to the interpreter can now be monomorphic to compiled code. |
|
379 // |
|
380 // In both of these cases the only thing being modifed is the jump/call target and these |
|
381 // transitions are mt_safe |
|
382 |
|
383 Thread *thread = Thread::current(); |
|
384 if (info.to_interpreter() || info.to_aot()) { |
|
385 // Call to interpreter |
|
386 if (info.is_optimized() && is_optimized()) { |
|
387 assert(is_clean(), "unsafe IC path"); |
|
388 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); |
|
389 // the call analysis (callee structure) specifies that the call is optimized |
|
390 // (either because of CHA or the static target is final) |
|
391 // At code generation time, this call has been emitted as static call |
|
392 // Call via stub |
|
393 assert(info.cached_metadata() != NULL && info.cached_metadata()->is_method(), "sanity check"); |
|
394 methodHandle method (thread, (Method*)info.cached_metadata()); |
|
395 _call->set_to_interpreted(method, info); |
|
396 |
|
397 if (TraceICs) { |
|
398 ResourceMark rm(thread); |
|
399 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to %s: %s", |
|
400 p2i(instruction_address()), |
|
401 (info.to_aot() ? "aot" : "interpreter"), |
|
402 method->print_value_string()); |
|
403 } |
|
404 } else { |
|
405 // Call via method-klass-holder |
|
406 InlineCacheBuffer::create_transition_stub(this, info.claim_cached_icholder(), info.entry()); |
|
407 if (TraceICs) { |
|
408 ResourceMark rm(thread); |
|
409 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via icholder ", p2i(instruction_address())); |
|
410 } |
|
411 } |
|
412 } else { |
|
413 // Call to compiled code |
|
414 bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL); |
|
415 #ifdef ASSERT |
|
416 CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry()); |
|
417 assert (cb->is_compiled(), "must be compiled!"); |
|
418 #endif /* ASSERT */ |
|
419 |
|
420 // This is MT safe if we come from a clean-cache and go through a |
|
421 // non-verified entry point |
|
422 bool safe = SafepointSynchronize::is_at_safepoint() || |
|
423 (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean())); |
|
424 |
|
425 if (!safe) { |
|
426 InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry()); |
|
427 } else { |
|
428 if (is_optimized()) { |
|
429 set_ic_destination(info.entry()); |
|
430 } else { |
|
431 set_ic_destination_and_value(info.entry(), info.cached_metadata()); |
|
432 } |
|
433 } |
|
434 |
|
435 if (TraceICs) { |
|
436 ResourceMark rm(thread); |
|
437 assert(info.cached_metadata() == NULL || info.cached_metadata()->is_klass(), "must be"); |
|
438 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass) %s: %s", |
|
439 p2i(instruction_address()), |
|
440 ((Klass*)info.cached_metadata())->print_value_string(), |
|
441 (safe) ? "" : "via stub"); |
|
442 } |
|
443 } |
|
444 // We can't check this anymore. With lazy deopt we could have already |
|
445 // cleaned this IC entry before we even return. This is possible if |
|
446 // we ran out of space in the inline cache buffer trying to do the |
|
447 // set_next and we safepointed to free up space. This is a benign |
|
448 // race because the IC entry was complete when we safepointed so |
|
449 // cleaning it immediately is harmless. |
|
450 // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); |
|
451 } |
|
452 |
|
453 |
|
454 // is_optimized: Compiler has generated an optimized call (i.e. fixed, no inline cache) |
|
455 // static_bound: The call can be static bound. If it isn't also optimized, the property |
|
456 // wasn't provable at time of compilation. An optimized call will have any necessary |
|
457 // null check, while a static_bound won't. A static_bound (but not optimized) must |
|
458 // therefore use the unverified entry point. |
|
459 void CompiledIC::compute_monomorphic_entry(const methodHandle& method, |
|
460 Klass* receiver_klass, |
|
461 bool is_optimized, |
|
462 bool static_bound, |
|
463 bool caller_is_nmethod, |
|
464 CompiledICInfo& info, |
|
465 TRAPS) { |
|
466 CompiledMethod* method_code = method->code(); |
|
467 |
|
468 address entry = NULL; |
|
469 if (method_code != NULL && method_code->is_in_use()) { |
|
470 assert(method_code->is_compiled(), "must be compiled"); |
|
471 // Call to compiled code |
|
472 // |
|
473 // Note: the following problem exists with Compiler1: |
|
474 // - at compile time we may or may not know if the destination is final |
|
475 // - if we know that the destination is final (is_optimized), we will emit |
|
476 // an optimized virtual call (no inline cache), and need a Method* to make |
|
477 // a call to the interpreter |
|
478 // - if we don't know if the destination is final, we emit a standard |
|
479 // virtual call, and use CompiledICHolder to call interpreted code |
|
480 // (no static call stub has been generated) |
|
481 // - In the case that we here notice the call is static bound we |
|
482 // convert the call into what looks to be an optimized virtual call, |
|
483 // but we must use the unverified entry point (since there will be no |
|
484 // null check on a call when the target isn't loaded). |
|
485 // This causes problems when verifying the IC because |
|
486 // it looks vanilla but is optimized. Code in is_call_to_interpreted |
|
487 // is aware of this and weakens its asserts. |
|
488 if (is_optimized) { |
|
489 entry = method_code->verified_entry_point(); |
|
490 } else { |
|
491 entry = method_code->entry_point(); |
|
492 } |
|
493 } |
|
494 bool far_c2a = entry != NULL && caller_is_nmethod && method_code->is_far_code(); |
|
495 if (entry != NULL && !far_c2a) { |
|
496 // Call to near compiled code (nmethod or aot). |
|
497 info.set_compiled_entry(entry, (static_bound || is_optimized) ? NULL : receiver_klass, is_optimized); |
|
498 } else { |
|
499 if (is_optimized) { |
|
500 if (far_c2a) { |
|
501 // Call to aot code from nmethod. |
|
502 info.set_aot_entry(entry, method()); |
|
503 } else { |
|
504 // Use stub entry |
|
505 info.set_interpreter_entry(method()->get_c2i_entry(), method()); |
|
506 } |
|
507 } else { |
|
508 // Use icholder entry |
|
509 assert(method_code == NULL || method_code->is_compiled(), "must be compiled"); |
|
510 CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass); |
|
511 info.set_icholder_entry(method()->get_c2i_unverified_entry(), holder); |
|
512 } |
|
513 } |
|
514 assert(info.is_optimized() == is_optimized, "must agree"); |
|
515 } |
|
516 |
|
517 |
|
518 bool CompiledIC::is_icholder_entry(address entry) { |
|
519 CodeBlob* cb = CodeCache::find_blob_unsafe(entry); |
|
520 return (cb != NULL && cb->is_adapter_blob()); |
|
521 } |
|
522 |
|
523 bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm) { |
|
524 // This call site might have become stale so inspect it carefully. |
|
525 address dest = cm->call_wrapper_at(call_site->addr())->destination(); |
|
526 return is_icholder_entry(dest); |
|
527 } |
|
528 |
|
529 // Release the CompiledICHolder* associated with this call site is there is one. |
|
530 void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm) { |
|
531 assert(cm->is_nmethod(), "must be nmethod"); |
|
532 // This call site might have become stale so inspect it carefully. |
|
533 NativeCall* call = nativeCall_at(call_site->addr()); |
|
534 if (is_icholder_entry(call->destination())) { |
|
535 NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value()); |
|
536 InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data()); |
|
537 } |
|
538 } |
|
539 |
|
540 // ---------------------------------------------------------------------------- |
|
541 |
|
542 void CompiledStaticCall::set_to_clean() { |
|
543 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); |
|
544 // Reset call site |
|
545 MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag); |
|
546 #ifdef ASSERT |
|
547 CodeBlob* cb = CodeCache::find_blob_unsafe(instruction_address()); |
|
548 assert(cb != NULL && cb->is_compiled(), "must be compiled"); |
|
549 #endif |
|
550 |
|
551 set_destination_mt_safe(resolve_call_stub()); |
|
552 |
|
553 // Do not reset stub here: It is too expensive to call find_stub. |
|
554 // Instead, rely on caller (nmethod::clear_inline_caches) to clear |
|
555 // both the call and its stub. |
|
556 } |
|
557 |
|
558 bool CompiledStaticCall::is_clean() const { |
|
559 return destination() == resolve_call_stub(); |
|
560 } |
|
561 |
|
562 bool CompiledStaticCall::is_call_to_compiled() const { |
|
563 return CodeCache::contains(destination()); |
|
564 } |
|
565 |
|
566 bool CompiledDirectStaticCall::is_call_to_interpreted() const { |
|
567 // It is a call to interpreted, if it calls to a stub. Hence, the destination |
|
568 // must be in the stub part of the nmethod that contains the call |
|
569 CompiledMethod* cm = CodeCache::find_compiled(instruction_address()); |
|
570 return cm->stub_contains(destination()); |
|
571 } |
|
572 |
|
573 bool CompiledDirectStaticCall::is_call_to_far() const { |
|
574 // It is a call to aot method, if it calls to a stub. Hence, the destination |
|
575 // must be in the stub part of the nmethod that contains the call |
|
576 CodeBlob* desc = CodeCache::find_blob(instruction_address()); |
|
577 return desc->as_compiled_method()->stub_contains(destination()); |
|
578 } |
|
579 |
|
580 void CompiledStaticCall::set_to_compiled(address entry) { |
|
581 if (TraceICs) { |
|
582 ResourceMark rm; |
|
583 tty->print_cr("%s@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT, |
|
584 name(), |
|
585 p2i(instruction_address()), |
|
586 p2i(entry)); |
|
587 } |
|
588 // Call to compiled code |
|
589 assert(CodeCache::contains(entry), "wrong entry point"); |
|
590 set_destination_mt_safe(entry); |
|
591 } |
|
592 |
|
593 void CompiledStaticCall::set(const StaticCallInfo& info) { |
|
594 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); |
|
595 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); |
|
596 // Updating a cache to the wrong entry can cause bugs that are very hard |
|
597 // to track down - if cache entry gets invalid - we just clean it. In |
|
598 // this way it is always the same code path that is responsible for |
|
599 // updating and resolving an inline cache |
|
600 assert(is_clean(), "do not update a call entry - use clean"); |
|
601 |
|
602 if (info._to_interpreter) { |
|
603 // Call to interpreted code |
|
604 set_to_interpreted(info.callee(), info.entry()); |
|
605 #if INCLUDE_AOT |
|
606 } else if (info._to_aot) { |
|
607 // Call to far code |
|
608 set_to_far(info.callee(), info.entry()); |
|
609 #endif |
|
610 } else { |
|
611 set_to_compiled(info.entry()); |
|
612 } |
|
613 } |
|
614 |
|
615 // Compute settings for a CompiledStaticCall. Since we might have to set |
|
616 // the stub when calling to the interpreter, we need to return arguments. |
|
617 void CompiledStaticCall::compute_entry(const methodHandle& m, bool caller_is_nmethod, StaticCallInfo& info) { |
|
618 CompiledMethod* m_code = m->code(); |
|
619 info._callee = m; |
|
620 if (m_code != NULL && m_code->is_in_use()) { |
|
621 if (caller_is_nmethod && m_code->is_far_code()) { |
|
622 // Call to far aot code from nmethod. |
|
623 info._to_aot = true; |
|
624 } else { |
|
625 info._to_aot = false; |
|
626 } |
|
627 info._to_interpreter = false; |
|
628 info._entry = m_code->verified_entry_point(); |
|
629 } else { |
|
630 // Callee is interpreted code. In any case entering the interpreter |
|
631 // puts a converter-frame on the stack to save arguments. |
|
632 assert(!m->is_method_handle_intrinsic(), "Compiled code should never call interpreter MH intrinsics"); |
|
633 info._to_interpreter = true; |
|
634 info._entry = m()->get_c2i_entry(); |
|
635 } |
|
636 } |
|
637 |
|
638 address CompiledDirectStaticCall::find_stub_for(address instruction, bool is_aot) { |
|
639 // Find reloc. information containing this call-site |
|
640 RelocIterator iter((nmethod*)NULL, instruction); |
|
641 while (iter.next()) { |
|
642 if (iter.addr() == instruction) { |
|
643 switch(iter.type()) { |
|
644 case relocInfo::static_call_type: |
|
645 return iter.static_call_reloc()->static_stub(is_aot); |
|
646 // We check here for opt_virtual_call_type, since we reuse the code |
|
647 // from the CompiledIC implementation |
|
648 case relocInfo::opt_virtual_call_type: |
|
649 return iter.opt_virtual_call_reloc()->static_stub(is_aot); |
|
650 case relocInfo::poll_type: |
|
651 case relocInfo::poll_return_type: // A safepoint can't overlap a call. |
|
652 default: |
|
653 ShouldNotReachHere(); |
|
654 } |
|
655 } |
|
656 } |
|
657 return NULL; |
|
658 } |
|
659 |
|
660 address CompiledDirectStaticCall::find_stub(bool is_aot) { |
|
661 return CompiledDirectStaticCall::find_stub_for(instruction_address(), is_aot); |
|
662 } |
|
663 |
|
664 address CompiledDirectStaticCall::resolve_call_stub() const { |
|
665 return SharedRuntime::get_resolve_static_call_stub(); |
|
666 } |
|
667 |
|
668 //----------------------------------------------------------------------------- |
|
669 // Non-product mode code |
|
670 #ifndef PRODUCT |
|
671 |
|
672 void CompiledIC::verify() { |
|
673 _call->verify(); |
|
674 assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted() |
|
675 || is_optimized() || is_megamorphic(), "sanity check"); |
|
676 } |
|
677 |
|
678 void CompiledIC::print() { |
|
679 print_compiled_ic(); |
|
680 tty->cr(); |
|
681 } |
|
682 |
|
683 void CompiledIC::print_compiled_ic() { |
|
684 tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT, |
|
685 p2i(instruction_address()), is_call_to_interpreted() ? "interpreted " : "", p2i(ic_destination()), p2i(is_optimized() ? NULL : cached_value())); |
|
686 } |
|
687 |
|
688 void CompiledDirectStaticCall::print() { |
|
689 tty->print("static call at " INTPTR_FORMAT " -> ", p2i(instruction_address())); |
|
690 if (is_clean()) { |
|
691 tty->print("clean"); |
|
692 } else if (is_call_to_compiled()) { |
|
693 tty->print("compiled"); |
|
694 } else if (is_call_to_far()) { |
|
695 tty->print("far"); |
|
696 } else if (is_call_to_interpreted()) { |
|
697 tty->print("interpreted"); |
|
698 } |
|
699 tty->cr(); |
|
700 } |
|
701 |
|
702 #endif // !PRODUCT |