25 #include "precompiled.hpp" |
25 #include "precompiled.hpp" |
26 #include "code/compiledIC.hpp" |
26 #include "code/compiledIC.hpp" |
27 #include "code/compiledMethod.inline.hpp" |
27 #include "code/compiledMethod.inline.hpp" |
28 #include "code/scopeDesc.hpp" |
28 #include "code/scopeDesc.hpp" |
29 #include "code/codeCache.hpp" |
29 #include "code/codeCache.hpp" |
|
30 #include "gc/shared/barrierSet.hpp" |
|
31 #include "gc/shared/gcBehaviours.hpp" |
30 #include "interpreter/bytecode.inline.hpp" |
32 #include "interpreter/bytecode.inline.hpp" |
31 #include "logging/log.hpp" |
33 #include "logging/log.hpp" |
32 #include "logging/logTag.hpp" |
34 #include "logging/logTag.hpp" |
33 #include "memory/resourceArea.hpp" |
35 #include "memory/resourceArea.hpp" |
34 #include "oops/methodData.hpp" |
36 #include "oops/methodData.hpp" |
35 #include "oops/method.inline.hpp" |
37 #include "oops/method.inline.hpp" |
36 #include "prims/methodHandles.hpp" |
38 #include "prims/methodHandles.hpp" |
37 #include "runtime/handles.inline.hpp" |
39 #include "runtime/handles.inline.hpp" |
38 #include "runtime/mutexLocker.hpp" |
40 #include "runtime/mutexLocker.hpp" |
39 |
41 |
40 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments) |
42 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, |
|
43 int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, |
|
44 bool caller_must_gc_arguments) |
41 : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments), |
45 : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments), |
42 _mark_for_deoptimization_status(not_marked), _method(method) { |
46 _mark_for_deoptimization_status(not_marked), |
|
47 _is_unloading_state(0), |
|
48 _method(method) |
|
49 { |
43 init_defaults(); |
50 init_defaults(); |
44 } |
51 clear_unloading_state(); |
45 |
52 } |
46 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) |
53 |
47 : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments), |
54 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, |
48 _mark_for_deoptimization_status(not_marked), _method(method) { |
55 int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, |
|
56 OopMapSet* oop_maps, bool caller_must_gc_arguments) |
|
57 : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, |
|
58 frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments), |
|
59 _mark_for_deoptimization_status(not_marked), |
|
60 _is_unloading_state(0), |
|
61 _method(method) |
|
62 { |
49 init_defaults(); |
63 init_defaults(); |
|
64 clear_unloading_state(); |
50 } |
65 } |
51 |
66 |
52 void CompiledMethod::init_defaults() { |
67 void CompiledMethod::init_defaults() { |
53 _has_unsafe_access = 0; |
68 _has_unsafe_access = 0; |
54 _has_method_handle_invokes = 0; |
69 _has_method_handle_invokes = 0; |
55 _lazy_critical_native = 0; |
70 _lazy_critical_native = 0; |
56 _has_wide_vectors = 0; |
71 _has_wide_vectors = 0; |
57 _unloading_clock = 0; |
|
58 } |
72 } |
59 |
73 |
60 bool CompiledMethod::is_method_handle_return(address return_pc) { |
74 bool CompiledMethod::is_method_handle_return(address return_pc) { |
61 if (!has_method_handle_invokes()) return false; |
75 if (!has_method_handle_invokes()) return false; |
62 PcDesc* pd = pc_desc_at(return_pc); |
76 PcDesc* pd = pc_desc_at(return_pc); |
436 } |
430 } |
437 } |
431 } |
438 #endif |
432 #endif |
439 } |
433 } |
440 |
434 |
441 // This is called at the end of the strong tracing/marking phase of a |
|
442 // GC to unload an nmethod if it contains otherwise unreachable |
|
443 // oops. |
|
444 |
|
445 void CompiledMethod::do_unloading(BoolObjectClosure* is_alive) { |
|
446 // Make sure the oop's ready to receive visitors |
|
447 assert(!is_zombie() && !is_unloaded(), |
|
448 "should not call follow on zombie or unloaded nmethod"); |
|
449 |
|
450 address low_boundary = oops_reloc_begin(); |
|
451 |
|
452 if (do_unloading_oops(low_boundary, is_alive)) { |
|
453 return; |
|
454 } |
|
455 |
|
456 #if INCLUDE_JVMCI |
|
457 if (do_unloading_jvmci()) { |
|
458 return; |
|
459 } |
|
460 #endif |
|
461 |
|
462 // Cleanup exception cache and inline caches happens |
|
463 // after all the unloaded methods are found. |
|
464 } |
|
465 |
|
466 // Clean references to unloaded nmethods at addr from this one, which is not unloaded. |
435 // Clean references to unloaded nmethods at addr from this one, which is not unloaded. |
467 template <class CompiledICorStaticCall> |
436 template <class CompiledICorStaticCall> |
468 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from, |
437 static void clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from, |
469 bool parallel, bool clean_all) { |
438 bool clean_all) { |
470 // Ok, to lookup references to zombies here |
439 // Ok, to lookup references to zombies here |
471 CodeBlob *cb = CodeCache::find_blob_unsafe(addr); |
440 CodeBlob *cb = CodeCache::find_blob_unsafe(addr); |
472 CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; |
441 CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; |
473 if (nm != NULL) { |
442 if (nm != NULL) { |
474 if (parallel && nm->unloading_clock() != CompiledMethod::global_unloading_clock()) { |
|
475 // The nmethod has not been processed yet. |
|
476 return true; |
|
477 } |
|
478 |
|
479 // Clean inline caches pointing to both zombie and not_entrant methods |
443 // Clean inline caches pointing to both zombie and not_entrant methods |
480 if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) { |
444 if (clean_all || !nm->is_in_use() || nm->is_unloading() || (nm->method()->code() != nm)) { |
481 ic->set_to_clean(from->is_alive()); |
445 ic->set_to_clean(from->is_alive()); |
482 assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string()); |
446 assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string()); |
483 } |
447 } |
484 } |
448 } |
485 |
449 } |
486 return false; |
450 |
487 } |
451 static void clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from, |
488 |
452 bool clean_all) { |
489 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from, |
453 clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, clean_all); |
490 bool parallel, bool clean_all = false) { |
454 } |
491 return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, parallel, clean_all); |
455 |
492 } |
456 static void clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from, |
493 |
457 bool clean_all) { |
494 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from, |
458 clean_if_nmethod_is_unloaded(csc, csc->destination(), from, clean_all); |
495 bool parallel, bool clean_all = false) { |
|
496 return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, parallel, clean_all); |
|
497 } |
|
498 |
|
499 bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) { |
|
500 ResourceMark rm; |
|
501 |
|
502 // Make sure the oop's ready to receive visitors |
|
503 assert(!is_zombie() && !is_unloaded(), |
|
504 "should not call follow on zombie or unloaded nmethod"); |
|
505 |
|
506 address low_boundary = oops_reloc_begin(); |
|
507 |
|
508 if (do_unloading_oops(low_boundary, is_alive)) { |
|
509 return false; |
|
510 } |
|
511 |
|
512 #if INCLUDE_JVMCI |
|
513 if (do_unloading_jvmci()) { |
|
514 return false; |
|
515 } |
|
516 #endif |
|
517 |
|
518 return unload_nmethod_caches(/*parallel*/true, unloading_occurred); |
|
519 } |
459 } |
520 |
460 |
521 // Cleans caches in nmethods that point to either classes that are unloaded |
461 // Cleans caches in nmethods that point to either classes that are unloaded |
522 // or nmethods that are unloaded. |
462 // or nmethods that are unloaded. |
523 // |
463 // |
524 // Can be called either in parallel by G1 currently or after all |
464 // Can be called either in parallel by G1 currently or after all |
525 // nmethods are unloaded. Return postponed=true in the parallel case for |
465 // nmethods are unloaded. Return postponed=true in the parallel case for |
526 // inline caches found that point to nmethods that are not yet visited during |
466 // inline caches found that point to nmethods that are not yet visited during |
527 // the do_unloading walk. |
467 // the do_unloading walk. |
528 bool CompiledMethod::unload_nmethod_caches(bool parallel, bool unloading_occurred) { |
468 void CompiledMethod::unload_nmethod_caches(bool unloading_occurred) { |
|
469 ResourceMark rm; |
529 |
470 |
530 // Exception cache only needs to be called if unloading occurred |
471 // Exception cache only needs to be called if unloading occurred |
531 if (unloading_occurred) { |
472 if (unloading_occurred) { |
532 clean_exception_cache(); |
473 clean_exception_cache(); |
533 } |
474 } |
534 |
475 |
535 bool postponed = cleanup_inline_caches_impl(parallel, unloading_occurred, /*clean_all*/false); |
476 cleanup_inline_caches_impl(unloading_occurred, false); |
536 |
477 |
537 // All static stubs need to be cleaned. |
478 // All static stubs need to be cleaned. |
538 clean_ic_stubs(); |
479 clean_ic_stubs(); |
539 |
480 |
540 // Check that the metadata embedded in the nmethod is alive |
481 // Check that the metadata embedded in the nmethod is alive |
541 DEBUG_ONLY(metadata_do(check_class)); |
482 DEBUG_ONLY(metadata_do(check_class)); |
542 |
483 } |
543 return postponed; |
484 |
|
485 // The IsUnloadingStruct represents a tuple comprising a result of |
|
486 // IsUnloadingBehaviour::is_unloading() for a given unloading cycle. |
|
487 struct IsUnloadingStruct { |
|
488 unsigned int _is_unloading:1; |
|
489 unsigned int _unloading_cycle:2; |
|
490 }; |
|
491 |
|
492 // The IsUnloadingUnion allows treating the tuple of the IsUnloadingStruct |
|
493 // like a uint8_t, making it possible to read and write the tuple atomically. |
|
494 union IsUnloadingUnion { |
|
495 IsUnloadingStruct _inflated; |
|
496 uint8_t _value; |
|
497 }; |
|
498 |
|
499 bool CompiledMethod::is_unloading() { |
|
500 IsUnloadingUnion state; |
|
501 state._value = RawAccess<MO_RELAXED>::load(&_is_unloading_state); |
|
502 if (state._inflated._is_unloading == 1) { |
|
503 return true; |
|
504 } |
|
505 if (state._inflated._unloading_cycle == CodeCache::unloading_cycle()) { |
|
506 return state._inflated._is_unloading == 1; |
|
507 } |
|
508 |
|
509 // The IsUnloadingBehaviour is responsible for checking if there are any dead |
|
510 // oops in the CompiledMethod, by calling oops_do on it. |
|
511 bool result = IsUnloadingBehaviour::current()->is_unloading(this); |
|
512 |
|
513 state._inflated._unloading_cycle = CodeCache::unloading_cycle(); |
|
514 state._inflated._is_unloading = result ? 1 : 0; |
|
515 |
|
516 RawAccess<MO_RELAXED>::store(&_is_unloading_state, state._value); |
|
517 |
|
518 return result; |
|
519 } |
|
520 |
|
521 void CompiledMethod::clear_unloading_state() { |
|
522 IsUnloadingUnion state; |
|
523 state._inflated._unloading_cycle = CodeCache::unloading_cycle(); |
|
524 state._inflated._is_unloading = 0; |
|
525 RawAccess<MO_RELAXED>::store(&_is_unloading_state, state._value); |
544 } |
526 } |
545 |
527 |
546 // Called to clean up after class unloading for live nmethods and from the sweeper |
528 // Called to clean up after class unloading for live nmethods and from the sweeper |
547 // for all methods. |
529 // for all methods. |
548 bool CompiledMethod::cleanup_inline_caches_impl(bool parallel, bool unloading_occurred, bool clean_all) { |
530 void CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) { |
549 assert(CompiledICLocker::is_safe(this), "mt unsafe call"); |
531 assert(CompiledICLocker::is_safe(this), "mt unsafe call"); |
550 bool postponed = false; |
|
551 ResourceMark rm; |
532 ResourceMark rm; |
552 |
533 |
553 // Find all calls in an nmethod and clear the ones that point to non-entrant, |
534 // Find all calls in an nmethod and clear the ones that point to non-entrant, |
554 // zombie and unloaded nmethods. |
535 // zombie and unloaded nmethods. |
555 RelocIterator iter(this, oops_reloc_begin()); |
536 RelocIterator iter(this, oops_reloc_begin()); |
562 // If class unloading occurred we first clear ICs where the cached metadata |
543 // If class unloading occurred we first clear ICs where the cached metadata |
563 // is referring to an unloaded klass or method. |
544 // is referring to an unloaded klass or method. |
564 clean_ic_if_metadata_is_dead(CompiledIC_at(&iter)); |
545 clean_ic_if_metadata_is_dead(CompiledIC_at(&iter)); |
565 } |
546 } |
566 |
547 |
567 postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, parallel, clean_all); |
548 clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all); |
568 break; |
549 break; |
569 |
550 |
570 case relocInfo::opt_virtual_call_type: |
551 case relocInfo::opt_virtual_call_type: |
571 postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, parallel, clean_all); |
552 clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all); |
572 break; |
553 break; |
573 |
554 |
574 case relocInfo::static_call_type: |
555 case relocInfo::static_call_type: |
575 postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, parallel, clean_all); |
556 clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, clean_all); |
576 break; |
557 break; |
577 |
558 |
578 case relocInfo::oop_type: |
559 case relocInfo::oop_type: |
579 // handled by do_unloading_oops already |
|
580 break; |
560 break; |
581 |
561 |
582 case relocInfo::metadata_type: |
562 case relocInfo::metadata_type: |
583 break; // nothing to do. |
563 break; // nothing to do. |
584 |
|
585 default: |
|
586 break; |
|
587 } |
|
588 } |
|
589 |
|
590 return postponed; |
|
591 } |
|
592 |
|
593 void CompiledMethod::do_unloading_parallel_postponed() { |
|
594 ResourceMark rm; |
|
595 |
|
596 // Make sure the oop's ready to receive visitors |
|
597 assert(!is_zombie(), |
|
598 "should not call follow on zombie nmethod"); |
|
599 |
|
600 RelocIterator iter(this, oops_reloc_begin()); |
|
601 while(iter.next()) { |
|
602 |
|
603 switch (iter.type()) { |
|
604 |
|
605 case relocInfo::virtual_call_type: |
|
606 clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, true); |
|
607 break; |
|
608 |
|
609 case relocInfo::opt_virtual_call_type: |
|
610 clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, true); |
|
611 break; |
|
612 |
|
613 case relocInfo::static_call_type: |
|
614 clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, true); |
|
615 break; |
|
616 |
564 |
617 default: |
565 default: |
618 break; |
566 break; |
619 } |
567 } |
620 } |
568 } |