468 sciter.static_stub_reloc()->clear_inline_cache(); |
432 sciter.static_stub_reloc()->clear_inline_cache(); |
469 } |
433 } |
470 } |
434 } |
471 } |
435 } |
472 } |
436 } |
473 // Check that the metadata embedded in the nmethod is alive |
|
474 metadata_do(check_class); |
|
475 #endif |
437 #endif |
476 } |
438 } |
477 |
439 |
478 // This is called at the end of the strong tracing/marking phase of a |
440 // This is called at the end of the strong tracing/marking phase of a |
479 // GC to unload an nmethod if it contains otherwise unreachable |
441 // GC to unload an nmethod if it contains otherwise unreachable |
480 // oops. |
442 // oops. |
481 |
443 |
482 void CompiledMethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { |
444 void CompiledMethod::do_unloading(BoolObjectClosure* is_alive) { |
483 // Make sure the oop's ready to receive visitors |
445 // Make sure the oop's ready to receive visitors |
484 assert(!is_zombie() && !is_unloaded(), |
446 assert(!is_zombie() && !is_unloaded(), |
485 "should not call follow on zombie or unloaded nmethod"); |
447 "should not call follow on zombie or unloaded nmethod"); |
486 |
448 |
487 // If the method is not entrant then a JMP is plastered over the |
449 address low_boundary = oops_reloc_begin(); |
488 // first few bytes. If an oop in the old code was there, that oop |
450 |
489 // should not get GC'd. Skip the first few bytes of oops on |
451 if (do_unloading_oops(low_boundary, is_alive)) { |
490 // not-entrant methods. |
|
491 address low_boundary = verified_entry_point(); |
|
492 if (is_not_entrant()) { |
|
493 low_boundary += NativeJump::instruction_size; |
|
494 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. |
|
495 // (See comment above.) |
|
496 } |
|
497 |
|
498 // Exception cache |
|
499 clean_exception_cache(); |
|
500 |
|
501 // If class unloading occurred we first iterate over all inline caches and |
|
502 // clear ICs where the cached oop is referring to an unloaded klass or method. |
|
503 // The remaining live cached oops will be traversed in the relocInfo::oop_type |
|
504 // iteration below. |
|
505 if (unloading_occurred) { |
|
506 RelocIterator iter(this, low_boundary); |
|
507 while(iter.next()) { |
|
508 if (iter.type() == relocInfo::virtual_call_type) { |
|
509 CompiledIC *ic = CompiledIC_at(&iter); |
|
510 clean_ic_if_metadata_is_dead(ic); |
|
511 } |
|
512 } |
|
513 } |
|
514 |
|
515 if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) { |
|
516 return; |
452 return; |
517 } |
453 } |
518 |
454 |
519 #if INCLUDE_JVMCI |
455 #if INCLUDE_JVMCI |
520 if (do_unloading_jvmci(unloading_occurred)) { |
456 if (do_unloading_jvmci()) { |
521 return; |
457 return; |
522 } |
458 } |
523 #endif |
459 #endif |
524 |
460 |
525 // Ensure that all metadata is still alive |
461 // Cleanup exception cache and inline caches happens |
526 verify_metadata_loaders(low_boundary); |
462 // after all the unloaded methods are found. |
527 } |
463 } |
528 |
464 |
|
465 // Clean references to unloaded nmethods at addr from this one, which is not unloaded. |
529 template <class CompiledICorStaticCall> |
466 template <class CompiledICorStaticCall> |
530 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from) { |
467 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from, |
|
468 bool parallel, bool clean_all) { |
531 // Ok, to lookup references to zombies here |
469 // Ok, to lookup references to zombies here |
532 CodeBlob *cb = CodeCache::find_blob_unsafe(addr); |
470 CodeBlob *cb = CodeCache::find_blob_unsafe(addr); |
533 CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; |
471 CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; |
534 if (nm != NULL) { |
472 if (nm != NULL) { |
535 if (nm->unloading_clock() != CompiledMethod::global_unloading_clock()) { |
473 if (parallel && nm->unloading_clock() != CompiledMethod::global_unloading_clock()) { |
536 // The nmethod has not been processed yet. |
474 // The nmethod has not been processed yet. |
537 return true; |
475 return true; |
538 } |
476 } |
539 |
477 |
540 // Clean inline caches pointing to both zombie and not_entrant methods |
478 // Clean inline caches pointing to both zombie and not_entrant methods |
541 if (!nm->is_in_use() || (nm->method()->code() != nm)) { |
479 if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) { |
542 ic->set_to_clean(); |
480 ic->set_to_clean(from->is_alive()); |
543 assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string()); |
481 assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string()); |
544 } |
482 } |
545 } |
483 } |
546 |
484 |
547 return false; |
485 return false; |
548 } |
486 } |
549 |
487 |
550 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from) { |
488 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from, |
551 return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from); |
489 bool parallel, bool clean_all = false) { |
552 } |
490 return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, parallel, clean_all); |
553 |
491 } |
554 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from) { |
492 |
555 return clean_if_nmethod_is_unloaded(csc, csc->destination(), from); |
493 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from, |
|
494 bool parallel, bool clean_all = false) { |
|
495 return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, parallel, clean_all); |
556 } |
496 } |
557 |
497 |
558 bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) { |
498 bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) { |
559 ResourceMark rm; |
499 ResourceMark rm; |
560 |
500 |
561 // Make sure the oop's ready to receive visitors |
501 // Make sure the oop's ready to receive visitors |
562 assert(!is_zombie() && !is_unloaded(), |
502 assert(!is_zombie() && !is_unloaded(), |
563 "should not call follow on zombie or unloaded nmethod"); |
503 "should not call follow on zombie or unloaded nmethod"); |
564 |
504 |
565 // If the method is not entrant then a JMP is plastered over the |
505 address low_boundary = oops_reloc_begin(); |
566 // first few bytes. If an oop in the old code was there, that oop |
506 |
567 // should not get GC'd. Skip the first few bytes of oops on |
507 if (do_unloading_oops(low_boundary, is_alive)) { |
568 // not-entrant methods. |
508 return false; |
569 address low_boundary = verified_entry_point(); |
509 } |
570 if (is_not_entrant()) { |
510 |
571 low_boundary += NativeJump::instruction_size; |
511 #if INCLUDE_JVMCI |
572 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. |
512 if (do_unloading_jvmci()) { |
573 // (See comment above.) |
513 return false; |
574 } |
514 } |
575 |
515 #endif |
576 // Exception cache |
516 |
577 clean_exception_cache(); |
517 return unload_nmethod_caches(/*parallel*/true, unloading_occurred); |
578 |
518 } |
|
519 |
|
520 // Cleans caches in nmethods that point to either classes that are unloaded |
|
521 // or nmethods that are unloaded. |
|
522 // |
|
523 // Can be called either in parallel by G1 currently or after all |
|
524 // nmethods are unloaded. Return postponed=true in the parallel case for |
|
525 // inline caches found that point to nmethods that are not yet visited during |
|
526 // the do_unloading walk. |
|
527 bool CompiledMethod::unload_nmethod_caches(bool parallel, bool unloading_occurred) { |
|
528 |
|
529 // Exception cache only needs to be called if unloading occurred |
|
530 if (unloading_occurred) { |
|
531 clean_exception_cache(); |
|
532 } |
|
533 |
|
534 bool postponed = cleanup_inline_caches_impl(parallel, unloading_occurred, /*clean_all*/false); |
|
535 |
|
536 // All static stubs need to be cleaned. |
|
537 clean_ic_stubs(); |
|
538 |
|
539 // Check that the metadata embedded in the nmethod is alive |
|
540 DEBUG_ONLY(metadata_do(check_class)); |
|
541 |
|
542 return postponed; |
|
543 } |
|
544 |
|
545 // Called to clean up after class unloading for live nmethods and from the sweeper |
|
546 // for all methods. |
|
547 bool CompiledMethod::cleanup_inline_caches_impl(bool parallel, bool unloading_occurred, bool clean_all) { |
|
548 assert_locked_or_safepoint(CompiledIC_lock); |
579 bool postponed = false; |
549 bool postponed = false; |
580 |
550 |
581 RelocIterator iter(this, low_boundary); |
551 // Find all calls in an nmethod and clear the ones that point to non-entrant, |
|
552 // zombie and unloaded nmethods. |
|
553 RelocIterator iter(this, oops_reloc_begin()); |
582 while(iter.next()) { |
554 while(iter.next()) { |
583 |
555 |
584 switch (iter.type()) { |
556 switch (iter.type()) { |
585 |
557 |
586 case relocInfo::virtual_call_type: |
558 case relocInfo::virtual_call_type: |
587 if (unloading_occurred) { |
559 if (unloading_occurred) { |
588 // If class unloading occurred we first iterate over all inline caches and |
560 // If class unloading occurred we first clear ICs where the cached metadata |
589 // clear ICs where the cached oop is referring to an unloaded klass or method. |
561 // is referring to an unloaded klass or method. |
590 clean_ic_if_metadata_is_dead(CompiledIC_at(&iter)); |
562 clean_ic_if_metadata_is_dead(CompiledIC_at(&iter)); |
591 } |
563 } |
592 |
564 |
593 postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this); |
565 postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, parallel, clean_all); |
594 break; |
566 break; |
595 |
567 |
596 case relocInfo::opt_virtual_call_type: |
568 case relocInfo::opt_virtual_call_type: |
597 postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this); |
569 postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, parallel, clean_all); |
598 break; |
570 break; |
599 |
571 |
600 case relocInfo::static_call_type: |
572 case relocInfo::static_call_type: |
601 postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this); |
573 postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, parallel, clean_all); |
602 break; |
574 break; |
603 |
575 |
604 case relocInfo::oop_type: |
576 case relocInfo::oop_type: |
605 // handled by do_unloading_oops below |
577 // handled by do_unloading_oops already |
606 break; |
578 break; |
607 |
579 |
608 case relocInfo::metadata_type: |
580 case relocInfo::metadata_type: |
609 break; // nothing to do. |
581 break; // nothing to do. |
610 |
582 |
611 default: |
583 default: |
612 break; |
584 break; |
613 } |
585 } |
614 } |
586 } |
615 |
|
616 if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) { |
|
617 return postponed; |
|
618 } |
|
619 |
|
620 #if INCLUDE_JVMCI |
|
621 if (do_unloading_jvmci(unloading_occurred)) { |
|
622 return postponed; |
|
623 } |
|
624 #endif |
|
625 |
|
626 // Ensure that all metadata is still alive |
|
627 verify_metadata_loaders(low_boundary); |
|
628 |
587 |
629 return postponed; |
588 return postponed; |
630 } |
589 } |
631 |
590 |
632 void CompiledMethod::do_unloading_parallel_postponed() { |
591 void CompiledMethod::do_unloading_parallel_postponed() { |