431 nsize = pcs_size + sizeof(PcDesc); |
413 nsize = pcs_size + sizeof(PcDesc); |
432 } |
414 } |
433 assert((nsize % oopSize) == 0, "correct alignment"); |
415 assert((nsize % oopSize) == 0, "correct alignment"); |
434 return nsize; |
416 return nsize; |
435 } |
417 } |
436 |
|
437 //----------------------------------------------------------------------------- |
|
438 |
|
439 |
|
440 void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) { |
|
441 assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock"); |
|
442 assert(new_entry != NULL,"Must be non null"); |
|
443 assert(new_entry->next() == NULL, "Must be null"); |
|
444 |
|
445 ExceptionCache *ec = exception_cache(); |
|
446 if (ec != NULL) { |
|
447 new_entry->set_next(ec); |
|
448 } |
|
449 release_set_exception_cache(new_entry); |
|
450 } |
|
451 |
|
452 void nmethod::clean_exception_cache(BoolObjectClosure* is_alive) { |
|
453 ExceptionCache* prev = NULL; |
|
454 ExceptionCache* curr = exception_cache(); |
|
455 |
|
456 while (curr != NULL) { |
|
457 ExceptionCache* next = curr->next(); |
|
458 |
|
459 Klass* ex_klass = curr->exception_type(); |
|
460 if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) { |
|
461 if (prev == NULL) { |
|
462 set_exception_cache(next); |
|
463 } else { |
|
464 prev->set_next(next); |
|
465 } |
|
466 delete curr; |
|
467 // prev stays the same. |
|
468 } else { |
|
469 prev = curr; |
|
470 } |
|
471 |
|
472 curr = next; |
|
473 } |
|
474 } |
|
475 |
|
476 // public method for accessing the exception cache |
|
477 // These are the public access methods. |
|
478 address nmethod::handler_for_exception_and_pc(Handle exception, address pc) { |
|
479 // We never grab a lock to read the exception cache, so we may |
|
480 // have false negatives. This is okay, as it can only happen during |
|
481 // the first few exception lookups for a given nmethod. |
|
482 ExceptionCache* ec = exception_cache(); |
|
483 while (ec != NULL) { |
|
484 address ret_val; |
|
485 if ((ret_val = ec->match(exception,pc)) != NULL) { |
|
486 return ret_val; |
|
487 } |
|
488 ec = ec->next(); |
|
489 } |
|
490 return NULL; |
|
491 } |
|
492 |
|
493 |
|
494 void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) { |
|
495 // There are potential race conditions during exception cache updates, so we |
|
496 // must own the ExceptionCache_lock before doing ANY modifications. Because |
|
497 // we don't lock during reads, it is possible to have several threads attempt |
|
498 // to update the cache with the same data. We need to check for already inserted |
|
499 // copies of the current data before adding it. |
|
500 |
|
501 MutexLocker ml(ExceptionCache_lock); |
|
502 ExceptionCache* target_entry = exception_cache_entry_for_exception(exception); |
|
503 |
|
504 if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) { |
|
505 target_entry = new ExceptionCache(exception,pc,handler); |
|
506 add_exception_cache_entry(target_entry); |
|
507 } |
|
508 } |
|
509 |
|
510 |
|
511 //-------------end of code for ExceptionCache-------------- |
|
512 |
418 |
513 |
419 |
514 int nmethod::total_size() const { |
420 int nmethod::total_size() const { |
515 return |
421 return |
516 consts_size() + |
422 consts_size() + |
690 CodeBuffer* code_buffer, |
590 CodeBuffer* code_buffer, |
691 int frame_size, |
591 int frame_size, |
692 ByteSize basic_lock_owner_sp_offset, |
592 ByteSize basic_lock_owner_sp_offset, |
693 ByteSize basic_lock_sp_offset, |
593 ByteSize basic_lock_sp_offset, |
694 OopMapSet* oop_maps ) |
594 OopMapSet* oop_maps ) |
695 : CodeBlob("native nmethod", code_buffer, sizeof(nmethod), |
595 : CompiledMethod(method, "native nmethod", nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false), |
696 nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps), |
|
697 _native_receiver_sp_offset(basic_lock_owner_sp_offset), |
596 _native_receiver_sp_offset(basic_lock_owner_sp_offset), |
698 _native_basic_lock_sp_offset(basic_lock_sp_offset) |
597 _native_basic_lock_sp_offset(basic_lock_sp_offset) |
699 { |
598 { |
700 { |
599 { |
|
600 int scopes_data_offset = 0; |
|
601 int deoptimize_offset = 0; |
|
602 int deoptimize_mh_offset = 0; |
|
603 |
701 debug_only(NoSafepointVerifier nsv;) |
604 debug_only(NoSafepointVerifier nsv;) |
702 assert_locked_or_safepoint(CodeCache_lock); |
605 assert_locked_or_safepoint(CodeCache_lock); |
703 |
606 |
704 init_defaults(); |
607 init_defaults(); |
705 _method = method; |
|
706 _entry_bci = InvocationEntryBci; |
608 _entry_bci = InvocationEntryBci; |
707 // We have no exception handler or deopt handler make the |
609 // We have no exception handler or deopt handler make the |
708 // values something that will never match a pc like the nmethod vtable entry |
610 // values something that will never match a pc like the nmethod vtable entry |
709 _exception_offset = 0; |
611 _exception_offset = 0; |
710 _deoptimize_offset = 0; |
|
711 _deoptimize_mh_offset = 0; |
|
712 _orig_pc_offset = 0; |
612 _orig_pc_offset = 0; |
713 |
613 |
714 _consts_offset = data_offset(); |
614 _consts_offset = data_offset(); |
715 _stub_offset = data_offset(); |
615 _stub_offset = data_offset(); |
716 _oops_offset = data_offset(); |
616 _oops_offset = data_offset(); |
717 _metadata_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize); |
617 _metadata_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize); |
718 _scopes_data_offset = _metadata_offset + round_to(code_buffer->total_metadata_size(), wordSize); |
618 scopes_data_offset = _metadata_offset + round_to(code_buffer->total_metadata_size(), wordSize); |
719 _scopes_pcs_offset = _scopes_data_offset; |
619 _scopes_pcs_offset = scopes_data_offset; |
720 _dependencies_offset = _scopes_pcs_offset; |
620 _dependencies_offset = _scopes_pcs_offset; |
721 _handler_table_offset = _dependencies_offset; |
621 _handler_table_offset = _dependencies_offset; |
722 _nul_chk_table_offset = _handler_table_offset; |
622 _nul_chk_table_offset = _handler_table_offset; |
723 _nmethod_end_offset = _nul_chk_table_offset; |
623 _nmethod_end_offset = _nul_chk_table_offset; |
724 _compile_id = compile_id; |
624 _compile_id = compile_id; |
725 _comp_level = CompLevel_none; |
625 _comp_level = CompLevel_none; |
726 _entry_point = code_begin() + offsets->value(CodeOffsets::Entry); |
626 _entry_point = code_begin() + offsets->value(CodeOffsets::Entry); |
727 _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); |
627 _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); |
728 _osr_entry_point = NULL; |
628 _osr_entry_point = NULL; |
729 _exception_cache = NULL; |
629 _exception_cache = NULL; |
730 _pc_desc_cache.reset_to(NULL); |
630 _pc_desc_container.reset_to(NULL); |
731 _hotness_counter = NMethodSweeper::hotness_counter_reset_val(); |
631 _hotness_counter = NMethodSweeper::hotness_counter_reset_val(); |
732 |
632 |
|
633 _scopes_data_begin = (address) this + scopes_data_offset; |
|
634 _deopt_handler_begin = (address) this + deoptimize_offset; |
|
635 _deopt_mh_handler_begin = (address) this + deoptimize_mh_offset; |
|
636 |
|
637 code_buffer->copy_code_and_locs_to(this); |
733 code_buffer->copy_values_to(this); |
638 code_buffer->copy_values_to(this); |
734 if (ScavengeRootsInCode) { |
639 if (ScavengeRootsInCode) { |
735 if (detect_scavenge_root_oops()) { |
640 if (detect_scavenge_root_oops()) { |
736 CodeCache::add_scavenge_root_nmethod(this); |
641 CodeCache::add_scavenge_root_nmethod(this); |
737 } |
642 } |
793 #if INCLUDE_JVMCI |
698 #if INCLUDE_JVMCI |
794 , Handle installed_code, |
699 , Handle installed_code, |
795 Handle speculation_log |
700 Handle speculation_log |
796 #endif |
701 #endif |
797 ) |
702 ) |
798 : CodeBlob("nmethod", code_buffer, sizeof(nmethod), |
703 : CompiledMethod(method, "nmethod", nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false), |
799 nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps), |
|
800 _native_receiver_sp_offset(in_ByteSize(-1)), |
704 _native_receiver_sp_offset(in_ByteSize(-1)), |
801 _native_basic_lock_sp_offset(in_ByteSize(-1)) |
705 _native_basic_lock_sp_offset(in_ByteSize(-1)) |
802 { |
706 { |
803 assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR"); |
707 assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR"); |
804 { |
708 { |
805 debug_only(NoSafepointVerifier nsv;) |
709 debug_only(NoSafepointVerifier nsv;) |
806 assert_locked_or_safepoint(CodeCache_lock); |
710 assert_locked_or_safepoint(CodeCache_lock); |
807 |
711 |
|
712 _deopt_handler_begin = (address) this; |
|
713 _deopt_mh_handler_begin = (address) this; |
|
714 |
808 init_defaults(); |
715 init_defaults(); |
809 _method = method; |
|
810 _entry_bci = entry_bci; |
716 _entry_bci = entry_bci; |
811 _compile_id = compile_id; |
717 _compile_id = compile_id; |
812 _comp_level = comp_level; |
718 _comp_level = comp_level; |
813 _compiler = compiler; |
719 _compiler = compiler; |
814 _orig_pc_offset = orig_pc_offset; |
720 _orig_pc_offset = orig_pc_offset; |
828 _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions); |
734 _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions); |
829 } else { |
735 } else { |
830 _exception_offset = -1; |
736 _exception_offset = -1; |
831 } |
737 } |
832 if (offsets->value(CodeOffsets::Deopt) != -1) { |
738 if (offsets->value(CodeOffsets::Deopt) != -1) { |
833 _deoptimize_offset = code_offset() + offsets->value(CodeOffsets::Deopt); |
739 _deopt_handler_begin = (address) this + code_offset() + offsets->value(CodeOffsets::Deopt); |
834 } else { |
740 } else { |
835 _deoptimize_offset = -1; |
741 _deopt_handler_begin = NULL; |
836 } |
742 } |
837 if (offsets->value(CodeOffsets::DeoptMH) != -1) { |
743 if (offsets->value(CodeOffsets::DeoptMH) != -1) { |
838 _deoptimize_mh_offset = code_offset() + offsets->value(CodeOffsets::DeoptMH); |
744 _deopt_mh_handler_begin = (address) this + code_offset() + offsets->value(CodeOffsets::DeoptMH); |
839 } else { |
745 } else { |
840 _deoptimize_mh_offset = -1; |
746 _deopt_mh_handler_begin = NULL; |
841 } |
747 } |
842 } else { |
748 } else { |
843 #endif |
749 #endif |
844 // Exception handler and deopt handler are in the stub section |
750 // Exception handler and deopt handler are in the stub section |
845 assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set"); |
751 assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set"); |
846 assert(offsets->value(CodeOffsets::Deopt ) != -1, "must be set"); |
752 assert(offsets->value(CodeOffsets::Deopt ) != -1, "must be set"); |
847 |
753 |
848 _exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions); |
754 _exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions); |
849 _deoptimize_offset = _stub_offset + offsets->value(CodeOffsets::Deopt); |
755 _deopt_handler_begin = (address) this + _stub_offset + offsets->value(CodeOffsets::Deopt); |
850 if (offsets->value(CodeOffsets::DeoptMH) != -1) { |
756 if (offsets->value(CodeOffsets::DeoptMH) != -1) { |
851 _deoptimize_mh_offset = _stub_offset + offsets->value(CodeOffsets::DeoptMH); |
757 _deopt_mh_handler_begin = (address) this + _stub_offset + offsets->value(CodeOffsets::DeoptMH); |
852 } else { |
758 } else { |
853 _deoptimize_mh_offset = -1; |
759 _deopt_mh_handler_begin = NULL; |
854 #if INCLUDE_JVMCI |
760 #if INCLUDE_JVMCI |
855 } |
761 } |
856 #endif |
762 #endif |
857 } |
763 } |
858 if (offsets->value(CodeOffsets::UnwindHandler) != -1) { |
764 if (offsets->value(CodeOffsets::UnwindHandler) != -1) { |
861 _unwind_handler_offset = -1; |
767 _unwind_handler_offset = -1; |
862 } |
768 } |
863 |
769 |
864 _oops_offset = data_offset(); |
770 _oops_offset = data_offset(); |
865 _metadata_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize); |
771 _metadata_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize); |
866 _scopes_data_offset = _metadata_offset + round_to(code_buffer->total_metadata_size(), wordSize); |
772 int scopes_data_offset = _metadata_offset + round_to(code_buffer->total_metadata_size(), wordSize); |
867 |
773 |
868 _scopes_pcs_offset = _scopes_data_offset + round_to(debug_info->data_size (), oopSize); |
774 _scopes_pcs_offset = scopes_data_offset + round_to(debug_info->data_size (), oopSize); |
869 _dependencies_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size()); |
775 _dependencies_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size()); |
870 _handler_table_offset = _dependencies_offset + round_to(dependencies->size_in_bytes (), oopSize); |
776 _handler_table_offset = _dependencies_offset + round_to(dependencies->size_in_bytes (), oopSize); |
871 _nul_chk_table_offset = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize); |
777 _nul_chk_table_offset = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize); |
872 _nmethod_end_offset = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize); |
778 _nmethod_end_offset = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize); |
873 |
|
874 _entry_point = code_begin() + offsets->value(CodeOffsets::Entry); |
779 _entry_point = code_begin() + offsets->value(CodeOffsets::Entry); |
875 _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); |
780 _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); |
876 _osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry); |
781 _osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry); |
877 _exception_cache = NULL; |
782 _exception_cache = NULL; |
878 _pc_desc_cache.reset_to(scopes_pcs_begin()); |
783 |
879 |
784 _scopes_data_begin = (address) this + scopes_data_offset; |
|
785 |
|
786 _pc_desc_container.reset_to(scopes_pcs_begin()); |
|
787 |
|
788 code_buffer->copy_code_and_locs_to(this); |
880 // Copy contents of ScopeDescRecorder to nmethod |
789 // Copy contents of ScopeDescRecorder to nmethod |
881 code_buffer->copy_values_to(this); |
790 code_buffer->copy_values_to(this); |
882 debug_info->copy_to(this); |
791 debug_info->copy_to(this); |
883 dependencies->copy_to(this); |
792 dependencies->copy_to(this); |
884 if (ScavengeRootsInCode) { |
793 if (ScavengeRootsInCode) { |
1049 Metadata** dest = metadata_begin(); |
958 Metadata** dest = metadata_begin(); |
1050 for (int index = 0 ; index < length; index++) { |
959 for (int index = 0 ; index < length; index++) { |
1051 dest[index] = array->at(index); |
960 dest[index] = array->at(index); |
1052 } |
961 } |
1053 } |
962 } |
1054 |
|
1055 bool nmethod::is_at_poll_return(address pc) { |
|
1056 RelocIterator iter(this, pc, pc+1); |
|
1057 while (iter.next()) { |
|
1058 if (iter.type() == relocInfo::poll_return_type) |
|
1059 return true; |
|
1060 } |
|
1061 return false; |
|
1062 } |
|
1063 |
|
1064 |
|
1065 bool nmethod::is_at_poll_or_poll_return(address pc) { |
|
1066 RelocIterator iter(this, pc, pc+1); |
|
1067 while (iter.next()) { |
|
1068 relocInfo::relocType t = iter.type(); |
|
1069 if (t == relocInfo::poll_return_type || t == relocInfo::poll_type) |
|
1070 return true; |
|
1071 } |
|
1072 return false; |
|
1073 } |
|
1074 |
|
1075 |
963 |
1076 void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) { |
964 void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) { |
1077 // re-patch all oop-bearing instructions, just in case some oops moved |
965 // re-patch all oop-bearing instructions, just in case some oops moved |
1078 RelocIterator iter(this, begin, end); |
966 RelocIterator iter(this, begin, end); |
1079 while (iter.next()) { |
967 while (iter.next()) { |
1091 } |
979 } |
1092 } |
980 } |
1093 } |
981 } |
1094 |
982 |
1095 |
983 |
1096 void nmethod::verify_oop_relocations() { |
984 void nmethod::verify_clean_inline_caches() { |
1097 // Ensure sure that the code matches the current oop values |
|
1098 RelocIterator iter(this, NULL, NULL); |
|
1099 while (iter.next()) { |
|
1100 if (iter.type() == relocInfo::oop_type) { |
|
1101 oop_Relocation* reloc = iter.oop_reloc(); |
|
1102 if (!reloc->oop_is_immediate()) { |
|
1103 reloc->verify_oop_relocation(); |
|
1104 } |
|
1105 } |
|
1106 } |
|
1107 } |
|
1108 |
|
1109 |
|
1110 ScopeDesc* nmethod::scope_desc_at(address pc) { |
|
1111 PcDesc* pd = pc_desc_at(pc); |
|
1112 guarantee(pd != NULL, "scope must be present"); |
|
1113 return new ScopeDesc(this, pd->scope_decode_offset(), |
|
1114 pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(), |
|
1115 pd->return_oop()); |
|
1116 } |
|
1117 |
|
1118 |
|
1119 void nmethod::clear_inline_caches() { |
|
1120 assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint"); |
|
1121 if (is_zombie()) { |
|
1122 return; |
|
1123 } |
|
1124 |
|
1125 RelocIterator iter(this); |
|
1126 while (iter.next()) { |
|
1127 iter.reloc()->clear_inline_cache(); |
|
1128 } |
|
1129 } |
|
1130 |
|
1131 // Clear ICStubs of all compiled ICs |
|
1132 void nmethod::clear_ic_stubs() { |
|
1133 assert_locked_or_safepoint(CompiledIC_lock); |
|
1134 RelocIterator iter(this); |
|
1135 while(iter.next()) { |
|
1136 if (iter.type() == relocInfo::virtual_call_type) { |
|
1137 CompiledIC* ic = CompiledIC_at(&iter); |
|
1138 ic->clear_ic_stub(); |
|
1139 } |
|
1140 } |
|
1141 } |
|
1142 |
|
1143 void nmethod::cleanup_inline_caches(bool clean_all/*=false*/) { |
|
1144 assert_locked_or_safepoint(CompiledIC_lock); |
985 assert_locked_or_safepoint(CompiledIC_lock); |
1145 |
986 |
1146 // If the method is not entrant or zombie then a JMP is plastered over the |
987 // If the method is not entrant or zombie then a JMP is plastered over the |
1147 // first few bytes. If an oop in the old code was there, that oop |
988 // first few bytes. If an oop in the old code was there, that oop |
1148 // should not get GC'd. Skip the first few bytes of oops on |
989 // should not get GC'd. Skip the first few bytes of oops on |
1154 // This means that the low_boundary is going to be a little too high. |
995 // This means that the low_boundary is going to be a little too high. |
1155 // This shouldn't matter, since oops of non-entrant methods are never used. |
996 // This shouldn't matter, since oops of non-entrant methods are never used. |
1156 // In fact, why are we bothering to look at oops in a non-entrant method?? |
997 // In fact, why are we bothering to look at oops in a non-entrant method?? |
1157 } |
998 } |
1158 |
999 |
1159 // Find all calls in an nmethod and clear the ones that point to non-entrant, |
|
1160 // zombie and unloaded nmethods. |
|
1161 ResourceMark rm; |
1000 ResourceMark rm; |
1162 RelocIterator iter(this, low_boundary); |
1001 RelocIterator iter(this, low_boundary); |
1163 while(iter.next()) { |
1002 while(iter.next()) { |
1164 switch(iter.type()) { |
1003 switch(iter.type()) { |
1165 case relocInfo::virtual_call_type: |
1004 case relocInfo::virtual_call_type: |
1166 case relocInfo::opt_virtual_call_type: { |
1005 case relocInfo::opt_virtual_call_type: { |
1167 CompiledIC *ic = CompiledIC_at(&iter); |
1006 CompiledIC *ic = CompiledIC_at(&iter); |
1168 // Ok, to lookup references to zombies here |
1007 // Ok, to lookup references to zombies here |
1169 CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination()); |
1008 CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination()); |
1170 if( cb != NULL && cb->is_nmethod() ) { |
1009 nmethod* nm = cb->as_nmethod_or_null(); |
1171 nmethod* nm = (nmethod*)cb; |
1010 if( nm != NULL ) { |
1172 // Clean inline caches pointing to zombie, non-entrant and unloaded methods |
|
1173 if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive()); |
|
1174 } |
|
1175 break; |
|
1176 } |
|
1177 case relocInfo::static_call_type: { |
|
1178 CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc()); |
|
1179 CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination()); |
|
1180 if( cb != NULL && cb->is_nmethod() ) { |
|
1181 nmethod* nm = (nmethod*)cb; |
|
1182 // Clean inline caches pointing to zombie, non-entrant and unloaded methods |
|
1183 if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean(); |
|
1184 } |
|
1185 break; |
|
1186 } |
|
1187 } |
|
1188 } |
|
1189 } |
|
1190 |
|
1191 void nmethod::verify_clean_inline_caches() { |
|
1192 assert_locked_or_safepoint(CompiledIC_lock); |
|
1193 |
|
1194 // If the method is not entrant or zombie then a JMP is plastered over the |
|
1195 // first few bytes. If an oop in the old code was there, that oop |
|
1196 // should not get GC'd. Skip the first few bytes of oops on |
|
1197 // not-entrant methods. |
|
1198 address low_boundary = verified_entry_point(); |
|
1199 if (!is_in_use()) { |
|
1200 low_boundary += NativeJump::instruction_size; |
|
1201 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. |
|
1202 // This means that the low_boundary is going to be a little too high. |
|
1203 // This shouldn't matter, since oops of non-entrant methods are never used. |
|
1204 // In fact, why are we bothering to look at oops in a non-entrant method?? |
|
1205 } |
|
1206 |
|
1207 ResourceMark rm; |
|
1208 RelocIterator iter(this, low_boundary); |
|
1209 while(iter.next()) { |
|
1210 switch(iter.type()) { |
|
1211 case relocInfo::virtual_call_type: |
|
1212 case relocInfo::opt_virtual_call_type: { |
|
1213 CompiledIC *ic = CompiledIC_at(&iter); |
|
1214 // Ok, to lookup references to zombies here |
|
1215 CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination()); |
|
1216 if( cb != NULL && cb->is_nmethod() ) { |
|
1217 nmethod* nm = (nmethod*)cb; |
|
1218 // Verify that inline caches pointing to both zombie and not_entrant methods are clean |
1011 // Verify that inline caches pointing to both zombie and not_entrant methods are clean |
1219 if (!nm->is_in_use() || (nm->method()->code() != nm)) { |
1012 if (!nm->is_in_use() || (nm->method()->code() != nm)) { |
1220 assert(ic->is_clean(), "IC should be clean"); |
1013 assert(ic->is_clean(), "IC should be clean"); |
1221 } |
1014 } |
1222 } |
1015 } |
1223 break; |
1016 break; |
1224 } |
1017 } |
1225 case relocInfo::static_call_type: { |
1018 case relocInfo::static_call_type: { |
1226 CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc()); |
1019 CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc()); |
1227 CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination()); |
1020 CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination()); |
1228 if( cb != NULL && cb->is_nmethod() ) { |
1021 nmethod* nm = cb->as_nmethod_or_null(); |
1229 nmethod* nm = (nmethod*)cb; |
1022 if( nm != NULL ) { |
1230 // Verify that inline caches pointing to both zombie and not_entrant methods are clean |
1023 // Verify that inline caches pointing to both zombie and not_entrant methods are clean |
1231 if (!nm->is_in_use() || (nm->method()->code() != nm)) { |
1024 if (!nm->is_in_use() || (nm->method()->code() != nm)) { |
1232 assert(csc->is_clean(), "IC should be clean"); |
1025 assert(csc->is_clean(), "IC should be clean"); |
1233 } |
1026 } |
1234 } |
1027 } |
1235 break; |
1028 break; |
1236 } |
1029 } |
1237 } |
1030 } |
1238 } |
1031 } |
1239 } |
|
1240 |
|
1241 int nmethod::verify_icholder_relocations() { |
|
1242 int count = 0; |
|
1243 |
|
1244 RelocIterator iter(this); |
|
1245 while(iter.next()) { |
|
1246 if (iter.type() == relocInfo::virtual_call_type) { |
|
1247 if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) { |
|
1248 CompiledIC *ic = CompiledIC_at(&iter); |
|
1249 if (TraceCompiledIC) { |
|
1250 tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder())); |
|
1251 ic->print(); |
|
1252 } |
|
1253 assert(ic->cached_icholder() != NULL, "must be non-NULL"); |
|
1254 count++; |
|
1255 } |
|
1256 } |
|
1257 } |
|
1258 |
|
1259 return count; |
|
1260 } |
1032 } |
1261 |
1033 |
1262 // This is a private interface with the sweeper. |
1034 // This is a private interface with the sweeper. |
1263 void nmethod::mark_as_seen_on_stack() { |
1035 void nmethod::mark_as_seen_on_stack() { |
1264 assert(is_alive(), "Must be an alive method"); |
1036 assert(is_alive(), "Must be an alive method"); |
1287 if (m == NULL) return; |
1059 if (m == NULL) return; |
1288 MethodData* mdo = m->method_data(); |
1060 MethodData* mdo = m->method_data(); |
1289 if (mdo == NULL) return; |
1061 if (mdo == NULL) return; |
1290 // There is a benign race here. See comments in methodData.hpp. |
1062 // There is a benign race here. See comments in methodData.hpp. |
1291 mdo->inc_decompile_count(); |
1063 mdo->inc_decompile_count(); |
1292 } |
|
1293 |
|
1294 void nmethod::increase_unloading_clock() { |
|
1295 _global_unloading_clock++; |
|
1296 if (_global_unloading_clock == 0) { |
|
1297 // _nmethods are allocated with _unloading_clock == 0, |
|
1298 // so 0 is never used as a clock value. |
|
1299 _global_unloading_clock = 1; |
|
1300 } |
|
1301 } |
|
1302 |
|
1303 void nmethod::set_unloading_clock(unsigned char unloading_clock) { |
|
1304 OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock); |
|
1305 } |
|
1306 |
|
1307 unsigned char nmethod::unloading_clock() { |
|
1308 return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock); |
|
1309 } |
1064 } |
1310 |
1065 |
1311 void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) { |
1066 void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) { |
1312 |
1067 |
1313 post_compiled_method_unload(); |
1068 post_compiled_method_unload(); |
1751 // attempt to report the event in the unlikely scenario where the |
1505 // attempt to report the event in the unlikely scenario where the |
1752 // event is enabled at the time the nmethod is made a zombie. |
1506 // event is enabled at the time the nmethod is made a zombie. |
1753 set_unload_reported(); |
1507 set_unload_reported(); |
1754 } |
1508 } |
1755 |
1509 |
1756 void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) { |
|
1757 if (ic->is_icholder_call()) { |
|
1758 // The only exception is compiledICHolder oops which may |
|
1759 // yet be marked below. (We check this further below). |
|
1760 CompiledICHolder* cichk_oop = ic->cached_icholder(); |
|
1761 |
|
1762 if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) && |
|
1763 cichk_oop->holder_klass()->is_loader_alive(is_alive)) { |
|
1764 return; |
|
1765 } |
|
1766 } else { |
|
1767 Metadata* ic_oop = ic->cached_metadata(); |
|
1768 if (ic_oop != NULL) { |
|
1769 if (ic_oop->is_klass()) { |
|
1770 if (((Klass*)ic_oop)->is_loader_alive(is_alive)) { |
|
1771 return; |
|
1772 } |
|
1773 } else if (ic_oop->is_method()) { |
|
1774 if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) { |
|
1775 return; |
|
1776 } |
|
1777 } else { |
|
1778 ShouldNotReachHere(); |
|
1779 } |
|
1780 } |
|
1781 } |
|
1782 |
|
1783 ic->set_to_clean(); |
|
1784 } |
|
1785 |
|
1786 // This is called at the end of the strong tracing/marking phase of a |
|
1787 // GC to unload an nmethod if it contains otherwise unreachable |
|
1788 // oops. |
|
1789 |
|
1790 void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { |
|
1791 // Make sure the oop's ready to receive visitors |
|
1792 assert(!is_zombie() && !is_unloaded(), |
|
1793 "should not call follow on zombie or unloaded nmethod"); |
|
1794 |
|
1795 // If the method is not entrant then a JMP is plastered over the |
|
1796 // first few bytes. If an oop in the old code was there, that oop |
|
1797 // should not get GC'd. Skip the first few bytes of oops on |
|
1798 // not-entrant methods. |
|
1799 address low_boundary = verified_entry_point(); |
|
1800 if (is_not_entrant()) { |
|
1801 low_boundary += NativeJump::instruction_size; |
|
1802 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. |
|
1803 // (See comment above.) |
|
1804 } |
|
1805 |
|
1806 // The RedefineClasses() API can cause the class unloading invariant |
|
1807 // to no longer be true. See jvmtiExport.hpp for details. |
|
1808 // Also, leave a debugging breadcrumb in local flag. |
|
1809 if (JvmtiExport::has_redefined_a_class()) { |
|
1810 // This set of the unloading_occurred flag is done before the |
|
1811 // call to post_compiled_method_unload() so that the unloading |
|
1812 // of this nmethod is reported. |
|
1813 unloading_occurred = true; |
|
1814 } |
|
1815 |
|
1816 // Exception cache |
|
1817 clean_exception_cache(is_alive); |
|
1818 |
|
1819 // If class unloading occurred we first iterate over all inline caches and |
|
1820 // clear ICs where the cached oop is referring to an unloaded klass or method. |
|
1821 // The remaining live cached oops will be traversed in the relocInfo::oop_type |
|
1822 // iteration below. |
|
1823 if (unloading_occurred) { |
|
1824 RelocIterator iter(this, low_boundary); |
|
1825 while(iter.next()) { |
|
1826 if (iter.type() == relocInfo::virtual_call_type) { |
|
1827 CompiledIC *ic = CompiledIC_at(&iter); |
|
1828 clean_ic_if_metadata_is_dead(ic, is_alive); |
|
1829 } |
|
1830 } |
|
1831 } |
|
1832 |
|
1833 // Compiled code |
|
1834 { |
|
1835 RelocIterator iter(this, low_boundary); |
|
1836 while (iter.next()) { |
|
1837 if (iter.type() == relocInfo::oop_type) { |
|
1838 oop_Relocation* r = iter.oop_reloc(); |
|
1839 // In this loop, we must only traverse those oops directly embedded in |
|
1840 // the code. Other oops (oop_index>0) are seen as part of scopes_oops. |
|
1841 assert(1 == (r->oop_is_immediate()) + |
|
1842 (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()), |
|
1843 "oop must be found in exactly one place"); |
|
1844 if (r->oop_is_immediate() && r->oop_value() != NULL) { |
|
1845 if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) { |
|
1846 return; |
|
1847 } |
|
1848 } |
|
1849 } |
|
1850 } |
|
1851 } |
|
1852 |
|
1853 |
|
1854 // Scopes |
|
1855 for (oop* p = oops_begin(); p < oops_end(); p++) { |
|
1856 if (*p == Universe::non_oop_word()) continue; // skip non-oops |
|
1857 if (can_unload(is_alive, p, unloading_occurred)) { |
|
1858 return; |
|
1859 } |
|
1860 } |
|
1861 |
|
1862 #if INCLUDE_JVMCI |
|
1863 // Follow JVMCI method |
|
1864 BarrierSet* bs = Universe::heap()->barrier_set(); |
|
1865 if (_jvmci_installed_code != NULL) { |
|
1866 if (_jvmci_installed_code->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(_jvmci_installed_code)) { |
|
1867 if (!is_alive->do_object_b(_jvmci_installed_code)) { |
|
1868 clear_jvmci_installed_code(); |
|
1869 } |
|
1870 } else { |
|
1871 if (can_unload(is_alive, (oop*)&_jvmci_installed_code, unloading_occurred)) { |
|
1872 return; |
|
1873 } |
|
1874 } |
|
1875 } |
|
1876 |
|
1877 if (_speculation_log != NULL) { |
|
1878 if (!is_alive->do_object_b(_speculation_log)) { |
|
1879 bs->write_ref_nmethod_pre(&_speculation_log, this); |
|
1880 _speculation_log = NULL; |
|
1881 bs->write_ref_nmethod_post(&_speculation_log, this); |
|
1882 } |
|
1883 } |
|
1884 #endif |
|
1885 |
|
1886 |
|
1887 // Ensure that all metadata is still alive |
|
1888 verify_metadata_loaders(low_boundary, is_alive); |
|
1889 } |
|
1890 |
|
1891 template <class CompiledICorStaticCall> |
|
1892 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, nmethod* from) { |
|
1893 // Ok, to lookup references to zombies here |
|
1894 CodeBlob *cb = CodeCache::find_blob_unsafe(addr); |
|
1895 if (cb != NULL && cb->is_nmethod()) { |
|
1896 nmethod* nm = (nmethod*)cb; |
|
1897 |
|
1898 if (nm->unloading_clock() != nmethod::global_unloading_clock()) { |
|
1899 // The nmethod has not been processed yet. |
|
1900 return true; |
|
1901 } |
|
1902 |
|
1903 // Clean inline caches pointing to both zombie and not_entrant methods |
|
1904 if (!nm->is_in_use() || (nm->method()->code() != nm)) { |
|
1905 ic->set_to_clean(); |
|
1906 assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string()); |
|
1907 } |
|
1908 } |
|
1909 |
|
1910 return false; |
|
1911 } |
|
1912 |
|
1913 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, nmethod* from) { |
|
1914 return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from); |
|
1915 } |
|
1916 |
|
1917 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, nmethod* from) { |
|
1918 return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from); |
|
1919 } |
|
1920 |
|
1921 bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *is_alive, bool unloading_occurred) { |
1510 bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *is_alive, bool unloading_occurred) { |
1922 assert(iter_at_oop->type() == relocInfo::oop_type, "Wrong relocation type"); |
1511 assert(iter_at_oop->type() == relocInfo::oop_type, "Wrong relocation type"); |
1923 |
1512 |
1924 oop_Relocation* r = iter_at_oop->oop_reloc(); |
1513 oop_Relocation* r = iter_at_oop->oop_reloc(); |
1925 // Traverse those oops directly embedded in the code. |
1514 // Traverse those oops directly embedded in the code. |
1935 } |
1524 } |
1936 |
1525 |
1937 return false; |
1526 return false; |
1938 } |
1527 } |
1939 |
1528 |
1940 |
1529 bool nmethod::do_unloading_scopes(BoolObjectClosure* is_alive, bool unloading_occurred) { |
1941 bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) { |
|
1942 ResourceMark rm; |
|
1943 |
|
1944 // Make sure the oop's ready to receive visitors |
|
1945 assert(!is_zombie() && !is_unloaded(), |
|
1946 "should not call follow on zombie or unloaded nmethod"); |
|
1947 |
|
1948 // If the method is not entrant then a JMP is plastered over the |
|
1949 // first few bytes. If an oop in the old code was there, that oop |
|
1950 // should not get GC'd. Skip the first few bytes of oops on |
|
1951 // not-entrant methods. |
|
1952 address low_boundary = verified_entry_point(); |
|
1953 if (is_not_entrant()) { |
|
1954 low_boundary += NativeJump::instruction_size; |
|
1955 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. |
|
1956 // (See comment above.) |
|
1957 } |
|
1958 |
|
1959 // The RedefineClasses() API can cause the class unloading invariant |
|
1960 // to no longer be true. See jvmtiExport.hpp for details. |
|
1961 // Also, leave a debugging breadcrumb in local flag. |
|
1962 if (JvmtiExport::has_redefined_a_class()) { |
|
1963 // This set of the unloading_occurred flag is done before the |
|
1964 // call to post_compiled_method_unload() so that the unloading |
|
1965 // of this nmethod is reported. |
|
1966 unloading_occurred = true; |
|
1967 } |
|
1968 |
|
1969 // Exception cache |
|
1970 clean_exception_cache(is_alive); |
|
1971 |
|
1972 bool is_unloaded = false; |
|
1973 bool postponed = false; |
|
1974 |
|
1975 RelocIterator iter(this, low_boundary); |
|
1976 while(iter.next()) { |
|
1977 |
|
1978 switch (iter.type()) { |
|
1979 |
|
1980 case relocInfo::virtual_call_type: |
|
1981 if (unloading_occurred) { |
|
1982 // If class unloading occurred we first iterate over all inline caches and |
|
1983 // clear ICs where the cached oop is referring to an unloaded klass or method. |
|
1984 clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive); |
|
1985 } |
|
1986 |
|
1987 postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this); |
|
1988 break; |
|
1989 |
|
1990 case relocInfo::opt_virtual_call_type: |
|
1991 postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this); |
|
1992 break; |
|
1993 |
|
1994 case relocInfo::static_call_type: |
|
1995 postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this); |
|
1996 break; |
|
1997 |
|
1998 case relocInfo::oop_type: |
|
1999 if (!is_unloaded) { |
|
2000 is_unloaded = unload_if_dead_at(&iter, is_alive, unloading_occurred); |
|
2001 } |
|
2002 break; |
|
2003 |
|
2004 case relocInfo::metadata_type: |
|
2005 break; // nothing to do. |
|
2006 } |
|
2007 } |
|
2008 |
|
2009 if (is_unloaded) { |
|
2010 return postponed; |
|
2011 } |
|
2012 |
|
2013 // Scopes |
1530 // Scopes |
2014 for (oop* p = oops_begin(); p < oops_end(); p++) { |
1531 for (oop* p = oops_begin(); p < oops_end(); p++) { |
2015 if (*p == Universe::non_oop_word()) continue; // skip non-oops |
1532 if (*p == Universe::non_oop_word()) continue; // skip non-oops |
2016 if (can_unload(is_alive, p, unloading_occurred)) { |
1533 if (can_unload(is_alive, p, unloading_occurred)) { |
2017 is_unloaded = true; |
1534 return true; |
2018 break; |
1535 } |
2019 } |
1536 } |
2020 } |
1537 return false; |
2021 |
1538 } |
2022 if (is_unloaded) { |
1539 |
2023 return postponed; |
1540 bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) { |
2024 } |
1541 // Compiled code |
|
1542 { |
|
1543 RelocIterator iter(this, low_boundary); |
|
1544 while (iter.next()) { |
|
1545 if (iter.type() == relocInfo::oop_type) { |
|
1546 if (unload_if_dead_at(&iter, is_alive, unloading_occurred)) { |
|
1547 return true; |
|
1548 } |
|
1549 } |
|
1550 } |
|
1551 } |
|
1552 |
|
1553 return do_unloading_scopes(is_alive, unloading_occurred); |
|
1554 } |
2025 |
1555 |
2026 #if INCLUDE_JVMCI |
1556 #if INCLUDE_JVMCI |
|
1557 bool nmethod::do_unloading_jvmci(BoolObjectClosure* is_alive, bool unloading_occurred) { |
|
1558 bool is_unloaded = false; |
2027 // Follow JVMCI method |
1559 // Follow JVMCI method |
2028 BarrierSet* bs = Universe::heap()->barrier_set(); |
1560 BarrierSet* bs = Universe::heap()->barrier_set(); |
2029 if (_jvmci_installed_code != NULL) { |
1561 if (_jvmci_installed_code != NULL) { |
2030 if (_jvmci_installed_code->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(_jvmci_installed_code)) { |
1562 if (_jvmci_installed_code->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(_jvmci_installed_code)) { |
2031 if (!is_alive->do_object_b(_jvmci_installed_code)) { |
1563 if (!is_alive->do_object_b(_jvmci_installed_code)) { |
2032 clear_jvmci_installed_code(); |
1564 clear_jvmci_installed_code(); |
2033 } |
1565 } |
2034 } else { |
1566 } else { |
2035 if (can_unload(is_alive, (oop*)&_jvmci_installed_code, unloading_occurred)) { |
1567 if (can_unload(is_alive, (oop*)&_jvmci_installed_code, unloading_occurred)) { |
2036 is_unloaded = true; |
1568 return true; |
2037 } |
1569 } |
2038 } |
1570 } |
2039 } |
1571 } |
2040 |
1572 |
2041 if (_speculation_log != NULL) { |
1573 if (_speculation_log != NULL) { |
2043 bs->write_ref_nmethod_pre(&_speculation_log, this); |
1575 bs->write_ref_nmethod_pre(&_speculation_log, this); |
2044 _speculation_log = NULL; |
1576 _speculation_log = NULL; |
2045 bs->write_ref_nmethod_post(&_speculation_log, this); |
1577 bs->write_ref_nmethod_post(&_speculation_log, this); |
2046 } |
1578 } |
2047 } |
1579 } |
2048 #endif |
1580 return is_unloaded; |
2049 |
1581 } |
2050 // Ensure that all metadata is still alive |
1582 #endif |
2051 verify_metadata_loaders(low_boundary, is_alive); |
|
2052 |
|
2053 return postponed; |
|
2054 } |
|
2055 |
|
2056 void nmethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) { |
|
2057 ResourceMark rm; |
|
2058 |
|
2059 // Make sure the oop's ready to receive visitors |
|
2060 assert(!is_zombie(), |
|
2061 "should not call follow on zombie nmethod"); |
|
2062 |
|
2063 // If the method is not entrant then a JMP is plastered over the |
|
2064 // first few bytes. If an oop in the old code was there, that oop |
|
2065 // should not get GC'd. Skip the first few bytes of oops on |
|
2066 // not-entrant methods. |
|
2067 address low_boundary = verified_entry_point(); |
|
2068 if (is_not_entrant()) { |
|
2069 low_boundary += NativeJump::instruction_size; |
|
2070 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. |
|
2071 // (See comment above.) |
|
2072 } |
|
2073 |
|
2074 RelocIterator iter(this, low_boundary); |
|
2075 while(iter.next()) { |
|
2076 |
|
2077 switch (iter.type()) { |
|
2078 |
|
2079 case relocInfo::virtual_call_type: |
|
2080 clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this); |
|
2081 break; |
|
2082 |
|
2083 case relocInfo::opt_virtual_call_type: |
|
2084 clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this); |
|
2085 break; |
|
2086 |
|
2087 case relocInfo::static_call_type: |
|
2088 clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this); |
|
2089 break; |
|
2090 } |
|
2091 } |
|
2092 } |
|
2093 |
|
2094 #ifdef ASSERT |
|
2095 |
|
2096 class CheckClass : AllStatic { |
|
2097 static BoolObjectClosure* _is_alive; |
|
2098 |
|
2099 // Check class_loader is alive for this bit of metadata. |
|
2100 static void check_class(Metadata* md) { |
|
2101 Klass* klass = NULL; |
|
2102 if (md->is_klass()) { |
|
2103 klass = ((Klass*)md); |
|
2104 } else if (md->is_method()) { |
|
2105 klass = ((Method*)md)->method_holder(); |
|
2106 } else if (md->is_methodData()) { |
|
2107 klass = ((MethodData*)md)->method()->method_holder(); |
|
2108 } else { |
|
2109 md->print(); |
|
2110 ShouldNotReachHere(); |
|
2111 } |
|
2112 assert(klass->is_loader_alive(_is_alive), "must be alive"); |
|
2113 } |
|
2114 public: |
|
2115 static void do_check_class(BoolObjectClosure* is_alive, nmethod* nm) { |
|
2116 assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint"); |
|
2117 _is_alive = is_alive; |
|
2118 nm->metadata_do(check_class); |
|
2119 } |
|
2120 }; |
|
2121 |
|
2122 // This is called during a safepoint so can use static data |
|
2123 BoolObjectClosure* CheckClass::_is_alive = NULL; |
|
2124 #endif // ASSERT |
|
2125 |
|
2126 |
|
2127 // Processing of oop references should have been sufficient to keep |
|
2128 // all strong references alive. Any weak references should have been |
|
2129 // cleared as well. Visit all the metadata and ensure that it's |
|
2130 // really alive. |
|
2131 void nmethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) { |
|
2132 #ifdef ASSERT |
|
2133 RelocIterator iter(this, low_boundary); |
|
2134 while (iter.next()) { |
|
2135 // static_stub_Relocations may have dangling references to |
|
2136 // Method*s so trim them out here. Otherwise it looks like |
|
2137 // compiled code is maintaining a link to dead metadata. |
|
2138 address static_call_addr = NULL; |
|
2139 if (iter.type() == relocInfo::opt_virtual_call_type) { |
|
2140 CompiledIC* cic = CompiledIC_at(&iter); |
|
2141 if (!cic->is_call_to_interpreted()) { |
|
2142 static_call_addr = iter.addr(); |
|
2143 } |
|
2144 } else if (iter.type() == relocInfo::static_call_type) { |
|
2145 CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc()); |
|
2146 if (!csc->is_call_to_interpreted()) { |
|
2147 static_call_addr = iter.addr(); |
|
2148 } |
|
2149 } |
|
2150 if (static_call_addr != NULL) { |
|
2151 RelocIterator sciter(this, low_boundary); |
|
2152 while (sciter.next()) { |
|
2153 if (sciter.type() == relocInfo::static_stub_type && |
|
2154 sciter.static_stub_reloc()->static_call() == static_call_addr) { |
|
2155 sciter.static_stub_reloc()->clear_inline_cache(); |
|
2156 } |
|
2157 } |
|
2158 } |
|
2159 } |
|
2160 // Check that the metadata embedded in the nmethod is alive |
|
2161 CheckClass::do_check_class(is_alive, this); |
|
2162 #endif |
|
2163 } |
|
2164 |
|
2165 |
1583 |
2166 // Iterate over metadata calling this function. Used by RedefineClasses |
1584 // Iterate over metadata calling this function. Used by RedefineClasses |
2167 void nmethod::metadata_do(void f(Metadata*)) { |
1585 void nmethod::metadata_do(void f(Metadata*)) { |
2168 address low_boundary = verified_entry_point(); |
1586 address low_boundary = verified_entry_point(); |
2169 if (is_not_entrant()) { |
1587 if (is_not_entrant()) { |
2358 NOT_PRODUCT(if (TraceScavenge) detect_scavenge_root._print_nm = this); |
1776 NOT_PRODUCT(if (TraceScavenge) detect_scavenge_root._print_nm = this); |
2359 oops_do(&detect_scavenge_root); |
1777 oops_do(&detect_scavenge_root); |
2360 return detect_scavenge_root.detected_scavenge_root(); |
1778 return detect_scavenge_root.detected_scavenge_root(); |
2361 } |
1779 } |
2362 |
1780 |
2363 // Method that knows how to preserve outgoing arguments at call. This method must be |
|
2364 // called with a frame corresponding to a Java invoke |
|
2365 void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { |
|
2366 #ifndef SHARK |
|
2367 if (method() != NULL && !method()->is_native()) { |
|
2368 address pc = fr.pc(); |
|
2369 SimpleScopeDesc ssd(this, pc); |
|
2370 Bytecode_invoke call(ssd.method(), ssd.bci()); |
|
2371 bool has_receiver = call.has_receiver(); |
|
2372 bool has_appendix = call.has_appendix(); |
|
2373 Symbol* signature = call.signature(); |
|
2374 |
|
2375 // The method attached by JIT-compilers should be used, if present. |
|
2376 // Bytecode can be inaccurate in such case. |
|
2377 Method* callee = attached_method_before_pc(pc); |
|
2378 if (callee != NULL) { |
|
2379 has_receiver = !(callee->access_flags().is_static()); |
|
2380 has_appendix = false; |
|
2381 signature = callee->signature(); |
|
2382 } |
|
2383 |
|
2384 fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f); |
|
2385 } |
|
2386 #endif // !SHARK |
|
2387 } |
|
2388 |
|
2389 inline bool includes(void* p, void* from, void* to) { |
1781 inline bool includes(void* p, void* from, void* to) { |
2390 return from <= p && p < to; |
1782 return from <= p && p < to; |
2391 } |
1783 } |
2392 |
1784 |
2393 |
1785 |
2439 void nmethod::copy_scopes_data(u_char* buffer, int size) { |
1831 void nmethod::copy_scopes_data(u_char* buffer, int size) { |
2440 assert(scopes_data_size() >= size, "oob"); |
1832 assert(scopes_data_size() >= size, "oob"); |
2441 memcpy(scopes_data_begin(), buffer, size); |
1833 memcpy(scopes_data_begin(), buffer, size); |
2442 } |
1834 } |
2443 |
1835 |
2444 // When using JVMCI the address might be off by the size of a call instruction. |
|
2445 bool nmethod::is_deopt_entry(address pc) { |
|
2446 return pc == deopt_handler_begin() |
|
2447 #if INCLUDE_JVMCI |
|
2448 || pc == (deopt_handler_begin() + NativeCall::instruction_size) |
|
2449 #endif |
|
2450 ; |
|
2451 } |
|
2452 |
|
2453 #ifdef ASSERT |
1836 #ifdef ASSERT |
2454 static PcDesc* linear_search(nmethod* nm, int pc_offset, bool approximate) { |
1837 static PcDesc* linear_search(const PcDescSearch& search, int pc_offset, bool approximate) { |
2455 PcDesc* lower = nm->scopes_pcs_begin(); |
1838 PcDesc* lower = search.scopes_pcs_begin(); |
2456 PcDesc* upper = nm->scopes_pcs_end(); |
1839 PcDesc* upper = search.scopes_pcs_end(); |
2457 lower += 1; // exclude initial sentinel |
1840 lower += 1; // exclude initial sentinel |
2458 PcDesc* res = NULL; |
1841 PcDesc* res = NULL; |
2459 for (PcDesc* p = lower; p < upper; p++) { |
1842 for (PcDesc* p = lower; p < upper; p++) { |
2460 NOT_PRODUCT(--pc_nmethod_stats.pc_desc_tests); // don't count this call to match_desc |
1843 NOT_PRODUCT(--pc_nmethod_stats.pc_desc_tests); // don't count this call to match_desc |
2461 if (match_desc(p, pc_offset, approximate)) { |
1844 if (match_desc(p, pc_offset, approximate)) { |
2469 } |
1852 } |
2470 #endif |
1853 #endif |
2471 |
1854 |
2472 |
1855 |
2473 // Finds a PcDesc with real-pc equal to "pc" |
1856 // Finds a PcDesc with real-pc equal to "pc" |
2474 PcDesc* nmethod::find_pc_desc_internal(address pc, bool approximate) { |
1857 PcDesc* PcDescContainer::find_pc_desc_internal(address pc, bool approximate, const PcDescSearch& search) { |
2475 address base_address = code_begin(); |
1858 address base_address = search.code_begin(); |
2476 if ((pc < base_address) || |
1859 if ((pc < base_address) || |
2477 (pc - base_address) >= (ptrdiff_t) PcDesc::upper_offset_limit) { |
1860 (pc - base_address) >= (ptrdiff_t) PcDesc::upper_offset_limit) { |
2478 return NULL; // PC is wildly out of range |
1861 return NULL; // PC is wildly out of range |
2479 } |
1862 } |
2480 int pc_offset = (int) (pc - base_address); |
1863 int pc_offset = (int) (pc - base_address); |
2481 |
1864 |
2482 // Check the PcDesc cache if it contains the desired PcDesc |
1865 // Check the PcDesc cache if it contains the desired PcDesc |
2483 // (This as an almost 100% hit rate.) |
1866 // (This as an almost 100% hit rate.) |
2484 PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate); |
1867 PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate); |
2485 if (res != NULL) { |
1868 if (res != NULL) { |
2486 assert(res == linear_search(this, pc_offset, approximate), "cache ok"); |
1869 assert(res == linear_search(search, pc_offset, approximate), "cache ok"); |
2487 return res; |
1870 return res; |
2488 } |
1871 } |
2489 |
1872 |
2490 // Fallback algorithm: quasi-linear search for the PcDesc |
1873 // Fallback algorithm: quasi-linear search for the PcDesc |
2491 // Find the last pc_offset less than the given offset. |
1874 // Find the last pc_offset less than the given offset. |
2492 // The successor must be the required match, if there is a match at all. |
1875 // The successor must be the required match, if there is a match at all. |
2493 // (Use a fixed radix to avoid expensive affine pointer arithmetic.) |
1876 // (Use a fixed radix to avoid expensive affine pointer arithmetic.) |
2494 PcDesc* lower = scopes_pcs_begin(); |
1877 PcDesc* lower = search.scopes_pcs_begin(); |
2495 PcDesc* upper = scopes_pcs_end(); |
1878 PcDesc* upper = search.scopes_pcs_end(); |
2496 upper -= 1; // exclude final sentinel |
1879 upper -= 1; // exclude final sentinel |
2497 if (lower >= upper) return NULL; // native method; no PcDescs at all |
1880 if (lower >= upper) return NULL; // native method; no PcDescs at all |
2498 |
1881 |
2499 #define assert_LU_OK \ |
1882 #define assert_LU_OK \ |
2500 /* invariant on lower..upper during the following search: */ \ |
1883 /* invariant on lower..upper during the following search: */ \ |
2699 |
2082 |
2700 |
2083 |
2701 // QQQ might we make this work from a frame?? |
2084 // QQQ might we make this work from a frame?? |
2702 nmethodLocker::nmethodLocker(address pc) { |
2085 nmethodLocker::nmethodLocker(address pc) { |
2703 CodeBlob* cb = CodeCache::find_blob(pc); |
2086 CodeBlob* cb = CodeCache::find_blob(pc); |
2704 guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found"); |
2087 guarantee(cb != NULL && cb->is_compiled(), "bad pc for a nmethod found"); |
2705 _nm = (nmethod*)cb; |
2088 _nm = cb->as_compiled_method(); |
2706 lock_nmethod(_nm); |
2089 lock_nmethod(_nm); |
2707 } |
2090 } |
2708 |
2091 |
2709 // Only JvmtiDeferredEvent::compiled_method_unload_event() |
2092 // Only JvmtiDeferredEvent::compiled_method_unload_event() |
2710 // should pass zombie_ok == true. |
2093 // should pass zombie_ok == true. |
2711 void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) { |
2094 void nmethodLocker::lock_nmethod(CompiledMethod* cm, bool zombie_ok) { |
2712 if (nm == NULL) return; |
2095 if (cm == NULL) return; |
|
2096 nmethod* nm = cm->as_nmethod(); |
2713 Atomic::inc(&nm->_lock_count); |
2097 Atomic::inc(&nm->_lock_count); |
2714 assert(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method"); |
2098 assert(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method"); |
2715 } |
2099 } |
2716 |
2100 |
2717 void nmethodLocker::unlock_nmethod(nmethod* nm) { |
2101 void nmethodLocker::unlock_nmethod(CompiledMethod* cm) { |
2718 if (nm == NULL) return; |
2102 if (cm == NULL) return; |
|
2103 nmethod* nm = cm->as_nmethod(); |
2719 Atomic::dec(&nm->_lock_count); |
2104 Atomic::dec(&nm->_lock_count); |
2720 assert(nm->_lock_count >= 0, "unmatched nmethod lock/unlock"); |
2105 assert(nm->_lock_count >= 0, "unmatched nmethod lock/unlock"); |
2721 } |
|
2722 |
|
2723 // ----------------------------------------------------------------------------- |
|
2724 // nmethod::get_deopt_original_pc |
|
2725 // |
|
2726 // Return the original PC for the given PC if: |
|
2727 // (a) the given PC belongs to a nmethod and |
|
2728 // (b) it is a deopt PC |
|
2729 address nmethod::get_deopt_original_pc(const frame* fr) { |
|
2730 if (fr->cb() == NULL) return NULL; |
|
2731 |
|
2732 nmethod* nm = fr->cb()->as_nmethod_or_null(); |
|
2733 if (nm != NULL && nm->is_deopt_pc(fr->pc())) |
|
2734 return nm->get_original_pc(fr); |
|
2735 |
|
2736 return NULL; |
|
2737 } |
|
2738 |
|
2739 |
|
2740 // ----------------------------------------------------------------------------- |
|
2741 // MethodHandle |
|
2742 |
|
2743 bool nmethod::is_method_handle_return(address return_pc) { |
|
2744 if (!has_method_handle_invokes()) return false; |
|
2745 PcDesc* pd = pc_desc_at(return_pc); |
|
2746 if (pd == NULL) |
|
2747 return false; |
|
2748 return pd->is_method_handle_invoke(); |
|
2749 } |
2106 } |
2750 |
2107 |
2751 |
2108 |
2752 // ----------------------------------------------------------------------------- |
2109 // ----------------------------------------------------------------------------- |
2753 // Verification |
2110 // Verification |