32 #include "compiler/compileBroker.hpp" |
32 #include "compiler/compileBroker.hpp" |
33 #include "compiler/compileLog.hpp" |
33 #include "compiler/compileLog.hpp" |
34 #include "compiler/compilerOracle.hpp" |
34 #include "compiler/compilerOracle.hpp" |
35 #include "compiler/disassembler.hpp" |
35 #include "compiler/disassembler.hpp" |
36 #include "interpreter/bytecode.hpp" |
36 #include "interpreter/bytecode.hpp" |
37 #include "oops/methodDataOop.hpp" |
37 #include "oops/methodData.hpp" |
38 #include "prims/jvmtiRedefineClassesTrace.hpp" |
38 #include "prims/jvmtiRedefineClassesTrace.hpp" |
39 #include "prims/jvmtiImpl.hpp" |
39 #include "prims/jvmtiImpl.hpp" |
40 #include "runtime/sharedRuntime.hpp" |
40 #include "runtime/sharedRuntime.hpp" |
41 #include "runtime/sweeper.hpp" |
41 #include "runtime/sweeper.hpp" |
42 #include "utilities/dtrace.hpp" |
42 #include "utilities/dtrace.hpp" |
57 HS_DTRACE_PROBE_DECL6(hotspot, compiled__method__unload, |
57 HS_DTRACE_PROBE_DECL6(hotspot, compiled__method__unload, |
58 char*, int, char*, int, char*, int); |
58 char*, int, char*, int, char*, int); |
59 |
59 |
60 #define DTRACE_METHOD_UNLOAD_PROBE(method) \ |
60 #define DTRACE_METHOD_UNLOAD_PROBE(method) \ |
61 { \ |
61 { \ |
62 methodOop m = (method); \ |
62 Method* m = (method); \ |
63 if (m != NULL) { \ |
63 if (m != NULL) { \ |
64 Symbol* klass_name = m->klass_name(); \ |
64 Symbol* klass_name = m->klass_name(); \ |
65 Symbol* name = m->name(); \ |
65 Symbol* name = m->name(); \ |
66 Symbol* signature = m->signature(); \ |
66 Symbol* signature = m->signature(); \ |
67 HS_DTRACE_PROBE6(hotspot, compiled__method__unload, \ |
67 HS_DTRACE_PROBE6(hotspot, compiled__method__unload, \ |
599 // which are dependent on those classes. The slow way is to |
602 // which are dependent on those classes. The slow way is to |
600 // check every nmethod for dependencies which makes it linear in |
603 // check every nmethod for dependencies which makes it linear in |
601 // the number of methods compiled. For applications with a lot |
604 // the number of methods compiled. For applications with a lot |
602 // classes the slow way is too slow. |
605 // classes the slow way is too slow. |
603 for (Dependencies::DepStream deps(nm); deps.next(); ) { |
606 for (Dependencies::DepStream deps(nm); deps.next(); ) { |
604 klassOop klass = deps.context_type(); |
607 Klass* klass = deps.context_type(); |
605 if (klass == NULL) continue; // ignore things like evol_method |
608 if (klass == NULL) continue; // ignore things like evol_method |
606 |
609 |
607 // record this nmethod as dependent on this klass |
610 // record this nmethod as dependent on this klass |
608 instanceKlass::cast(klass)->add_dependent_nmethod(nm); |
611 InstanceKlass::cast(klass)->add_dependent_nmethod(nm); |
609 } |
612 } |
610 } |
613 } |
611 NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm)); |
614 NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm)); |
612 if (PrintAssembly && nm != NULL) |
615 if (PrintAssembly && nm != NULL) |
613 Disassembler::decode(nm); |
616 Disassembler::decode(nm); |
656 _orig_pc_offset = 0; |
659 _orig_pc_offset = 0; |
657 |
660 |
658 _consts_offset = data_offset(); |
661 _consts_offset = data_offset(); |
659 _stub_offset = data_offset(); |
662 _stub_offset = data_offset(); |
660 _oops_offset = data_offset(); |
663 _oops_offset = data_offset(); |
661 _scopes_data_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize); |
664 _metadata_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize); |
|
665 _scopes_data_offset = _metadata_offset + round_to(code_buffer->total_metadata_size(), wordSize); |
662 _scopes_pcs_offset = _scopes_data_offset; |
666 _scopes_pcs_offset = _scopes_data_offset; |
663 _dependencies_offset = _scopes_pcs_offset; |
667 _dependencies_offset = _scopes_pcs_offset; |
664 _handler_table_offset = _dependencies_offset; |
668 _handler_table_offset = _dependencies_offset; |
665 _nul_chk_table_offset = _handler_table_offset; |
669 _nul_chk_table_offset = _handler_table_offset; |
666 _nmethod_end_offset = _nul_chk_table_offset; |
670 _nmethod_end_offset = _nul_chk_table_offset; |
736 _trap_offset = offsets->value(CodeOffsets::Dtrace_trap); |
740 _trap_offset = offsets->value(CodeOffsets::Dtrace_trap); |
737 _orig_pc_offset = 0; |
741 _orig_pc_offset = 0; |
738 _consts_offset = data_offset(); |
742 _consts_offset = data_offset(); |
739 _stub_offset = data_offset(); |
743 _stub_offset = data_offset(); |
740 _oops_offset = data_offset(); |
744 _oops_offset = data_offset(); |
741 _scopes_data_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize); |
745 _metadata_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize); |
|
746 _scopes_data_offset = _metadata_offset + round_to(code_buffer->total_metadata_size(), wordSize); |
742 _scopes_pcs_offset = _scopes_data_offset; |
747 _scopes_pcs_offset = _scopes_data_offset; |
743 _dependencies_offset = _scopes_pcs_offset; |
748 _dependencies_offset = _scopes_pcs_offset; |
744 _handler_table_offset = _dependencies_offset; |
749 _handler_table_offset = _dependencies_offset; |
745 _nul_chk_table_offset = _handler_table_offset; |
750 _nul_chk_table_offset = _handler_table_offset; |
746 _nmethod_end_offset = _nul_chk_table_offset; |
751 _nmethod_end_offset = _nul_chk_table_offset; |
845 } else { |
850 } else { |
846 _unwind_handler_offset = -1; |
851 _unwind_handler_offset = -1; |
847 } |
852 } |
848 |
853 |
849 _oops_offset = data_offset(); |
854 _oops_offset = data_offset(); |
850 _scopes_data_offset = _oops_offset + round_to(code_buffer->total_oop_size (), oopSize); |
855 _metadata_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize); |
|
856 _scopes_data_offset = _metadata_offset + round_to(code_buffer->total_metadata_size(), wordSize); |
|
857 |
851 _scopes_pcs_offset = _scopes_data_offset + round_to(debug_info->data_size (), oopSize); |
858 _scopes_pcs_offset = _scopes_data_offset + round_to(debug_info->data_size (), oopSize); |
852 _dependencies_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size()); |
859 _dependencies_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size()); |
853 _handler_table_offset = _dependencies_offset + round_to(dependencies->size_in_bytes (), oopSize); |
860 _handler_table_offset = _dependencies_offset + round_to(dependencies->size_in_bytes (), oopSize); |
854 _nul_chk_table_offset = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize); |
861 _nul_chk_table_offset = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize); |
855 _nmethod_end_offset = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize); |
862 _nmethod_end_offset = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize); |
1018 // CodeBlob constructor, so it is valid even at this early point to |
1025 // CodeBlob constructor, so it is valid even at this early point to |
1019 // iterate over relocations and patch the code. |
1026 // iterate over relocations and patch the code. |
1020 fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true); |
1027 fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true); |
1021 } |
1028 } |
1022 |
1029 |
|
1030 void nmethod::copy_values(GrowableArray<Metadata*>* array) { |
|
1031 int length = array->length(); |
|
1032 assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough"); |
|
1033 Metadata** dest = metadata_begin(); |
|
1034 for (int index = 0 ; index < length; index++) { |
|
1035 dest[index] = array->at(index); |
|
1036 } |
|
1037 } |
1023 |
1038 |
1024 bool nmethod::is_at_poll_return(address pc) { |
1039 bool nmethod::is_at_poll_return(address pc) { |
1025 RelocIterator iter(this, pc, pc+1); |
1040 RelocIterator iter(this, pc, pc+1); |
1026 while (iter.next()) { |
1041 while (iter.next()) { |
1027 if (iter.type() == relocInfo::poll_return_type) |
1042 if (iter.type() == relocInfo::poll_return_type) |
1193 flush_dependencies(is_alive); |
1211 flush_dependencies(is_alive); |
1194 |
1212 |
1195 // Break cycle between nmethod & method |
1213 // Break cycle between nmethod & method |
1196 if (TraceClassUnloading && WizardMode) { |
1214 if (TraceClassUnloading && WizardMode) { |
1197 tty->print_cr("[Class unloading: Making nmethod " INTPTR_FORMAT |
1215 tty->print_cr("[Class unloading: Making nmethod " INTPTR_FORMAT |
1198 " unloadable], methodOop(" INTPTR_FORMAT |
1216 " unloadable], Method*(" INTPTR_FORMAT |
1199 "), cause(" INTPTR_FORMAT ")", |
1217 "), cause(" INTPTR_FORMAT ")", |
1200 this, (address)_method, (address)cause); |
1218 this, (address)_method, (address)cause); |
1201 if (!Universe::heap()->is_gc_active()) |
1219 if (!Universe::heap()->is_gc_active()) |
1202 cause->klass()->print(); |
1220 cause->klass()->print(); |
1203 } |
1221 } |
1204 // Unlink the osr method, so we do not look this up again |
1222 // Unlink the osr method, so we do not look this up again |
1205 if (is_osr_method()) { |
1223 if (is_osr_method()) { |
1206 invalidate_osr_method(); |
1224 invalidate_osr_method(); |
1207 } |
1225 } |
1208 // If _method is already NULL the methodOop is about to be unloaded, |
1226 // If _method is already NULL the Method* is about to be unloaded, |
1209 // so we don't have to break the cycle. Note that it is possible to |
1227 // so we don't have to break the cycle. Note that it is possible to |
1210 // have the methodOop live here, in case we unload the nmethod because |
1228 // have the Method* live here, in case we unload the nmethod because |
1211 // it is pointing to some oop (other than the methodOop) being unloaded. |
1229 // it is pointing to some oop (other than the Method*) being unloaded. |
1212 if (_method != NULL) { |
1230 if (_method != NULL) { |
1213 // OSR methods point to the methodOop, but the methodOop does not |
1231 // OSR methods point to the Method*, but the Method* does not |
1214 // point back! |
1232 // point back! |
1215 if (_method->code() == this) { |
1233 if (_method->code() == this) { |
1216 _method->clear_code(); // Break a cycle |
1234 _method->clear_code(); // Break a cycle |
1217 } |
1235 } |
1218 _method = NULL; // Clear the method of this dead nmethod |
1236 _method = NULL; // Clear the method of this dead nmethod |
1438 assert(Universe::heap()->is_gc_active() == (is_alive != NULL), |
1456 assert(Universe::heap()->is_gc_active() == (is_alive != NULL), |
1439 "is_alive is non-NULL if and only if we are called during GC"); |
1457 "is_alive is non-NULL if and only if we are called during GC"); |
1440 if (!has_flushed_dependencies()) { |
1458 if (!has_flushed_dependencies()) { |
1441 set_has_flushed_dependencies(); |
1459 set_has_flushed_dependencies(); |
1442 for (Dependencies::DepStream deps(this); deps.next(); ) { |
1460 for (Dependencies::DepStream deps(this); deps.next(); ) { |
1443 klassOop klass = deps.context_type(); |
1461 Klass* klass = deps.context_type(); |
1444 if (klass == NULL) continue; // ignore things like evol_method |
1462 if (klass == NULL) continue; // ignore things like evol_method |
1445 |
1463 |
1446 // During GC the is_alive closure is non-NULL, and is used to |
1464 // During GC the is_alive closure is non-NULL, and is used to |
1447 // determine liveness of dependees that need to be updated. |
1465 // determine liveness of dependees that need to be updated. |
1448 if (is_alive == NULL || is_alive->do_object_b(klass)) { |
1466 if (is_alive == NULL || klass->is_loader_alive(is_alive)) { |
1449 instanceKlass::cast(klass)->remove_dependent_nmethod(this); |
1467 InstanceKlass::cast(klass)->remove_dependent_nmethod(this); |
1450 } |
1468 } |
1451 } |
1469 } |
1452 } |
1470 } |
1453 } |
1471 } |
1454 |
1472 |
1460 assert(root != NULL, "just checking"); |
1478 assert(root != NULL, "just checking"); |
1461 oop obj = *root; |
1479 oop obj = *root; |
1462 if (obj == NULL || is_alive->do_object_b(obj)) { |
1480 if (obj == NULL || is_alive->do_object_b(obj)) { |
1463 return false; |
1481 return false; |
1464 } |
1482 } |
1465 if (obj->is_compiledICHolder()) { |
1483 |
1466 compiledICHolderOop cichk_oop = compiledICHolderOop(obj); |
|
1467 if (is_alive->do_object_b( |
|
1468 cichk_oop->holder_method()->method_holder()) && |
|
1469 is_alive->do_object_b(cichk_oop->holder_klass())) { |
|
1470 // The oop should be kept alive |
|
1471 keep_alive->do_oop(root); |
|
1472 return false; |
|
1473 } |
|
1474 } |
|
1475 // If ScavengeRootsInCode is true, an nmethod might be unloaded |
1484 // If ScavengeRootsInCode is true, an nmethod might be unloaded |
1476 // simply because one of its constant oops has gone dead. |
1485 // simply because one of its constant oops has gone dead. |
1477 // No actual classes need to be unloaded in order for this to occur. |
1486 // No actual classes need to be unloaded in order for this to occur. |
1478 assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading"); |
1487 assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading"); |
1479 make_unloaded(is_alive, obj); |
1488 make_unloaded(is_alive, obj); |
1484 // post_compiled_method_load_event |
1493 // post_compiled_method_load_event |
1485 // new method for install_code() path |
1494 // new method for install_code() path |
1486 // Transfer information from compilation to jvmti |
1495 // Transfer information from compilation to jvmti |
1487 void nmethod::post_compiled_method_load_event() { |
1496 void nmethod::post_compiled_method_load_event() { |
1488 |
1497 |
1489 methodOop moop = method(); |
1498 Method* moop = method(); |
1490 #ifndef USDT2 |
1499 #ifndef USDT2 |
1491 HS_DTRACE_PROBE8(hotspot, compiled__method__load, |
1500 HS_DTRACE_PROBE8(hotspot, compiled__method__load, |
1492 moop->klass_name()->bytes(), |
1501 moop->klass_name()->bytes(), |
1493 moop->klass_name()->utf8_length(), |
1502 moop->klass_name()->utf8_length(), |
1494 moop->name()->bytes(), |
1503 moop->name()->bytes(), |
1539 assert(_method != NULL && !is_unloaded(), "just checking"); |
1548 assert(_method != NULL && !is_unloaded(), "just checking"); |
1540 DTRACE_METHOD_UNLOAD_PROBE(method()); |
1549 DTRACE_METHOD_UNLOAD_PROBE(method()); |
1541 |
1550 |
1542 // If a JVMTI agent has enabled the CompiledMethodUnload event then |
1551 // If a JVMTI agent has enabled the CompiledMethodUnload event then |
1543 // post the event. Sometime later this nmethod will be made a zombie |
1552 // post the event. Sometime later this nmethod will be made a zombie |
1544 // by the sweeper but the methodOop will not be valid at that point. |
1553 // by the sweeper but the Method* will not be valid at that point. |
1545 // If the _jmethod_id is null then no load event was ever requested |
1554 // If the _jmethod_id is null then no load event was ever requested |
1546 // so don't bother posting the unload. The main reason for this is |
1555 // so don't bother posting the unload. The main reason for this is |
1547 // that the jmethodID is a weak reference to the methodOop so if |
1556 // that the jmethodID is a weak reference to the Method* so if |
1548 // it's being unloaded there's no way to look it up since the weak |
1557 // it's being unloaded there's no way to look it up since the weak |
1549 // ref will have been cleared. |
1558 // ref will have been cleared. |
1550 if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) { |
1559 if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) { |
1551 assert(!unload_reported(), "already unloaded"); |
1560 assert(!unload_reported(), "already unloaded"); |
1552 JvmtiDeferredEvent event = |
1561 JvmtiDeferredEvent event = |
1627 if (unloading_occurred) { |
1629 if (unloading_occurred) { |
1628 RelocIterator iter(this, low_boundary); |
1630 RelocIterator iter(this, low_boundary); |
1629 while(iter.next()) { |
1631 while(iter.next()) { |
1630 if (iter.type() == relocInfo::virtual_call_type) { |
1632 if (iter.type() == relocInfo::virtual_call_type) { |
1631 CompiledIC *ic = CompiledIC_at(iter.reloc()); |
1633 CompiledIC *ic = CompiledIC_at(iter.reloc()); |
1632 oop ic_oop = ic->cached_oop(); |
1634 if (ic->is_icholder_call()) { |
1633 if (ic_oop != NULL && !is_alive->do_object_b(ic_oop)) { |
|
1634 // The only exception is compiledICHolder oops which may |
1635 // The only exception is compiledICHolder oops which may |
1635 // yet be marked below. (We check this further below). |
1636 // yet be marked below. (We check this further below). |
1636 if (ic_oop->is_compiledICHolder()) { |
1637 CompiledICHolder* cichk_oop = ic->cached_icholder(); |
1637 compiledICHolderOop cichk_oop = compiledICHolderOop(ic_oop); |
1638 if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) && |
1638 if (is_alive->do_object_b( |
1639 cichk_oop->holder_klass()->is_loader_alive(is_alive)) { |
1639 cichk_oop->holder_method()->method_holder()) && |
|
1640 is_alive->do_object_b(cichk_oop->holder_klass())) { |
|
1641 continue; |
1640 continue; |
1642 } |
1641 } |
|
1642 } else { |
|
1643 Metadata* ic_oop = ic->cached_metadata(); |
|
1644 if (ic_oop != NULL) { |
|
1645 if (ic_oop->is_klass()) { |
|
1646 if (((Klass*)ic_oop)->is_loader_alive(is_alive)) { |
|
1647 continue; |
|
1648 } |
|
1649 } else if (ic_oop->is_method()) { |
|
1650 if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) { |
|
1651 continue; |
|
1652 } |
|
1653 } else { |
|
1654 ShouldNotReachHere(); |
|
1655 } |
|
1656 } |
1643 } |
1657 } |
1644 ic->set_to_clean(); |
1658 ic->set_to_clean(); |
1645 assert(ic->cached_oop() == NULL, |
|
1646 "cached oop in IC should be cleared"); |
|
1647 } |
|
1648 } |
1659 } |
1649 } |
1660 } |
1650 } |
1661 } |
1651 |
1662 |
1652 // Compiled code |
1663 // Compiled code |
|
1664 { |
1653 RelocIterator iter(this, low_boundary); |
1665 RelocIterator iter(this, low_boundary); |
1654 while (iter.next()) { |
1666 while (iter.next()) { |
1655 if (iter.type() == relocInfo::oop_type) { |
1667 if (iter.type() == relocInfo::oop_type) { |
1656 oop_Relocation* r = iter.oop_reloc(); |
1668 oop_Relocation* r = iter.oop_reloc(); |
1657 // In this loop, we must only traverse those oops directly embedded in |
1669 // In this loop, we must only traverse those oops directly embedded in |
1664 return; |
1676 return; |
1665 } |
1677 } |
1666 } |
1678 } |
1667 } |
1679 } |
1668 } |
1680 } |
|
1681 } |
1669 |
1682 |
1670 |
1683 |
1671 // Scopes |
1684 // Scopes |
1672 for (oop* p = oops_begin(); p < oops_end(); p++) { |
1685 for (oop* p = oops_begin(); p < oops_end(); p++) { |
1673 if (*p == Universe::non_oop_word()) continue; // skip non-oops |
1686 if (*p == Universe::non_oop_word()) continue; // skip non-oops |
1674 if (can_unload(is_alive, keep_alive, p, unloading_occurred)) { |
1687 if (can_unload(is_alive, keep_alive, p, unloading_occurred)) { |
1675 return; |
1688 return; |
1676 } |
1689 } |
1677 } |
1690 } |
1678 |
1691 |
1679 #ifndef PRODUCT |
1692 // Ensure that all metadata is still alive |
1680 // This nmethod was not unloaded; check below that all CompiledICs |
1693 verify_metadata_loaders(low_boundary, is_alive); |
1681 // refer to marked oops. |
1694 } |
1682 { |
1695 |
|
1696 #ifdef ASSERT |
|
1697 |
|
1698 class CheckClass : AllStatic { |
|
1699 static BoolObjectClosure* _is_alive; |
|
1700 |
|
1701 // Check class_loader is alive for this bit of metadata. |
|
1702 static void check_class(Metadata* md) { |
|
1703 Klass* klass = NULL; |
|
1704 if (md->is_klass()) { |
|
1705 klass = ((Klass*)md); |
|
1706 } else if (md->is_method()) { |
|
1707 klass = ((Method*)md)->method_holder(); |
|
1708 } else if (md->is_methodData()) { |
|
1709 klass = ((MethodData*)md)->method()->method_holder(); |
|
1710 } else { |
|
1711 md->print(); |
|
1712 ShouldNotReachHere(); |
|
1713 } |
|
1714 assert(klass->is_loader_alive(_is_alive), "must be alive"); |
|
1715 } |
|
1716 public: |
|
1717 static void do_check_class(BoolObjectClosure* is_alive, nmethod* nm) { |
|
1718 assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint"); |
|
1719 _is_alive = is_alive; |
|
1720 nm->metadata_do(check_class); |
|
1721 } |
|
1722 }; |
|
1723 |
|
1724 // This is called during a safepoint so can use static data |
|
1725 BoolObjectClosure* CheckClass::_is_alive = NULL; |
|
1726 #endif // ASSERT |
|
1727 |
|
1728 |
|
1729 // Processing of oop references should have been sufficient to keep |
|
1730 // all strong references alive. Any weak references should have been |
|
1731 // cleared as well. Visit all the metadata and ensure that it's |
|
1732 // really alive. |
|
1733 void nmethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) { |
|
1734 #ifdef ASSERT |
1683 RelocIterator iter(this, low_boundary); |
1735 RelocIterator iter(this, low_boundary); |
1684 while (iter.next()) { |
1736 while (iter.next()) { |
1685 if (iter.type() == relocInfo::virtual_call_type) { |
1737 // static_stub_Relocations may have dangling references to |
1686 CompiledIC *ic = CompiledIC_at(iter.reloc()); |
1738 // Method*s so trim them out here. Otherwise it looks like |
1687 oop ic_oop = ic->cached_oop(); |
1739 // compiled code is maintaining a link to dead metadata. |
1688 assert(ic_oop == NULL || is_alive->do_object_b(ic_oop), |
1740 address static_call_addr = NULL; |
1689 "Found unmarked ic_oop in reachable nmethod"); |
1741 if (iter.type() == relocInfo::opt_virtual_call_type) { |
1690 } |
1742 CompiledIC* cic = CompiledIC_at(iter.reloc()); |
1691 } |
1743 if (!cic->is_call_to_interpreted()) { |
1692 } |
1744 static_call_addr = iter.addr(); |
1693 #endif // !PRODUCT |
1745 } |
1694 } |
1746 } else if (iter.type() == relocInfo::static_call_type) { |
|
1747 CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc()); |
|
1748 if (!csc->is_call_to_interpreted()) { |
|
1749 static_call_addr = iter.addr(); |
|
1750 } |
|
1751 } |
|
1752 if (static_call_addr != NULL) { |
|
1753 RelocIterator sciter(this, low_boundary); |
|
1754 while (sciter.next()) { |
|
1755 if (sciter.type() == relocInfo::static_stub_type && |
|
1756 sciter.static_stub_reloc()->static_call() == static_call_addr) { |
|
1757 sciter.static_stub_reloc()->clear_inline_cache(); |
|
1758 } |
|
1759 } |
|
1760 } |
|
1761 } |
|
1762 // Check that the metadata embedded in the nmethod is alive |
|
1763 CheckClass::do_check_class(is_alive, this); |
|
1764 #endif |
|
1765 } |
|
1766 |
|
1767 |
|
1768 // Iterate over metadata calling this function. Used by RedefineClasses |
|
1769 void nmethod::metadata_do(void f(Metadata*)) { |
|
1770 address low_boundary = verified_entry_point(); |
|
1771 if (is_not_entrant()) { |
|
1772 low_boundary += NativeJump::instruction_size; |
|
1773 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. |
|
1774 // (See comment above.) |
|
1775 } |
|
1776 { |
|
1777 // Visit all immediate references that are embedded in the instruction stream. |
|
1778 RelocIterator iter(this, low_boundary); |
|
1779 while (iter.next()) { |
|
1780 if (iter.type() == relocInfo::metadata_type ) { |
|
1781 metadata_Relocation* r = iter.metadata_reloc(); |
|
1782 // In this lmetadata, we must only follow those metadatas directly embedded in |
|
1783 // the code. Other metadatas (oop_index>0) are seen as part of |
|
1784 // the metadata section below. |
|
1785 assert(1 == (r->metadata_is_immediate()) + |
|
1786 (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()), |
|
1787 "metadata must be found in exactly one place"); |
|
1788 if (r->metadata_is_immediate() && r->metadata_value() != NULL) { |
|
1789 Metadata* md = r->metadata_value(); |
|
1790 f(md); |
|
1791 } |
|
1792 } |
|
1793 } |
|
1794 } |
|
1795 |
|
1796 // Visit the metadata section |
|
1797 for (Metadata** p = metadata_begin(); p < metadata_end(); p++) { |
|
1798 if (*p == Universe::non_oop_word() || *p == NULL) continue; // skip non-oops |
|
1799 Metadata* md = *p; |
|
1800 f(md); |
|
1801 } |
|
1802 // Call function Method*, not embedded in these other places. |
|
1803 if (_method != NULL) f(_method); |
|
1804 } |
|
1805 |
1695 |
1806 |
1696 // This method is called twice during GC -- once while |
1807 // This method is called twice during GC -- once while |
1697 // tracing the "active" nmethods on thread stacks during |
1808 // tracing the "active" nmethods on thread stacks during |
1698 // the (strong) marking phase, and then again when walking |
1809 // the (strong) marking phase, and then again when walking |
1699 // the code cache contents during the weak roots processing |
1810 // the code cache contents during the weak roots processing |
1717 low_boundary += NativeJump::instruction_size; |
1828 low_boundary += NativeJump::instruction_size; |
1718 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. |
1829 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. |
1719 // (See comment above.) |
1830 // (See comment above.) |
1720 } |
1831 } |
1721 |
1832 |
1722 // Compiled code |
|
1723 f->do_oop((oop*) &_method); |
|
1724 if (!do_strong_roots_only) { |
|
1725 // weak roots processing phase -- update ExceptionCache oops |
|
1726 ExceptionCache* ec = exception_cache(); |
|
1727 while(ec != NULL) { |
|
1728 f->do_oop((oop*)ec->exception_type_addr()); |
|
1729 ec = ec->next(); |
|
1730 } |
|
1731 } // Else strong roots phase -- skip oops in ExceptionCache |
|
1732 |
|
1733 RelocIterator iter(this, low_boundary); |
1833 RelocIterator iter(this, low_boundary); |
1734 |
1834 |
1735 while (iter.next()) { |
1835 while (iter.next()) { |
1736 if (iter.type() == relocInfo::oop_type ) { |
1836 if (iter.type() == relocInfo::oop_type ) { |
1737 oop_Relocation* r = iter.oop_reloc(); |
1837 oop_Relocation* r = iter.oop_reloc(); |
2061 } |
2161 } |
2062 } |
2162 } |
2063 return found_check; |
2163 return found_check; |
2064 } |
2164 } |
2065 |
2165 |
2066 bool nmethod::is_evol_dependent_on(klassOop dependee) { |
2166 bool nmethod::is_evol_dependent_on(Klass* dependee) { |
2067 instanceKlass *dependee_ik = instanceKlass::cast(dependee); |
2167 InstanceKlass *dependee_ik = InstanceKlass::cast(dependee); |
2068 objArrayOop dependee_methods = dependee_ik->methods(); |
2168 Array<Method*>* dependee_methods = dependee_ik->methods(); |
2069 for (Dependencies::DepStream deps(this); deps.next(); ) { |
2169 for (Dependencies::DepStream deps(this); deps.next(); ) { |
2070 if (deps.type() == Dependencies::evol_method) { |
2170 if (deps.type() == Dependencies::evol_method) { |
2071 methodOop method = deps.method_argument(0); |
2171 Method* method = deps.method_argument(0); |
2072 for (int j = 0; j < dependee_methods->length(); j++) { |
2172 for (int j = 0; j < dependee_methods->length(); j++) { |
2073 if ((methodOop) dependee_methods->obj_at(j) == method) { |
2173 if (dependee_methods->at(j) == method) { |
2074 // RC_TRACE macro has an embedded ResourceMark |
2174 // RC_TRACE macro has an embedded ResourceMark |
2075 RC_TRACE(0x01000000, |
2175 RC_TRACE(0x01000000, |
2076 ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)", |
2176 ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)", |
2077 _method->method_holder()->klass_part()->external_name(), |
2177 _method->method_holder()->external_name(), |
2078 _method->name()->as_C_string(), |
2178 _method->name()->as_C_string(), |
2079 _method->signature()->as_C_string(), compile_id(), |
2179 _method->signature()->as_C_string(), compile_id(), |
2080 method->method_holder()->klass_part()->external_name(), |
2180 method->method_holder()->external_name(), |
2081 method->name()->as_C_string(), |
2181 method->name()->as_C_string(), |
2082 method->signature()->as_C_string())); |
2182 method->signature()->as_C_string())); |
2083 if (TraceDependencies || LogCompilation) |
2183 if (TraceDependencies || LogCompilation) |
2084 deps.log_dependency(dependee); |
2184 deps.log_dependency(dependee); |
2085 return true; |
2185 return true; |
2272 CompiledIC* ic = NULL; |
2372 CompiledIC* ic = NULL; |
2273 Thread *cur = Thread::current(); |
2373 Thread *cur = Thread::current(); |
2274 if (CompiledIC_lock->owner() == cur || |
2374 if (CompiledIC_lock->owner() == cur || |
2275 ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) && |
2375 ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) && |
2276 SafepointSynchronize::is_at_safepoint())) { |
2376 SafepointSynchronize::is_at_safepoint())) { |
2277 ic = CompiledIC_at(call_site); |
2377 ic = CompiledIC_at(this, call_site); |
2278 CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); |
2378 CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); |
2279 } else { |
2379 } else { |
2280 MutexLocker ml_verify (CompiledIC_lock); |
2380 MutexLocker ml_verify (CompiledIC_lock); |
2281 ic = CompiledIC_at(call_site); |
2381 ic = CompiledIC_at(this, call_site); |
2282 } |
2382 } |
2283 PcDesc* pd = pc_desc_at(ic->end_of_call()); |
2383 PcDesc* pd = pc_desc_at(ic->end_of_call()); |
2284 assert(pd != NULL, "PcDesc must exist"); |
2384 assert(pd != NULL, "PcDesc must exist"); |
2285 for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(), |
2385 for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(), |
2286 pd->obj_decode_offset(), pd->should_reexecute(), |
2386 pd->obj_decode_offset(), pd->should_reexecute(), |