src/hotspot/share/code/nmethod.cpp
changeset 50416 ef980b9ac191
parent 49902 3661f31c6df4
child 50429 83aec1d357d4
equal deleted inserted replaced
50415:e4301f8c3aaa 50416:ef980b9ac191
   944 
   944 
   945 
   945 
   946 void nmethod::verify_clean_inline_caches() {
   946 void nmethod::verify_clean_inline_caches() {
   947   assert_locked_or_safepoint(CompiledIC_lock);
   947   assert_locked_or_safepoint(CompiledIC_lock);
   948 
   948 
   949   // If the method is not entrant or zombie then a JMP is plastered over the
       
   950   // first few bytes.  If an oop in the old code was there, that oop
       
   951   // should not get GC'd.  Skip the first few bytes of oops on
       
   952   // not-entrant methods.
       
   953   address low_boundary = verified_entry_point();
       
   954   if (!is_in_use()) {
       
   955     low_boundary += NativeJump::instruction_size;
       
   956     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
       
   957     // This means that the low_boundary is going to be a little too high.
       
   958     // This shouldn't matter, since oops of non-entrant methods are never used.
       
   959     // In fact, why are we bothering to look at oops in a non-entrant method??
       
   960   }
       
   961 
       
   962   ResourceMark rm;
   949   ResourceMark rm;
   963   RelocIterator iter(this, low_boundary);
   950   RelocIterator iter(this, oops_reloc_begin());
   964   while(iter.next()) {
   951   while(iter.next()) {
   965     switch(iter.type()) {
   952     switch(iter.type()) {
   966       case relocInfo::virtual_call_type:
   953       case relocInfo::virtual_call_type:
   967       case relocInfo::opt_virtual_call_type: {
   954       case relocInfo::opt_virtual_call_type: {
   968         CompiledIC *ic = CompiledIC_at(&iter);
   955         CompiledIC *ic = CompiledIC_at(&iter);
  1039   // InstanceKlass.
  1026   // InstanceKlass.
  1040   assert(Universe::heap()->is_gc_active(), "should only be called during gc");
  1027   assert(Universe::heap()->is_gc_active(), "should only be called during gc");
  1041   flush_dependencies(/*delete_immediately*/false);
  1028   flush_dependencies(/*delete_immediately*/false);
  1042 
  1029 
  1043   // Break cycle between nmethod & method
  1030   // Break cycle between nmethod & method
  1044   LogTarget(Trace, class, unload) lt;
  1031   LogTarget(Trace, class, unload, nmethod) lt;
  1045   if (lt.is_enabled()) {
  1032   if (lt.is_enabled()) {
  1046     LogStream ls(lt);
  1033     LogStream ls(lt);
  1047     ls.print_cr("making nmethod " INTPTR_FORMAT
  1034     ls.print("making nmethod " INTPTR_FORMAT
  1048                   " unloadable, Method*(" INTPTR_FORMAT
  1035              " unloadable, Method*(" INTPTR_FORMAT
  1049                   "), cause(" INTPTR_FORMAT ")",
  1036              "), cause(" INTPTR_FORMAT ") ",
  1050                   p2i(this), p2i(_method), p2i(cause));
  1037              p2i(this), p2i(_method), p2i(cause));
       
  1038      if (cause != NULL) {
       
  1039        cause->print_value_on(&ls);
       
  1040      }
       
  1041      ls.cr();
  1051   }
  1042   }
  1052   // Unlink the osr method, so we do not look this up again
  1043   // Unlink the osr method, so we do not look this up again
  1053   if (is_osr_method()) {
  1044   if (is_osr_method()) {
  1054     // Invalidate the osr nmethod only once
  1045     // Invalidate the osr nmethod only once
  1055     if (is_in_use()) {
  1046     if (is_in_use()) {
  1376   }
  1367   }
  1377 }
  1368 }
  1378 
  1369 
  1379 
  1370 
  1380 // If this oop is not live, the nmethod can be unloaded.
  1371 // If this oop is not live, the nmethod can be unloaded.
  1381 bool nmethod::can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred) {
  1372 bool nmethod::can_unload(BoolObjectClosure* is_alive, oop* root) {
  1382   assert(root != NULL, "just checking");
  1373   assert(root != NULL, "just checking");
  1383   oop obj = *root;
  1374   oop obj = *root;
  1384   if (obj == NULL || is_alive->do_object_b(obj)) {
  1375   if (obj == NULL || is_alive->do_object_b(obj)) {
  1385       return false;
  1376       return false;
  1386   }
  1377   }
  1387 
  1378 
  1388   // If ScavengeRootsInCode is true, an nmethod might be unloaded
  1379   // An nmethod might be unloaded simply because one of its constant oops has gone dead.
  1389   // simply because one of its constant oops has gone dead.
       
  1390   // No actual classes need to be unloaded in order for this to occur.
  1380   // No actual classes need to be unloaded in order for this to occur.
  1391   assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading");
       
  1392   make_unloaded(obj);
  1381   make_unloaded(obj);
  1393   return true;
  1382   return true;
  1394 }
  1383 }
  1395 
  1384 
  1396 // ------------------------------------------------------------------
  1385 // ------------------------------------------------------------------
  1464   // attempt to report the event in the unlikely scenario where the
  1453   // attempt to report the event in the unlikely scenario where the
  1465   // event is enabled at the time the nmethod is made a zombie.
  1454   // event is enabled at the time the nmethod is made a zombie.
  1466   set_unload_reported();
  1455   set_unload_reported();
  1467 }
  1456 }
  1468 
  1457 
  1469 bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *is_alive, bool unloading_occurred) {
  1458 bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *is_alive) {
  1470   assert(iter_at_oop->type() == relocInfo::oop_type, "Wrong relocation type");
  1459   assert(iter_at_oop->type() == relocInfo::oop_type, "Wrong relocation type");
  1471 
  1460 
  1472   oop_Relocation* r = iter_at_oop->oop_reloc();
  1461   oop_Relocation* r = iter_at_oop->oop_reloc();
  1473   // Traverse those oops directly embedded in the code.
  1462   // Traverse those oops directly embedded in the code.
  1474   // Other oops (oop_index>0) are seen as part of scopes_oops.
  1463   // Other oops (oop_index>0) are seen as part of scopes_oops.
  1475   assert(1 == (r->oop_is_immediate()) +
  1464   assert(1 == (r->oop_is_immediate()) +
  1476          (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
  1465          (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
  1477          "oop must be found in exactly one place");
  1466          "oop must be found in exactly one place");
  1478   if (r->oop_is_immediate() && r->oop_value() != NULL) {
  1467   if (r->oop_is_immediate() && r->oop_value() != NULL) {
  1479     // Unload this nmethod if the oop is dead.
  1468     // Unload this nmethod if the oop is dead.
  1480     if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
  1469     if (can_unload(is_alive, r->oop_addr())) {
  1481       return true;;
  1470       return true;;
  1482     }
  1471     }
  1483   }
  1472   }
  1484 
  1473 
  1485   return false;
  1474   return false;
  1486 }
  1475 }
  1487 
  1476 
  1488 bool nmethod::do_unloading_scopes(BoolObjectClosure* is_alive, bool unloading_occurred) {
  1477 bool nmethod::do_unloading_scopes(BoolObjectClosure* is_alive) {
  1489   // Scopes
  1478   // Scopes
  1490   for (oop* p = oops_begin(); p < oops_end(); p++) {
  1479   for (oop* p = oops_begin(); p < oops_end(); p++) {
  1491     if (*p == Universe::non_oop_word())  continue;  // skip non-oops
  1480     if (*p == Universe::non_oop_word())  continue;  // skip non-oops
  1492     if (can_unload(is_alive, p, unloading_occurred)) {
  1481     if (can_unload(is_alive, p)) {
  1493       return true;
  1482       return true;
  1494     }
  1483     }
  1495   }
  1484   }
  1496   return false;
  1485   return false;
  1497 }
  1486 }
  1498 
  1487 
  1499 bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) {
  1488 bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive) {
  1500   // Compiled code
  1489   // Compiled code
  1501 
  1490 
  1502   // Prevent extra code cache walk for platforms that don't have immediate oops.
  1491   // Prevent extra code cache walk for platforms that don't have immediate oops.
  1503   if (relocInfo::mustIterateImmediateOopsInCode()) {
  1492   if (relocInfo::mustIterateImmediateOopsInCode()) {
  1504     RelocIterator iter(this, low_boundary);
  1493     RelocIterator iter(this, low_boundary);
  1505     while (iter.next()) {
  1494     while (iter.next()) {
  1506       if (iter.type() == relocInfo::oop_type) {
  1495       if (iter.type() == relocInfo::oop_type) {
  1507         if (unload_if_dead_at(&iter, is_alive, unloading_occurred)) {
  1496         if (unload_if_dead_at(&iter, is_alive)) {
  1508           return true;
  1497           return true;
  1509         }
  1498         }
  1510       }
  1499       }
  1511     }
  1500     }
  1512   }
  1501   }
  1513 
  1502 
  1514   return do_unloading_scopes(is_alive, unloading_occurred);
  1503   return do_unloading_scopes(is_alive);
  1515 }
  1504 }
  1516 
  1505 
  1517 #if INCLUDE_JVMCI
  1506 #if INCLUDE_JVMCI
  1518 bool nmethod::do_unloading_jvmci(bool unloading_occurred) {
  1507 bool nmethod::do_unloading_jvmci() {
  1519   if (_jvmci_installed_code != NULL) {
  1508   if (_jvmci_installed_code != NULL) {
  1520     if (JNIHandles::is_global_weak_cleared(_jvmci_installed_code)) {
  1509     if (JNIHandles::is_global_weak_cleared(_jvmci_installed_code)) {
  1521       if (_jvmci_installed_code_triggers_unloading) {
  1510       if (_jvmci_installed_code_triggers_unloading) {
  1522         // jweak reference processing has already cleared the referent
  1511         // jweak reference processing has already cleared the referent
  1523         make_unloaded(NULL);
  1512         make_unloaded(NULL);
  1531 }
  1520 }
  1532 #endif
  1521 #endif
  1533 
  1522 
  1534 // Iterate over metadata calling this function.   Used by RedefineClasses
  1523 // Iterate over metadata calling this function.   Used by RedefineClasses
  1535 void nmethod::metadata_do(void f(Metadata*)) {
  1524 void nmethod::metadata_do(void f(Metadata*)) {
  1536   address low_boundary = verified_entry_point();
       
  1537   if (is_not_entrant()) {
       
  1538     low_boundary += NativeJump::instruction_size;
       
  1539     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
       
  1540     // (See comment above.)
       
  1541   }
       
  1542   {
  1525   {
  1543     // Visit all immediate references that are embedded in the instruction stream.
  1526     // Visit all immediate references that are embedded in the instruction stream.
  1544     RelocIterator iter(this, low_boundary);
  1527     RelocIterator iter(this, oops_reloc_begin());
  1545     while (iter.next()) {
  1528     while (iter.next()) {
  1546       if (iter.type() == relocInfo::metadata_type ) {
  1529       if (iter.type() == relocInfo::metadata_type ) {
  1547         metadata_Relocation* r = iter.metadata_reloc();
  1530         metadata_Relocation* r = iter.metadata_reloc();
  1548         // In this metadata, we must only follow those metadatas directly embedded in
  1531         // In this metadata, we must only follow those metadatas directly embedded in
  1549         // the code.  Other metadatas (oop_index>0) are seen as part of
  1532         // the code.  Other metadatas (oop_index>0) are seen as part of
  1586 void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
  1569 void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
  1587   // make sure the oops ready to receive visitors
  1570   // make sure the oops ready to receive visitors
  1588   assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
  1571   assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
  1589   assert(!is_unloaded(), "should not call follow on unloaded nmethod");
  1572   assert(!is_unloaded(), "should not call follow on unloaded nmethod");
  1590 
  1573 
  1591   // If the method is not entrant or zombie then a JMP is plastered over the
       
  1592   // first few bytes.  If an oop in the old code was there, that oop
       
  1593   // should not get GC'd.  Skip the first few bytes of oops on
       
  1594   // not-entrant methods.
       
  1595   address low_boundary = verified_entry_point();
       
  1596   if (is_not_entrant()) {
       
  1597     low_boundary += NativeJump::instruction_size;
       
  1598     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
       
  1599     // (See comment above.)
       
  1600   }
       
  1601 
       
  1602   // Prevent extra code cache walk for platforms that don't have immediate oops.
  1574   // Prevent extra code cache walk for platforms that don't have immediate oops.
  1603   if (relocInfo::mustIterateImmediateOopsInCode()) {
  1575   if (relocInfo::mustIterateImmediateOopsInCode()) {
  1604     RelocIterator iter(this, low_boundary);
  1576     RelocIterator iter(this, oops_reloc_begin());
  1605 
  1577 
  1606     while (iter.next()) {
  1578     while (iter.next()) {
  1607       if (iter.type() == relocInfo::oop_type ) {
  1579       if (iter.type() == relocInfo::oop_type ) {
  1608         oop_Relocation* r = iter.oop_reloc();
  1580         oop_Relocation* r = iter.oop_reloc();
  1609         // In this loop, we must only follow those oops directly embedded in
  1581         // In this loop, we must only follow those oops directly embedded in
  1648           Atomic::cmpxchg(this, &_oops_do_mark_nmethods, required_mark_nmethods);
  1620           Atomic::cmpxchg(this, &_oops_do_mark_nmethods, required_mark_nmethods);
  1649         if (observed_mark_nmethods == required_mark_nmethods)
  1621         if (observed_mark_nmethods == required_mark_nmethods)
  1650           break;
  1622           break;
  1651       }
  1623       }
  1652       // Mark was clear when we first saw this guy.
  1624       // Mark was clear when we first saw this guy.
  1653       if (TraceScavenge) { print_on(tty, "oops_do, mark"); }
  1625       LogTarget(Trace, gc, nmethod) lt;
       
  1626       if (lt.is_enabled()) {
       
  1627         LogStream ls(lt);
       
  1628         CompileTask::print(&ls, this, "oops_do, mark", /*short_form:*/ true);
       
  1629       }
  1654       return false;
  1630       return false;
  1655     }
  1631     }
  1656   }
  1632   }
  1657   // On fall through, another racing thread marked this nmethod before we did.
  1633   // On fall through, another racing thread marked this nmethod before we did.
  1658   return true;
  1634   return true;
  1659 }
  1635 }
  1660 
  1636 
  1661 void nmethod::oops_do_marking_prologue() {
  1637 void nmethod::oops_do_marking_prologue() {
  1662   if (TraceScavenge) { tty->print_cr("[oops_do_marking_prologue"); }
  1638   log_trace(gc, nmethod)("oops_do_marking_prologue");
  1663   assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
  1639   assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
  1664   // We use cmpxchg instead of regular assignment here because the user
  1640   // We use cmpxchg instead of regular assignment here because the user
  1665   // may fork a bunch of threads, and we need them all to see the same state.
  1641   // may fork a bunch of threads, and we need them all to see the same state.
  1666   nmethod* observed = Atomic::cmpxchg(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, (nmethod*)NULL);
  1642   nmethod* observed = Atomic::cmpxchg(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, (nmethod*)NULL);
  1667   guarantee(observed == NULL, "no races in this sequential code");
  1643   guarantee(observed == NULL, "no races in this sequential code");
  1673   while (cur != NMETHOD_SENTINEL) {
  1649   while (cur != NMETHOD_SENTINEL) {
  1674     assert(cur != NULL, "not NULL-terminated");
  1650     assert(cur != NULL, "not NULL-terminated");
  1675     nmethod* next = cur->_oops_do_mark_link;
  1651     nmethod* next = cur->_oops_do_mark_link;
  1676     cur->_oops_do_mark_link = NULL;
  1652     cur->_oops_do_mark_link = NULL;
  1677     DEBUG_ONLY(cur->verify_oop_relocations());
  1653     DEBUG_ONLY(cur->verify_oop_relocations());
  1678     NOT_PRODUCT(if (TraceScavenge)  cur->print_on(tty, "oops_do, unmark"));
  1654 
       
  1655     LogTarget(Trace, gc, nmethod) lt;
       
  1656     if (lt.is_enabled()) {
       
  1657       LogStream ls(lt);
       
  1658       CompileTask::print(&ls, cur, "oops_do, unmark", /*short_form:*/ true);
       
  1659     }
  1679     cur = next;
  1660     cur = next;
  1680   }
  1661   }
  1681   nmethod* required = _oops_do_mark_nmethods;
  1662   nmethod* required = _oops_do_mark_nmethods;
  1682   nmethod* observed = Atomic::cmpxchg((nmethod*)NULL, &_oops_do_mark_nmethods, required);
  1663   nmethod* observed = Atomic::cmpxchg((nmethod*)NULL, &_oops_do_mark_nmethods, required);
  1683   guarantee(observed == required, "no races in this sequential code");
  1664   guarantee(observed == required, "no races in this sequential code");
  1684   if (TraceScavenge) { tty->print_cr("oops_do_marking_epilogue]"); }
  1665   log_trace(gc, nmethod)("oops_do_marking_epilogue");
  1685 }
  1666 }
  1686 
  1667 
  1687 class DetectScavengeRoot: public OopClosure {
  1668 class DetectScavengeRoot: public OopClosure {
  1688   bool     _detected_scavenge_root;
  1669   bool     _detected_scavenge_root;
       
  1670   nmethod* _print_nm;
  1689 public:
  1671 public:
  1690   DetectScavengeRoot() : _detected_scavenge_root(false)
  1672   DetectScavengeRoot(nmethod* nm) : _detected_scavenge_root(false), _print_nm(nm) {}
  1691   { NOT_PRODUCT(_print_nm = NULL); }
  1673 
  1692   bool detected_scavenge_root() { return _detected_scavenge_root; }
  1674   bool detected_scavenge_root() { return _detected_scavenge_root; }
  1693   virtual void do_oop(oop* p) {
  1675   virtual void do_oop(oop* p) {
  1694     if ((*p) != NULL && Universe::heap()->is_scavengable(*p)) {
  1676     if ((*p) != NULL && Universe::heap()->is_scavengable(*p)) {
  1695       NOT_PRODUCT(maybe_print(p));
  1677       NOT_PRODUCT(maybe_print(p));
  1696       _detected_scavenge_root = true;
  1678       _detected_scavenge_root = true;
  1697     }
  1679     }
  1698   }
  1680   }
  1699   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
  1681   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
  1700 
  1682 
  1701 #ifndef PRODUCT
  1683 #ifndef PRODUCT
  1702   nmethod* _print_nm;
       
  1703   void maybe_print(oop* p) {
  1684   void maybe_print(oop* p) {
  1704     if (_print_nm == NULL)  return;
  1685     LogTarget(Trace, gc, nmethod) lt;
  1705     if (!_detected_scavenge_root)  _print_nm->print_on(tty, "new scavenge root");
  1686     if (lt.is_enabled()) {
  1706     tty->print_cr("" PTR_FORMAT "[offset=%d] detected scavengable oop " PTR_FORMAT " (found at " PTR_FORMAT ")",
  1687       LogStream ls(lt);
  1707                   p2i(_print_nm), (int)((intptr_t)p - (intptr_t)_print_nm),
  1688       if (!_detected_scavenge_root) {
  1708                   p2i(*p), p2i(p));
  1689         CompileTask::print(&ls, _print_nm, "new scavenge root", /*short_form:*/ true);
  1709     (*p)->print();
  1690       }
       
  1691       ls.print("" PTR_FORMAT "[offset=%d] detected scavengable oop " PTR_FORMAT " (found at " PTR_FORMAT ") ",
       
  1692                p2i(_print_nm), (int)((intptr_t)p - (intptr_t)_print_nm),
       
  1693                p2i(*p), p2i(p));
       
  1694       (*p)->print_value_on(&ls);
       
  1695       ls.cr();
       
  1696     }
  1710   }
  1697   }
  1711 #endif //PRODUCT
  1698 #endif //PRODUCT
  1712 };
  1699 };
  1713 
  1700 
  1714 bool nmethod::detect_scavenge_root_oops() {
  1701 bool nmethod::detect_scavenge_root_oops() {
  1715   DetectScavengeRoot detect_scavenge_root;
  1702   DetectScavengeRoot detect_scavenge_root(this);
  1716   NOT_PRODUCT(if (TraceScavenge)  detect_scavenge_root._print_nm = this);
       
  1717   oops_do(&detect_scavenge_root);
  1703   oops_do(&detect_scavenge_root);
  1718   return detect_scavenge_root.detected_scavenge_root();
  1704   return detect_scavenge_root.detected_scavenge_root();
  1719 }
  1705 }
  1720 
  1706 
  1721 inline bool includes(void* p, void* from, void* to) {
  1707 inline bool includes(void* p, void* from, void* to) {