1 /* |
1 /* |
2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. |
2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * |
4 * |
5 * This code is free software; you can redistribute it and/or modify it |
5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as |
6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. |
7 * published by the Free Software Foundation. |
1483 return false; |
1483 return false; |
1484 } |
1484 } |
1485 |
1485 |
1486 bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) { |
1486 bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) { |
1487 // Compiled code |
1487 // Compiled code |
1488 { |
1488 |
1489 RelocIterator iter(this, low_boundary); |
1489 // Prevent extra code cache walk for platforms that don't have immediate oops. |
1490 while (iter.next()) { |
1490 if (relocInfo::mustIterateImmediateOopsInCode()) { |
1491 if (iter.type() == relocInfo::oop_type) { |
1491 RelocIterator iter(this, low_boundary); |
1492 if (unload_if_dead_at(&iter, is_alive, unloading_occurred)) { |
1492 while (iter.next()) { |
1493 return true; |
1493 if (iter.type() == relocInfo::oop_type) { |
|
1494 if (unload_if_dead_at(&iter, is_alive, unloading_occurred)) { |
|
1495 return true; |
|
1496 } |
1494 } |
1497 } |
1495 } |
1498 } |
1496 } |
|
1497 } |
1499 } |
1498 |
1500 |
1499 return do_unloading_scopes(is_alive, unloading_occurred); |
1501 return do_unloading_scopes(is_alive, unloading_occurred); |
1500 } |
1502 } |
1501 |
1503 |
1582 low_boundary += NativeJump::instruction_size; |
1584 low_boundary += NativeJump::instruction_size; |
1583 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. |
1585 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. |
1584 // (See comment above.) |
1586 // (See comment above.) |
1585 } |
1587 } |
1586 |
1588 |
1587 RelocIterator iter(this, low_boundary); |
1589 // Prevent extra code cache walk for platforms that don't have immediate oops. |
1588 |
1590 if (relocInfo::mustIterateImmediateOopsInCode()) { |
1589 while (iter.next()) { |
1591 RelocIterator iter(this, low_boundary); |
1590 if (iter.type() == relocInfo::oop_type ) { |
1592 |
1591 oop_Relocation* r = iter.oop_reloc(); |
1593 while (iter.next()) { |
1592 // In this loop, we must only follow those oops directly embedded in |
1594 if (iter.type() == relocInfo::oop_type ) { |
1593 // the code. Other oops (oop_index>0) are seen as part of scopes_oops. |
1595 oop_Relocation* r = iter.oop_reloc(); |
1594 assert(1 == (r->oop_is_immediate()) + |
1596 // In this loop, we must only follow those oops directly embedded in |
1595 (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()), |
1597 // the code. Other oops (oop_index>0) are seen as part of scopes_oops. |
1596 "oop must be found in exactly one place"); |
1598 assert(1 == (r->oop_is_immediate()) + |
1597 if (r->oop_is_immediate() && r->oop_value() != NULL) { |
1599 (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()), |
1598 f->do_oop(r->oop_addr()); |
1600 "oop must be found in exactly one place"); |
|
1601 if (r->oop_is_immediate() && r->oop_value() != NULL) { |
|
1602 f->do_oop(r->oop_addr()); |
|
1603 } |
1599 } |
1604 } |
1600 } |
1605 } |
1601 } |
1606 } |
1602 |
1607 |
1603 // Scopes |
1608 // Scopes |
1618 // This code must be MP safe, because it is used from parallel GC passes. |
1623 // This code must be MP safe, because it is used from parallel GC passes. |
1619 bool nmethod::test_set_oops_do_mark() { |
1624 bool nmethod::test_set_oops_do_mark() { |
1620 assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called"); |
1625 assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called"); |
1621 if (_oops_do_mark_link == NULL) { |
1626 if (_oops_do_mark_link == NULL) { |
1622 // Claim this nmethod for this thread to mark. |
1627 // Claim this nmethod for this thread to mark. |
1623 if (Atomic::cmpxchg(NMETHOD_SENTINEL, &_oops_do_mark_link, (nmethod*)NULL) == NULL) { |
1628 if (Atomic::replace_if_null(NMETHOD_SENTINEL, &_oops_do_mark_link)) { |
1624 // Atomically append this nmethod (now claimed) to the head of the list: |
1629 // Atomically append this nmethod (now claimed) to the head of the list: |
1625 nmethod* observed_mark_nmethods = _oops_do_mark_nmethods; |
1630 nmethod* observed_mark_nmethods = _oops_do_mark_nmethods; |
1626 for (;;) { |
1631 for (;;) { |
1627 nmethod* required_mark_nmethods = observed_mark_nmethods; |
1632 nmethod* required_mark_nmethods = observed_mark_nmethods; |
1628 _oops_do_mark_link = required_mark_nmethods; |
1633 _oops_do_mark_link = required_mark_nmethods; |