--- a/src/hotspot/share/gc/z/zHeapIterator.cpp Thu Oct 17 20:27:44 2019 +0100
+++ b/src/hotspot/share/gc/z/zHeapIterator.cpp Thu Oct 17 20:53:35 2019 +0100
@@ -22,6 +22,8 @@
*/
#include "precompiled.hpp"
+#include "classfile/classLoaderData.hpp"
+#include "classfile/classLoaderDataGraph.hpp"
#include "gc/z/zBarrier.inline.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zGranuleMap.inline.hpp"
@@ -51,18 +53,29 @@
}
};
+template <bool Concurrent, bool Weak>
class ZHeapIteratorRootOopClosure : public ZRootsIteratorClosure {
private:
ZHeapIterator* const _iter;
+ oop load_oop(oop* p) {
+ if (Weak) {
+ return NativeAccess<AS_NO_KEEPALIVE | ON_PHANTOM_OOP_REF>::oop_load(p);
+ }
+
+ if (Concurrent) {
+ return NativeAccess<AS_NO_KEEPALIVE>::oop_load(p);
+ }
+
+ return RawAccess<>::oop_load(p);
+ }
+
public:
ZHeapIteratorRootOopClosure(ZHeapIterator* iter) :
_iter(iter) {}
virtual void do_oop(oop* p) {
- // Load barrier needed here, even on non-concurrent strong roots,
- // for the same reason we need fixup_partial_loads() in ZHeap::mark_end().
- const oop obj = NativeAccess<AS_NO_KEEPALIVE>::oop_load(p);
+ const oop obj = load_oop(p);
_iter->push(obj);
}
@@ -71,28 +84,28 @@
}
};
-class ZHeapIteratorOopClosure : public BasicOopIterateClosure {
+template <bool VisitReferents>
+class ZHeapIteratorOopClosure : public ClaimMetadataVisitingOopIterateClosure {
private:
ZHeapIterator* const _iter;
const oop _base;
- const bool _visit_referents;
- oop load_oop(oop* p) const {
- if (_visit_referents) {
- return HeapAccess<ON_UNKNOWN_OOP_REF | AS_NO_KEEPALIVE>::oop_load_at(_base, _base->field_offset(p));
- } else {
- return HeapAccess<AS_NO_KEEPALIVE>::oop_load(p);
+ oop load_oop(oop* p) {
+ if (VisitReferents) {
+ return HeapAccess<AS_NO_KEEPALIVE | ON_UNKNOWN_OOP_REF>::oop_load_at(_base, _base->field_offset(p));
}
+
+ return HeapAccess<AS_NO_KEEPALIVE>::oop_load(p);
}
public:
- ZHeapIteratorOopClosure(ZHeapIterator* iter, oop base, bool visit_referents) :
+ ZHeapIteratorOopClosure(ZHeapIterator* iter, oop base) :
+ ClaimMetadataVisitingOopIterateClosure(ClassLoaderData::_claim_other),
_iter(iter),
- _base(base),
- _visit_referents(visit_referents) {}
+ _base(base) {}
virtual ReferenceIterationMode reference_iteration_mode() {
- return _visit_referents ? DO_FIELDS : DO_FIELDS_EXCEPT_REFERENT;
+ return VisitReferents ? DO_FIELDS : DO_FIELDS_EXCEPT_REFERENT;
}
virtual void do_oop(oop* p) {
@@ -111,16 +124,16 @@
#endif
};
-ZHeapIterator::ZHeapIterator(bool visit_referents) :
+ZHeapIterator::ZHeapIterator() :
_visit_stack(),
- _visit_map(),
- _visit_referents(visit_referents) {}
+ _visit_map() {}
ZHeapIterator::~ZHeapIterator() {
ZVisitMapIterator iter(&_visit_map);
for (ZHeapIteratorBitMap* map; iter.next(&map);) {
delete map;
}
+ ClassLoaderDataGraph::clear_claimed_marks(ClassLoaderData::_claim_other);
}
static size_t object_index_max() {
@@ -162,49 +175,47 @@
_visit_stack.push(obj);
}
-void ZHeapIterator::objects_do(ObjectClosure* cl) {
- // Note that the heap iterator visits all reachable objects, including
- // objects that might be unreachable from the application, such as a
- // not yet cleared JNIWeakGloablRef. However, also note that visiting
- // the JVMTI tag map is a requirement to make sure we visit all tagged
- // objects, even those that might now have become phantom reachable.
- // If we didn't do this the application would have expected to see
- // ObjectFree events for phantom reachable objects in the tag map.
-
- ZStatTimerDisable disable;
- ZHeapIteratorRootOopClosure root_cl(this);
+template <typename RootsIterator, bool Concurrent, bool Weak>
+void ZHeapIterator::push_roots() {
+ ZHeapIteratorRootOopClosure<Concurrent, Weak> cl(this);
+ RootsIterator roots;
+ roots.oops_do(&cl);
+}
- // Push strong roots onto stack
- {
- ZRootsIterator roots;
- roots.oops_do(&root_cl);
- }
+template <bool VisitReferents>
+void ZHeapIterator::push_fields(oop obj) {
+ ZHeapIteratorOopClosure<VisitReferents> cl(this, obj);
+ obj->oop_iterate(&cl);
+}
- {
- ZConcurrentRootsIterator roots;
- roots.oops_do(&root_cl);
- }
+template <bool VisitWeaks>
+void ZHeapIterator::objects_do(ObjectClosure* cl) {
+ ZStatTimerDisable disable;
- // Push weak roots onto stack
- {
- ZWeakRootsIterator roots;
- roots.oops_do(&root_cl);
- }
-
- {
- ZConcurrentWeakRootsIterator roots;
- roots.oops_do(&root_cl);
+ // Push roots to visit
+ push_roots<ZRootsIterator, false /* Concurrent */, false /* Weak */>();
+ push_roots<ZConcurrentRootsIteratorClaimOther, true /* Concurrent */, false /* Weak */>();
+ if (VisitWeaks) {
+ push_roots<ZWeakRootsIterator, false /* Concurrent */, true /* Weak */>();
+ push_roots<ZConcurrentWeakRootsIterator, true /* Concurrent */, true /* Weak */>();
}
// Drain stack
while (!_visit_stack.is_empty()) {
const oop obj = _visit_stack.pop();
- // Visit
+ // Visit object
cl->do_object(obj);
- // Push members to visit
- ZHeapIteratorOopClosure push_cl(this, obj, _visit_referents);
- obj->oop_iterate(&push_cl);
+ // Push fields to visit
+ push_fields<VisitWeaks>(obj);
}
}
+
+void ZHeapIterator::objects_do(ObjectClosure* cl, bool visit_weaks) {
+ if (visit_weaks) {
+ objects_do<true /* VisitWeaks */>(cl);
+ } else {
+ objects_do<false /* VisitWeaks */>(cl);
+ }
+}