20 * or visit www.oracle.com if you need additional information or have any |
20 * or visit www.oracle.com if you need additional information or have any |
21 * questions. |
21 * questions. |
22 */ |
22 */ |
23 |
23 |
24 #include "precompiled.hpp" |
24 #include "precompiled.hpp" |
|
25 #include "classfile/classLoaderData.hpp" |
|
26 #include "classfile/classLoaderDataGraph.hpp" |
25 #include "gc/z/zBarrier.inline.hpp" |
27 #include "gc/z/zBarrier.inline.hpp" |
26 #include "gc/z/zGlobals.hpp" |
28 #include "gc/z/zGlobals.hpp" |
27 #include "gc/z/zGranuleMap.inline.hpp" |
29 #include "gc/z/zGranuleMap.inline.hpp" |
28 #include "gc/z/zHeapIterator.hpp" |
30 #include "gc/z/zHeapIterator.hpp" |
29 #include "gc/z/zOop.inline.hpp" |
31 #include "gc/z/zOop.inline.hpp" |
49 _map.set_bit(index); |
51 _map.set_bit(index); |
50 return true; |
52 return true; |
51 } |
53 } |
52 }; |
54 }; |
53 |
55 |
|
56 template <bool Concurrent, bool Weak> |
54 class ZHeapIteratorRootOopClosure : public ZRootsIteratorClosure { |
57 class ZHeapIteratorRootOopClosure : public ZRootsIteratorClosure { |
55 private: |
58 private: |
56 ZHeapIterator* const _iter; |
59 ZHeapIterator* const _iter; |
57 |
60 |
|
61 oop load_oop(oop* p) { |
|
62 if (Weak) { |
|
63 return NativeAccess<AS_NO_KEEPALIVE | ON_PHANTOM_OOP_REF>::oop_load(p); |
|
64 } |
|
65 |
|
66 if (Concurrent) { |
|
67 return NativeAccess<AS_NO_KEEPALIVE>::oop_load(p); |
|
68 } |
|
69 |
|
70 return RawAccess<>::oop_load(p); |
|
71 } |
|
72 |
58 public: |
73 public: |
59 ZHeapIteratorRootOopClosure(ZHeapIterator* iter) : |
74 ZHeapIteratorRootOopClosure(ZHeapIterator* iter) : |
60 _iter(iter) {} |
75 _iter(iter) {} |
61 |
76 |
62 virtual void do_oop(oop* p) { |
77 virtual void do_oop(oop* p) { |
63 // Load barrier needed here, even on non-concurrent strong roots, |
78 const oop obj = load_oop(p); |
64 // for the same reason we need fixup_partial_loads() in ZHeap::mark_end(). |
|
65 const oop obj = NativeAccess<AS_NO_KEEPALIVE>::oop_load(p); |
|
66 _iter->push(obj); |
79 _iter->push(obj); |
67 } |
80 } |
68 |
81 |
69 virtual void do_oop(narrowOop* p) { |
82 virtual void do_oop(narrowOop* p) { |
70 ShouldNotReachHere(); |
83 ShouldNotReachHere(); |
71 } |
84 } |
72 }; |
85 }; |
73 |
86 |
74 class ZHeapIteratorOopClosure : public BasicOopIterateClosure { |
87 template <bool VisitReferents> |
|
88 class ZHeapIteratorOopClosure : public ClaimMetadataVisitingOopIterateClosure { |
75 private: |
89 private: |
76 ZHeapIterator* const _iter; |
90 ZHeapIterator* const _iter; |
77 const oop _base; |
91 const oop _base; |
78 const bool _visit_referents; |
92 |
79 |
93 oop load_oop(oop* p) { |
80 oop load_oop(oop* p) const { |
94 if (VisitReferents) { |
81 if (_visit_referents) { |
95 return HeapAccess<AS_NO_KEEPALIVE | ON_UNKNOWN_OOP_REF>::oop_load_at(_base, _base->field_offset(p)); |
82 return HeapAccess<ON_UNKNOWN_OOP_REF | AS_NO_KEEPALIVE>::oop_load_at(_base, _base->field_offset(p)); |
96 } |
83 } else { |
97 |
84 return HeapAccess<AS_NO_KEEPALIVE>::oop_load(p); |
98 return HeapAccess<AS_NO_KEEPALIVE>::oop_load(p); |
85 } |
|
86 } |
99 } |
87 |
100 |
88 public: |
101 public: |
89 ZHeapIteratorOopClosure(ZHeapIterator* iter, oop base, bool visit_referents) : |
102 ZHeapIteratorOopClosure(ZHeapIterator* iter, oop base) : |
|
103 ClaimMetadataVisitingOopIterateClosure(ClassLoaderData::_claim_other), |
90 _iter(iter), |
104 _iter(iter), |
91 _base(base), |
105 _base(base) {} |
92 _visit_referents(visit_referents) {} |
|
93 |
106 |
94 virtual ReferenceIterationMode reference_iteration_mode() { |
107 virtual ReferenceIterationMode reference_iteration_mode() { |
95 return _visit_referents ? DO_FIELDS : DO_FIELDS_EXCEPT_REFERENT; |
108 return VisitReferents ? DO_FIELDS : DO_FIELDS_EXCEPT_REFERENT; |
96 } |
109 } |
97 |
110 |
98 virtual void do_oop(oop* p) { |
111 virtual void do_oop(oop* p) { |
99 const oop obj = load_oop(p); |
112 const oop obj = load_oop(p); |
100 _iter->push(obj); |
113 _iter->push(obj); |
109 return false; |
122 return false; |
110 } |
123 } |
111 #endif |
124 #endif |
112 }; |
125 }; |
113 |
126 |
114 ZHeapIterator::ZHeapIterator(bool visit_referents) : |
127 ZHeapIterator::ZHeapIterator() : |
115 _visit_stack(), |
128 _visit_stack(), |
116 _visit_map(), |
129 _visit_map() {} |
117 _visit_referents(visit_referents) {} |
|
118 |
130 |
119 ZHeapIterator::~ZHeapIterator() { |
131 ZHeapIterator::~ZHeapIterator() { |
120 ZVisitMapIterator iter(&_visit_map); |
132 ZVisitMapIterator iter(&_visit_map); |
121 for (ZHeapIteratorBitMap* map; iter.next(&map);) { |
133 for (ZHeapIteratorBitMap* map; iter.next(&map);) { |
122 delete map; |
134 delete map; |
123 } |
135 } |
|
136 ClassLoaderDataGraph::clear_claimed_marks(ClassLoaderData::_claim_other); |
124 } |
137 } |
125 |
138 |
126 static size_t object_index_max() { |
139 static size_t object_index_max() { |
127 return ZGranuleSize >> ZObjectAlignmentSmallShift; |
140 return ZGranuleSize >> ZObjectAlignmentSmallShift; |
128 } |
141 } |
160 |
173 |
161 // Push |
174 // Push |
162 _visit_stack.push(obj); |
175 _visit_stack.push(obj); |
163 } |
176 } |
164 |
177 |
|
178 template <typename RootsIterator, bool Concurrent, bool Weak> |
|
179 void ZHeapIterator::push_roots() { |
|
180 ZHeapIteratorRootOopClosure<Concurrent, Weak> cl(this); |
|
181 RootsIterator roots; |
|
182 roots.oops_do(&cl); |
|
183 } |
|
184 |
|
185 template <bool VisitReferents> |
|
186 void ZHeapIterator::push_fields(oop obj) { |
|
187 ZHeapIteratorOopClosure<VisitReferents> cl(this, obj); |
|
188 obj->oop_iterate(&cl); |
|
189 } |
|
190 |
|
191 template <bool VisitWeaks> |
165 void ZHeapIterator::objects_do(ObjectClosure* cl) { |
192 void ZHeapIterator::objects_do(ObjectClosure* cl) { |
166 // Note that the heap iterator visits all reachable objects, including |
|
167 // objects that might be unreachable from the application, such as a |
|
168 // not yet cleared JNIWeakGloablRef. However, also note that visiting |
|
169 // the JVMTI tag map is a requirement to make sure we visit all tagged |
|
170 // objects, even those that might now have become phantom reachable. |
|
171 // If we didn't do this the application would have expected to see |
|
172 // ObjectFree events for phantom reachable objects in the tag map. |
|
173 |
|
174 ZStatTimerDisable disable; |
193 ZStatTimerDisable disable; |
175 ZHeapIteratorRootOopClosure root_cl(this); |
194 |
176 |
195 // Push roots to visit |
177 // Push strong roots onto stack |
196 push_roots<ZRootsIterator, false /* Concurrent */, false /* Weak */>(); |
178 { |
197 push_roots<ZConcurrentRootsIteratorClaimOther, true /* Concurrent */, false /* Weak */>(); |
179 ZRootsIterator roots; |
198 if (VisitWeaks) { |
180 roots.oops_do(&root_cl); |
199 push_roots<ZWeakRootsIterator, false /* Concurrent */, true /* Weak */>(); |
181 } |
200 push_roots<ZConcurrentWeakRootsIterator, true /* Concurrent */, true /* Weak */>(); |
182 |
|
183 { |
|
184 ZConcurrentRootsIterator roots; |
|
185 roots.oops_do(&root_cl); |
|
186 } |
|
187 |
|
188 // Push weak roots onto stack |
|
189 { |
|
190 ZWeakRootsIterator roots; |
|
191 roots.oops_do(&root_cl); |
|
192 } |
|
193 |
|
194 { |
|
195 ZConcurrentWeakRootsIterator roots; |
|
196 roots.oops_do(&root_cl); |
|
197 } |
201 } |
198 |
202 |
199 // Drain stack |
203 // Drain stack |
200 while (!_visit_stack.is_empty()) { |
204 while (!_visit_stack.is_empty()) { |
201 const oop obj = _visit_stack.pop(); |
205 const oop obj = _visit_stack.pop(); |
202 |
206 |
203 // Visit |
207 // Visit object |
204 cl->do_object(obj); |
208 cl->do_object(obj); |
205 |
209 |
206 // Push members to visit |
210 // Push fields to visit |
207 ZHeapIteratorOopClosure push_cl(this, obj, _visit_referents); |
211 push_fields<VisitWeaks>(obj); |
208 obj->oop_iterate(&push_cl); |
212 } |
209 } |
213 } |
210 } |
214 |
|
215 void ZHeapIterator::objects_do(ObjectClosure* cl, bool visit_weaks) { |
|
216 if (visit_weaks) { |
|
217 objects_do<true /* VisitWeaks */>(cl); |
|
218 } else { |
|
219 objects_do<false /* VisitWeaks */>(cl); |
|
220 } |
|
221 } |