46 // (with appropriate modifications) to any "non-convex interval". |
46 // (with appropriate modifications) to any "non-convex interval". |
47 |
47 |
48 // forward references |
48 // forward references |
49 class ReferencePolicy; |
49 class ReferencePolicy; |
50 class AbstractRefProcTaskExecutor; |
50 class AbstractRefProcTaskExecutor; |
51 class DiscoveredList; |
51 |
|
52 // List of discovered references. |
|
53 class DiscoveredList { |
|
54 public: |
|
55 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { } |
|
56 oop head() const { |
|
57 return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) : |
|
58 _oop_head; |
|
59 } |
|
60 HeapWord* adr_head() { |
|
61 return UseCompressedOops ? (HeapWord*)&_compressed_head : |
|
62 (HeapWord*)&_oop_head; |
|
63 } |
|
64 void set_head(oop o) { |
|
65 if (UseCompressedOops) { |
|
66 // Must compress the head ptr. |
|
67 _compressed_head = oopDesc::encode_heap_oop(o); |
|
68 } else { |
|
69 _oop_head = o; |
|
70 } |
|
71 } |
|
72 bool is_empty() const { return head() == NULL; } |
|
73 size_t length() { return _len; } |
|
74 void set_length(size_t len) { _len = len; } |
|
75 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); } |
|
76 void dec_length(size_t dec) { _len -= dec; } |
|
77 private: |
|
78 // Set value depending on UseCompressedOops. This could be a template class |
|
79 // but then we have to fix all the instantiations and declarations that use this class. |
|
80 oop _oop_head; |
|
81 narrowOop _compressed_head; |
|
82 size_t _len; |
|
83 }; |
|
84 |
|
85 // Iterator for the list of discovered references. |
|
86 class DiscoveredListIterator { |
|
87 private: |
|
88 DiscoveredList& _refs_list; |
|
89 HeapWord* _prev_next; |
|
90 oop _prev; |
|
91 oop _ref; |
|
92 HeapWord* _discovered_addr; |
|
93 oop _next; |
|
94 HeapWord* _referent_addr; |
|
95 oop _referent; |
|
96 OopClosure* _keep_alive; |
|
97 BoolObjectClosure* _is_alive; |
|
98 |
|
99 DEBUG_ONLY( |
|
100 oop _first_seen; // cyclic linked list check |
|
101 ) |
|
102 |
|
103 NOT_PRODUCT( |
|
104 size_t _processed; |
|
105 size_t _removed; |
|
106 ) |
|
107 |
|
108 public: |
|
109 inline DiscoveredListIterator(DiscoveredList& refs_list, |
|
110 OopClosure* keep_alive, |
|
111 BoolObjectClosure* is_alive): |
|
112 _refs_list(refs_list), |
|
113 _prev_next(refs_list.adr_head()), |
|
114 _prev(NULL), |
|
115 _ref(refs_list.head()), |
|
116 #ifdef ASSERT |
|
117 _first_seen(refs_list.head()), |
|
118 #endif |
|
119 #ifndef PRODUCT |
|
120 _processed(0), |
|
121 _removed(0), |
|
122 #endif |
|
123 _next(NULL), |
|
124 _keep_alive(keep_alive), |
|
125 _is_alive(is_alive) |
|
126 { } |
|
127 |
|
128 // End Of List. |
|
129 inline bool has_next() const { return _ref != NULL; } |
|
130 |
|
131 // Get oop to the Reference object. |
|
132 inline oop obj() const { return _ref; } |
|
133 |
|
134 // Get oop to the referent object. |
|
135 inline oop referent() const { return _referent; } |
|
136 |
|
137 // Returns true if referent is alive. |
|
138 inline bool is_referent_alive() const { |
|
139 return _is_alive->do_object_b(_referent); |
|
140 } |
|
141 |
|
142 // Loads data for the current reference. |
|
143 // The "allow_null_referent" argument tells us to allow for the possibility |
|
144 // of a NULL referent in the discovered Reference object. This typically |
|
145 // happens in the case of concurrent collectors that may have done the |
|
146 // discovery concurrently, or interleaved, with mutator execution. |
|
147 void load_ptrs(DEBUG_ONLY(bool allow_null_referent)); |
|
148 |
|
149 // Move to the next discovered reference. |
|
150 inline void next() { |
|
151 _prev_next = _discovered_addr; |
|
152 _prev = _ref; |
|
153 move_to_next(); |
|
154 } |
|
155 |
|
156 // Remove the current reference from the list |
|
157 void remove(); |
|
158 |
|
159 // Make the Reference object active again. |
|
160 void make_active(); |
|
161 |
|
162 // Make the referent alive. |
|
163 inline void make_referent_alive() { |
|
164 if (UseCompressedOops) { |
|
165 _keep_alive->do_oop((narrowOop*)_referent_addr); |
|
166 } else { |
|
167 _keep_alive->do_oop((oop*)_referent_addr); |
|
168 } |
|
169 } |
|
170 |
|
171 // Update the discovered field. |
|
172 inline void update_discovered() { |
|
173 // First _prev_next ref actually points into DiscoveredList (gross). |
|
174 if (UseCompressedOops) { |
|
175 if (!oopDesc::is_null(*(narrowOop*)_prev_next)) { |
|
176 _keep_alive->do_oop((narrowOop*)_prev_next); |
|
177 } |
|
178 } else { |
|
179 if (!oopDesc::is_null(*(oop*)_prev_next)) { |
|
180 _keep_alive->do_oop((oop*)_prev_next); |
|
181 } |
|
182 } |
|
183 } |
|
184 |
|
185 // NULL out referent pointer. |
|
186 void clear_referent(); |
|
187 |
|
188 // Statistics |
|
189 NOT_PRODUCT( |
|
190 inline size_t processed() const { return _processed; } |
|
191 inline size_t removed() const { return _removed; } |
|
192 ) |
|
193 |
|
194 inline void move_to_next() { |
|
195 if (_ref == _next) { |
|
196 // End of the list. |
|
197 _ref = NULL; |
|
198 } else { |
|
199 _ref = _next; |
|
200 } |
|
201 assert(_ref != _first_seen, "cyclic ref_list found"); |
|
202 NOT_PRODUCT(_processed++); |
|
203 } |
|
204 |
|
205 }; |
52 |
206 |
53 class ReferenceProcessor : public CHeapObj { |
207 class ReferenceProcessor : public CHeapObj { |
54 protected: |
208 protected: |
55 // Compatibility with pre-4965777 JDK's |
209 // Compatibility with pre-4965777 JDK's |
56 static bool _pending_list_uses_discovered_field; |
210 static bool _pending_list_uses_discovered_field; |
57 MemRegion _span; // (right-open) interval of heap |
211 |
58 // subject to wkref discovery |
212 MemRegion _span; // (right-open) interval of heap |
59 bool _discovering_refs; // true when discovery enabled |
213 // subject to wkref discovery |
60 bool _discovery_is_atomic; // if discovery is atomic wrt |
214 |
61 // other collectors in configuration |
215 bool _discovering_refs; // true when discovery enabled |
62 bool _discovery_is_mt; // true if reference discovery is MT. |
216 bool _discovery_is_atomic; // if discovery is atomic wrt |
|
217 // other collectors in configuration |
|
218 bool _discovery_is_mt; // true if reference discovery is MT. |
|
219 |
63 // If true, setting "next" field of a discovered refs list requires |
220 // If true, setting "next" field of a discovered refs list requires |
64 // write barrier(s). (Must be true if used in a collector in which |
221 // write barrier(s). (Must be true if used in a collector in which |
65 // elements of a discovered list may be moved during discovery: for |
222 // elements of a discovered list may be moved during discovery: for |
66 // example, a collector like Garbage-First that moves objects during a |
223 // example, a collector like Garbage-First that moves objects during a |
67 // long-term concurrent marking phase that does weak reference |
224 // long-term concurrent marking phase that does weak reference |
68 // discovery.) |
225 // discovery.) |
69 bool _discovered_list_needs_barrier; |
226 bool _discovered_list_needs_barrier; |
70 BarrierSet* _bs; // Cached copy of BarrierSet. |
227 |
71 bool _enqueuing_is_done; // true if all weak references enqueued |
228 BarrierSet* _bs; // Cached copy of BarrierSet. |
72 bool _processing_is_mt; // true during phases when |
229 bool _enqueuing_is_done; // true if all weak references enqueued |
73 // reference processing is MT. |
230 bool _processing_is_mt; // true during phases when |
74 int _next_id; // round-robin mod _num_q counter in |
231 // reference processing is MT. |
75 // support of work distribution |
232 int _next_id; // round-robin mod _num_q counter in |
76 |
233 // support of work distribution |
77 // For collectors that do not keep GC marking information |
234 |
|
235 // For collectors that do not keep GC liveness information |
78 // in the object header, this field holds a closure that |
236 // in the object header, this field holds a closure that |
79 // helps the reference processor determine the reachability |
237 // helps the reference processor determine the reachability |
80 // of an oop (the field is currently initialized to NULL for |
238 // of an oop. It is currently initialized to NULL for all |
81 // all collectors but the CMS collector). |
239 // collectors except for CMS and G1. |
82 BoolObjectClosure* _is_alive_non_header; |
240 BoolObjectClosure* _is_alive_non_header; |
83 |
241 |
84 // Soft ref clearing policies |
242 // Soft ref clearing policies |
85 // . the default policy |
243 // . the default policy |
86 static ReferencePolicy* _default_soft_ref_policy; |
244 static ReferencePolicy* _default_soft_ref_policy; |