|
1 /* |
|
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #include "precompiled.hpp" |
|
26 #include "classfile/javaClasses.hpp" |
|
27 #include "classfile/systemDictionary.hpp" |
|
28 #include "gc/shared/collectedHeap.hpp" |
|
29 #include "gc/shared/collectedHeap.inline.hpp" |
|
30 #include "gc/shared/gcTimer.hpp" |
|
31 #include "gc/shared/gcTraceTime.hpp" |
|
32 #include "gc/shared/referencePolicy.hpp" |
|
33 #include "gc/shared/referenceProcessor.hpp" |
|
34 #include "oops/oop.inline.hpp" |
|
35 #include "runtime/java.hpp" |
|
36 #include "runtime/jniHandles.hpp" |
|
37 |
|
38 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; |
|
39 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; |
|
40 jlong ReferenceProcessor::_soft_ref_timestamp_clock = 0; |
|
41 |
|
42 void referenceProcessor_init() { |
|
43 ReferenceProcessor::init_statics(); |
|
44 } |
|
45 |
|
46 void ReferenceProcessor::init_statics() { |
|
47 // We need a monotonically non-decreasing time in ms but |
|
48 // os::javaTimeMillis() does not guarantee monotonicity. |
|
49 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; |
|
50 |
|
51 // Initialize the soft ref timestamp clock. |
|
52 _soft_ref_timestamp_clock = now; |
|
53 // Also update the soft ref clock in j.l.r.SoftReference |
|
54 java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock); |
|
55 |
|
56 _always_clear_soft_ref_policy = new AlwaysClearPolicy(); |
|
57 _default_soft_ref_policy = new COMPILER2_PRESENT(LRUMaxHeapPolicy()) |
|
58 NOT_COMPILER2(LRUCurrentHeapPolicy()); |
|
59 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { |
|
60 vm_exit_during_initialization("Could not allocate reference policy object"); |
|
61 } |
|
62 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || |
|
63 RefDiscoveryPolicy == ReferentBasedDiscovery, |
|
64 "Unrecognized RefDiscoveryPolicy"); |
|
65 } |
|
66 |
|
67 void ReferenceProcessor::enable_discovery(bool check_no_refs) { |
|
68 #ifdef ASSERT |
|
69 // Verify that we're not currently discovering refs |
|
70 assert(!_discovering_refs, "nested call?"); |
|
71 |
|
72 if (check_no_refs) { |
|
73 // Verify that the discovered lists are empty |
|
74 verify_no_references_recorded(); |
|
75 } |
|
76 #endif // ASSERT |
|
77 |
|
78 // Someone could have modified the value of the static |
|
79 // field in the j.l.r.SoftReference class that holds the |
|
80 // soft reference timestamp clock using reflection or |
|
81 // Unsafe between GCs. Unconditionally update the static |
|
82 // field in ReferenceProcessor here so that we use the new |
|
83 // value during reference discovery. |
|
84 |
|
85 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); |
|
86 _discovering_refs = true; |
|
87 } |
|
88 |
|
89 ReferenceProcessor::ReferenceProcessor(MemRegion span, |
|
90 bool mt_processing, |
|
91 uint mt_processing_degree, |
|
92 bool mt_discovery, |
|
93 uint mt_discovery_degree, |
|
94 bool atomic_discovery, |
|
95 BoolObjectClosure* is_alive_non_header) : |
|
96 _discovering_refs(false), |
|
97 _enqueuing_is_done(false), |
|
98 _is_alive_non_header(is_alive_non_header), |
|
99 _processing_is_mt(mt_processing), |
|
100 _next_id(0) |
|
101 { |
|
102 _span = span; |
|
103 _discovery_is_atomic = atomic_discovery; |
|
104 _discovery_is_mt = mt_discovery; |
|
105 _num_q = MAX2(1U, mt_processing_degree); |
|
106 _max_num_q = MAX2(_num_q, mt_discovery_degree); |
|
107 _discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList, |
|
108 _max_num_q * number_of_subclasses_of_ref(), mtGC); |
|
109 |
|
110 if (_discovered_refs == NULL) { |
|
111 vm_exit_during_initialization("Could not allocated RefProc Array"); |
|
112 } |
|
113 _discoveredSoftRefs = &_discovered_refs[0]; |
|
114 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q]; |
|
115 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q]; |
|
116 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q]; |
|
117 _discoveredCleanerRefs = &_discoveredPhantomRefs[_max_num_q]; |
|
118 |
|
119 // Initialize all entries to NULL |
|
120 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { |
|
121 _discovered_refs[i].set_head(NULL); |
|
122 _discovered_refs[i].set_length(0); |
|
123 } |
|
124 |
|
125 setup_policy(false /* default soft ref policy */); |
|
126 } |
|
127 |
|
128 #ifndef PRODUCT |
|
129 void ReferenceProcessor::verify_no_references_recorded() { |
|
130 guarantee(!_discovering_refs, "Discovering refs?"); |
|
131 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { |
|
132 guarantee(_discovered_refs[i].is_empty(), |
|
133 "Found non-empty discovered list"); |
|
134 } |
|
135 } |
|
136 #endif |
|
137 |
|
138 void ReferenceProcessor::weak_oops_do(OopClosure* f) { |
|
139 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { |
|
140 if (UseCompressedOops) { |
|
141 f->do_oop((narrowOop*)_discovered_refs[i].adr_head()); |
|
142 } else { |
|
143 f->do_oop((oop*)_discovered_refs[i].adr_head()); |
|
144 } |
|
145 } |
|
146 } |
|
147 |
|
148 void ReferenceProcessor::update_soft_ref_master_clock() { |
|
149 // Update (advance) the soft ref master clock field. This must be done |
|
150 // after processing the soft ref list. |
|
151 |
|
152 // We need a monotonically non-decreasing time in ms but |
|
153 // os::javaTimeMillis() does not guarantee monotonicity. |
|
154 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; |
|
155 jlong soft_ref_clock = java_lang_ref_SoftReference::clock(); |
|
156 assert(soft_ref_clock == _soft_ref_timestamp_clock, "soft ref clocks out of sync"); |
|
157 |
|
158 NOT_PRODUCT( |
|
159 if (now < _soft_ref_timestamp_clock) { |
|
160 warning("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, |
|
161 _soft_ref_timestamp_clock, now); |
|
162 } |
|
163 ) |
|
164 // The values of now and _soft_ref_timestamp_clock are set using |
|
165 // javaTimeNanos(), which is guaranteed to be monotonically |
|
166 // non-decreasing provided the underlying platform provides such |
|
167 // a time source (and it is bug free). |
|
168 // In product mode, however, protect ourselves from non-monotonicity. |
|
169 if (now > _soft_ref_timestamp_clock) { |
|
170 _soft_ref_timestamp_clock = now; |
|
171 java_lang_ref_SoftReference::set_clock(now); |
|
172 } |
|
173 // Else leave clock stalled at its old value until time progresses |
|
174 // past clock value. |
|
175 } |
|
176 |
|
177 size_t ReferenceProcessor::total_count(DiscoveredList lists[]) { |
|
178 size_t total = 0; |
|
179 for (uint i = 0; i < _max_num_q; ++i) { |
|
180 total += lists[i].length(); |
|
181 } |
|
182 return total; |
|
183 } |
|
184 |
|
185 ReferenceProcessorStats ReferenceProcessor::process_discovered_references( |
|
186 BoolObjectClosure* is_alive, |
|
187 OopClosure* keep_alive, |
|
188 VoidClosure* complete_gc, |
|
189 AbstractRefProcTaskExecutor* task_executor, |
|
190 GCTimer* gc_timer, |
|
191 GCId gc_id) { |
|
192 NOT_PRODUCT(verify_ok_to_handle_reflists()); |
|
193 |
|
194 assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); |
|
195 // Stop treating discovered references specially. |
|
196 disable_discovery(); |
|
197 |
|
198 // If discovery was concurrent, someone could have modified |
|
199 // the value of the static field in the j.l.r.SoftReference |
|
200 // class that holds the soft reference timestamp clock using |
|
201 // reflection or Unsafe between when discovery was enabled and |
|
202 // now. Unconditionally update the static field in ReferenceProcessor |
|
203 // here so that we use the new value during processing of the |
|
204 // discovered soft refs. |
|
205 |
|
206 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); |
|
207 |
|
208 bool trace_time = PrintGCDetails && PrintReferenceGC; |
|
209 |
|
210 // Soft references |
|
211 size_t soft_count = 0; |
|
212 { |
|
213 GCTraceTime tt("SoftReference", trace_time, false, gc_timer, gc_id); |
|
214 soft_count = |
|
215 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, |
|
216 is_alive, keep_alive, complete_gc, task_executor); |
|
217 } |
|
218 |
|
219 update_soft_ref_master_clock(); |
|
220 |
|
221 // Weak references |
|
222 size_t weak_count = 0; |
|
223 { |
|
224 GCTraceTime tt("WeakReference", trace_time, false, gc_timer, gc_id); |
|
225 weak_count = |
|
226 process_discovered_reflist(_discoveredWeakRefs, NULL, true, |
|
227 is_alive, keep_alive, complete_gc, task_executor); |
|
228 } |
|
229 |
|
230 // Final references |
|
231 size_t final_count = 0; |
|
232 { |
|
233 GCTraceTime tt("FinalReference", trace_time, false, gc_timer, gc_id); |
|
234 final_count = |
|
235 process_discovered_reflist(_discoveredFinalRefs, NULL, false, |
|
236 is_alive, keep_alive, complete_gc, task_executor); |
|
237 } |
|
238 |
|
239 // Phantom references |
|
240 size_t phantom_count = 0; |
|
241 { |
|
242 GCTraceTime tt("PhantomReference", trace_time, false, gc_timer, gc_id); |
|
243 phantom_count = |
|
244 process_discovered_reflist(_discoveredPhantomRefs, NULL, false, |
|
245 is_alive, keep_alive, complete_gc, task_executor); |
|
246 |
|
247 // Process cleaners, but include them in phantom statistics. We expect |
|
248 // Cleaner references to be temporary, and don't want to deal with |
|
249 // possible incompatibilities arising from making it more visible. |
|
250 phantom_count += |
|
251 process_discovered_reflist(_discoveredCleanerRefs, NULL, true, |
|
252 is_alive, keep_alive, complete_gc, task_executor); |
|
253 } |
|
254 |
|
255 // Weak global JNI references. It would make more sense (semantically) to |
|
256 // traverse these simultaneously with the regular weak references above, but |
|
257 // that is not how the JDK1.2 specification is. See #4126360. Native code can |
|
258 // thus use JNI weak references to circumvent the phantom references and |
|
259 // resurrect a "post-mortem" object. |
|
260 { |
|
261 GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer, gc_id); |
|
262 if (task_executor != NULL) { |
|
263 task_executor->set_single_threaded_mode(); |
|
264 } |
|
265 process_phaseJNI(is_alive, keep_alive, complete_gc); |
|
266 } |
|
267 |
|
268 return ReferenceProcessorStats(soft_count, weak_count, final_count, phantom_count); |
|
269 } |
|
270 |
|
271 #ifndef PRODUCT |
|
272 // Calculate the number of jni handles. |
|
273 uint ReferenceProcessor::count_jni_refs() { |
|
274 class AlwaysAliveClosure: public BoolObjectClosure { |
|
275 public: |
|
276 virtual bool do_object_b(oop obj) { return true; } |
|
277 }; |
|
278 |
|
279 class CountHandleClosure: public OopClosure { |
|
280 private: |
|
281 int _count; |
|
282 public: |
|
283 CountHandleClosure(): _count(0) {} |
|
284 void do_oop(oop* unused) { _count++; } |
|
285 void do_oop(narrowOop* unused) { ShouldNotReachHere(); } |
|
286 int count() { return _count; } |
|
287 }; |
|
288 CountHandleClosure global_handle_count; |
|
289 AlwaysAliveClosure always_alive; |
|
290 JNIHandles::weak_oops_do(&always_alive, &global_handle_count); |
|
291 return global_handle_count.count(); |
|
292 } |
|
293 #endif |
|
294 |
|
295 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive, |
|
296 OopClosure* keep_alive, |
|
297 VoidClosure* complete_gc) { |
|
298 #ifndef PRODUCT |
|
299 if (PrintGCDetails && PrintReferenceGC) { |
|
300 unsigned int count = count_jni_refs(); |
|
301 gclog_or_tty->print(", %u refs", count); |
|
302 } |
|
303 #endif |
|
304 JNIHandles::weak_oops_do(is_alive, keep_alive); |
|
305 complete_gc->do_void(); |
|
306 } |
|
307 |
|
308 |
|
309 template <class T> |
|
310 bool enqueue_discovered_ref_helper(ReferenceProcessor* ref, |
|
311 AbstractRefProcTaskExecutor* task_executor) { |
|
312 |
|
313 // Remember old value of pending references list |
|
314 T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr(); |
|
315 T old_pending_list_value = *pending_list_addr; |
|
316 |
|
317 // Enqueue references that are not made active again, and |
|
318 // clear the decks for the next collection (cycle). |
|
319 ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor); |
|
320 // Do the post-barrier on pending_list_addr missed in |
|
321 // enqueue_discovered_reflist. |
|
322 oopDesc::bs()->write_ref_field(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); |
|
323 |
|
324 // Stop treating discovered references specially. |
|
325 ref->disable_discovery(); |
|
326 |
|
327 // Return true if new pending references were added |
|
328 return old_pending_list_value != *pending_list_addr; |
|
329 } |
|
330 |
|
331 bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) { |
|
332 NOT_PRODUCT(verify_ok_to_handle_reflists()); |
|
333 if (UseCompressedOops) { |
|
334 return enqueue_discovered_ref_helper<narrowOop>(this, task_executor); |
|
335 } else { |
|
336 return enqueue_discovered_ref_helper<oop>(this, task_executor); |
|
337 } |
|
338 } |
|
339 |
|
340 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list, |
|
341 HeapWord* pending_list_addr) { |
|
342 // Given a list of refs linked through the "discovered" field |
|
343 // (java.lang.ref.Reference.discovered), self-loop their "next" field |
|
344 // thus distinguishing them from active References, then |
|
345 // prepend them to the pending list. |
|
346 // |
|
347 // The Java threads will see the Reference objects linked together through |
|
348 // the discovered field. Instead of trying to do the write barrier updates |
|
349 // in all places in the reference processor where we manipulate the discovered |
|
350 // field we make sure to do the barrier here where we anyway iterate through |
|
351 // all linked Reference objects. Note that it is important to not dirty any |
|
352 // cards during reference processing since this will cause card table |
|
353 // verification to fail for G1. |
|
354 if (TraceReferenceGC && PrintGCDetails) { |
|
355 gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list " |
|
356 INTPTR_FORMAT, p2i(refs_list.head())); |
|
357 } |
|
358 |
|
359 oop obj = NULL; |
|
360 oop next_d = refs_list.head(); |
|
361 // Walk down the list, self-looping the next field |
|
362 // so that the References are not considered active. |
|
363 while (obj != next_d) { |
|
364 obj = next_d; |
|
365 assert(obj->is_instanceRef(), "should be reference object"); |
|
366 next_d = java_lang_ref_Reference::discovered(obj); |
|
367 if (TraceReferenceGC && PrintGCDetails) { |
|
368 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, |
|
369 p2i(obj), p2i(next_d)); |
|
370 } |
|
371 assert(java_lang_ref_Reference::next(obj) == NULL, |
|
372 "Reference not active; should not be discovered"); |
|
373 // Self-loop next, so as to make Ref not active. |
|
374 java_lang_ref_Reference::set_next_raw(obj, obj); |
|
375 if (next_d != obj) { |
|
376 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), next_d); |
|
377 } else { |
|
378 // This is the last object. |
|
379 // Swap refs_list into pending_list_addr and |
|
380 // set obj's discovered to what we read from pending_list_addr. |
|
381 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); |
|
382 // Need post-barrier on pending_list_addr. See enqueue_discovered_ref_helper() above. |
|
383 java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL |
|
384 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old); |
|
385 } |
|
386 } |
|
387 } |
|
388 |
|
389 // Parallel enqueue task |
|
390 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask { |
|
391 public: |
|
392 RefProcEnqueueTask(ReferenceProcessor& ref_processor, |
|
393 DiscoveredList discovered_refs[], |
|
394 HeapWord* pending_list_addr, |
|
395 int n_queues) |
|
396 : EnqueueTask(ref_processor, discovered_refs, |
|
397 pending_list_addr, n_queues) |
|
398 { } |
|
399 |
|
400 virtual void work(unsigned int work_id) { |
|
401 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds"); |
|
402 // Simplest first cut: static partitioning. |
|
403 int index = work_id; |
|
404 // The increment on "index" must correspond to the maximum number of queues |
|
405 // (n_queues) with which that ReferenceProcessor was created. That |
|
406 // is because of the "clever" way the discovered references lists were |
|
407 // allocated and are indexed into. |
|
408 assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected"); |
|
409 for (int j = 0; |
|
410 j < ReferenceProcessor::number_of_subclasses_of_ref(); |
|
411 j++, index += _n_queues) { |
|
412 _ref_processor.enqueue_discovered_reflist( |
|
413 _refs_lists[index], _pending_list_addr); |
|
414 _refs_lists[index].set_head(NULL); |
|
415 _refs_lists[index].set_length(0); |
|
416 } |
|
417 } |
|
418 }; |
|
419 |
|
420 // Enqueue references that are not made active again |
|
421 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr, |
|
422 AbstractRefProcTaskExecutor* task_executor) { |
|
423 if (_processing_is_mt && task_executor != NULL) { |
|
424 // Parallel code |
|
425 RefProcEnqueueTask tsk(*this, _discovered_refs, |
|
426 pending_list_addr, _max_num_q); |
|
427 task_executor->execute(tsk); |
|
428 } else { |
|
429 // Serial code: call the parent class's implementation |
|
430 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { |
|
431 enqueue_discovered_reflist(_discovered_refs[i], pending_list_addr); |
|
432 _discovered_refs[i].set_head(NULL); |
|
433 _discovered_refs[i].set_length(0); |
|
434 } |
|
435 } |
|
436 } |
|
437 |
|
438 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { |
|
439 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref); |
|
440 oop discovered = java_lang_ref_Reference::discovered(_ref); |
|
441 assert(_discovered_addr && discovered->is_oop_or_null(), |
|
442 err_msg("Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered))); |
|
443 _next = discovered; |
|
444 _referent_addr = java_lang_ref_Reference::referent_addr(_ref); |
|
445 _referent = java_lang_ref_Reference::referent(_ref); |
|
446 assert(Universe::heap()->is_in_reserved_or_null(_referent), |
|
447 "Wrong oop found in java.lang.Reference object"); |
|
448 assert(allow_null_referent ? |
|
449 _referent->is_oop_or_null() |
|
450 : _referent->is_oop(), |
|
451 err_msg("Expected an oop%s for referent field at " PTR_FORMAT, |
|
452 (allow_null_referent ? " or NULL" : ""), |
|
453 p2i(_referent))); |
|
454 } |
|
455 |
|
456 void DiscoveredListIterator::remove() { |
|
457 assert(_ref->is_oop(), "Dropping a bad reference"); |
|
458 oop_store_raw(_discovered_addr, NULL); |
|
459 |
|
460 // First _prev_next ref actually points into DiscoveredList (gross). |
|
461 oop new_next; |
|
462 if (_next == _ref) { |
|
463 // At the end of the list, we should make _prev point to itself. |
|
464 // If _ref is the first ref, then _prev_next will be in the DiscoveredList, |
|
465 // and _prev will be NULL. |
|
466 new_next = _prev; |
|
467 } else { |
|
468 new_next = _next; |
|
469 } |
|
470 // Remove Reference object from discovered list. Note that G1 does not need a |
|
471 // pre-barrier here because we know the Reference has already been found/marked, |
|
472 // that's how it ended up in the discovered list in the first place. |
|
473 oop_store_raw(_prev_next, new_next); |
|
474 NOT_PRODUCT(_removed++); |
|
475 _refs_list.dec_length(1); |
|
476 } |
|
477 |
|
478 void DiscoveredListIterator::clear_referent() { |
|
479 oop_store_raw(_referent_addr, NULL); |
|
480 } |
|
481 |
|
482 // NOTE: process_phase*() are largely similar, and at a high level |
|
483 // merely iterate over the extant list applying a predicate to |
|
484 // each of its elements and possibly removing that element from the |
|
485 // list and applying some further closures to that element. |
|
486 // We should consider the possibility of replacing these |
|
487 // process_phase*() methods by abstracting them into |
|
488 // a single general iterator invocation that receives appropriate |
|
489 // closures that accomplish this work. |
|
490 |
|
491 // (SoftReferences only) Traverse the list and remove any SoftReferences whose |
|
492 // referents are not alive, but that should be kept alive for policy reasons. |
|
493 // Keep alive the transitive closure of all such referents. |
|
494 void |
|
495 ReferenceProcessor::process_phase1(DiscoveredList& refs_list, |
|
496 ReferencePolicy* policy, |
|
497 BoolObjectClosure* is_alive, |
|
498 OopClosure* keep_alive, |
|
499 VoidClosure* complete_gc) { |
|
500 assert(policy != NULL, "Must have a non-NULL policy"); |
|
501 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); |
|
502 // Decide which softly reachable refs should be kept alive. |
|
503 while (iter.has_next()) { |
|
504 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); |
|
505 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); |
|
506 if (referent_is_dead && |
|
507 !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) { |
|
508 if (TraceReferenceGC) { |
|
509 gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", |
|
510 p2i(iter.obj()), iter.obj()->klass()->internal_name()); |
|
511 } |
|
512 // Remove Reference object from list |
|
513 iter.remove(); |
|
514 // keep the referent around |
|
515 iter.make_referent_alive(); |
|
516 iter.move_to_next(); |
|
517 } else { |
|
518 iter.next(); |
|
519 } |
|
520 } |
|
521 // Close the reachable set |
|
522 complete_gc->do_void(); |
|
523 NOT_PRODUCT( |
|
524 if (PrintGCDetails && TraceReferenceGC) { |
|
525 gclog_or_tty->print_cr(" Dropped " SIZE_FORMAT " dead Refs out of " SIZE_FORMAT |
|
526 " discovered Refs by policy, from list " INTPTR_FORMAT, |
|
527 iter.removed(), iter.processed(), p2i(refs_list.head())); |
|
528 } |
|
529 ) |
|
530 } |
|
531 |
|
532 // Traverse the list and remove any Refs that are not active, or |
|
533 // whose referents are either alive or NULL. |
|
534 void |
|
535 ReferenceProcessor::pp2_work(DiscoveredList& refs_list, |
|
536 BoolObjectClosure* is_alive, |
|
537 OopClosure* keep_alive) { |
|
538 assert(discovery_is_atomic(), "Error"); |
|
539 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); |
|
540 while (iter.has_next()) { |
|
541 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); |
|
542 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) |
|
543 assert(next == NULL, "Should not discover inactive Reference"); |
|
544 if (iter.is_referent_alive()) { |
|
545 if (TraceReferenceGC) { |
|
546 gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", |
|
547 p2i(iter.obj()), iter.obj()->klass()->internal_name()); |
|
548 } |
|
549 // The referent is reachable after all. |
|
550 // Remove Reference object from list. |
|
551 iter.remove(); |
|
552 // Update the referent pointer as necessary: Note that this |
|
553 // should not entail any recursive marking because the |
|
554 // referent must already have been traversed. |
|
555 iter.make_referent_alive(); |
|
556 iter.move_to_next(); |
|
557 } else { |
|
558 iter.next(); |
|
559 } |
|
560 } |
|
561 NOT_PRODUCT( |
|
562 if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) { |
|
563 gclog_or_tty->print_cr(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT |
|
564 " Refs in discovered list " INTPTR_FORMAT, |
|
565 iter.removed(), iter.processed(), p2i(refs_list.head())); |
|
566 } |
|
567 ) |
|
568 } |
|
569 |
|
570 void |
|
571 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, |
|
572 BoolObjectClosure* is_alive, |
|
573 OopClosure* keep_alive, |
|
574 VoidClosure* complete_gc) { |
|
575 assert(!discovery_is_atomic(), "Error"); |
|
576 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); |
|
577 while (iter.has_next()) { |
|
578 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); |
|
579 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); |
|
580 oop next = java_lang_ref_Reference::next(iter.obj()); |
|
581 if ((iter.referent() == NULL || iter.is_referent_alive() || |
|
582 next != NULL)) { |
|
583 assert(next->is_oop_or_null(), err_msg("Expected an oop or NULL for next field at " PTR_FORMAT, p2i(next))); |
|
584 // Remove Reference object from list |
|
585 iter.remove(); |
|
586 // Trace the cohorts |
|
587 iter.make_referent_alive(); |
|
588 if (UseCompressedOops) { |
|
589 keep_alive->do_oop((narrowOop*)next_addr); |
|
590 } else { |
|
591 keep_alive->do_oop((oop*)next_addr); |
|
592 } |
|
593 iter.move_to_next(); |
|
594 } else { |
|
595 iter.next(); |
|
596 } |
|
597 } |
|
598 // Now close the newly reachable set |
|
599 complete_gc->do_void(); |
|
600 NOT_PRODUCT( |
|
601 if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) { |
|
602 gclog_or_tty->print_cr(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT |
|
603 " Refs in discovered list " INTPTR_FORMAT, |
|
604 iter.removed(), iter.processed(), p2i(refs_list.head())); |
|
605 } |
|
606 ) |
|
607 } |
|
608 |
|
609 // Traverse the list and process the referents, by either |
|
610 // clearing them or keeping them (and their reachable |
|
611 // closure) alive. |
|
612 void |
|
613 ReferenceProcessor::process_phase3(DiscoveredList& refs_list, |
|
614 bool clear_referent, |
|
615 BoolObjectClosure* is_alive, |
|
616 OopClosure* keep_alive, |
|
617 VoidClosure* complete_gc) { |
|
618 ResourceMark rm; |
|
619 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); |
|
620 while (iter.has_next()) { |
|
621 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); |
|
622 if (clear_referent) { |
|
623 // NULL out referent pointer |
|
624 iter.clear_referent(); |
|
625 } else { |
|
626 // keep the referent around |
|
627 iter.make_referent_alive(); |
|
628 } |
|
629 if (TraceReferenceGC) { |
|
630 gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", |
|
631 clear_referent ? "cleared " : "", |
|
632 p2i(iter.obj()), iter.obj()->klass()->internal_name()); |
|
633 } |
|
634 assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference"); |
|
635 iter.next(); |
|
636 } |
|
637 // Close the reachable set |
|
638 complete_gc->do_void(); |
|
639 } |
|
640 |
|
641 void |
|
642 ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) { |
|
643 oop obj = NULL; |
|
644 oop next = refs_list.head(); |
|
645 while (next != obj) { |
|
646 obj = next; |
|
647 next = java_lang_ref_Reference::discovered(obj); |
|
648 java_lang_ref_Reference::set_discovered_raw(obj, NULL); |
|
649 } |
|
650 refs_list.set_head(NULL); |
|
651 refs_list.set_length(0); |
|
652 } |
|
653 |
|
654 void |
|
655 ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) { |
|
656 clear_discovered_references(refs_list); |
|
657 } |
|
658 |
|
659 void ReferenceProcessor::abandon_partial_discovery() { |
|
660 // loop over the lists |
|
661 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { |
|
662 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) { |
|
663 gclog_or_tty->print_cr("\nAbandoning %s discovered list", list_name(i)); |
|
664 } |
|
665 abandon_partial_discovered_list(_discovered_refs[i]); |
|
666 } |
|
667 } |
|
668 |
|
669 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask { |
|
670 public: |
|
671 RefProcPhase1Task(ReferenceProcessor& ref_processor, |
|
672 DiscoveredList refs_lists[], |
|
673 ReferencePolicy* policy, |
|
674 bool marks_oops_alive) |
|
675 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), |
|
676 _policy(policy) |
|
677 { } |
|
678 virtual void work(unsigned int i, BoolObjectClosure& is_alive, |
|
679 OopClosure& keep_alive, |
|
680 VoidClosure& complete_gc) |
|
681 { |
|
682 Thread* thr = Thread::current(); |
|
683 int refs_list_index = ((WorkerThread*)thr)->id(); |
|
684 _ref_processor.process_phase1(_refs_lists[refs_list_index], _policy, |
|
685 &is_alive, &keep_alive, &complete_gc); |
|
686 } |
|
687 private: |
|
688 ReferencePolicy* _policy; |
|
689 }; |
|
690 |
|
691 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask { |
|
692 public: |
|
693 RefProcPhase2Task(ReferenceProcessor& ref_processor, |
|
694 DiscoveredList refs_lists[], |
|
695 bool marks_oops_alive) |
|
696 : ProcessTask(ref_processor, refs_lists, marks_oops_alive) |
|
697 { } |
|
698 virtual void work(unsigned int i, BoolObjectClosure& is_alive, |
|
699 OopClosure& keep_alive, |
|
700 VoidClosure& complete_gc) |
|
701 { |
|
702 _ref_processor.process_phase2(_refs_lists[i], |
|
703 &is_alive, &keep_alive, &complete_gc); |
|
704 } |
|
705 }; |
|
706 |
|
707 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask { |
|
708 public: |
|
709 RefProcPhase3Task(ReferenceProcessor& ref_processor, |
|
710 DiscoveredList refs_lists[], |
|
711 bool clear_referent, |
|
712 bool marks_oops_alive) |
|
713 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), |
|
714 _clear_referent(clear_referent) |
|
715 { } |
|
716 virtual void work(unsigned int i, BoolObjectClosure& is_alive, |
|
717 OopClosure& keep_alive, |
|
718 VoidClosure& complete_gc) |
|
719 { |
|
720 // Don't use "refs_list_index" calculated in this way because |
|
721 // balance_queues() has moved the Ref's into the first n queues. |
|
722 // Thread* thr = Thread::current(); |
|
723 // int refs_list_index = ((WorkerThread*)thr)->id(); |
|
724 // _ref_processor.process_phase3(_refs_lists[refs_list_index], _clear_referent, |
|
725 _ref_processor.process_phase3(_refs_lists[i], _clear_referent, |
|
726 &is_alive, &keep_alive, &complete_gc); |
|
727 } |
|
728 private: |
|
729 bool _clear_referent; |
|
730 }; |
|
731 |
|
732 // Balances reference queues. |
|
733 // Move entries from all queues[0, 1, ..., _max_num_q-1] to |
|
734 // queues[0, 1, ..., _num_q-1] because only the first _num_q |
|
735 // corresponding to the active workers will be processed. |
|
736 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) |
|
737 { |
|
738 // calculate total length |
|
739 size_t total_refs = 0; |
|
740 if (TraceReferenceGC && PrintGCDetails) { |
|
741 gclog_or_tty->print_cr("\nBalance ref_lists "); |
|
742 } |
|
743 |
|
744 for (uint i = 0; i < _max_num_q; ++i) { |
|
745 total_refs += ref_lists[i].length(); |
|
746 if (TraceReferenceGC && PrintGCDetails) { |
|
747 gclog_or_tty->print(SIZE_FORMAT " ", ref_lists[i].length()); |
|
748 } |
|
749 } |
|
750 if (TraceReferenceGC && PrintGCDetails) { |
|
751 gclog_or_tty->print_cr(" = " SIZE_FORMAT, total_refs); |
|
752 } |
|
753 size_t avg_refs = total_refs / _num_q + 1; |
|
754 uint to_idx = 0; |
|
755 for (uint from_idx = 0; from_idx < _max_num_q; from_idx++) { |
|
756 bool move_all = false; |
|
757 if (from_idx >= _num_q) { |
|
758 move_all = ref_lists[from_idx].length() > 0; |
|
759 } |
|
760 while ((ref_lists[from_idx].length() > avg_refs) || |
|
761 move_all) { |
|
762 assert(to_idx < _num_q, "Sanity Check!"); |
|
763 if (ref_lists[to_idx].length() < avg_refs) { |
|
764 // move superfluous refs |
|
765 size_t refs_to_move; |
|
766 // Move all the Ref's if the from queue will not be processed. |
|
767 if (move_all) { |
|
768 refs_to_move = MIN2(ref_lists[from_idx].length(), |
|
769 avg_refs - ref_lists[to_idx].length()); |
|
770 } else { |
|
771 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs, |
|
772 avg_refs - ref_lists[to_idx].length()); |
|
773 } |
|
774 |
|
775 assert(refs_to_move > 0, "otherwise the code below will fail"); |
|
776 |
|
777 oop move_head = ref_lists[from_idx].head(); |
|
778 oop move_tail = move_head; |
|
779 oop new_head = move_head; |
|
780 // find an element to split the list on |
|
781 for (size_t j = 0; j < refs_to_move; ++j) { |
|
782 move_tail = new_head; |
|
783 new_head = java_lang_ref_Reference::discovered(new_head); |
|
784 } |
|
785 |
|
786 // Add the chain to the to list. |
|
787 if (ref_lists[to_idx].head() == NULL) { |
|
788 // to list is empty. Make a loop at the end. |
|
789 java_lang_ref_Reference::set_discovered_raw(move_tail, move_tail); |
|
790 } else { |
|
791 java_lang_ref_Reference::set_discovered_raw(move_tail, ref_lists[to_idx].head()); |
|
792 } |
|
793 ref_lists[to_idx].set_head(move_head); |
|
794 ref_lists[to_idx].inc_length(refs_to_move); |
|
795 |
|
796 // Remove the chain from the from list. |
|
797 if (move_tail == new_head) { |
|
798 // We found the end of the from list. |
|
799 ref_lists[from_idx].set_head(NULL); |
|
800 } else { |
|
801 ref_lists[from_idx].set_head(new_head); |
|
802 } |
|
803 ref_lists[from_idx].dec_length(refs_to_move); |
|
804 if (ref_lists[from_idx].length() == 0) { |
|
805 break; |
|
806 } |
|
807 } else { |
|
808 to_idx = (to_idx + 1) % _num_q; |
|
809 } |
|
810 } |
|
811 } |
|
812 #ifdef ASSERT |
|
813 size_t balanced_total_refs = 0; |
|
814 for (uint i = 0; i < _max_num_q; ++i) { |
|
815 balanced_total_refs += ref_lists[i].length(); |
|
816 if (TraceReferenceGC && PrintGCDetails) { |
|
817 gclog_or_tty->print(SIZE_FORMAT " ", ref_lists[i].length()); |
|
818 } |
|
819 } |
|
820 if (TraceReferenceGC && PrintGCDetails) { |
|
821 gclog_or_tty->print_cr(" = " SIZE_FORMAT, balanced_total_refs); |
|
822 gclog_or_tty->flush(); |
|
823 } |
|
824 assert(total_refs == balanced_total_refs, "Balancing was incomplete"); |
|
825 #endif |
|
826 } |
|
827 |
|
828 void ReferenceProcessor::balance_all_queues() { |
|
829 balance_queues(_discoveredSoftRefs); |
|
830 balance_queues(_discoveredWeakRefs); |
|
831 balance_queues(_discoveredFinalRefs); |
|
832 balance_queues(_discoveredPhantomRefs); |
|
833 balance_queues(_discoveredCleanerRefs); |
|
834 } |
|
835 |
|
836 size_t |
|
837 ReferenceProcessor::process_discovered_reflist( |
|
838 DiscoveredList refs_lists[], |
|
839 ReferencePolicy* policy, |
|
840 bool clear_referent, |
|
841 BoolObjectClosure* is_alive, |
|
842 OopClosure* keep_alive, |
|
843 VoidClosure* complete_gc, |
|
844 AbstractRefProcTaskExecutor* task_executor) |
|
845 { |
|
846 bool mt_processing = task_executor != NULL && _processing_is_mt; |
|
847 // If discovery used MT and a dynamic number of GC threads, then |
|
848 // the queues must be balanced for correctness if fewer than the |
|
849 // maximum number of queues were used. The number of queue used |
|
850 // during discovery may be different than the number to be used |
|
851 // for processing so don't depend of _num_q < _max_num_q as part |
|
852 // of the test. |
|
853 bool must_balance = _discovery_is_mt; |
|
854 |
|
855 if ((mt_processing && ParallelRefProcBalancingEnabled) || |
|
856 must_balance) { |
|
857 balance_queues(refs_lists); |
|
858 } |
|
859 |
|
860 size_t total_list_count = total_count(refs_lists); |
|
861 |
|
862 if (PrintReferenceGC && PrintGCDetails) { |
|
863 gclog_or_tty->print(", " SIZE_FORMAT " refs", total_list_count); |
|
864 } |
|
865 |
|
866 // Phase 1 (soft refs only): |
|
867 // . Traverse the list and remove any SoftReferences whose |
|
868 // referents are not alive, but that should be kept alive for |
|
869 // policy reasons. Keep alive the transitive closure of all |
|
870 // such referents. |
|
871 if (policy != NULL) { |
|
872 if (mt_processing) { |
|
873 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/); |
|
874 task_executor->execute(phase1); |
|
875 } else { |
|
876 for (uint i = 0; i < _max_num_q; i++) { |
|
877 process_phase1(refs_lists[i], policy, |
|
878 is_alive, keep_alive, complete_gc); |
|
879 } |
|
880 } |
|
881 } else { // policy == NULL |
|
882 assert(refs_lists != _discoveredSoftRefs, |
|
883 "Policy must be specified for soft references."); |
|
884 } |
|
885 |
|
886 // Phase 2: |
|
887 // . Traverse the list and remove any refs whose referents are alive. |
|
888 if (mt_processing) { |
|
889 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/); |
|
890 task_executor->execute(phase2); |
|
891 } else { |
|
892 for (uint i = 0; i < _max_num_q; i++) { |
|
893 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); |
|
894 } |
|
895 } |
|
896 |
|
897 // Phase 3: |
|
898 // . Traverse the list and process referents as appropriate. |
|
899 if (mt_processing) { |
|
900 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/); |
|
901 task_executor->execute(phase3); |
|
902 } else { |
|
903 for (uint i = 0; i < _max_num_q; i++) { |
|
904 process_phase3(refs_lists[i], clear_referent, |
|
905 is_alive, keep_alive, complete_gc); |
|
906 } |
|
907 } |
|
908 |
|
909 return total_list_count; |
|
910 } |
|
911 |
|
912 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { |
|
913 uint id = 0; |
|
914 // Determine the queue index to use for this object. |
|
915 if (_discovery_is_mt) { |
|
916 // During a multi-threaded discovery phase, |
|
917 // each thread saves to its "own" list. |
|
918 Thread* thr = Thread::current(); |
|
919 id = thr->as_Worker_thread()->id(); |
|
920 } else { |
|
921 // single-threaded discovery, we save in round-robin |
|
922 // fashion to each of the lists. |
|
923 if (_processing_is_mt) { |
|
924 id = next_id(); |
|
925 } |
|
926 } |
|
927 assert(id < _max_num_q, "Id is out-of-bounds (call Freud?)"); |
|
928 |
|
929 // Get the discovered queue to which we will add |
|
930 DiscoveredList* list = NULL; |
|
931 switch (rt) { |
|
932 case REF_OTHER: |
|
933 // Unknown reference type, no special treatment |
|
934 break; |
|
935 case REF_SOFT: |
|
936 list = &_discoveredSoftRefs[id]; |
|
937 break; |
|
938 case REF_WEAK: |
|
939 list = &_discoveredWeakRefs[id]; |
|
940 break; |
|
941 case REF_FINAL: |
|
942 list = &_discoveredFinalRefs[id]; |
|
943 break; |
|
944 case REF_PHANTOM: |
|
945 list = &_discoveredPhantomRefs[id]; |
|
946 break; |
|
947 case REF_CLEANER: |
|
948 list = &_discoveredCleanerRefs[id]; |
|
949 break; |
|
950 case REF_NONE: |
|
951 // we should not reach here if we are an InstanceRefKlass |
|
952 default: |
|
953 ShouldNotReachHere(); |
|
954 } |
|
955 if (TraceReferenceGC && PrintGCDetails) { |
|
956 gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT, id, p2i(list)); |
|
957 } |
|
958 return list; |
|
959 } |
|
960 |
|
961 inline void |
|
962 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, |
|
963 oop obj, |
|
964 HeapWord* discovered_addr) { |
|
965 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); |
|
966 // First we must make sure this object is only enqueued once. CAS in a non null |
|
967 // discovered_addr. |
|
968 oop current_head = refs_list.head(); |
|
969 // The last ref must have its discovered field pointing to itself. |
|
970 oop next_discovered = (current_head != NULL) ? current_head : obj; |
|
971 |
|
972 oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr, |
|
973 NULL); |
|
974 if (retest == NULL) { |
|
975 // This thread just won the right to enqueue the object. |
|
976 // We have separate lists for enqueueing, so no synchronization |
|
977 // is necessary. |
|
978 refs_list.set_head(obj); |
|
979 refs_list.inc_length(1); |
|
980 |
|
981 if (TraceReferenceGC) { |
|
982 gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", |
|
983 p2i(obj), obj->klass()->internal_name()); |
|
984 } |
|
985 } else { |
|
986 // If retest was non NULL, another thread beat us to it: |
|
987 // The reference has already been discovered... |
|
988 if (TraceReferenceGC) { |
|
989 gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)", |
|
990 p2i(obj), obj->klass()->internal_name()); |
|
991 } |
|
992 } |
|
993 } |
|
994 |
|
995 #ifndef PRODUCT |
|
996 // Non-atomic (i.e. concurrent) discovery might allow us |
|
997 // to observe j.l.References with NULL referents, being those |
|
998 // cleared concurrently by mutators during (or after) discovery. |
|
999 void ReferenceProcessor::verify_referent(oop obj) { |
|
1000 bool da = discovery_is_atomic(); |
|
1001 oop referent = java_lang_ref_Reference::referent(obj); |
|
1002 assert(da ? referent->is_oop() : referent->is_oop_or_null(), |
|
1003 err_msg("Bad referent " INTPTR_FORMAT " found in Reference " |
|
1004 INTPTR_FORMAT " during %satomic discovery ", |
|
1005 p2i(referent), p2i(obj), da ? "" : "non-")); |
|
1006 } |
|
1007 #endif |
|
1008 |
|
1009 // We mention two of several possible choices here: |
|
1010 // #0: if the reference object is not in the "originating generation" |
|
1011 // (or part of the heap being collected, indicated by our "span" |
|
1012 // we don't treat it specially (i.e. we scan it as we would |
|
1013 // a normal oop, treating its references as strong references). |
|
1014 // This means that references can't be discovered unless their |
|
1015 // referent is also in the same span. This is the simplest, |
|
1016 // most "local" and most conservative approach, albeit one |
|
1017 // that may cause weak references to be enqueued least promptly. |
|
1018 // We call this choice the "ReferenceBasedDiscovery" policy. |
|
1019 // #1: the reference object may be in any generation (span), but if |
|
1020 // the referent is in the generation (span) being currently collected |
|
1021 // then we can discover the reference object, provided |
|
1022 // the object has not already been discovered by |
|
1023 // a different concurrently running collector (as may be the |
|
1024 // case, for instance, if the reference object is in CMS and |
|
1025 // the referent in DefNewGeneration), and provided the processing |
|
1026 // of this reference object by the current collector will |
|
1027 // appear atomic to every other collector in the system. |
|
1028 // (Thus, for instance, a concurrent collector may not |
|
1029 // discover references in other generations even if the |
|
1030 // referent is in its own generation). This policy may, |
|
1031 // in certain cases, enqueue references somewhat sooner than |
|
1032 // might Policy #0 above, but at marginally increased cost |
|
1033 // and complexity in processing these references. |
|
1034 // We call this choice the "RefeferentBasedDiscovery" policy. |
|
1035 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { |
|
1036 // Make sure we are discovering refs (rather than processing discovered refs). |
|
1037 if (!_discovering_refs || !RegisterReferences) { |
|
1038 return false; |
|
1039 } |
|
1040 // We only discover active references. |
|
1041 oop next = java_lang_ref_Reference::next(obj); |
|
1042 if (next != NULL) { // Ref is no longer active |
|
1043 return false; |
|
1044 } |
|
1045 |
|
1046 HeapWord* obj_addr = (HeapWord*)obj; |
|
1047 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && |
|
1048 !_span.contains(obj_addr)) { |
|
1049 // Reference is not in the originating generation; |
|
1050 // don't treat it specially (i.e. we want to scan it as a normal |
|
1051 // object with strong references). |
|
1052 return false; |
|
1053 } |
|
1054 |
|
1055 // We only discover references whose referents are not (yet) |
|
1056 // known to be strongly reachable. |
|
1057 if (is_alive_non_header() != NULL) { |
|
1058 verify_referent(obj); |
|
1059 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) { |
|
1060 return false; // referent is reachable |
|
1061 } |
|
1062 } |
|
1063 if (rt == REF_SOFT) { |
|
1064 // For soft refs we can decide now if these are not |
|
1065 // current candidates for clearing, in which case we |
|
1066 // can mark through them now, rather than delaying that |
|
1067 // to the reference-processing phase. Since all current |
|
1068 // time-stamp policies advance the soft-ref clock only |
|
1069 // at a major collection cycle, this is always currently |
|
1070 // accurate. |
|
1071 if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) { |
|
1072 return false; |
|
1073 } |
|
1074 } |
|
1075 |
|
1076 ResourceMark rm; // Needed for tracing. |
|
1077 |
|
1078 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj); |
|
1079 const oop discovered = java_lang_ref_Reference::discovered(obj); |
|
1080 assert(discovered->is_oop_or_null(), err_msg("Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered))); |
|
1081 if (discovered != NULL) { |
|
1082 // The reference has already been discovered... |
|
1083 if (TraceReferenceGC) { |
|
1084 gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)", |
|
1085 p2i(obj), obj->klass()->internal_name()); |
|
1086 } |
|
1087 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { |
|
1088 // assumes that an object is not processed twice; |
|
1089 // if it's been already discovered it must be on another |
|
1090 // generation's discovered list; so we won't discover it. |
|
1091 return false; |
|
1092 } else { |
|
1093 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery, |
|
1094 "Unrecognized policy"); |
|
1095 // Check assumption that an object is not potentially |
|
1096 // discovered twice except by concurrent collectors that potentially |
|
1097 // trace the same Reference object twice. |
|
1098 assert(UseConcMarkSweepGC || UseG1GC, |
|
1099 "Only possible with a concurrent marking collector"); |
|
1100 return true; |
|
1101 } |
|
1102 } |
|
1103 |
|
1104 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { |
|
1105 verify_referent(obj); |
|
1106 // Discover if and only if EITHER: |
|
1107 // .. reference is in our span, OR |
|
1108 // .. we are an atomic collector and referent is in our span |
|
1109 if (_span.contains(obj_addr) || |
|
1110 (discovery_is_atomic() && |
|
1111 _span.contains(java_lang_ref_Reference::referent(obj)))) { |
|
1112 // should_enqueue = true; |
|
1113 } else { |
|
1114 return false; |
|
1115 } |
|
1116 } else { |
|
1117 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery && |
|
1118 _span.contains(obj_addr), "code inconsistency"); |
|
1119 } |
|
1120 |
|
1121 // Get the right type of discovered queue head. |
|
1122 DiscoveredList* list = get_discovered_list(rt); |
|
1123 if (list == NULL) { |
|
1124 return false; // nothing special needs to be done |
|
1125 } |
|
1126 |
|
1127 if (_discovery_is_mt) { |
|
1128 add_to_discovered_list_mt(*list, obj, discovered_addr); |
|
1129 } else { |
|
1130 // We do a raw store here: the field will be visited later when processing |
|
1131 // the discovered references. |
|
1132 oop current_head = list->head(); |
|
1133 // The last ref must have its discovered field pointing to itself. |
|
1134 oop next_discovered = (current_head != NULL) ? current_head : obj; |
|
1135 |
|
1136 assert(discovered == NULL, "control point invariant"); |
|
1137 oop_store_raw(discovered_addr, next_discovered); |
|
1138 list->set_head(obj); |
|
1139 list->inc_length(1); |
|
1140 |
|
1141 if (TraceReferenceGC) { |
|
1142 gclog_or_tty->print_cr("Discovered reference (" INTPTR_FORMAT ": %s)", |
|
1143 p2i(obj), obj->klass()->internal_name()); |
|
1144 } |
|
1145 } |
|
1146 assert(obj->is_oop(), "Discovered a bad reference"); |
|
1147 verify_referent(obj); |
|
1148 return true; |
|
1149 } |
|
1150 |
|
1151 // Preclean the discovered references by removing those |
|
1152 // whose referents are alive, and by marking from those that |
|
1153 // are not active. These lists can be handled here |
|
1154 // in any order and, indeed, concurrently. |
|
1155 void ReferenceProcessor::preclean_discovered_references( |
|
1156 BoolObjectClosure* is_alive, |
|
1157 OopClosure* keep_alive, |
|
1158 VoidClosure* complete_gc, |
|
1159 YieldClosure* yield, |
|
1160 GCTimer* gc_timer, |
|
1161 GCId gc_id) { |
|
1162 |
|
1163 NOT_PRODUCT(verify_ok_to_handle_reflists()); |
|
1164 |
|
1165 // Soft references |
|
1166 { |
|
1167 GCTraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC, |
|
1168 false, gc_timer, gc_id); |
|
1169 for (uint i = 0; i < _max_num_q; i++) { |
|
1170 if (yield->should_return()) { |
|
1171 return; |
|
1172 } |
|
1173 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, |
|
1174 keep_alive, complete_gc, yield); |
|
1175 } |
|
1176 } |
|
1177 |
|
1178 // Weak references |
|
1179 { |
|
1180 GCTraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, |
|
1181 false, gc_timer, gc_id); |
|
1182 for (uint i = 0; i < _max_num_q; i++) { |
|
1183 if (yield->should_return()) { |
|
1184 return; |
|
1185 } |
|
1186 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, |
|
1187 keep_alive, complete_gc, yield); |
|
1188 } |
|
1189 } |
|
1190 |
|
1191 // Final references |
|
1192 { |
|
1193 GCTraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, |
|
1194 false, gc_timer, gc_id); |
|
1195 for (uint i = 0; i < _max_num_q; i++) { |
|
1196 if (yield->should_return()) { |
|
1197 return; |
|
1198 } |
|
1199 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, |
|
1200 keep_alive, complete_gc, yield); |
|
1201 } |
|
1202 } |
|
1203 |
|
1204 // Phantom references |
|
1205 { |
|
1206 GCTraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, |
|
1207 false, gc_timer, gc_id); |
|
1208 for (uint i = 0; i < _max_num_q; i++) { |
|
1209 if (yield->should_return()) { |
|
1210 return; |
|
1211 } |
|
1212 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, |
|
1213 keep_alive, complete_gc, yield); |
|
1214 } |
|
1215 |
|
1216 // Cleaner references. Included in timing for phantom references. We |
|
1217 // expect Cleaner references to be temporary, and don't want to deal with |
|
1218 // possible incompatibilities arising from making it more visible. |
|
1219 for (uint i = 0; i < _max_num_q; i++) { |
|
1220 if (yield->should_return()) { |
|
1221 return; |
|
1222 } |
|
1223 preclean_discovered_reflist(_discoveredCleanerRefs[i], is_alive, |
|
1224 keep_alive, complete_gc, yield); |
|
1225 } |
|
1226 } |
|
1227 } |
|
1228 |
|
1229 // Walk the given discovered ref list, and remove all reference objects |
|
1230 // whose referents are still alive, whose referents are NULL or which |
|
1231 // are not active (have a non-NULL next field). NOTE: When we are |
|
1232 // thus precleaning the ref lists (which happens single-threaded today), |
|
1233 // we do not disable refs discovery to honor the correct semantics of |
|
1234 // java.lang.Reference. As a result, we need to be careful below |
|
1235 // that ref removal steps interleave safely with ref discovery steps |
|
1236 // (in this thread). |
|
1237 void |
|
1238 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, |
|
1239 BoolObjectClosure* is_alive, |
|
1240 OopClosure* keep_alive, |
|
1241 VoidClosure* complete_gc, |
|
1242 YieldClosure* yield) { |
|
1243 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); |
|
1244 while (iter.has_next()) { |
|
1245 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); |
|
1246 oop obj = iter.obj(); |
|
1247 oop next = java_lang_ref_Reference::next(obj); |
|
1248 if (iter.referent() == NULL || iter.is_referent_alive() || |
|
1249 next != NULL) { |
|
1250 // The referent has been cleared, or is alive, or the Reference is not |
|
1251 // active; we need to trace and mark its cohort. |
|
1252 if (TraceReferenceGC) { |
|
1253 gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)", |
|
1254 p2i(iter.obj()), iter.obj()->klass()->internal_name()); |
|
1255 } |
|
1256 // Remove Reference object from list |
|
1257 iter.remove(); |
|
1258 // Keep alive its cohort. |
|
1259 iter.make_referent_alive(); |
|
1260 if (UseCompressedOops) { |
|
1261 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj); |
|
1262 keep_alive->do_oop(next_addr); |
|
1263 } else { |
|
1264 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj); |
|
1265 keep_alive->do_oop(next_addr); |
|
1266 } |
|
1267 iter.move_to_next(); |
|
1268 } else { |
|
1269 iter.next(); |
|
1270 } |
|
1271 } |
|
1272 // Close the reachable set |
|
1273 complete_gc->do_void(); |
|
1274 |
|
1275 NOT_PRODUCT( |
|
1276 if (PrintGCDetails && PrintReferenceGC && (iter.processed() > 0)) { |
|
1277 gclog_or_tty->print_cr(" Dropped " SIZE_FORMAT " Refs out of " SIZE_FORMAT |
|
1278 " Refs in discovered list " INTPTR_FORMAT, |
|
1279 iter.removed(), iter.processed(), p2i(refs_list.head())); |
|
1280 } |
|
1281 ) |
|
1282 } |
|
1283 |
|
1284 const char* ReferenceProcessor::list_name(uint i) { |
|
1285 assert(i <= _max_num_q * number_of_subclasses_of_ref(), |
|
1286 "Out of bounds index"); |
|
1287 |
|
1288 int j = i / _max_num_q; |
|
1289 switch (j) { |
|
1290 case 0: return "SoftRef"; |
|
1291 case 1: return "WeakRef"; |
|
1292 case 2: return "FinalRef"; |
|
1293 case 3: return "PhantomRef"; |
|
1294 case 4: return "CleanerRef"; |
|
1295 } |
|
1296 ShouldNotReachHere(); |
|
1297 return NULL; |
|
1298 } |
|
1299 |
|
1300 #ifndef PRODUCT |
|
1301 void ReferenceProcessor::verify_ok_to_handle_reflists() { |
|
1302 // empty for now |
|
1303 } |
|
1304 #endif |
|
1305 |
|
1306 #ifndef PRODUCT |
|
1307 void ReferenceProcessor::clear_discovered_references() { |
|
1308 guarantee(!_discovering_refs, "Discovering refs?"); |
|
1309 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { |
|
1310 clear_discovered_references(_discovered_refs[i]); |
|
1311 } |
|
1312 } |
|
1313 |
|
1314 #endif // PRODUCT |