99 bool is_traversal_mode = ShenandoahHeap::heap()->is_traversal_mode(); |
99 bool is_traversal_mode = ShenandoahHeap::heap()->is_traversal_mode(); |
100 bool on_weak_ref = (decorators & (ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF)) != 0; |
100 bool on_weak_ref = (decorators & (ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF)) != 0; |
101 return (on_weak_ref || unknown) && (keep_alive || is_traversal_mode); |
101 return (on_weak_ref || unknown) && (keep_alive || is_traversal_mode); |
102 } |
102 } |
103 |
103 |
104 template <class T> |
|
105 inline void ShenandoahBarrierSet::inline_write_ref_field_pre(T* field, oop new_val) { |
|
106 shenandoah_assert_not_in_cset_loc_except(field, _heap->cancelled_gc()); |
|
107 if (_heap->is_concurrent_mark_in_progress()) { |
|
108 T heap_oop = RawAccess<>::oop_load(field); |
|
109 if (!CompressedOops::is_null(heap_oop)) { |
|
110 enqueue(CompressedOops::decode(heap_oop)); |
|
111 } |
|
112 } |
|
113 } |
|
114 |
|
115 // These are the more general virtual versions. |
|
116 void ShenandoahBarrierSet::write_ref_field_pre_work(oop* field, oop new_val) { |
|
117 inline_write_ref_field_pre(field, new_val); |
|
118 } |
|
119 |
|
120 void ShenandoahBarrierSet::write_ref_field_pre_work(narrowOop* field, oop new_val) { |
|
121 inline_write_ref_field_pre(field, new_val); |
|
122 } |
|
123 |
|
124 void ShenandoahBarrierSet::write_ref_field_pre_work(void* field, oop new_val) { |
|
125 guarantee(false, "Not needed"); |
|
126 } |
|
127 |
|
128 void ShenandoahBarrierSet::write_ref_field_work(void* v, oop o, bool release) { |
|
129 shenandoah_assert_not_in_cset_loc_except(v, _heap->cancelled_gc()); |
|
130 shenandoah_assert_not_forwarded_except (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress()); |
|
131 shenandoah_assert_not_in_cset_except (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress()); |
|
132 } |
|
133 |
|
134 oop ShenandoahBarrierSet::load_reference_barrier_not_null(oop obj) { |
104 oop ShenandoahBarrierSet::load_reference_barrier_not_null(oop obj) { |
135 if (ShenandoahLoadRefBarrier && _heap->has_forwarded_objects()) { |
105 if (ShenandoahLoadRefBarrier && _heap->has_forwarded_objects()) { |
136 return load_reference_barrier_impl(obj); |
106 return load_reference_barrier_impl(obj); |
137 } else { |
107 } else { |
138 return obj; |
108 return obj; |
232 } else { |
202 } else { |
233 return obj; |
203 return obj; |
234 } |
204 } |
235 } |
205 } |
236 |
206 |
237 void ShenandoahBarrierSet::storeval_barrier(oop obj) { |
|
238 if (ShenandoahStoreValEnqueueBarrier && !CompressedOops::is_null(obj) && _heap->is_concurrent_traversal_in_progress()) { |
|
239 enqueue(obj); |
|
240 } |
|
241 } |
|
242 |
|
243 void ShenandoahBarrierSet::keep_alive_barrier(oop obj) { |
|
244 if (ShenandoahKeepAliveBarrier && _heap->is_concurrent_mark_in_progress()) { |
|
245 enqueue(obj); |
|
246 } |
|
247 } |
|
248 |
|
249 void ShenandoahBarrierSet::enqueue(oop obj) { |
|
250 shenandoah_assert_not_forwarded_if(NULL, obj, _heap->is_concurrent_traversal_in_progress()); |
|
251 assert(_satb_mark_queue_set.is_active(), "only get here when SATB active"); |
|
252 |
|
253 // Filter marked objects before hitting the SATB queues. The same predicate would |
|
254 // be used by SATBMQ::filter to eliminate already marked objects downstream, but |
|
255 // filtering here helps to avoid wasteful SATB queueing work to begin with. |
|
256 if (!_heap->requires_marking<false>(obj)) return; |
|
257 |
|
258 ShenandoahThreadLocalData::satb_mark_queue(Thread::current()).enqueue_known_active(obj); |
|
259 } |
|
260 |
|
261 void ShenandoahBarrierSet::on_thread_create(Thread* thread) { |
207 void ShenandoahBarrierSet::on_thread_create(Thread* thread) { |
262 // Create thread local data |
208 // Create thread local data |
263 ShenandoahThreadLocalData::create(thread); |
209 ShenandoahThreadLocalData::create(thread); |
264 } |
210 } |
265 |
211 |