26 #include "gc/shared/satbMarkQueue.hpp" |
26 #include "gc/shared/satbMarkQueue.hpp" |
27 #include "gc/shared/collectedHeap.hpp" |
27 #include "gc/shared/collectedHeap.hpp" |
28 #include "logging/log.hpp" |
28 #include "logging/log.hpp" |
29 #include "memory/allocation.inline.hpp" |
29 #include "memory/allocation.inline.hpp" |
30 #include "oops/oop.inline.hpp" |
30 #include "oops/oop.inline.hpp" |
|
31 #include "runtime/atomic.hpp" |
31 #include "runtime/mutexLocker.hpp" |
32 #include "runtime/mutexLocker.hpp" |
|
33 #include "runtime/orderAccess.hpp" |
32 #include "runtime/os.hpp" |
34 #include "runtime/os.hpp" |
33 #include "runtime/safepoint.hpp" |
35 #include "runtime/safepoint.hpp" |
34 #include "runtime/thread.hpp" |
36 #include "runtime/thread.hpp" |
35 #include "runtime/threadSMR.hpp" |
37 #include "runtime/threadSMR.hpp" |
36 #include "runtime/vmThread.hpp" |
38 #include "runtime/vmThread.hpp" |
|
39 #include "utilities/globalCounter.inline.hpp" |
37 |
40 |
38 SATBMarkQueue::SATBMarkQueue(SATBMarkQueueSet* qset) : |
41 SATBMarkQueue::SATBMarkQueue(SATBMarkQueueSet* qset) : |
39 // SATB queues are only active during marking cycles. We create |
42 // SATB queues are only active during marking cycles. We create |
40 // them with their active field set to false. If a thread is |
43 // them with their active field set to false. If a thread is |
41 // created during a cycle and its SATB queue needs to be activated |
44 // created during a cycle and its SATB queue needs to be activated |
105 |
108 |
106 #endif // PRODUCT |
109 #endif // PRODUCT |
107 |
110 |
108 SATBMarkQueueSet::SATBMarkQueueSet() : |
111 SATBMarkQueueSet::SATBMarkQueueSet() : |
109 PtrQueueSet(), |
112 PtrQueueSet(), |
|
113 _list(), |
|
114 _count_and_process_flag(0), |
|
115 _process_completed_buffers_threshold(SIZE_MAX), |
110 _buffer_enqueue_threshold(0) |
116 _buffer_enqueue_threshold(0) |
111 {} |
117 {} |
112 |
118 |
113 void SATBMarkQueueSet::initialize(Monitor* cbl_mon, |
119 SATBMarkQueueSet::~SATBMarkQueueSet() { |
114 BufferNode::Allocator* allocator, |
120 abandon_completed_buffers(); |
|
121 } |
|
122 |
|
123 // _count_and_process_flag has flag in least significant bit, count in |
|
124 // remaining bits. _process_completed_buffers_threshold is scaled |
|
125 // accordingly, with the lsbit set, so a _count_and_process_flag value |
|
126 // is directly comparable with the recorded threshold value. The |
|
127 // process flag is set whenever the count exceeds the threshold, and |
|
128 // remains set until the count is reduced to zero. |
|
129 |
|
130 // Increment count. If count > threshold, set flag, else maintain flag. |
|
131 static void increment_count(volatile size_t* cfptr, size_t threshold) { |
|
132 size_t old; |
|
133 size_t value = Atomic::load(cfptr); |
|
134 do { |
|
135 old = value; |
|
136 value += 2; |
|
137 assert(value > old, "overflow"); |
|
138 if (value > threshold) value |= 1; |
|
139 value = Atomic::cmpxchg(value, cfptr, old); |
|
140 } while (value != old); |
|
141 } |
|
142 |
|
143 // Decrement count. If count == 0, clear flag, else maintain flag. |
|
144 static void decrement_count(volatile size_t* cfptr) { |
|
145 size_t old; |
|
146 size_t value = Atomic::load(cfptr); |
|
147 do { |
|
148 assert((value >> 1) != 0, "underflow"); |
|
149 old = value; |
|
150 value -= 2; |
|
151 if (value <= 1) value = 0; |
|
152 value = Atomic::cmpxchg(value, cfptr, old); |
|
153 } while (value != old); |
|
154 } |
|
155 |
|
156 // Scale requested threshold to align with count field. If scaling |
|
157 // overflows, just use max value. Set process flag field to make |
|
158 // comparison in increment_count exact. |
|
159 static size_t scale_threshold(size_t value) { |
|
160 size_t scaled_value = value << 1; |
|
161 if ((scaled_value >> 1) != value) { |
|
162 scaled_value = SIZE_MAX; |
|
163 } |
|
164 return scaled_value | 1; |
|
165 } |
|
166 |
|
167 void SATBMarkQueueSet::initialize(BufferNode::Allocator* allocator, |
115 size_t process_completed_buffers_threshold, |
168 size_t process_completed_buffers_threshold, |
116 uint buffer_enqueue_threshold_percentage) { |
169 uint buffer_enqueue_threshold_percentage) { |
117 PtrQueueSet::initialize(cbl_mon, allocator); |
170 PtrQueueSet::initialize(allocator); |
118 set_process_completed_buffers_threshold(process_completed_buffers_threshold); |
171 _process_completed_buffers_threshold = |
|
172 scale_threshold(process_completed_buffers_threshold); |
119 assert(buffer_size() != 0, "buffer size not initialized"); |
173 assert(buffer_size() != 0, "buffer size not initialized"); |
120 // Minimum threshold of 1 ensures enqueuing of completely full buffers. |
174 // Minimum threshold of 1 ensures enqueuing of completely full buffers. |
121 size_t size = buffer_size(); |
175 size_t size = buffer_size(); |
122 size_t enqueue_qty = (size * buffer_enqueue_threshold_percentage) / 100; |
176 size_t enqueue_qty = (size * buffer_enqueue_threshold_percentage) / 100; |
123 _buffer_enqueue_threshold = MAX2(size - enqueue_qty, (size_t)1); |
177 _buffer_enqueue_threshold = MAX2(size - enqueue_qty, (size_t)1); |
205 } else { |
259 } else { |
206 return false; |
260 return false; |
207 } |
261 } |
208 } |
262 } |
209 |
263 |
|
264 // SATB buffer life-cycle - Per-thread queues obtain buffers from the |
|
265 // qset's buffer allocator, fill them, and push them onto the qset's |
|
266 // list. The GC concurrently pops buffers from the qset, processes |
|
267 // them, and returns them to the buffer allocator for re-use. Both |
|
268 // the allocator and the qset use lock-free stacks. The ABA problem |
|
269 // is solved by having both allocation pops and GC pops performed |
|
270 // within GlobalCounter critical sections, while the return of buffers |
|
271 // to the allocator performs a GlobalCounter synchronize before |
|
272 // pushing onto the allocator's list. |
|
273 |
|
274 void SATBMarkQueueSet::enqueue_completed_buffer(BufferNode* node) { |
|
275 assert(node != NULL, "precondition"); |
|
276 // Increment count and update flag appropriately. Done before |
|
277 // pushing buffer so count is always at least the actual number in |
|
278 // the list, and decrement never underflows. |
|
279 increment_count(&_count_and_process_flag, _process_completed_buffers_threshold); |
|
280 _list.push(*node); |
|
281 } |
|
282 |
|
283 BufferNode* SATBMarkQueueSet::get_completed_buffer() { |
|
284 BufferNode* node; |
|
285 { |
|
286 GlobalCounter::CriticalSection cs(Thread::current()); |
|
287 node = _list.pop(); |
|
288 } |
|
289 if (node != NULL) { |
|
290 // Got a buffer so decrement count and update flag appropriately. |
|
291 decrement_count(&_count_and_process_flag); |
|
292 } |
|
293 return node; |
|
294 } |
|
295 |
210 #ifndef PRODUCT |
296 #ifndef PRODUCT |
211 // Helpful for debugging |
297 // Helpful for debugging |
212 |
298 |
213 #define SATB_PRINTER_BUFFER_SIZE 256 |
299 #define SATB_PRINTER_BUFFER_SIZE 256 |
214 |
300 |
217 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); |
303 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); |
218 |
304 |
219 tty->cr(); |
305 tty->cr(); |
220 tty->print_cr("SATB BUFFERS [%s]", msg); |
306 tty->print_cr("SATB BUFFERS [%s]", msg); |
221 |
307 |
222 BufferNode* nd = completed_buffers_head(); |
308 BufferNode* nd = _list.top(); |
223 int i = 0; |
309 int i = 0; |
224 while (nd != NULL) { |
310 while (nd != NULL) { |
225 void** buf = BufferNode::make_buffer_from_node(nd); |
311 void** buf = BufferNode::make_buffer_from_node(nd); |
226 os::snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Enqueued: %d", i); |
312 os::snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Enqueued: %d", i); |
227 print_satb_buffer(buffer, buf, nd->index(), buffer_size()); |
313 print_satb_buffer(buffer, buf, nd->index(), buffer_size()); |
246 |
332 |
247 tty->cr(); |
333 tty->cr(); |
248 } |
334 } |
249 #endif // PRODUCT |
335 #endif // PRODUCT |
250 |
336 |
|
337 void SATBMarkQueueSet::abandon_completed_buffers() { |
|
338 Atomic::store(size_t(0), &_count_and_process_flag); |
|
339 BufferNode* buffers_to_delete = _list.pop_all(); |
|
340 while (buffers_to_delete != NULL) { |
|
341 BufferNode* bn = buffers_to_delete; |
|
342 buffers_to_delete = bn->next(); |
|
343 bn->set_next(NULL); |
|
344 deallocate_buffer(bn); |
|
345 } |
|
346 } |
|
347 |
251 void SATBMarkQueueSet::abandon_partial_marking() { |
348 void SATBMarkQueueSet::abandon_partial_marking() { |
252 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); |
349 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); |
253 abandon_completed_buffers(); |
350 abandon_completed_buffers(); |
254 |
351 |
255 class AbandonThreadQueueClosure : public ThreadClosure { |
352 class AbandonThreadQueueClosure : public ThreadClosure { |