|
1 /* |
|
2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #include "precompiled.hpp" |
|
26 #include "gc/g1/ptrQueue.hpp" |
|
27 #include "memory/allocation.hpp" |
|
28 #include "memory/allocation.inline.hpp" |
|
29 #include "runtime/mutex.hpp" |
|
30 #include "runtime/mutexLocker.hpp" |
|
31 #include "runtime/thread.inline.hpp" |
|
32 |
|
33 #include <new> |
|
34 |
|
35 PtrQueue::PtrQueue(PtrQueueSet* qset, bool permanent, bool active) : |
|
36 _qset(qset), |
|
37 _active(active), |
|
38 _permanent(permanent), |
|
39 _index(0), |
|
40 _capacity_in_bytes(0), |
|
41 _buf(NULL), |
|
42 _lock(NULL) |
|
43 {} |
|
44 |
|
45 PtrQueue::~PtrQueue() { |
|
46 assert(_permanent || (_buf == NULL), "queue must be flushed before delete"); |
|
47 } |
|
48 |
|
49 void PtrQueue::flush_impl() { |
|
50 if (_buf != NULL) { |
|
51 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index()); |
|
52 if (is_empty()) { |
|
53 // No work to do. |
|
54 qset()->deallocate_buffer(node); |
|
55 } else { |
|
56 qset()->enqueue_complete_buffer(node); |
|
57 } |
|
58 _buf = NULL; |
|
59 set_index(0); |
|
60 } |
|
61 } |
|
62 |
|
63 |
|
64 void PtrQueue::enqueue_known_active(void* ptr) { |
|
65 while (_index == 0) { |
|
66 handle_zero_index(); |
|
67 } |
|
68 |
|
69 assert(_buf != NULL, "postcondition"); |
|
70 assert(index() > 0, "postcondition"); |
|
71 assert(index() <= capacity(), "invariant"); |
|
72 _index -= _element_size; |
|
73 _buf[index()] = ptr; |
|
74 } |
|
75 |
|
76 void PtrQueue::locking_enqueue_completed_buffer(BufferNode* node) { |
|
77 assert(_lock->owned_by_self(), "Required."); |
|
78 qset()->enqueue_complete_buffer(node); |
|
79 } |
|
80 |
|
81 |
|
82 BufferNode* BufferNode::allocate(size_t size) { |
|
83 size_t byte_size = size * sizeof(void*); |
|
84 void* data = NEW_C_HEAP_ARRAY(char, buffer_offset() + byte_size, mtGC); |
|
85 return new (data) BufferNode; |
|
86 } |
|
87 |
|
88 void BufferNode::deallocate(BufferNode* node) { |
|
89 node->~BufferNode(); |
|
90 FREE_C_HEAP_ARRAY(char, node); |
|
91 } |
|
92 |
|
93 PtrQueueSet::PtrQueueSet(bool notify_when_complete) : |
|
94 _buffer_size(0), |
|
95 _max_completed_queue(0), |
|
96 _cbl_mon(NULL), _fl_lock(NULL), |
|
97 _notify_when_complete(notify_when_complete), |
|
98 _completed_buffers_head(NULL), |
|
99 _completed_buffers_tail(NULL), |
|
100 _n_completed_buffers(0), |
|
101 _process_completed_threshold(0), _process_completed(false), |
|
102 _buf_free_list(NULL), _buf_free_list_sz(0) |
|
103 { |
|
104 _fl_owner = this; |
|
105 } |
|
106 |
|
107 PtrQueueSet::~PtrQueueSet() { |
|
108 // There are presently only a couple (derived) instances ever |
|
109 // created, and they are permanent, so no harm currently done by |
|
110 // doing nothing here. |
|
111 } |
|
112 |
|
113 void PtrQueueSet::initialize(Monitor* cbl_mon, |
|
114 Mutex* fl_lock, |
|
115 int process_completed_threshold, |
|
116 int max_completed_queue, |
|
117 PtrQueueSet *fl_owner) { |
|
118 _max_completed_queue = max_completed_queue; |
|
119 _process_completed_threshold = process_completed_threshold; |
|
120 _completed_queue_padding = 0; |
|
121 assert(cbl_mon != NULL && fl_lock != NULL, "Init order issue?"); |
|
122 _cbl_mon = cbl_mon; |
|
123 _fl_lock = fl_lock; |
|
124 _fl_owner = (fl_owner != NULL) ? fl_owner : this; |
|
125 } |
|
126 |
|
127 void** PtrQueueSet::allocate_buffer() { |
|
128 BufferNode* node = NULL; |
|
129 { |
|
130 MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag); |
|
131 node = _fl_owner->_buf_free_list; |
|
132 if (node != NULL) { |
|
133 _fl_owner->_buf_free_list = node->next(); |
|
134 _fl_owner->_buf_free_list_sz--; |
|
135 } |
|
136 } |
|
137 if (node == NULL) { |
|
138 node = BufferNode::allocate(buffer_size()); |
|
139 } else { |
|
140 // Reinitialize buffer obtained from free list. |
|
141 node->set_index(0); |
|
142 node->set_next(NULL); |
|
143 } |
|
144 return BufferNode::make_buffer_from_node(node); |
|
145 } |
|
146 |
|
147 void PtrQueueSet::deallocate_buffer(BufferNode* node) { |
|
148 MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag); |
|
149 node->set_next(_fl_owner->_buf_free_list); |
|
150 _fl_owner->_buf_free_list = node; |
|
151 _fl_owner->_buf_free_list_sz++; |
|
152 } |
|
153 |
|
154 void PtrQueueSet::reduce_free_list() { |
|
155 assert(_fl_owner == this, "Free list reduction is allowed only for the owner"); |
|
156 // For now we'll adopt the strategy of deleting half. |
|
157 MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag); |
|
158 size_t n = _buf_free_list_sz / 2; |
|
159 for (size_t i = 0; i < n; ++i) { |
|
160 assert(_buf_free_list != NULL, |
|
161 "_buf_free_list_sz is wrong: " SIZE_FORMAT, _buf_free_list_sz); |
|
162 BufferNode* node = _buf_free_list; |
|
163 _buf_free_list = node->next(); |
|
164 _buf_free_list_sz--; |
|
165 BufferNode::deallocate(node); |
|
166 } |
|
167 } |
|
168 |
|
169 void PtrQueue::handle_zero_index() { |
|
170 assert(index() == 0, "precondition"); |
|
171 |
|
172 // This thread records the full buffer and allocates a new one (while |
|
173 // holding the lock if there is one). |
|
174 if (_buf != NULL) { |
|
175 if (!should_enqueue_buffer()) { |
|
176 assert(index() > 0, "the buffer can only be re-used if it's not full"); |
|
177 return; |
|
178 } |
|
179 |
|
180 if (_lock) { |
|
181 assert(_lock->owned_by_self(), "Required."); |
|
182 |
|
183 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index()); |
|
184 _buf = NULL; // clear shared _buf field |
|
185 |
|
186 locking_enqueue_completed_buffer(node); // enqueue completed buffer |
|
187 assert(_buf == NULL, "multiple enqueuers appear to be racing"); |
|
188 } else { |
|
189 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index()); |
|
190 if (qset()->process_or_enqueue_complete_buffer(node)) { |
|
191 // Recycle the buffer. No allocation. |
|
192 assert(_buf == BufferNode::make_buffer_from_node(node), "invariant"); |
|
193 assert(capacity() == qset()->buffer_size(), "invariant"); |
|
194 reset(); |
|
195 return; |
|
196 } |
|
197 } |
|
198 } |
|
199 // Set capacity in case this is the first allocation. |
|
200 set_capacity(qset()->buffer_size()); |
|
201 // Allocate a new buffer. |
|
202 _buf = qset()->allocate_buffer(); |
|
203 reset(); |
|
204 } |
|
205 |
|
206 bool PtrQueueSet::process_or_enqueue_complete_buffer(BufferNode* node) { |
|
207 if (Thread::current()->is_Java_thread()) { |
|
208 // We don't lock. It is fine to be epsilon-precise here. |
|
209 if (_max_completed_queue == 0 || |
|
210 (_max_completed_queue > 0 && |
|
211 _n_completed_buffers >= _max_completed_queue + _completed_queue_padding)) { |
|
212 bool b = mut_process_buffer(node); |
|
213 if (b) { |
|
214 // True here means that the buffer hasn't been deallocated and the caller may reuse it. |
|
215 return true; |
|
216 } |
|
217 } |
|
218 } |
|
219 // The buffer will be enqueued. The caller will have to get a new one. |
|
220 enqueue_complete_buffer(node); |
|
221 return false; |
|
222 } |
|
223 |
|
224 void PtrQueueSet::enqueue_complete_buffer(BufferNode* cbn) { |
|
225 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); |
|
226 cbn->set_next(NULL); |
|
227 if (_completed_buffers_tail == NULL) { |
|
228 assert(_completed_buffers_head == NULL, "Well-formedness"); |
|
229 _completed_buffers_head = cbn; |
|
230 _completed_buffers_tail = cbn; |
|
231 } else { |
|
232 _completed_buffers_tail->set_next(cbn); |
|
233 _completed_buffers_tail = cbn; |
|
234 } |
|
235 _n_completed_buffers++; |
|
236 |
|
237 if (!_process_completed && _process_completed_threshold >= 0 && |
|
238 _n_completed_buffers >= (size_t)_process_completed_threshold) { |
|
239 _process_completed = true; |
|
240 if (_notify_when_complete) { |
|
241 _cbl_mon->notify(); |
|
242 } |
|
243 } |
|
244 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked()); |
|
245 } |
|
246 |
|
247 size_t PtrQueueSet::completed_buffers_list_length() { |
|
248 size_t n = 0; |
|
249 BufferNode* cbn = _completed_buffers_head; |
|
250 while (cbn != NULL) { |
|
251 n++; |
|
252 cbn = cbn->next(); |
|
253 } |
|
254 return n; |
|
255 } |
|
256 |
|
257 void PtrQueueSet::assert_completed_buffer_list_len_correct() { |
|
258 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); |
|
259 assert_completed_buffer_list_len_correct_locked(); |
|
260 } |
|
261 |
|
262 void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() { |
|
263 guarantee(completed_buffers_list_length() == _n_completed_buffers, |
|
264 "Completed buffer length is wrong."); |
|
265 } |
|
266 |
|
267 void PtrQueueSet::set_buffer_size(size_t sz) { |
|
268 assert(_buffer_size == 0 && sz > 0, "Should be called only once."); |
|
269 _buffer_size = sz; |
|
270 } |
|
271 |
|
272 // Merge lists of buffers. Notify the processing threads. |
|
273 // The source queue is emptied as a result. The queues |
|
274 // must share the monitor. |
|
275 void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) { |
|
276 assert(_cbl_mon == src->_cbl_mon, "Should share the same lock"); |
|
277 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); |
|
278 if (_completed_buffers_tail == NULL) { |
|
279 assert(_completed_buffers_head == NULL, "Well-formedness"); |
|
280 _completed_buffers_head = src->_completed_buffers_head; |
|
281 _completed_buffers_tail = src->_completed_buffers_tail; |
|
282 } else { |
|
283 assert(_completed_buffers_head != NULL, "Well formedness"); |
|
284 if (src->_completed_buffers_head != NULL) { |
|
285 _completed_buffers_tail->set_next(src->_completed_buffers_head); |
|
286 _completed_buffers_tail = src->_completed_buffers_tail; |
|
287 } |
|
288 } |
|
289 _n_completed_buffers += src->_n_completed_buffers; |
|
290 |
|
291 src->_n_completed_buffers = 0; |
|
292 src->_completed_buffers_head = NULL; |
|
293 src->_completed_buffers_tail = NULL; |
|
294 |
|
295 assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL || |
|
296 _completed_buffers_head != NULL && _completed_buffers_tail != NULL, |
|
297 "Sanity"); |
|
298 } |
|
299 |
|
300 void PtrQueueSet::notify_if_necessary() { |
|
301 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); |
|
302 assert(_process_completed_threshold >= 0, "_process_completed is negative"); |
|
303 if (_n_completed_buffers >= (size_t)_process_completed_threshold || _max_completed_queue == 0) { |
|
304 _process_completed = true; |
|
305 if (_notify_when_complete) |
|
306 _cbl_mon->notify(); |
|
307 } |
|
308 } |