|
1 /* |
|
2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #ifndef SHARE_GC_SHARED_PTRQUEUE_HPP |
|
26 #define SHARE_GC_SHARED_PTRQUEUE_HPP |
|
27 |
|
28 #include "utilities/align.hpp" |
|
29 #include "utilities/sizes.hpp" |
|
30 |
|
31 // There are various techniques that require threads to be able to log |
|
32 // addresses. For example, a generational write barrier might log |
|
33 // the addresses of modified old-generation objects. This type supports |
|
34 // this operation. |
|
35 |
|
36 class BufferNode; |
|
37 class PtrQueueSet; |
|
38 class PtrQueue { |
|
39 friend class VMStructs; |
|
40 |
|
41 // Noncopyable - not defined. |
|
42 PtrQueue(const PtrQueue&); |
|
43 PtrQueue& operator=(const PtrQueue&); |
|
44 |
|
45 // The ptr queue set to which this queue belongs. |
|
46 PtrQueueSet* const _qset; |
|
47 |
|
48 // Whether updates should be logged. |
|
49 bool _active; |
|
50 |
|
51 // If true, the queue is permanent, and doesn't need to deallocate |
|
52 // its buffer in the destructor (since that obtains a lock which may not |
|
53 // be legally locked by then. |
|
54 const bool _permanent; |
|
55 |
|
56 // The (byte) index at which an object was last enqueued. Starts at |
|
57 // capacity_in_bytes (indicating an empty buffer) and goes towards zero. |
|
58 // Value is always pointer-size aligned. |
|
59 size_t _index; |
|
60 |
|
61 // Size of the current buffer, in bytes. |
|
62 // Value is always pointer-size aligned. |
|
63 size_t _capacity_in_bytes; |
|
64 |
|
65 static const size_t _element_size = sizeof(void*); |
|
66 |
|
67 // Get the capacity, in bytes. The capacity must have been set. |
|
68 size_t capacity_in_bytes() const { |
|
69 assert(_capacity_in_bytes > 0, "capacity not set"); |
|
70 return _capacity_in_bytes; |
|
71 } |
|
72 |
|
73 void set_capacity(size_t entries) { |
|
74 size_t byte_capacity = index_to_byte_index(entries); |
|
75 assert(_capacity_in_bytes == 0 || _capacity_in_bytes == byte_capacity, |
|
76 "changing capacity " SIZE_FORMAT " -> " SIZE_FORMAT, |
|
77 _capacity_in_bytes, byte_capacity); |
|
78 _capacity_in_bytes = byte_capacity; |
|
79 } |
|
80 |
|
81 static size_t byte_index_to_index(size_t ind) { |
|
82 assert(is_aligned(ind, _element_size), "precondition"); |
|
83 return ind / _element_size; |
|
84 } |
|
85 |
|
86 static size_t index_to_byte_index(size_t ind) { |
|
87 return ind * _element_size; |
|
88 } |
|
89 |
|
90 protected: |
|
91 // The buffer. |
|
92 void** _buf; |
|
93 |
|
94 size_t index() const { |
|
95 return byte_index_to_index(_index); |
|
96 } |
|
97 |
|
98 void set_index(size_t new_index) { |
|
99 size_t byte_index = index_to_byte_index(new_index); |
|
100 assert(byte_index <= capacity_in_bytes(), "precondition"); |
|
101 _index = byte_index; |
|
102 } |
|
103 |
|
104 size_t capacity() const { |
|
105 return byte_index_to_index(capacity_in_bytes()); |
|
106 } |
|
107 |
|
108 // If there is a lock associated with this buffer, this is that lock. |
|
109 Mutex* _lock; |
|
110 |
|
111 PtrQueueSet* qset() { return _qset; } |
|
112 bool is_permanent() const { return _permanent; } |
|
113 |
|
114 // Process queue entries and release resources. |
|
115 void flush_impl(); |
|
116 |
|
117 // Initialize this queue to contain a null buffer, and be part of the |
|
118 // given PtrQueueSet. |
|
119 PtrQueue(PtrQueueSet* qset, bool permanent = false, bool active = false); |
|
120 |
|
121 // Requires queue flushed or permanent. |
|
122 ~PtrQueue(); |
|
123 |
|
124 public: |
|
125 |
|
126 // Associate a lock with a ptr queue. |
|
127 void set_lock(Mutex* lock) { _lock = lock; } |
|
128 |
|
129 // Forcibly set empty. |
|
130 void reset() { |
|
131 if (_buf != NULL) { |
|
132 _index = capacity_in_bytes(); |
|
133 } |
|
134 } |
|
135 |
|
136 void enqueue(volatile void* ptr) { |
|
137 enqueue((void*)(ptr)); |
|
138 } |
|
139 |
|
140 // Enqueues the given "obj". |
|
141 void enqueue(void* ptr) { |
|
142 if (!_active) return; |
|
143 else enqueue_known_active(ptr); |
|
144 } |
|
145 |
|
146 // This method is called when we're doing the zero index handling |
|
147 // and gives a chance to the queues to do any pre-enqueueing |
|
148 // processing they might want to do on the buffer. It should return |
|
149 // true if the buffer should be enqueued, or false if enough |
|
150 // entries were cleared from it so that it can be re-used. It should |
|
151 // not return false if the buffer is still full (otherwise we can |
|
152 // get into an infinite loop). |
|
153 virtual bool should_enqueue_buffer() { return true; } |
|
154 void handle_zero_index(); |
|
155 void locking_enqueue_completed_buffer(BufferNode* node); |
|
156 |
|
157 void enqueue_known_active(void* ptr); |
|
158 |
|
159 // Return the size of the in-use region. |
|
160 size_t size() const { |
|
161 size_t result = 0; |
|
162 if (_buf != NULL) { |
|
163 assert(_index <= capacity_in_bytes(), "Invariant"); |
|
164 result = byte_index_to_index(capacity_in_bytes() - _index); |
|
165 } |
|
166 return result; |
|
167 } |
|
168 |
|
169 bool is_empty() const { |
|
170 return _buf == NULL || capacity_in_bytes() == _index; |
|
171 } |
|
172 |
|
173 // Set the "active" property of the queue to "b". An enqueue to an |
|
174 // inactive thread is a no-op. Setting a queue to inactive resets its |
|
175 // log to the empty state. |
|
176 void set_active(bool b) { |
|
177 _active = b; |
|
178 if (!b && _buf != NULL) { |
|
179 reset(); |
|
180 } else if (b && _buf != NULL) { |
|
181 assert(index() == capacity(), |
|
182 "invariant: queues are empty when activated."); |
|
183 } |
|
184 } |
|
185 |
|
186 bool is_active() const { return _active; } |
|
187 |
|
188 // To support compiler. |
|
189 |
|
190 protected: |
|
191 template<typename Derived> |
|
192 static ByteSize byte_offset_of_index() { |
|
193 return byte_offset_of(Derived, _index); |
|
194 } |
|
195 |
|
196 static ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); } |
|
197 |
|
198 template<typename Derived> |
|
199 static ByteSize byte_offset_of_buf() { |
|
200 return byte_offset_of(Derived, _buf); |
|
201 } |
|
202 |
|
203 static ByteSize byte_width_of_buf() { return in_ByteSize(_element_size); } |
|
204 |
|
205 template<typename Derived> |
|
206 static ByteSize byte_offset_of_active() { |
|
207 return byte_offset_of(Derived, _active); |
|
208 } |
|
209 |
|
210 static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); } |
|
211 |
|
212 }; |
|
213 |
|
214 class BufferNode { |
|
215 size_t _index; |
|
216 BufferNode* _next; |
|
217 void* _buffer[1]; // Pseudo flexible array member. |
|
218 |
|
219 BufferNode() : _index(0), _next(NULL) { } |
|
220 ~BufferNode() { } |
|
221 |
|
222 static size_t buffer_offset() { |
|
223 return offset_of(BufferNode, _buffer); |
|
224 } |
|
225 |
|
226 public: |
|
227 BufferNode* next() const { return _next; } |
|
228 void set_next(BufferNode* n) { _next = n; } |
|
229 size_t index() const { return _index; } |
|
230 void set_index(size_t i) { _index = i; } |
|
231 |
|
232 // Allocate a new BufferNode with the "buffer" having size elements. |
|
233 static BufferNode* allocate(size_t size); |
|
234 |
|
235 // Free a BufferNode. |
|
236 static void deallocate(BufferNode* node); |
|
237 |
|
238 // Return the BufferNode containing the buffer, after setting its index. |
|
239 static BufferNode* make_node_from_buffer(void** buffer, size_t index) { |
|
240 BufferNode* node = |
|
241 reinterpret_cast<BufferNode*>( |
|
242 reinterpret_cast<char*>(buffer) - buffer_offset()); |
|
243 node->set_index(index); |
|
244 return node; |
|
245 } |
|
246 |
|
247 // Return the buffer for node. |
|
248 static void** make_buffer_from_node(BufferNode *node) { |
|
249 // &_buffer[0] might lead to index out of bounds warnings. |
|
250 return reinterpret_cast<void**>( |
|
251 reinterpret_cast<char*>(node) + buffer_offset()); |
|
252 } |
|
253 }; |
|
254 |
|
255 // A PtrQueueSet represents resources common to a set of pointer queues. |
|
256 // In particular, the individual queues allocate buffers from this shared |
|
257 // set, and return completed buffers to the set. |
|
258 // All these variables are are protected by the TLOQ_CBL_mon. XXX ??? |
|
259 class PtrQueueSet { |
|
260 // The size of all buffers in the set. |
|
261 size_t _buffer_size; |
|
262 |
|
263 protected: |
|
264 Monitor* _cbl_mon; // Protects the fields below. |
|
265 BufferNode* _completed_buffers_head; |
|
266 BufferNode* _completed_buffers_tail; |
|
267 size_t _n_completed_buffers; |
|
268 int _process_completed_threshold; |
|
269 volatile bool _process_completed; |
|
270 |
|
271 // This (and the interpretation of the first element as a "next" |
|
272 // pointer) are protected by the TLOQ_FL_lock. |
|
273 Mutex* _fl_lock; |
|
274 BufferNode* _buf_free_list; |
|
275 size_t _buf_free_list_sz; |
|
276 // Queue set can share a freelist. The _fl_owner variable |
|
277 // specifies the owner. It is set to "this" by default. |
|
278 PtrQueueSet* _fl_owner; |
|
279 |
|
280 bool _all_active; |
|
281 |
|
282 // If true, notify_all on _cbl_mon when the threshold is reached. |
|
283 bool _notify_when_complete; |
|
284 |
|
285 // Maximum number of elements allowed on completed queue: after that, |
|
286 // enqueuer does the work itself. Zero indicates no maximum. |
|
287 int _max_completed_queue; |
|
288 size_t _completed_queue_padding; |
|
289 |
|
290 size_t completed_buffers_list_length(); |
|
291 void assert_completed_buffer_list_len_correct_locked(); |
|
292 void assert_completed_buffer_list_len_correct(); |
|
293 |
|
294 protected: |
|
295 // A mutator thread does the the work of processing a buffer. |
|
296 // Returns "true" iff the work is complete (and the buffer may be |
|
297 // deallocated). |
|
298 virtual bool mut_process_buffer(BufferNode* node) { |
|
299 ShouldNotReachHere(); |
|
300 return false; |
|
301 } |
|
302 |
|
303 // Create an empty ptr queue set. |
|
304 PtrQueueSet(bool notify_when_complete = false); |
|
305 ~PtrQueueSet(); |
|
306 |
|
307 // Because of init-order concerns, we can't pass these as constructor |
|
308 // arguments. |
|
309 void initialize(Monitor* cbl_mon, |
|
310 Mutex* fl_lock, |
|
311 int process_completed_threshold, |
|
312 int max_completed_queue, |
|
313 PtrQueueSet *fl_owner = NULL); |
|
314 |
|
315 public: |
|
316 |
|
317 // Return the buffer for a BufferNode of size buffer_size(). |
|
318 void** allocate_buffer(); |
|
319 |
|
320 // Return an empty buffer to the free list. The node is required |
|
321 // to have been allocated with a size of buffer_size(). |
|
322 void deallocate_buffer(BufferNode* node); |
|
323 |
|
324 // Declares that "buf" is a complete buffer. |
|
325 void enqueue_complete_buffer(BufferNode* node); |
|
326 |
|
327 // To be invoked by the mutator. |
|
328 bool process_or_enqueue_complete_buffer(BufferNode* node); |
|
329 |
|
330 bool completed_buffers_exist_dirty() { |
|
331 return _n_completed_buffers > 0; |
|
332 } |
|
333 |
|
334 bool process_completed_buffers() { return _process_completed; } |
|
335 void set_process_completed(bool x) { _process_completed = x; } |
|
336 |
|
337 bool is_active() { return _all_active; } |
|
338 |
|
339 // Set the buffer size. Should be called before any "enqueue" operation |
|
340 // can be called. And should only be called once. |
|
341 void set_buffer_size(size_t sz); |
|
342 |
|
343 // Get the buffer size. Must have been set. |
|
344 size_t buffer_size() const { |
|
345 assert(_buffer_size > 0, "buffer size not set"); |
|
346 return _buffer_size; |
|
347 } |
|
348 |
|
349 // Get/Set the number of completed buffers that triggers log processing. |
|
350 void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; } |
|
351 int process_completed_threshold() const { return _process_completed_threshold; } |
|
352 |
|
353 // Must only be called at a safe point. Indicates that the buffer free |
|
354 // list size may be reduced, if that is deemed desirable. |
|
355 void reduce_free_list(); |
|
356 |
|
357 size_t completed_buffers_num() { return _n_completed_buffers; } |
|
358 |
|
359 void merge_bufferlists(PtrQueueSet* src); |
|
360 |
|
361 void set_max_completed_queue(int m) { _max_completed_queue = m; } |
|
362 int max_completed_queue() { return _max_completed_queue; } |
|
363 |
|
364 void set_completed_queue_padding(size_t padding) { _completed_queue_padding = padding; } |
|
365 size_t completed_queue_padding() { return _completed_queue_padding; } |
|
366 |
|
367 // Notify the consumer if the number of buffers crossed the threshold |
|
368 void notify_if_necessary(); |
|
369 }; |
|
370 |
|
371 #endif // SHARE_GC_SHARED_PTRQUEUE_HPP |