|
1 /* |
|
2 * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #ifndef SHARE_GC_SHARED_OOPSTORAGE_HPP |
|
26 #define SHARE_GC_SHARED_OOPSTORAGE_HPP |
|
27 |
|
28 #include "memory/allocation.hpp" |
|
29 #include "metaprogramming/conditional.hpp" |
|
30 #include "metaprogramming/isConst.hpp" |
|
31 #include "oops/oop.hpp" |
|
32 #include "utilities/count_trailing_zeros.hpp" |
|
33 #include "utilities/debug.hpp" |
|
34 #include "utilities/globalDefinitions.hpp" |
|
35 #include "utilities/macros.hpp" |
|
36 |
|
37 class Mutex; |
|
38 class outputStream; |
|
39 |
|
40 // OopStorage supports management of off-heap references to objects allocated |
|
41 // in the Java heap. An OopStorage object provides a set of Java object |
|
42 // references (oop values), which clients refer to via oop* handles to the |
|
43 // associated OopStorage entries. Clients allocate entries to create a |
|
44 // (possibly weak) reference to a Java object, use that reference, and release |
|
45 // the reference when no longer needed. |
|
46 // |
|
47 // The garbage collector must know about all OopStorage objects and their |
|
48 // reference strength. OopStorage provides the garbage collector with support |
|
49 // for iteration over all the allocated entries. |
|
50 // |
|
51 // There are several categories of interaction with an OopStorage object. |
|
52 // |
|
53 // (1) allocation and release of entries, by the mutator or the VM. |
|
54 // (2) iteration by the garbage collector, possibly concurrent with mutator. |
|
55 // (3) iteration by other, non-GC, tools (only at safepoints). |
|
56 // (4) cleanup of unused internal storage, possibly concurrent with mutator. |
|
57 // |
|
58 // A goal of OopStorage is to make these interactions thread-safe, while |
|
59 // minimizing potential lock contention issues within and between these |
|
60 // categories. In particular, support for concurrent iteration by the garbage |
|
61 // collector, under certain restrictions, is required. Further, it must not |
|
62 // block nor be blocked by other operations for long periods. |
|
63 // |
|
64 // Internally, OopStorage is a set of Block objects, from which entries are |
|
65 // allocated and released. A block contains an oop[] and a bitmask indicating |
|
66 // which entries are in use (have been allocated and not yet released). New |
|
67 // blocks are constructed and added to the storage object when an entry |
|
68 // allocation request is made and there are no blocks with unused entries. |
|
69 // Blocks may be removed and deleted when empty. |
|
70 // |
|
71 // There are two important (and somewhat intertwined) protocols governing |
|
72 // concurrent access to a storage object. These are the Concurrent Iteration |
|
73 // Protocol and the Allocation Protocol. See the ParState class for a |
|
74 // discussion of concurrent iteration and the management of thread |
|
75 // interactions for this protocol. Similarly, see the allocate() function for |
|
76 // a discussion of allocation. |
|
77 |
|
78 class OopStorage : public CHeapObj<mtGC> { |
|
79 public: |
|
80 OopStorage(const char* name, Mutex* allocate_mutex, Mutex* active_mutex); |
|
81 ~OopStorage(); |
|
82 |
|
83 // These count and usage accessors are racy unless at a safepoint. |
|
84 |
|
85 // The number of allocated and not yet released entries. |
|
86 size_t allocation_count() const; |
|
87 |
|
88 // The number of blocks of entries. Useful for sizing parallel iteration. |
|
89 size_t block_count() const; |
|
90 |
|
91 // The number of blocks with no allocated entries. Useful for sizing |
|
92 // parallel iteration and scheduling block deletion. |
|
93 size_t empty_block_count() const; |
|
94 |
|
95 // Total number of blocks * memory allocation per block, plus |
|
96 // bookkeeping overhead, including this storage object. |
|
97 size_t total_memory_usage() const; |
|
98 |
|
99 enum EntryStatus { |
|
100 INVALID_ENTRY, |
|
101 UNALLOCATED_ENTRY, |
|
102 ALLOCATED_ENTRY |
|
103 }; |
|
104 |
|
105 // Locks _allocate_mutex. |
|
106 EntryStatus allocation_status(const oop* ptr) const; |
|
107 |
|
108 // Allocates and returns a new entry. Returns NULL if memory allocation |
|
109 // failed. Locks _allocate_mutex. |
|
110 // postcondition: *result == NULL. |
|
111 oop* allocate(); |
|
112 |
|
113 // Deallocates ptr, after setting its value to NULL. Locks _allocate_mutex. |
|
114 // precondition: ptr is a valid allocated entry. |
|
115 // precondition: *ptr == NULL. |
|
116 void release(const oop* ptr); |
|
117 |
|
118 // Releases all the ptrs. Possibly faster than individual calls to |
|
119 // release(oop*). Best if ptrs is sorted by address. Locks |
|
120 // _allocate_mutex. |
|
121 // precondition: All elements of ptrs are valid allocated entries. |
|
122 // precondition: *ptrs[i] == NULL, for i in [0,size). |
|
123 void release(const oop* const* ptrs, size_t size); |
|
124 |
|
125 // Applies f to each allocated entry's location. f must be a function or |
|
126 // function object. Assume p is either a const oop* or an oop*, depending |
|
127 // on whether the associated storage is const or non-const, respectively. |
|
128 // Then f(p) must be a valid expression. The result of invoking f(p) must |
|
129 // be implicitly convertible to bool. Iteration terminates and returns |
|
130 // false if any invocation of f returns false. Otherwise, the result of |
|
131 // iteration is true. |
|
132 // precondition: at safepoint. |
|
133 template<typename F> bool iterate_safepoint(F f); |
|
134 template<typename F> bool iterate_safepoint(F f) const; |
|
135 |
|
136 // oops_do and weak_oops_do are wrappers around iterate_safepoint, providing |
|
137 // an adaptation layer allowing the use of existing is-alive closures and |
|
138 // OopClosures. Assume p is either const oop* or oop*, depending on whether |
|
139 // the associated storage is const or non-const, respectively. Then |
|
140 // |
|
141 // - closure->do_oop(p) must be a valid expression whose value is ignored. |
|
142 // |
|
143 // - is_alive->do_object_b(*p) must be a valid expression whose value is |
|
144 // convertible to bool. |
|
145 // |
|
146 // For weak_oops_do, if *p == NULL then neither is_alive nor closure will be |
|
147 // invoked for p. If is_alive->do_object_b(*p) is false, then closure will |
|
148 // not be invoked on p, and *p will be set to NULL. |
|
149 |
|
150 template<typename Closure> void oops_do(Closure* closure); |
|
151 template<typename Closure> void oops_do(Closure* closure) const; |
|
152 template<typename Closure> void weak_oops_do(Closure* closure); |
|
153 |
|
154 template<typename IsAliveClosure, typename Closure> |
|
155 void weak_oops_do(IsAliveClosure* is_alive, Closure* closure); |
|
156 |
|
157 #if INCLUDE_ALL_GCS |
|
158 // Parallel iteration is for the exclusive use of the GC. |
|
159 // Other clients must use serial iteration. |
|
160 template<bool concurrent, bool is_const> class ParState; |
|
161 #endif // INCLUDE_ALL_GCS |
|
162 |
|
163 // Block cleanup functions are for the exclusive use of the GC. |
|
164 // Both stop deleting if there is an in-progress concurrent iteration. |
|
165 // Concurrent deletion locks both the allocate_mutex and the active_mutex. |
|
166 void delete_empty_blocks_safepoint(size_t retain = 1); |
|
167 void delete_empty_blocks_concurrent(size_t retain = 1); |
|
168 |
|
169 // Debugging and logging support. |
|
170 const char* name() const; |
|
171 void print_on(outputStream* st) const PRODUCT_RETURN; |
|
172 |
|
173 // Provides access to storage internals, for unit testing. |
|
174 class TestAccess; |
|
175 |
|
176 private: |
|
177 class Block; |
|
178 class BlockList; |
|
179 |
|
180 class BlockEntry VALUE_OBJ_CLASS_SPEC { |
|
181 friend class BlockList; |
|
182 |
|
183 // Members are mutable, and we deal exclusively with pointers to |
|
184 // const, to make const blocks easier to use; a block being const |
|
185 // doesn't prevent modifying its list state. |
|
186 mutable const Block* _prev; |
|
187 mutable const Block* _next; |
|
188 |
|
189 // Noncopyable. |
|
190 BlockEntry(const BlockEntry&); |
|
191 BlockEntry& operator=(const BlockEntry&); |
|
192 |
|
193 public: |
|
194 BlockEntry(); |
|
195 ~BlockEntry(); |
|
196 }; |
|
197 |
|
198 class BlockList VALUE_OBJ_CLASS_SPEC { |
|
199 const Block* _head; |
|
200 const Block* _tail; |
|
201 const BlockEntry& (*_get_entry)(const Block& block); |
|
202 |
|
203 // Noncopyable. |
|
204 BlockList(const BlockList&); |
|
205 BlockList& operator=(const BlockList&); |
|
206 |
|
207 public: |
|
208 BlockList(const BlockEntry& (*get_entry)(const Block& block)); |
|
209 ~BlockList(); |
|
210 |
|
211 Block* head(); |
|
212 const Block* chead() const; |
|
213 const Block* ctail() const; |
|
214 |
|
215 Block* prev(Block& block); |
|
216 Block* next(Block& block); |
|
217 |
|
218 const Block* prev(const Block& block) const; |
|
219 const Block* next(const Block& block) const; |
|
220 |
|
221 void push_front(const Block& block); |
|
222 void push_back(const Block& block); |
|
223 void unlink(const Block& block); |
|
224 }; |
|
225 |
|
226 class Block /* No base class, to avoid messing up alignment requirements */ { |
|
227 // _data must be the first non-static data member, for alignment. |
|
228 oop _data[BitsPerWord]; |
|
229 static const unsigned _data_pos = 0; // Position of _data. |
|
230 |
|
231 volatile uintx _allocated_bitmask; // One bit per _data element. |
|
232 const OopStorage* _owner; |
|
233 void* _memory; // Unaligned storage containing block. |
|
234 BlockEntry _active_entry; |
|
235 BlockEntry _allocate_entry; |
|
236 |
|
237 Block(const OopStorage* owner, void* memory); |
|
238 ~Block(); |
|
239 |
|
240 void check_index(unsigned index) const; |
|
241 unsigned get_index(const oop* ptr) const; |
|
242 |
|
243 template<typename F, typename BlockPtr> |
|
244 static bool iterate_impl(F f, BlockPtr b); |
|
245 |
|
246 // Noncopyable. |
|
247 Block(const Block&); |
|
248 Block& operator=(const Block&); |
|
249 |
|
250 public: |
|
251 static const BlockEntry& get_active_entry(const Block& block); |
|
252 static const BlockEntry& get_allocate_entry(const Block& block); |
|
253 |
|
254 static size_t allocation_size(); |
|
255 static size_t allocation_alignment_shift(); |
|
256 |
|
257 oop* get_pointer(unsigned index); |
|
258 const oop* get_pointer(unsigned index) const; |
|
259 |
|
260 uintx bitmask_for_index(unsigned index) const; |
|
261 uintx bitmask_for_entry(const oop* ptr) const; |
|
262 |
|
263 // Allocation bitmask accessors are racy. |
|
264 bool is_full() const; |
|
265 bool is_empty() const; |
|
266 uintx allocated_bitmask() const; |
|
267 uintx cmpxchg_allocated_bitmask(uintx new_value, uintx compare_value); |
|
268 |
|
269 bool contains(const oop* ptr) const; |
|
270 |
|
271 // Returns NULL if ptr is not in a block or not allocated in that block. |
|
272 static Block* block_for_ptr(const OopStorage* owner, const oop* ptr); |
|
273 |
|
274 oop* allocate(); |
|
275 static Block* new_block(const OopStorage* owner); |
|
276 static void delete_block(const Block& block); |
|
277 |
|
278 template<typename F> bool iterate(F f); |
|
279 template<typename F> bool iterate(F f) const; |
|
280 }; // class Block |
|
281 |
|
282 const char* _name; |
|
283 BlockList _active_list; |
|
284 BlockList _allocate_list; |
|
285 Block* volatile _active_head; |
|
286 |
|
287 Mutex* _allocate_mutex; |
|
288 Mutex* _active_mutex; |
|
289 |
|
290 // Counts are volatile for racy unlocked accesses. |
|
291 volatile size_t _allocation_count; |
|
292 volatile size_t _block_count; |
|
293 volatile size_t _empty_block_count; |
|
294 // mutable because this gets set even for const iteration. |
|
295 mutable bool _concurrent_iteration_active; |
|
296 |
|
297 Block* find_block_or_null(const oop* ptr) const; |
|
298 bool is_valid_block_locked_or_safepoint(const Block* block) const; |
|
299 EntryStatus allocation_status_validating_block(const Block* block, const oop* ptr) const; |
|
300 void check_release(const Block* block, const oop* ptr) const NOT_DEBUG_RETURN; |
|
301 void release_from_block(Block& block, uintx release_bitmask); |
|
302 void delete_empty_block(const Block& block); |
|
303 |
|
304 static void assert_at_safepoint() NOT_DEBUG_RETURN; |
|
305 |
|
306 template<typename F, typename Storage> |
|
307 static bool iterate_impl(F f, Storage* storage); |
|
308 |
|
309 #if INCLUDE_ALL_GCS |
|
310 // Implementation support for parallel iteration |
|
311 class BasicParState; |
|
312 #endif // INCLUDE_ALL_GCS |
|
313 |
|
314 // Wrapper for OopClosure-style function, so it can be used with |
|
315 // iterate. Assume p is of type oop*. Then cl->do_oop(p) must be a |
|
316 // valid expression whose value may be ignored. |
|
317 template<typename Closure> class OopFn; |
|
318 template<typename Closure> static OopFn<Closure> oop_fn(Closure* cl); |
|
319 |
|
320 // Wrapper for BoolObjectClosure + iteration handler pair, so they |
|
321 // can be used with iterate. |
|
322 template<typename IsAlive, typename F> class IfAliveFn; |
|
323 template<typename IsAlive, typename F> |
|
324 static IfAliveFn<IsAlive, F> if_alive_fn(IsAlive* is_alive, F f); |
|
325 |
|
326 // Wrapper for iteration handler, automatically skipping NULL entries. |
|
327 template<typename F> class SkipNullFn; |
|
328 template<typename F> static SkipNullFn<F> skip_null_fn(F f); |
|
329 |
|
330 // Wrapper for iteration handler; ignore handler result and return true. |
|
331 template<typename F> class AlwaysTrueFn; |
|
332 }; |
|
333 |
|
334 inline OopStorage::Block* OopStorage::BlockList::head() { |
|
335 return const_cast<Block*>(_head); |
|
336 } |
|
337 |
|
338 inline const OopStorage::Block* OopStorage::BlockList::chead() const { |
|
339 return _head; |
|
340 } |
|
341 |
|
342 inline const OopStorage::Block* OopStorage::BlockList::ctail() const { |
|
343 return _tail; |
|
344 } |
|
345 |
|
346 inline OopStorage::Block* OopStorage::BlockList::prev(Block& block) { |
|
347 return const_cast<Block*>(_get_entry(block)._prev); |
|
348 } |
|
349 |
|
350 inline OopStorage::Block* OopStorage::BlockList::next(Block& block) { |
|
351 return const_cast<Block*>(_get_entry(block)._next); |
|
352 } |
|
353 |
|
354 inline const OopStorage::Block* OopStorage::BlockList::prev(const Block& block) const { |
|
355 return _get_entry(block)._prev; |
|
356 } |
|
357 |
|
358 inline const OopStorage::Block* OopStorage::BlockList::next(const Block& block) const { |
|
359 return _get_entry(block)._next; |
|
360 } |
|
361 |
|
362 template<typename Closure> |
|
363 class OopStorage::OopFn VALUE_OBJ_CLASS_SPEC { |
|
364 public: |
|
365 explicit OopFn(Closure* cl) : _cl(cl) {} |
|
366 |
|
367 template<typename OopPtr> // [const] oop* |
|
368 bool operator()(OopPtr ptr) const { |
|
369 _cl->do_oop(ptr); |
|
370 return true; |
|
371 } |
|
372 |
|
373 private: |
|
374 Closure* _cl; |
|
375 }; |
|
376 |
|
377 template<typename Closure> |
|
378 inline OopStorage::OopFn<Closure> OopStorage::oop_fn(Closure* cl) { |
|
379 return OopFn<Closure>(cl); |
|
380 } |
|
381 |
|
382 template<typename IsAlive, typename F> |
|
383 class OopStorage::IfAliveFn VALUE_OBJ_CLASS_SPEC { |
|
384 public: |
|
385 IfAliveFn(IsAlive* is_alive, F f) : _is_alive(is_alive), _f(f) {} |
|
386 |
|
387 bool operator()(oop* ptr) const { |
|
388 bool result = true; |
|
389 oop v = *ptr; |
|
390 if (v != NULL) { |
|
391 if (_is_alive->do_object_b(v)) { |
|
392 result = _f(ptr); |
|
393 } else { |
|
394 *ptr = NULL; // Clear dead value. |
|
395 } |
|
396 } |
|
397 return result; |
|
398 } |
|
399 |
|
400 private: |
|
401 IsAlive* _is_alive; |
|
402 F _f; |
|
403 }; |
|
404 |
|
405 template<typename IsAlive, typename F> |
|
406 inline OopStorage::IfAliveFn<IsAlive, F> OopStorage::if_alive_fn(IsAlive* is_alive, F f) { |
|
407 return IfAliveFn<IsAlive, F>(is_alive, f); |
|
408 } |
|
409 |
|
410 template<typename F> |
|
411 class OopStorage::SkipNullFn VALUE_OBJ_CLASS_SPEC { |
|
412 public: |
|
413 SkipNullFn(F f) : _f(f) {} |
|
414 |
|
415 template<typename OopPtr> // [const] oop* |
|
416 bool operator()(OopPtr ptr) const { |
|
417 return (*ptr != NULL) ? _f(ptr) : true; |
|
418 } |
|
419 |
|
420 private: |
|
421 F _f; |
|
422 }; |
|
423 |
|
424 template<typename F> |
|
425 inline OopStorage::SkipNullFn<F> OopStorage::skip_null_fn(F f) { |
|
426 return SkipNullFn<F>(f); |
|
427 } |
|
428 |
|
429 template<typename F> |
|
430 class OopStorage::AlwaysTrueFn VALUE_OBJ_CLASS_SPEC { |
|
431 F _f; |
|
432 |
|
433 public: |
|
434 AlwaysTrueFn(F f) : _f(f) {} |
|
435 |
|
436 template<typename OopPtr> // [const] oop* |
|
437 bool operator()(OopPtr ptr) const { _f(ptr); return true; } |
|
438 }; |
|
439 |
|
440 // Inline Block accesses for use in iteration inner loop. |
|
441 |
|
442 inline void OopStorage::Block::check_index(unsigned index) const { |
|
443 assert(index < ARRAY_SIZE(_data), "Index out of bounds: %u", index); |
|
444 } |
|
445 |
|
446 inline oop* OopStorage::Block::get_pointer(unsigned index) { |
|
447 check_index(index); |
|
448 return &_data[index]; |
|
449 } |
|
450 |
|
451 inline const oop* OopStorage::Block::get_pointer(unsigned index) const { |
|
452 check_index(index); |
|
453 return &_data[index]; |
|
454 } |
|
455 |
|
456 inline uintx OopStorage::Block::allocated_bitmask() const { |
|
457 return _allocated_bitmask; |
|
458 } |
|
459 |
|
460 inline uintx OopStorage::Block::bitmask_for_index(unsigned index) const { |
|
461 check_index(index); |
|
462 return uintx(1) << index; |
|
463 } |
|
464 |
|
465 // Provide const or non-const iteration, depending on whether BlockPtr |
|
466 // is const Block* or Block*, respectively. |
|
467 template<typename F, typename BlockPtr> // BlockPtr := [const] Block* |
|
468 inline bool OopStorage::Block::iterate_impl(F f, BlockPtr block) { |
|
469 uintx bitmask = block->allocated_bitmask(); |
|
470 while (bitmask != 0) { |
|
471 unsigned index = count_trailing_zeros(bitmask); |
|
472 bitmask ^= block->bitmask_for_index(index); |
|
473 if (!f(block->get_pointer(index))) { |
|
474 return false; |
|
475 } |
|
476 } |
|
477 return true; |
|
478 } |
|
479 |
|
480 template<typename F> |
|
481 inline bool OopStorage::Block::iterate(F f) { |
|
482 return iterate_impl(f, this); |
|
483 } |
|
484 |
|
485 template<typename F> |
|
486 inline bool OopStorage::Block::iterate(F f) const { |
|
487 return iterate_impl(f, this); |
|
488 } |
|
489 |
|
490 ////////////////////////////////////////////////////////////////////////////// |
|
491 // Support for serial iteration, always at a safepoint. |
|
492 |
|
493 // Provide const or non-const iteration, depending on whether Storage is |
|
494 // const OopStorage* or OopStorage*, respectively. |
|
495 template<typename F, typename Storage> // Storage := [const] OopStorage |
|
496 inline bool OopStorage::iterate_impl(F f, Storage* storage) { |
|
497 assert_at_safepoint(); |
|
498 // Propagate const/non-const iteration to the block layer, by using |
|
499 // const or non-const blocks as corresponding to Storage. |
|
500 typedef typename Conditional<IsConst<Storage>::value, const Block*, Block*>::type BlockPtr; |
|
501 for (BlockPtr block = storage->_active_head; |
|
502 block != NULL; |
|
503 block = storage->_active_list.next(*block)) { |
|
504 if (!block->iterate(f)) { |
|
505 return false; |
|
506 } |
|
507 } |
|
508 return true; |
|
509 } |
|
510 |
|
511 template<typename F> |
|
512 inline bool OopStorage::iterate_safepoint(F f) { |
|
513 return iterate_impl(f, this); |
|
514 } |
|
515 |
|
516 template<typename F> |
|
517 inline bool OopStorage::iterate_safepoint(F f) const { |
|
518 return iterate_impl(f, this); |
|
519 } |
|
520 |
|
521 template<typename Closure> |
|
522 inline void OopStorage::oops_do(Closure* cl) { |
|
523 iterate_safepoint(oop_fn(cl)); |
|
524 } |
|
525 |
|
526 template<typename Closure> |
|
527 inline void OopStorage::oops_do(Closure* cl) const { |
|
528 iterate_safepoint(oop_fn(cl)); |
|
529 } |
|
530 |
|
531 template<typename Closure> |
|
532 inline void OopStorage::weak_oops_do(Closure* cl) { |
|
533 iterate_safepoint(skip_null_fn(oop_fn(cl))); |
|
534 } |
|
535 |
|
536 template<typename IsAliveClosure, typename Closure> |
|
537 inline void OopStorage::weak_oops_do(IsAliveClosure* is_alive, Closure* cl) { |
|
538 iterate_safepoint(if_alive_fn(is_alive, oop_fn(cl))); |
|
539 } |
|
540 |
|
541 #if INCLUDE_ALL_GCS |
|
542 |
|
543 ////////////////////////////////////////////////////////////////////////////// |
|
544 // Support for parallel and optionally concurrent state iteration. |
|
545 // |
|
546 // Parallel iteration is for the exclusive use of the GC. Other iteration |
|
547 // clients must use serial iteration. |
|
548 // |
|
549 // Concurrent Iteration |
|
550 // |
|
551 // Iteration involves the _active_list, which contains all of the blocks owned |
|
552 // by a storage object. This is a doubly-linked list, linked through |
|
553 // dedicated fields in the blocks. |
|
554 // |
|
555 // At most one concurrent ParState can exist at a time for a given storage |
|
556 // object. |
|
557 // |
|
558 // A concurrent ParState sets the associated storage's |
|
559 // _concurrent_iteration_active flag true when the state is constructed, and |
|
560 // sets it false when the state is destroyed. These assignments are made with |
|
561 // _active_mutex locked. Meanwhile, empty block deletion is not done while |
|
562 // _concurrent_iteration_active is true. The flag check and the dependent |
|
563 // removal of a block from the _active_list is performed with _active_mutex |
|
564 // locked. This prevents concurrent iteration and empty block deletion from |
|
565 // interfering with with each other. |
|
566 // |
|
567 // Both allocate() and delete_empty_blocks_concurrent() lock the |
|
568 // _allocate_mutex while performing their respective list manipulations, |
|
569 // preventing them from interfering with each other. |
|
570 // |
|
571 // When allocate() creates a new block, it is added to the front of the |
|
572 // _active_list. Then _active_head is set to the new block. When concurrent |
|
573 // iteration is started (by a parallel worker thread calling the state's |
|
574 // iterate() function), the current _active_head is used as the initial block |
|
575 // for the iteration, with iteration proceeding down the list headed by that |
|
576 // block. |
|
577 // |
|
578 // As a result, the list over which concurrent iteration operates is stable. |
|
579 // However, once the iteration is started, later allocations may add blocks to |
|
580 // the front of the list that won't be examined by the iteration. And while |
|
581 // the list is stable, concurrent allocate() and release() operations may |
|
582 // change the set of allocated entries in a block at any time during the |
|
583 // iteration. |
|
584 // |
|
585 // As a result, a concurrent iteration handler must accept that some |
|
586 // allocations and releases that occur after the iteration started will not be |
|
587 // seen by the iteration. Further, some may overlap examination by the |
|
588 // iteration. To help with this, allocate() and release() have an invariant |
|
589 // that an entry's value must be NULL when it is not in use. |
|
590 // |
|
591 // An in-progress delete_empty_blocks_concurrent() operation can contend with |
|
592 // the start of a concurrent iteration over the _active_mutex. Since both are |
|
593 // under GC control, that potential contention can be eliminated by never |
|
594 // scheduling both operations to run at the same time. |
|
595 // |
|
596 // ParState<concurrent, is_const> |
|
597 // concurrent must be true if iteration is concurrent with the |
|
598 // mutator, false if iteration is at a safepoint. |
|
599 // |
|
600 // is_const must be true if the iteration is over a constant storage |
|
601 // object, false if the iteration may modify the storage object. |
|
602 // |
|
603 // ParState([const] OopStorage* storage) |
|
604 // Construct an object for managing an iteration over storage. For a |
|
605 // concurrent ParState, empty block deletion for the associated storage |
|
606 // is inhibited for the life of the ParState. There can be no more |
|
607 // than one live concurrent ParState at a time for a given storage object. |
|
608 // |
|
609 // template<typename F> void iterate(F f) |
|
610 // Repeatedly claims a block from the associated storage that has |
|
611 // not been processed by this iteration (possibly by other threads), |
|
612 // and applies f to each entry in the claimed block. Assume p is of |
|
613 // type const oop* or oop*, according to is_const. Then f(p) must be |
|
614 // a valid expression whose value is ignored. Concurrent uses must |
|
615 // be prepared for an entry's value to change at any time, due to |
|
616 // mutator activity. |
|
617 // |
|
618 // template<typename Closure> void oops_do(Closure* cl) |
|
619 // Wrapper around iterate, providing an adaptation layer allowing |
|
620 // the use of OopClosures and similar objects for iteration. Assume |
|
621 // p is of type const oop* or oop*, according to is_const. Then |
|
622 // cl->do_oop(p) must be a valid expression whose value is ignored. |
|
623 // Concurrent uses must be prepared for the entry's value to change |
|
624 // at any time, due to mutator activity. |
|
625 // |
|
626 // Optional operations, provided only if !concurrent && !is_const. |
|
627 // These are not provided when is_const, because the storage object |
|
628 // may be modified by the iteration infrastructure, even if the |
|
629 // provided closure doesn't modify the storage object. These are not |
|
630 // provided when concurrent because any pre-filtering behavior by the |
|
631 // iteration infrastructure is inappropriate for concurrent iteration; |
|
632 // modifications of the storage by the mutator could result in the |
|
633 // pre-filtering being applied (successfully or not) to objects that |
|
634 // are unrelated to what the closure finds in the entry. |
|
635 // |
|
636 // template<typename Closure> void weak_oops_do(Closure* cl) |
|
637 // template<typename IsAliveClosure, typename Closure> |
|
638 // void weak_oops_do(IsAliveClosure* is_alive, Closure* cl) |
|
639 // Wrappers around iterate, providing an adaptation layer allowing |
|
640 // the use of is-alive closures and OopClosures for iteration. |
|
641 // Assume p is of type oop*. Then |
|
642 // |
|
643 // - cl->do_oop(p) must be a valid expression whose value is ignored. |
|
644 // |
|
645 // - is_alive->do_object_b(*p) must be a valid expression whose value |
|
646 // is convertible to bool. |
|
647 // |
|
648 // If *p == NULL then neither is_alive nor cl will be invoked for p. |
|
649 // If is_alive->do_object_b(*p) is false, then cl will not be |
|
650 // invoked on p. |
|
651 |
|
652 class OopStorage::BasicParState VALUE_OBJ_CLASS_SPEC { |
|
653 public: |
|
654 BasicParState(OopStorage* storage, bool concurrent); |
|
655 ~BasicParState(); |
|
656 |
|
657 template<bool is_const, typename F> void iterate(F f) { |
|
658 // Wrap f in ATF so we can use Block::iterate. |
|
659 AlwaysTrueFn<F> atf_f(f); |
|
660 ensure_iteration_started(); |
|
661 typename Conditional<is_const, const Block*, Block*>::type block; |
|
662 while ((block = claim_next_block()) != NULL) { |
|
663 block->iterate(atf_f); |
|
664 } |
|
665 } |
|
666 |
|
667 private: |
|
668 OopStorage* _storage; |
|
669 void* volatile _next_block; |
|
670 bool _concurrent; |
|
671 |
|
672 // Noncopyable. |
|
673 BasicParState(const BasicParState&); |
|
674 BasicParState& operator=(const BasicParState&); |
|
675 |
|
676 void update_iteration_state(bool value); |
|
677 void ensure_iteration_started(); |
|
678 Block* claim_next_block(); |
|
679 }; |
|
680 |
|
681 template<bool concurrent, bool is_const> |
|
682 class OopStorage::ParState VALUE_OBJ_CLASS_SPEC { |
|
683 BasicParState _basic_state; |
|
684 |
|
685 public: |
|
686 ParState(const OopStorage* storage) : |
|
687 // For simplicity, always recorded as non-const. |
|
688 _basic_state(const_cast<OopStorage*>(storage), concurrent) |
|
689 {} |
|
690 |
|
691 template<typename F> |
|
692 void iterate(F f) { |
|
693 _basic_state.template iterate<is_const>(f); |
|
694 } |
|
695 |
|
696 template<typename Closure> |
|
697 void oops_do(Closure* cl) { |
|
698 this->iterate(oop_fn(cl)); |
|
699 } |
|
700 }; |
|
701 |
|
702 template<> |
|
703 class OopStorage::ParState<false, false> VALUE_OBJ_CLASS_SPEC { |
|
704 BasicParState _basic_state; |
|
705 |
|
706 public: |
|
707 ParState(OopStorage* storage) : |
|
708 _basic_state(storage, false) |
|
709 {} |
|
710 |
|
711 template<typename F> |
|
712 void iterate(F f) { |
|
713 _basic_state.template iterate<false>(f); |
|
714 } |
|
715 |
|
716 template<typename Closure> |
|
717 void oops_do(Closure* cl) { |
|
718 this->iterate(oop_fn(cl)); |
|
719 } |
|
720 |
|
721 template<typename Closure> |
|
722 void weak_oops_do(Closure* cl) { |
|
723 this->iterate(skip_null_fn(oop_fn(cl))); |
|
724 } |
|
725 |
|
726 template<typename IsAliveClosure, typename Closure> |
|
727 void weak_oops_do(IsAliveClosure* is_alive, Closure* cl) { |
|
728 this->iterate(if_alive_fn(is_alive, oop_fn(cl))); |
|
729 } |
|
730 }; |
|
731 |
|
732 #endif // INCLUDE_ALL_GCS |
|
733 |
|
734 #endif // include guard |