332 static IfAliveFn<IsAlive, F> if_alive_fn(IsAlive* is_alive, F f); |
272 static IfAliveFn<IsAlive, F> if_alive_fn(IsAlive* is_alive, F f); |
333 |
273 |
334 // Wrapper for iteration handler, automatically skipping NULL entries. |
274 // Wrapper for iteration handler, automatically skipping NULL entries. |
335 template<typename F> class SkipNullFn; |
275 template<typename F> class SkipNullFn; |
336 template<typename F> static SkipNullFn<F> skip_null_fn(F f); |
276 template<typename F> static SkipNullFn<F> skip_null_fn(F f); |
337 |
|
338 // Wrapper for iteration handler; ignore handler result and return true. |
|
339 template<typename F> class AlwaysTrueFn; |
|
340 }; |
277 }; |
341 |
278 |
342 inline OopStorage::Block* OopStorage::BlockList::head() { |
|
343 return const_cast<Block*>(_head); |
|
344 } |
|
345 |
|
346 inline const OopStorage::Block* OopStorage::BlockList::chead() const { |
|
347 return _head; |
|
348 } |
|
349 |
|
350 inline const OopStorage::Block* OopStorage::BlockList::ctail() const { |
|
351 return _tail; |
|
352 } |
|
353 |
|
354 inline OopStorage::Block* OopStorage::BlockList::prev(Block& block) { |
|
355 return const_cast<Block*>(_get_entry(block)._prev); |
|
356 } |
|
357 |
|
358 inline OopStorage::Block* OopStorage::BlockList::next(Block& block) { |
|
359 return const_cast<Block*>(_get_entry(block)._next); |
|
360 } |
|
361 |
|
362 inline const OopStorage::Block* OopStorage::BlockList::prev(const Block& block) const { |
|
363 return _get_entry(block)._prev; |
|
364 } |
|
365 |
|
366 inline const OopStorage::Block* OopStorage::BlockList::next(const Block& block) const { |
|
367 return _get_entry(block)._next; |
|
368 } |
|
369 |
|
370 template<typename Closure> |
|
371 class OopStorage::OopFn VALUE_OBJ_CLASS_SPEC { |
|
372 public: |
|
373 explicit OopFn(Closure* cl) : _cl(cl) {} |
|
374 |
|
375 template<typename OopPtr> // [const] oop* |
|
376 bool operator()(OopPtr ptr) const { |
|
377 _cl->do_oop(ptr); |
|
378 return true; |
|
379 } |
|
380 |
|
381 private: |
|
382 Closure* _cl; |
|
383 }; |
|
384 |
|
385 template<typename Closure> |
|
386 inline OopStorage::OopFn<Closure> OopStorage::oop_fn(Closure* cl) { |
|
387 return OopFn<Closure>(cl); |
|
388 } |
|
389 |
|
390 template<typename IsAlive, typename F> |
|
391 class OopStorage::IfAliveFn VALUE_OBJ_CLASS_SPEC { |
|
392 public: |
|
393 IfAliveFn(IsAlive* is_alive, F f) : _is_alive(is_alive), _f(f) {} |
|
394 |
|
395 bool operator()(oop* ptr) const { |
|
396 bool result = true; |
|
397 oop v = *ptr; |
|
398 if (v != NULL) { |
|
399 if (_is_alive->do_object_b(v)) { |
|
400 result = _f(ptr); |
|
401 } else { |
|
402 *ptr = NULL; // Clear dead value. |
|
403 } |
|
404 } |
|
405 return result; |
|
406 } |
|
407 |
|
408 private: |
|
409 IsAlive* _is_alive; |
|
410 F _f; |
|
411 }; |
|
412 |
|
413 template<typename IsAlive, typename F> |
|
414 inline OopStorage::IfAliveFn<IsAlive, F> OopStorage::if_alive_fn(IsAlive* is_alive, F f) { |
|
415 return IfAliveFn<IsAlive, F>(is_alive, f); |
|
416 } |
|
417 |
|
418 template<typename F> |
|
419 class OopStorage::SkipNullFn VALUE_OBJ_CLASS_SPEC { |
|
420 public: |
|
421 SkipNullFn(F f) : _f(f) {} |
|
422 |
|
423 template<typename OopPtr> // [const] oop* |
|
424 bool operator()(OopPtr ptr) const { |
|
425 return (*ptr != NULL) ? _f(ptr) : true; |
|
426 } |
|
427 |
|
428 private: |
|
429 F _f; |
|
430 }; |
|
431 |
|
432 template<typename F> |
|
433 inline OopStorage::SkipNullFn<F> OopStorage::skip_null_fn(F f) { |
|
434 return SkipNullFn<F>(f); |
|
435 } |
|
436 |
|
437 template<typename F> |
|
438 class OopStorage::AlwaysTrueFn VALUE_OBJ_CLASS_SPEC { |
|
439 F _f; |
|
440 |
|
441 public: |
|
442 AlwaysTrueFn(F f) : _f(f) {} |
|
443 |
|
444 template<typename OopPtr> // [const] oop* |
|
445 bool operator()(OopPtr ptr) const { _f(ptr); return true; } |
|
446 }; |
|
447 |
|
448 // Inline Block accesses for use in iteration inner loop. |
|
449 |
|
450 inline void OopStorage::Block::check_index(unsigned index) const { |
|
451 assert(index < ARRAY_SIZE(_data), "Index out of bounds: %u", index); |
|
452 } |
|
453 |
|
454 inline oop* OopStorage::Block::get_pointer(unsigned index) { |
|
455 check_index(index); |
|
456 return &_data[index]; |
|
457 } |
|
458 |
|
459 inline const oop* OopStorage::Block::get_pointer(unsigned index) const { |
|
460 check_index(index); |
|
461 return &_data[index]; |
|
462 } |
|
463 |
|
464 inline uintx OopStorage::Block::allocated_bitmask() const { |
|
465 return _allocated_bitmask; |
|
466 } |
|
467 |
|
468 inline uintx OopStorage::Block::bitmask_for_index(unsigned index) const { |
|
469 check_index(index); |
|
470 return uintx(1) << index; |
|
471 } |
|
472 |
|
473 // Provide const or non-const iteration, depending on whether BlockPtr |
|
474 // is const Block* or Block*, respectively. |
|
475 template<typename F, typename BlockPtr> // BlockPtr := [const] Block* |
|
476 inline bool OopStorage::Block::iterate_impl(F f, BlockPtr block) { |
|
477 uintx bitmask = block->allocated_bitmask(); |
|
478 while (bitmask != 0) { |
|
479 unsigned index = count_trailing_zeros(bitmask); |
|
480 bitmask ^= block->bitmask_for_index(index); |
|
481 if (!f(block->get_pointer(index))) { |
|
482 return false; |
|
483 } |
|
484 } |
|
485 return true; |
|
486 } |
|
487 |
|
488 template<typename F> |
|
489 inline bool OopStorage::Block::iterate(F f) { |
|
490 return iterate_impl(f, this); |
|
491 } |
|
492 |
|
493 template<typename F> |
|
494 inline bool OopStorage::Block::iterate(F f) const { |
|
495 return iterate_impl(f, this); |
|
496 } |
|
497 |
|
498 ////////////////////////////////////////////////////////////////////////////// |
|
499 // Support for serial iteration, always at a safepoint. |
|
500 |
|
501 // Provide const or non-const iteration, depending on whether Storage is |
|
502 // const OopStorage* or OopStorage*, respectively. |
|
503 template<typename F, typename Storage> // Storage := [const] OopStorage |
|
504 inline bool OopStorage::iterate_impl(F f, Storage* storage) { |
|
505 assert_at_safepoint(); |
|
506 // Propagate const/non-const iteration to the block layer, by using |
|
507 // const or non-const blocks as corresponding to Storage. |
|
508 typedef typename Conditional<IsConst<Storage>::value, const Block*, Block*>::type BlockPtr; |
|
509 for (BlockPtr block = storage->_active_head; |
|
510 block != NULL; |
|
511 block = storage->_active_list.next(*block)) { |
|
512 if (!block->iterate(f)) { |
|
513 return false; |
|
514 } |
|
515 } |
|
516 return true; |
|
517 } |
|
518 |
|
519 template<typename F> |
|
520 inline bool OopStorage::iterate_safepoint(F f) { |
|
521 return iterate_impl(f, this); |
|
522 } |
|
523 |
|
524 template<typename F> |
|
525 inline bool OopStorage::iterate_safepoint(F f) const { |
|
526 return iterate_impl(f, this); |
|
527 } |
|
528 |
|
529 template<typename Closure> |
|
530 inline void OopStorage::oops_do(Closure* cl) { |
|
531 iterate_safepoint(oop_fn(cl)); |
|
532 } |
|
533 |
|
534 template<typename Closure> |
|
535 inline void OopStorage::oops_do(Closure* cl) const { |
|
536 iterate_safepoint(oop_fn(cl)); |
|
537 } |
|
538 |
|
539 template<typename Closure> |
|
540 inline void OopStorage::weak_oops_do(Closure* cl) { |
|
541 iterate_safepoint(skip_null_fn(oop_fn(cl))); |
|
542 } |
|
543 |
|
544 template<typename IsAliveClosure, typename Closure> |
|
545 inline void OopStorage::weak_oops_do(IsAliveClosure* is_alive, Closure* cl) { |
|
546 iterate_safepoint(if_alive_fn(is_alive, oop_fn(cl))); |
|
547 } |
|
548 |
|
549 #if INCLUDE_ALL_GCS |
|
550 |
|
551 ////////////////////////////////////////////////////////////////////////////// |
|
552 // Support for parallel and optionally concurrent state iteration. |
|
553 // |
|
554 // Parallel iteration is for the exclusive use of the GC. Other iteration |
|
555 // clients must use serial iteration. |
|
556 // |
|
557 // Concurrent Iteration |
|
558 // |
|
559 // Iteration involves the _active_list, which contains all of the blocks owned |
|
560 // by a storage object. This is a doubly-linked list, linked through |
|
561 // dedicated fields in the blocks. |
|
562 // |
|
563 // At most one concurrent ParState can exist at a time for a given storage |
|
564 // object. |
|
565 // |
|
566 // A concurrent ParState sets the associated storage's |
|
567 // _concurrent_iteration_active flag true when the state is constructed, and |
|
568 // sets it false when the state is destroyed. These assignments are made with |
|
569 // _active_mutex locked. Meanwhile, empty block deletion is not done while |
|
570 // _concurrent_iteration_active is true. The flag check and the dependent |
|
571 // removal of a block from the _active_list is performed with _active_mutex |
|
572 // locked. This prevents concurrent iteration and empty block deletion from |
|
573 // interfering with with each other. |
|
574 // |
|
575 // Both allocate() and delete_empty_blocks_concurrent() lock the |
|
576 // _allocate_mutex while performing their respective list manipulations, |
|
577 // preventing them from interfering with each other. |
|
578 // |
|
579 // When allocate() creates a new block, it is added to the front of the |
|
580 // _active_list. Then _active_head is set to the new block. When concurrent |
|
581 // iteration is started (by a parallel worker thread calling the state's |
|
582 // iterate() function), the current _active_head is used as the initial block |
|
583 // for the iteration, with iteration proceeding down the list headed by that |
|
584 // block. |
|
585 // |
|
586 // As a result, the list over which concurrent iteration operates is stable. |
|
587 // However, once the iteration is started, later allocations may add blocks to |
|
588 // the front of the list that won't be examined by the iteration. And while |
|
589 // the list is stable, concurrent allocate() and release() operations may |
|
590 // change the set of allocated entries in a block at any time during the |
|
591 // iteration. |
|
592 // |
|
593 // As a result, a concurrent iteration handler must accept that some |
|
594 // allocations and releases that occur after the iteration started will not be |
|
595 // seen by the iteration. Further, some may overlap examination by the |
|
596 // iteration. To help with this, allocate() and release() have an invariant |
|
597 // that an entry's value must be NULL when it is not in use. |
|
598 // |
|
599 // An in-progress delete_empty_blocks_concurrent() operation can contend with |
|
600 // the start of a concurrent iteration over the _active_mutex. Since both are |
|
601 // under GC control, that potential contention can be eliminated by never |
|
602 // scheduling both operations to run at the same time. |
|
603 // |
|
604 // ParState<concurrent, is_const> |
|
605 // concurrent must be true if iteration is concurrent with the |
|
606 // mutator, false if iteration is at a safepoint. |
|
607 // |
|
608 // is_const must be true if the iteration is over a constant storage |
|
609 // object, false if the iteration may modify the storage object. |
|
610 // |
|
611 // ParState([const] OopStorage* storage) |
|
612 // Construct an object for managing an iteration over storage. For a |
|
613 // concurrent ParState, empty block deletion for the associated storage |
|
614 // is inhibited for the life of the ParState. There can be no more |
|
615 // than one live concurrent ParState at a time for a given storage object. |
|
616 // |
|
617 // template<typename F> void iterate(F f) |
|
618 // Repeatedly claims a block from the associated storage that has |
|
619 // not been processed by this iteration (possibly by other threads), |
|
620 // and applies f to each entry in the claimed block. Assume p is of |
|
621 // type const oop* or oop*, according to is_const. Then f(p) must be |
|
622 // a valid expression whose value is ignored. Concurrent uses must |
|
623 // be prepared for an entry's value to change at any time, due to |
|
624 // mutator activity. |
|
625 // |
|
626 // template<typename Closure> void oops_do(Closure* cl) |
|
627 // Wrapper around iterate, providing an adaptation layer allowing |
|
628 // the use of OopClosures and similar objects for iteration. Assume |
|
629 // p is of type const oop* or oop*, according to is_const. Then |
|
630 // cl->do_oop(p) must be a valid expression whose value is ignored. |
|
631 // Concurrent uses must be prepared for the entry's value to change |
|
632 // at any time, due to mutator activity. |
|
633 // |
|
634 // Optional operations, provided only if !concurrent && !is_const. |
|
635 // These are not provided when is_const, because the storage object |
|
636 // may be modified by the iteration infrastructure, even if the |
|
637 // provided closure doesn't modify the storage object. These are not |
|
638 // provided when concurrent because any pre-filtering behavior by the |
|
639 // iteration infrastructure is inappropriate for concurrent iteration; |
|
640 // modifications of the storage by the mutator could result in the |
|
641 // pre-filtering being applied (successfully or not) to objects that |
|
642 // are unrelated to what the closure finds in the entry. |
|
643 // |
|
644 // template<typename Closure> void weak_oops_do(Closure* cl) |
|
645 // template<typename IsAliveClosure, typename Closure> |
|
646 // void weak_oops_do(IsAliveClosure* is_alive, Closure* cl) |
|
647 // Wrappers around iterate, providing an adaptation layer allowing |
|
648 // the use of is-alive closures and OopClosures for iteration. |
|
649 // Assume p is of type oop*. Then |
|
650 // |
|
651 // - cl->do_oop(p) must be a valid expression whose value is ignored. |
|
652 // |
|
653 // - is_alive->do_object_b(*p) must be a valid expression whose value |
|
654 // is convertible to bool. |
|
655 // |
|
656 // If *p == NULL then neither is_alive nor cl will be invoked for p. |
|
657 // If is_alive->do_object_b(*p) is false, then cl will not be |
|
658 // invoked on p. |
|
659 |
|
660 class OopStorage::BasicParState VALUE_OBJ_CLASS_SPEC { |
|
661 public: |
|
662 BasicParState(OopStorage* storage, bool concurrent); |
|
663 ~BasicParState(); |
|
664 |
|
665 template<bool is_const, typename F> void iterate(F f) { |
|
666 // Wrap f in ATF so we can use Block::iterate. |
|
667 AlwaysTrueFn<F> atf_f(f); |
|
668 ensure_iteration_started(); |
|
669 typename Conditional<is_const, const Block*, Block*>::type block; |
|
670 while ((block = claim_next_block()) != NULL) { |
|
671 block->iterate(atf_f); |
|
672 } |
|
673 } |
|
674 |
|
675 private: |
|
676 OopStorage* _storage; |
|
677 void* volatile _next_block; |
|
678 bool _concurrent; |
|
679 |
|
680 // Noncopyable. |
|
681 BasicParState(const BasicParState&); |
|
682 BasicParState& operator=(const BasicParState&); |
|
683 |
|
684 void update_iteration_state(bool value); |
|
685 void ensure_iteration_started(); |
|
686 Block* claim_next_block(); |
|
687 }; |
|
688 |
|
689 template<bool concurrent, bool is_const> |
|
690 class OopStorage::ParState VALUE_OBJ_CLASS_SPEC { |
|
691 BasicParState _basic_state; |
|
692 |
|
693 public: |
|
694 ParState(const OopStorage* storage) : |
|
695 // For simplicity, always recorded as non-const. |
|
696 _basic_state(const_cast<OopStorage*>(storage), concurrent) |
|
697 {} |
|
698 |
|
699 template<typename F> |
|
700 void iterate(F f) { |
|
701 _basic_state.template iterate<is_const>(f); |
|
702 } |
|
703 |
|
704 template<typename Closure> |
|
705 void oops_do(Closure* cl) { |
|
706 this->iterate(oop_fn(cl)); |
|
707 } |
|
708 }; |
|
709 |
|
710 template<> |
|
711 class OopStorage::ParState<false, false> VALUE_OBJ_CLASS_SPEC { |
|
712 BasicParState _basic_state; |
|
713 |
|
714 public: |
|
715 ParState(OopStorage* storage) : |
|
716 _basic_state(storage, false) |
|
717 {} |
|
718 |
|
719 template<typename F> |
|
720 void iterate(F f) { |
|
721 _basic_state.template iterate<false>(f); |
|
722 } |
|
723 |
|
724 template<typename Closure> |
|
725 void oops_do(Closure* cl) { |
|
726 this->iterate(oop_fn(cl)); |
|
727 } |
|
728 |
|
729 template<typename Closure> |
|
730 void weak_oops_do(Closure* cl) { |
|
731 this->iterate(skip_null_fn(oop_fn(cl))); |
|
732 } |
|
733 |
|
734 template<typename IsAliveClosure, typename Closure> |
|
735 void weak_oops_do(IsAliveClosure* is_alive, Closure* cl) { |
|
736 this->iterate(if_alive_fn(is_alive, oop_fn(cl))); |
|
737 } |
|
738 }; |
|
739 |
|
740 #endif // INCLUDE_ALL_GCS |
|
741 |
|
742 #endif // include guard |
279 #endif // include guard |