src/hotspot/share/gc/shared/oopStorage.hpp
changeset 48816 3495d6050efe
parent 48808 2b0b7f222800
child 48886 e1d09bd56d2d
equal deleted inserted replaced
48815:d80d521e9cb1 48816:3495d6050efe
    24 
    24 
    25 #ifndef SHARE_GC_SHARED_OOPSTORAGE_HPP
    25 #ifndef SHARE_GC_SHARED_OOPSTORAGE_HPP
    26 #define SHARE_GC_SHARED_OOPSTORAGE_HPP
    26 #define SHARE_GC_SHARED_OOPSTORAGE_HPP
    27 
    27 
    28 #include "memory/allocation.hpp"
    28 #include "memory/allocation.hpp"
    29 #include "metaprogramming/conditional.hpp"
       
    30 #include "metaprogramming/isConst.hpp"
       
    31 #include "oops/oop.hpp"
    29 #include "oops/oop.hpp"
    32 #include "utilities/count_trailing_zeros.hpp"
       
    33 #include "utilities/debug.hpp"
       
    34 #include "utilities/globalDefinitions.hpp"
    30 #include "utilities/globalDefinitions.hpp"
    35 #include "utilities/macros.hpp"
    31 #include "utilities/macros.hpp"
    36 
    32 
    37 class Mutex;
    33 class Mutex;
    38 class outputStream;
    34 class outputStream;
   129   // Then f(p) must be a valid expression.  The result of invoking f(p) must
   125   // Then f(p) must be a valid expression.  The result of invoking f(p) must
   130   // be implicitly convertible to bool.  Iteration terminates and returns
   126   // be implicitly convertible to bool.  Iteration terminates and returns
   131   // false if any invocation of f returns false.  Otherwise, the result of
   127   // false if any invocation of f returns false.  Otherwise, the result of
   132   // iteration is true.
   128   // iteration is true.
   133   // precondition: at safepoint.
   129   // precondition: at safepoint.
   134   template<typename F> bool iterate_safepoint(F f);
   130   template<typename F> inline bool iterate_safepoint(F f);
   135   template<typename F> bool iterate_safepoint(F f) const;
   131   template<typename F> inline bool iterate_safepoint(F f) const;
   136 
   132 
   137   // oops_do and weak_oops_do are wrappers around iterate_safepoint, providing
   133   // oops_do and weak_oops_do are wrappers around iterate_safepoint, providing
   138   // an adaptation layer allowing the use of existing is-alive closures and
   134   // an adaptation layer allowing the use of existing is-alive closures and
   139   // OopClosures.  Assume p is either const oop* or oop*, depending on whether
   135   // OopClosures.  Assume p is either const oop* or oop*, depending on whether
   140   // the associated storage is const or non-const, respectively.  Then
   136   // the associated storage is const or non-const, respectively.  Then
   146   //
   142   //
   147   // For weak_oops_do, if *p == NULL then neither is_alive nor closure will be
   143   // For weak_oops_do, if *p == NULL then neither is_alive nor closure will be
   148   // invoked for p.  If is_alive->do_object_b(*p) is false, then closure will
   144   // invoked for p.  If is_alive->do_object_b(*p) is false, then closure will
   149   // not be invoked on p, and *p will be set to NULL.
   145   // not be invoked on p, and *p will be set to NULL.
   150 
   146 
   151   template<typename Closure> void oops_do(Closure* closure);
   147   template<typename Closure> inline void oops_do(Closure* closure);
   152   template<typename Closure> void oops_do(Closure* closure) const;
   148   template<typename Closure> inline void oops_do(Closure* closure) const;
   153   template<typename Closure> void weak_oops_do(Closure* closure);
   149   template<typename Closure> inline void weak_oops_do(Closure* closure);
   154 
   150 
   155   template<typename IsAliveClosure, typename Closure>
   151   template<typename IsAliveClosure, typename Closure>
   156   void weak_oops_do(IsAliveClosure* is_alive, Closure* closure);
   152   inline void weak_oops_do(IsAliveClosure* is_alive, Closure* closure);
   157 
   153 
   158 #if INCLUDE_ALL_GCS
   154 #if INCLUDE_ALL_GCS
   159   // Parallel iteration is for the exclusive use of the GC.
   155   // Parallel iteration is for the exclusive use of the GC.
   160   // Other clients must use serial iteration.
   156   // Other clients must use serial iteration.
   161   template<bool concurrent, bool is_const> class ParState;
   157   template<bool concurrent, bool is_const> class ParState;
   179 
   175 
   180   // xlC on AIX can't compile test_oopStorage.cpp with following private
   176   // xlC on AIX can't compile test_oopStorage.cpp with following private
   181   // classes. C++03 introduced access for nested classes with DR45, but xlC
   177   // classes. C++03 introduced access for nested classes with DR45, but xlC
   182   // version 12 rejects it.
   178   // version 12 rejects it.
   183 NOT_AIX( private: )
   179 NOT_AIX( private: )
   184   class Block;
   180   class Block;                  // Forward decl; defined in .inline.hpp file.
   185   class BlockList;
   181   class BlockList;              // Forward decl for BlockEntry friend decl.
   186 
   182 
   187   class BlockEntry VALUE_OBJ_CLASS_SPEC {
   183   class BlockEntry VALUE_OBJ_CLASS_SPEC {
   188     friend class BlockList;
   184     friend class BlockList;
   189 
   185 
   190     // Members are mutable, and we deal exclusively with pointers to
   186     // Members are mutable, and we deal exclusively with pointers to
   227 
   223 
   228     void push_front(const Block& block);
   224     void push_front(const Block& block);
   229     void push_back(const Block& block);
   225     void push_back(const Block& block);
   230     void unlink(const Block& block);
   226     void unlink(const Block& block);
   231   };
   227   };
   232 
       
   233   class Block /* No base class, to avoid messing up alignment requirements */ {
       
   234     // _data must be the first non-static data member, for alignment.
       
   235     oop _data[BitsPerWord];
       
   236     static const unsigned _data_pos = 0; // Position of _data.
       
   237 
       
   238     volatile uintx _allocated_bitmask; // One bit per _data element.
       
   239     const OopStorage* _owner;
       
   240     void* _memory;              // Unaligned storage containing block.
       
   241     BlockEntry _active_entry;
       
   242     BlockEntry _allocate_entry;
       
   243 
       
   244     Block(const OopStorage* owner, void* memory);
       
   245     ~Block();
       
   246 
       
   247     void check_index(unsigned index) const;
       
   248     unsigned get_index(const oop* ptr) const;
       
   249 
       
   250     template<typename F, typename BlockPtr>
       
   251     static bool iterate_impl(F f, BlockPtr b);
       
   252 
       
   253     // Noncopyable.
       
   254     Block(const Block&);
       
   255     Block& operator=(const Block&);
       
   256 
       
   257   public:
       
   258     static const BlockEntry& get_active_entry(const Block& block);
       
   259     static const BlockEntry& get_allocate_entry(const Block& block);
       
   260 
       
   261     static size_t allocation_size();
       
   262     static size_t allocation_alignment_shift();
       
   263 
       
   264     oop* get_pointer(unsigned index);
       
   265     const oop* get_pointer(unsigned index) const;
       
   266 
       
   267     uintx bitmask_for_index(unsigned index) const;
       
   268     uintx bitmask_for_entry(const oop* ptr) const;
       
   269 
       
   270     // Allocation bitmask accessors are racy.
       
   271     bool is_full() const;
       
   272     bool is_empty() const;
       
   273     uintx allocated_bitmask() const;
       
   274     uintx cmpxchg_allocated_bitmask(uintx new_value, uintx compare_value);
       
   275 
       
   276     bool contains(const oop* ptr) const;
       
   277 
       
   278     // Returns NULL if ptr is not in a block or not allocated in that block.
       
   279     static Block* block_for_ptr(const OopStorage* owner, const oop* ptr);
       
   280 
       
   281     oop* allocate();
       
   282     static Block* new_block(const OopStorage* owner);
       
   283     static void delete_block(const Block& block);
       
   284 
       
   285     template<typename F> bool iterate(F f);
       
   286     template<typename F> bool iterate(F f) const;
       
   287   }; // class Block
       
   288 
   228 
   289 private:
   229 private:
   290   const char* _name;
   230   const char* _name;
   291   BlockList _active_list;
   231   BlockList _active_list;
   292   BlockList _allocate_list;
   232   BlockList _allocate_list;
   332   static IfAliveFn<IsAlive, F> if_alive_fn(IsAlive* is_alive, F f);
   272   static IfAliveFn<IsAlive, F> if_alive_fn(IsAlive* is_alive, F f);
   333 
   273 
   334   // Wrapper for iteration handler, automatically skipping NULL entries.
   274   // Wrapper for iteration handler, automatically skipping NULL entries.
   335   template<typename F> class SkipNullFn;
   275   template<typename F> class SkipNullFn;
   336   template<typename F> static SkipNullFn<F> skip_null_fn(F f);
   276   template<typename F> static SkipNullFn<F> skip_null_fn(F f);
   337 
       
   338   // Wrapper for iteration handler; ignore handler result and return true.
       
   339   template<typename F> class AlwaysTrueFn;
       
   340 };
   277 };
   341 
   278 
   342 inline OopStorage::Block* OopStorage::BlockList::head() {
       
   343   return const_cast<Block*>(_head);
       
   344 }
       
   345 
       
   346 inline const OopStorage::Block* OopStorage::BlockList::chead() const {
       
   347   return _head;
       
   348 }
       
   349 
       
   350 inline const OopStorage::Block* OopStorage::BlockList::ctail() const {
       
   351   return _tail;
       
   352 }
       
   353 
       
   354 inline OopStorage::Block* OopStorage::BlockList::prev(Block& block) {
       
   355   return const_cast<Block*>(_get_entry(block)._prev);
       
   356 }
       
   357 
       
   358 inline OopStorage::Block* OopStorage::BlockList::next(Block& block) {
       
   359   return const_cast<Block*>(_get_entry(block)._next);
       
   360 }
       
   361 
       
   362 inline const OopStorage::Block* OopStorage::BlockList::prev(const Block& block) const {
       
   363   return _get_entry(block)._prev;
       
   364 }
       
   365 
       
   366 inline const OopStorage::Block* OopStorage::BlockList::next(const Block& block) const {
       
   367   return _get_entry(block)._next;
       
   368 }
       
   369 
       
   370 template<typename Closure>
       
   371 class OopStorage::OopFn VALUE_OBJ_CLASS_SPEC {
       
   372 public:
       
   373   explicit OopFn(Closure* cl) : _cl(cl) {}
       
   374 
       
   375   template<typename OopPtr>     // [const] oop*
       
   376   bool operator()(OopPtr ptr) const {
       
   377     _cl->do_oop(ptr);
       
   378     return true;
       
   379   }
       
   380 
       
   381 private:
       
   382   Closure* _cl;
       
   383 };
       
   384 
       
   385 template<typename Closure>
       
   386 inline OopStorage::OopFn<Closure> OopStorage::oop_fn(Closure* cl) {
       
   387   return OopFn<Closure>(cl);
       
   388 }
       
   389 
       
   390 template<typename IsAlive, typename F>
       
   391 class OopStorage::IfAliveFn VALUE_OBJ_CLASS_SPEC {
       
   392 public:
       
   393   IfAliveFn(IsAlive* is_alive, F f) : _is_alive(is_alive), _f(f) {}
       
   394 
       
   395   bool operator()(oop* ptr) const {
       
   396     bool result = true;
       
   397     oop v = *ptr;
       
   398     if (v != NULL) {
       
   399       if (_is_alive->do_object_b(v)) {
       
   400         result = _f(ptr);
       
   401       } else {
       
   402         *ptr = NULL;            // Clear dead value.
       
   403       }
       
   404     }
       
   405     return result;
       
   406   }
       
   407 
       
   408 private:
       
   409   IsAlive* _is_alive;
       
   410   F _f;
       
   411 };
       
   412 
       
   413 template<typename IsAlive, typename F>
       
   414 inline OopStorage::IfAliveFn<IsAlive, F> OopStorage::if_alive_fn(IsAlive* is_alive, F f) {
       
   415   return IfAliveFn<IsAlive, F>(is_alive, f);
       
   416 }
       
   417 
       
   418 template<typename F>
       
   419 class OopStorage::SkipNullFn VALUE_OBJ_CLASS_SPEC {
       
   420 public:
       
   421   SkipNullFn(F f) : _f(f) {}
       
   422 
       
   423   template<typename OopPtr>     // [const] oop*
       
   424   bool operator()(OopPtr ptr) const {
       
   425     return (*ptr != NULL) ? _f(ptr) : true;
       
   426   }
       
   427 
       
   428 private:
       
   429   F _f;
       
   430 };
       
   431 
       
   432 template<typename F>
       
   433 inline OopStorage::SkipNullFn<F> OopStorage::skip_null_fn(F f) {
       
   434   return SkipNullFn<F>(f);
       
   435 }
       
   436 
       
   437 template<typename F>
       
   438 class OopStorage::AlwaysTrueFn VALUE_OBJ_CLASS_SPEC {
       
   439   F _f;
       
   440 
       
   441 public:
       
   442   AlwaysTrueFn(F f) : _f(f) {}
       
   443 
       
   444   template<typename OopPtr>     // [const] oop*
       
   445   bool operator()(OopPtr ptr) const { _f(ptr); return true; }
       
   446 };
       
   447 
       
   448 // Inline Block accesses for use in iteration inner loop.
       
   449 
       
   450 inline void OopStorage::Block::check_index(unsigned index) const {
       
   451   assert(index < ARRAY_SIZE(_data), "Index out of bounds: %u", index);
       
   452 }
       
   453 
       
   454 inline oop* OopStorage::Block::get_pointer(unsigned index) {
       
   455   check_index(index);
       
   456   return &_data[index];
       
   457 }
       
   458 
       
   459 inline const oop* OopStorage::Block::get_pointer(unsigned index) const {
       
   460   check_index(index);
       
   461   return &_data[index];
       
   462 }
       
   463 
       
   464 inline uintx OopStorage::Block::allocated_bitmask() const {
       
   465   return _allocated_bitmask;
       
   466 }
       
   467 
       
   468 inline uintx OopStorage::Block::bitmask_for_index(unsigned index) const {
       
   469   check_index(index);
       
   470   return uintx(1) << index;
       
   471 }
       
   472 
       
   473 // Provide const or non-const iteration, depending on whether BlockPtr
       
   474 // is const Block* or Block*, respectively.
       
   475 template<typename F, typename BlockPtr> // BlockPtr := [const] Block*
       
   476 inline bool OopStorage::Block::iterate_impl(F f, BlockPtr block) {
       
   477   uintx bitmask = block->allocated_bitmask();
       
   478   while (bitmask != 0) {
       
   479     unsigned index = count_trailing_zeros(bitmask);
       
   480     bitmask ^= block->bitmask_for_index(index);
       
   481     if (!f(block->get_pointer(index))) {
       
   482       return false;
       
   483     }
       
   484   }
       
   485   return true;
       
   486 }
       
   487 
       
   488 template<typename F>
       
   489 inline bool OopStorage::Block::iterate(F f) {
       
   490   return iterate_impl(f, this);
       
   491 }
       
   492 
       
   493 template<typename F>
       
   494 inline bool OopStorage::Block::iterate(F f) const {
       
   495   return iterate_impl(f, this);
       
   496 }
       
   497 
       
   498 //////////////////////////////////////////////////////////////////////////////
       
   499 // Support for serial iteration, always at a safepoint.
       
   500 
       
   501 // Provide const or non-const iteration, depending on whether Storage is
       
   502 // const OopStorage* or OopStorage*, respectively.
       
   503 template<typename F, typename Storage> // Storage := [const] OopStorage
       
   504 inline bool OopStorage::iterate_impl(F f, Storage* storage) {
       
   505   assert_at_safepoint();
       
   506   // Propagate const/non-const iteration to the block layer, by using
       
   507   // const or non-const blocks as corresponding to Storage.
       
   508   typedef typename Conditional<IsConst<Storage>::value, const Block*, Block*>::type BlockPtr;
       
   509   for (BlockPtr block = storage->_active_head;
       
   510        block != NULL;
       
   511        block = storage->_active_list.next(*block)) {
       
   512     if (!block->iterate(f)) {
       
   513       return false;
       
   514     }
       
   515   }
       
   516   return true;
       
   517 }
       
   518 
       
   519 template<typename F>
       
   520 inline bool OopStorage::iterate_safepoint(F f) {
       
   521   return iterate_impl(f, this);
       
   522 }
       
   523 
       
   524 template<typename F>
       
   525 inline bool OopStorage::iterate_safepoint(F f) const {
       
   526   return iterate_impl(f, this);
       
   527 }
       
   528 
       
   529 template<typename Closure>
       
   530 inline void OopStorage::oops_do(Closure* cl) {
       
   531   iterate_safepoint(oop_fn(cl));
       
   532 }
       
   533 
       
   534 template<typename Closure>
       
   535 inline void OopStorage::oops_do(Closure* cl) const {
       
   536   iterate_safepoint(oop_fn(cl));
       
   537 }
       
   538 
       
   539 template<typename Closure>
       
   540 inline void OopStorage::weak_oops_do(Closure* cl) {
       
   541   iterate_safepoint(skip_null_fn(oop_fn(cl)));
       
   542 }
       
   543 
       
   544 template<typename IsAliveClosure, typename Closure>
       
   545 inline void OopStorage::weak_oops_do(IsAliveClosure* is_alive, Closure* cl) {
       
   546   iterate_safepoint(if_alive_fn(is_alive, oop_fn(cl)));
       
   547 }
       
   548 
       
   549 #if INCLUDE_ALL_GCS
       
   550 
       
   551 //////////////////////////////////////////////////////////////////////////////
       
   552 // Support for parallel and optionally concurrent state iteration.
       
   553 //
       
   554 // Parallel iteration is for the exclusive use of the GC.  Other iteration
       
   555 // clients must use serial iteration.
       
   556 //
       
   557 // Concurrent Iteration
       
   558 //
       
   559 // Iteration involves the _active_list, which contains all of the blocks owned
       
   560 // by a storage object.  This is a doubly-linked list, linked through
       
   561 // dedicated fields in the blocks.
       
   562 //
       
   563 // At most one concurrent ParState can exist at a time for a given storage
       
   564 // object.
       
   565 //
       
   566 // A concurrent ParState sets the associated storage's
       
   567 // _concurrent_iteration_active flag true when the state is constructed, and
       
   568 // sets it false when the state is destroyed.  These assignments are made with
       
   569 // _active_mutex locked.  Meanwhile, empty block deletion is not done while
       
   570 // _concurrent_iteration_active is true.  The flag check and the dependent
       
   571 // removal of a block from the _active_list is performed with _active_mutex
       
   572 // locked.  This prevents concurrent iteration and empty block deletion from
       
   573 // interfering with with each other.
       
   574 //
       
   575 // Both allocate() and delete_empty_blocks_concurrent() lock the
       
   576 // _allocate_mutex while performing their respective list manipulations,
       
   577 // preventing them from interfering with each other.
       
   578 //
       
   579 // When allocate() creates a new block, it is added to the front of the
       
   580 // _active_list.  Then _active_head is set to the new block.  When concurrent
       
   581 // iteration is started (by a parallel worker thread calling the state's
       
   582 // iterate() function), the current _active_head is used as the initial block
       
   583 // for the iteration, with iteration proceeding down the list headed by that
       
   584 // block.
       
   585 //
       
   586 // As a result, the list over which concurrent iteration operates is stable.
       
   587 // However, once the iteration is started, later allocations may add blocks to
       
   588 // the front of the list that won't be examined by the iteration.  And while
       
   589 // the list is stable, concurrent allocate() and release() operations may
       
   590 // change the set of allocated entries in a block at any time during the
       
   591 // iteration.
       
   592 //
       
   593 // As a result, a concurrent iteration handler must accept that some
       
   594 // allocations and releases that occur after the iteration started will not be
       
   595 // seen by the iteration.  Further, some may overlap examination by the
       
   596 // iteration.  To help with this, allocate() and release() have an invariant
       
   597 // that an entry's value must be NULL when it is not in use.
       
   598 //
       
   599 // An in-progress delete_empty_blocks_concurrent() operation can contend with
       
   600 // the start of a concurrent iteration over the _active_mutex.  Since both are
       
   601 // under GC control, that potential contention can be eliminated by never
       
   602 // scheduling both operations to run at the same time.
       
   603 //
       
   604 // ParState<concurrent, is_const>
       
   605 //   concurrent must be true if iteration is concurrent with the
       
   606 //   mutator, false if iteration is at a safepoint.
       
   607 //
       
   608 //   is_const must be true if the iteration is over a constant storage
       
   609 //   object, false if the iteration may modify the storage object.
       
   610 //
       
   611 // ParState([const] OopStorage* storage)
       
   612 //   Construct an object for managing an iteration over storage.  For a
       
   613 //   concurrent ParState, empty block deletion for the associated storage
       
   614 //   is inhibited for the life of the ParState.  There can be no more
       
   615 //   than one live concurrent ParState at a time for a given storage object.
       
   616 //
       
   617 // template<typename F> void iterate(F f)
       
   618 //   Repeatedly claims a block from the associated storage that has
       
   619 //   not been processed by this iteration (possibly by other threads),
       
   620 //   and applies f to each entry in the claimed block. Assume p is of
       
   621 //   type const oop* or oop*, according to is_const. Then f(p) must be
       
   622 //   a valid expression whose value is ignored.  Concurrent uses must
       
   623 //   be prepared for an entry's value to change at any time, due to
       
   624 //   mutator activity.
       
   625 //
       
   626 // template<typename Closure> void oops_do(Closure* cl)
       
   627 //   Wrapper around iterate, providing an adaptation layer allowing
       
   628 //   the use of OopClosures and similar objects for iteration.  Assume
       
   629 //   p is of type const oop* or oop*, according to is_const.  Then
       
   630 //   cl->do_oop(p) must be a valid expression whose value is ignored.
       
   631 //   Concurrent uses must be prepared for the entry's value to change
       
   632 //   at any time, due to mutator activity.
       
   633 //
       
   634 // Optional operations, provided only if !concurrent && !is_const.
       
   635 // These are not provided when is_const, because the storage object
       
   636 // may be modified by the iteration infrastructure, even if the
       
   637 // provided closure doesn't modify the storage object.  These are not
       
   638 // provided when concurrent because any pre-filtering behavior by the
       
   639 // iteration infrastructure is inappropriate for concurrent iteration;
       
   640 // modifications of the storage by the mutator could result in the
       
   641 // pre-filtering being applied (successfully or not) to objects that
       
   642 // are unrelated to what the closure finds in the entry.
       
   643 //
       
   644 // template<typename Closure> void weak_oops_do(Closure* cl)
       
   645 // template<typename IsAliveClosure, typename Closure>
       
   646 // void weak_oops_do(IsAliveClosure* is_alive, Closure* cl)
       
   647 //   Wrappers around iterate, providing an adaptation layer allowing
       
   648 //   the use of is-alive closures and OopClosures for iteration.
       
   649 //   Assume p is of type oop*.  Then
       
   650 //
       
   651 //   - cl->do_oop(p) must be a valid expression whose value is ignored.
       
   652 //
       
   653 //   - is_alive->do_object_b(*p) must be a valid expression whose value
       
   654 //   is convertible to bool.
       
   655 //
       
   656 //   If *p == NULL then neither is_alive nor cl will be invoked for p.
       
   657 //   If is_alive->do_object_b(*p) is false, then cl will not be
       
   658 //   invoked on p.
       
   659 
       
   660 class OopStorage::BasicParState VALUE_OBJ_CLASS_SPEC {
       
   661 public:
       
   662   BasicParState(OopStorage* storage, bool concurrent);
       
   663   ~BasicParState();
       
   664 
       
   665   template<bool is_const, typename F> void iterate(F f) {
       
   666     // Wrap f in ATF so we can use Block::iterate.
       
   667     AlwaysTrueFn<F> atf_f(f);
       
   668     ensure_iteration_started();
       
   669     typename Conditional<is_const, const Block*, Block*>::type block;
       
   670     while ((block = claim_next_block()) != NULL) {
       
   671       block->iterate(atf_f);
       
   672     }
       
   673   }
       
   674 
       
   675 private:
       
   676   OopStorage* _storage;
       
   677   void* volatile _next_block;
       
   678   bool _concurrent;
       
   679 
       
   680   // Noncopyable.
       
   681   BasicParState(const BasicParState&);
       
   682   BasicParState& operator=(const BasicParState&);
       
   683 
       
   684   void update_iteration_state(bool value);
       
   685   void ensure_iteration_started();
       
   686   Block* claim_next_block();
       
   687 };
       
   688 
       
   689 template<bool concurrent, bool is_const>
       
   690 class OopStorage::ParState VALUE_OBJ_CLASS_SPEC {
       
   691   BasicParState _basic_state;
       
   692 
       
   693 public:
       
   694   ParState(const OopStorage* storage) :
       
   695     // For simplicity, always recorded as non-const.
       
   696     _basic_state(const_cast<OopStorage*>(storage), concurrent)
       
   697   {}
       
   698 
       
   699   template<typename F>
       
   700   void iterate(F f) {
       
   701     _basic_state.template iterate<is_const>(f);
       
   702   }
       
   703 
       
   704   template<typename Closure>
       
   705   void oops_do(Closure* cl) {
       
   706     this->iterate(oop_fn(cl));
       
   707   }
       
   708 };
       
   709 
       
   710 template<>
       
   711 class OopStorage::ParState<false, false> VALUE_OBJ_CLASS_SPEC {
       
   712   BasicParState _basic_state;
       
   713 
       
   714 public:
       
   715   ParState(OopStorage* storage) :
       
   716     _basic_state(storage, false)
       
   717   {}
       
   718 
       
   719   template<typename F>
       
   720   void iterate(F f) {
       
   721     _basic_state.template iterate<false>(f);
       
   722   }
       
   723 
       
   724   template<typename Closure>
       
   725   void oops_do(Closure* cl) {
       
   726     this->iterate(oop_fn(cl));
       
   727   }
       
   728 
       
   729   template<typename Closure>
       
   730   void weak_oops_do(Closure* cl) {
       
   731     this->iterate(skip_null_fn(oop_fn(cl)));
       
   732   }
       
   733 
       
   734   template<typename IsAliveClosure, typename Closure>
       
   735   void weak_oops_do(IsAliveClosure* is_alive, Closure* cl) {
       
   736     this->iterate(if_alive_fn(is_alive, oop_fn(cl)));
       
   737   }
       
   738 };
       
   739 
       
   740 #endif // INCLUDE_ALL_GCS
       
   741 
       
   742 #endif // include guard
   279 #endif // include guard