hotspot/src/share/vm/gc_interface/collectedHeap.hpp
changeset 4637 af4d405aacc1
parent 4636 90e004691873
child 5343 95a5c4b89273
equal deleted inserted replaced
4636:90e004691873 4637:af4d405aacc1
    49 #endif
    49 #endif
    50 
    50 
    51   // Used for filler objects (static, but initialized in ctor).
    51   // Used for filler objects (static, but initialized in ctor).
    52   static size_t _filler_array_max_size;
    52   static size_t _filler_array_max_size;
    53 
    53 
       
    54   // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
       
    55   bool _defer_initial_card_mark;
       
    56 
    54  protected:
    57  protected:
    55   MemRegion _reserved;
    58   MemRegion _reserved;
    56   BarrierSet* _barrier_set;
    59   BarrierSet* _barrier_set;
    57   bool _is_gc_active;
    60   bool _is_gc_active;
    58   unsigned int _total_collections;          // ... started
    61   unsigned int _total_collections;          // ... started
    68   PerfStringVariable* _perf_gc_lastcause;
    71   PerfStringVariable* _perf_gc_lastcause;
    69 
    72 
    70   // Constructor
    73   // Constructor
    71   CollectedHeap();
    74   CollectedHeap();
    72 
    75 
       
    76   // Do common initializations that must follow instance construction,
       
    77   // for example, those needing virtual calls.
       
    78   // This code could perhaps be moved into initialize() but would
       
    79   // be slightly more awkward because we want the latter to be a
       
    80   // pure virtual.
       
    81   void pre_initialize();
       
    82 
    73   // Create a new tlab
    83   // Create a new tlab
    74   virtual HeapWord* allocate_new_tlab(size_t size);
    84   virtual HeapWord* allocate_new_tlab(size_t size);
    75 
       
    76   // Fix up tlabs to make the heap well-formed again,
       
    77   // optionally retiring the tlabs.
       
    78   virtual void fill_all_tlabs(bool retire);
       
    79 
    85 
    80   // Accumulate statistics on all tlabs.
    86   // Accumulate statistics on all tlabs.
    81   virtual void accumulate_statistics_all_tlabs();
    87   virtual void accumulate_statistics_all_tlabs();
    82 
    88 
    83   // Reinitialize tlabs before resuming mutators.
    89   // Reinitialize tlabs before resuming mutators.
   429   // there is probably a corresponding slow path which can produce
   435   // there is probably a corresponding slow path which can produce
   430   // an object allocated anywhere.  The compiler's runtime support
   436   // an object allocated anywhere.  The compiler's runtime support
   431   // promises to call this function on such a slow-path-allocated
   437   // promises to call this function on such a slow-path-allocated
   432   // object before performing initializations that have elided
   438   // object before performing initializations that have elided
   433   // store barriers. Returns new_obj, or maybe a safer copy thereof.
   439   // store barriers. Returns new_obj, or maybe a safer copy thereof.
   434   virtual oop defer_store_barrier(JavaThread* thread, oop new_obj);
   440   virtual oop new_store_pre_barrier(JavaThread* thread, oop new_obj);
   435 
   441 
   436   // Answers whether an initializing store to a new object currently
   442   // Answers whether an initializing store to a new object currently
   437   // allocated at the given address doesn't need a (deferred) store
   443   // allocated at the given address doesn't need a store
   438   // barrier. Returns "true" if it doesn't need an initializing
   444   // barrier. Returns "true" if it doesn't need an initializing
   439   // store barrier; answers "false" if it does.
   445   // store barrier; answers "false" if it does.
   440   virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0;
   446   virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0;
       
   447 
       
   448   // If a compiler is eliding store barriers for TLAB-allocated objects,
       
   449   // we will be informed of a slow-path allocation by a call
       
   450   // to new_store_pre_barrier() above. Such a call precedes the
       
   451   // initialization of the object itself, and no post-store-barriers will
       
   452   // be issued. Some heap types require that the barrier strictly follows
       
   453   // the initializing stores. (This is currently implemented by deferring the
       
   454   // barrier until the next slow-path allocation or gc-related safepoint.)
       
   455   // This interface answers whether a particular heap type needs the card
       
   456   // mark to be thus strictly sequenced after the stores.
       
   457   virtual bool card_mark_must_follow_store() const = 0;
   441 
   458 
   442   // If the CollectedHeap was asked to defer a store barrier above,
   459   // If the CollectedHeap was asked to defer a store barrier above,
   443   // this informs it to flush such a deferred store barrier to the
   460   // this informs it to flush such a deferred store barrier to the
   444   // remembered set.
   461   // remembered set.
   445   virtual void flush_deferred_store_barrier(JavaThread* thread);
   462   virtual void flush_deferred_store_barrier(JavaThread* thread);