hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
changeset 22551 9bf46d16dcc6
parent 18996 05c86e558c94
child 22882 195c8f70d605
equal deleted inserted replaced
22550:820966182ab9 22551:9bf46d16dcc6
   169 
   169 
   170 // Represents a marking stack used by the CMS collector.
   170 // Represents a marking stack used by the CMS collector.
   171 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
   171 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
   172 class CMSMarkStack: public CHeapObj<mtGC>  {
   172 class CMSMarkStack: public CHeapObj<mtGC>  {
   173   //
   173   //
   174   friend class CMSCollector;   // to get at expasion stats further below
   174   friend class CMSCollector;   // To get at expansion stats further below.
   175   //
   175   //
   176 
   176 
   177   VirtualSpace _virtual_space;  // space for the stack
   177   VirtualSpace _virtual_space;  // Space for the stack
   178   oop*   _base;      // bottom of stack
   178   oop*   _base;      // Bottom of stack
   179   size_t _index;     // one more than last occupied index
   179   size_t _index;     // One more than last occupied index
   180   size_t _capacity;  // max #elements
   180   size_t _capacity;  // Max #elements
   181   Mutex  _par_lock;  // an advisory lock used in case of parallel access
   181   Mutex  _par_lock;  // An advisory lock used in case of parallel access
   182   NOT_PRODUCT(size_t _max_depth;)  // max depth plumbed during run
   182   NOT_PRODUCT(size_t _max_depth;)  // Max depth plumbed during run
   183 
   183 
   184  protected:
   184  protected:
   185   size_t _hit_limit;      // we hit max stack size limit
   185   size_t _hit_limit;      // We hit max stack size limit
   186   size_t _failed_double;  // we failed expansion before hitting limit
   186   size_t _failed_double;  // We failed expansion before hitting limit
   187 
   187 
   188  public:
   188  public:
   189   CMSMarkStack():
   189   CMSMarkStack():
   190     _par_lock(Mutex::event, "CMSMarkStack._par_lock", true),
   190     _par_lock(Mutex::event, "CMSMarkStack._par_lock", true),
   191     _hit_limit(0),
   191     _hit_limit(0),
   236   // Forcibly reset the stack, losing all of its contents.
   236   // Forcibly reset the stack, losing all of its contents.
   237   void reset() {
   237   void reset() {
   238     _index = 0;
   238     _index = 0;
   239   }
   239   }
   240 
   240 
   241   // Expand the stack, typically in response to an overflow condition
   241   // Expand the stack, typically in response to an overflow condition.
   242   void expand();
   242   void expand();
   243 
   243 
   244   // Compute the least valued stack element.
   244   // Compute the least valued stack element.
   245   oop least_value(HeapWord* low) {
   245   oop least_value(HeapWord* low) {
   246      oop least = (oop)low;
   246      oop least = (oop)low;
   248        least = MIN2(least, _base[i]);
   248        least = MIN2(least, _base[i]);
   249      }
   249      }
   250      return least;
   250      return least;
   251   }
   251   }
   252 
   252 
   253   // Exposed here to allow stack expansion in || case
   253   // Exposed here to allow stack expansion in || case.
   254   Mutex* par_lock() { return &_par_lock; }
   254   Mutex* par_lock() { return &_par_lock; }
   255 };
   255 };
   256 
   256 
   257 class CardTableRS;
   257 class CardTableRS;
   258 class CMSParGCThreadState;
   258 class CMSParGCThreadState;
   555 
   555 
   556   // Overflow list of grey objects, threaded through mark-word
   556   // Overflow list of grey objects, threaded through mark-word
   557   // Manipulated with CAS in the parallel/multi-threaded case.
   557   // Manipulated with CAS in the parallel/multi-threaded case.
   558   oop _overflow_list;
   558   oop _overflow_list;
   559   // The following array-pair keeps track of mark words
   559   // The following array-pair keeps track of mark words
   560   // displaced for accomodating overflow list above.
   560   // displaced for accommodating overflow list above.
   561   // This code will likely be revisited under RFE#4922830.
   561   // This code will likely be revisited under RFE#4922830.
   562   Stack<oop, mtGC>     _preserved_oop_stack;
   562   Stack<oop, mtGC>     _preserved_oop_stack;
   563   Stack<markOop, mtGC> _preserved_mark_stack;
   563   Stack<markOop, mtGC> _preserved_mark_stack;
   564 
   564 
   565   int*             _hash_seed;
   565   int*             _hash_seed;
   597   // Verification support
   597   // Verification support
   598   CMSBitMap     _verification_mark_bm;
   598   CMSBitMap     _verification_mark_bm;
   599   void verify_after_remark_work_1();
   599   void verify_after_remark_work_1();
   600   void verify_after_remark_work_2();
   600   void verify_after_remark_work_2();
   601 
   601 
   602   // true if any verification flag is on.
   602   // True if any verification flag is on.
   603   bool _verifying;
   603   bool _verifying;
   604   bool verifying() const { return _verifying; }
   604   bool verifying() const { return _verifying; }
   605   void set_verifying(bool v) { _verifying = v; }
   605   void set_verifying(bool v) { _verifying = v; }
   606 
   606 
   607   // Collector policy
   607   // Collector policy
   609   ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
   609   ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
   610 
   610 
   611   void set_did_compact(bool v);
   611   void set_did_compact(bool v);
   612 
   612 
   613   // XXX Move these to CMSStats ??? FIX ME !!!
   613   // XXX Move these to CMSStats ??? FIX ME !!!
   614   elapsedTimer _inter_sweep_timer;   // time between sweeps
   614   elapsedTimer _inter_sweep_timer;   // Time between sweeps
   615   elapsedTimer _intra_sweep_timer;   // time _in_ sweeps
   615   elapsedTimer _intra_sweep_timer;   // Time _in_ sweeps
   616   // padded decaying average estimates of the above
   616   // Padded decaying average estimates of the above
   617   AdaptivePaddedAverage _inter_sweep_estimate;
   617   AdaptivePaddedAverage _inter_sweep_estimate;
   618   AdaptivePaddedAverage _intra_sweep_estimate;
   618   AdaptivePaddedAverage _intra_sweep_estimate;
   619 
   619 
   620   CMSTracer* _gc_tracer_cm;
   620   CMSTracer* _gc_tracer_cm;
   621   ConcurrentGCTimer* _gc_timer_cm;
   621   ConcurrentGCTimer* _gc_timer_cm;
   630   void register_gc_end();
   630   void register_gc_end();
   631   void save_heap_summary();
   631   void save_heap_summary();
   632   void report_heap_summary(GCWhen::Type when);
   632   void report_heap_summary(GCWhen::Type when);
   633 
   633 
   634  protected:
   634  protected:
   635   ConcurrentMarkSweepGeneration* _cmsGen;  // old gen (CMS)
   635   ConcurrentMarkSweepGeneration* _cmsGen;  // Old gen (CMS)
   636   MemRegion                      _span;    // span covering above two
   636   MemRegion                      _span;    // Span covering above two
   637   CardTableRS*                   _ct;      // card table
   637   CardTableRS*                   _ct;      // Card table
   638 
   638 
   639   // CMS marking support structures
   639   // CMS marking support structures
   640   CMSBitMap     _markBitMap;
   640   CMSBitMap     _markBitMap;
   641   CMSBitMap     _modUnionTable;
   641   CMSBitMap     _modUnionTable;
   642   CMSMarkStack  _markStack;
   642   CMSMarkStack  _markStack;
   643 
   643 
   644   HeapWord*     _restart_addr; // in support of marking stack overflow
   644   HeapWord*     _restart_addr; // In support of marking stack overflow
   645   void          lower_restart_addr(HeapWord* low);
   645   void          lower_restart_addr(HeapWord* low);
   646 
   646 
   647   // Counters in support of marking stack / work queue overflow handling:
   647   // Counters in support of marking stack / work queue overflow handling:
   648   // a non-zero value indicates certain types of overflow events during
   648   // a non-zero value indicates certain types of overflow events during
   649   // the current CMS cycle and could lead to stack resizing efforts at
   649   // the current CMS cycle and could lead to stack resizing efforts at
   654   size_t        _ser_kac_preclean_ovflw;
   654   size_t        _ser_kac_preclean_ovflw;
   655   size_t        _ser_kac_ovflw;
   655   size_t        _ser_kac_ovflw;
   656   size_t        _par_kac_ovflw;
   656   size_t        _par_kac_ovflw;
   657   NOT_PRODUCT(ssize_t _num_par_pushes;)
   657   NOT_PRODUCT(ssize_t _num_par_pushes;)
   658 
   658 
   659   // ("Weak") Reference processing support
   659   // ("Weak") Reference processing support.
   660   ReferenceProcessor*            _ref_processor;
   660   ReferenceProcessor*            _ref_processor;
   661   CMSIsAliveClosure              _is_alive_closure;
   661   CMSIsAliveClosure              _is_alive_closure;
   662       // keep this textually after _markBitMap and _span; c'tor dependency
   662   // Keep this textually after _markBitMap and _span; c'tor dependency.
   663 
   663 
   664   ConcurrentMarkSweepThread*     _cmsThread;   // the thread doing the work
   664   ConcurrentMarkSweepThread*     _cmsThread;   // The thread doing the work
   665   ModUnionClosure    _modUnionClosure;
   665   ModUnionClosure    _modUnionClosure;
   666   ModUnionClosurePar _modUnionClosurePar;
   666   ModUnionClosurePar _modUnionClosurePar;
   667 
   667 
   668   // CMS abstract state machine
   668   // CMS abstract state machine
   669   // initial_state: Idling
   669   // initial_state: Idling
   695   static CollectorState _collectorState;
   695   static CollectorState _collectorState;
   696 
   696 
   697   // State related to prologue/epilogue invocation for my generations
   697   // State related to prologue/epilogue invocation for my generations
   698   bool _between_prologue_and_epilogue;
   698   bool _between_prologue_and_epilogue;
   699 
   699 
   700   // Signalling/State related to coordination between fore- and backgroud GC
   700   // Signaling/State related to coordination between fore- and background GC
   701   // Note: When the baton has been passed from background GC to foreground GC,
   701   // Note: When the baton has been passed from background GC to foreground GC,
   702   // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
   702   // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
   703   static bool _foregroundGCIsActive;    // true iff foreground collector is active or
   703   static bool _foregroundGCIsActive;    // true iff foreground collector is active or
   704                                  // wants to go active
   704                                  // wants to go active
   705   static bool _foregroundGCShouldWait;  // true iff background GC is active and has not
   705   static bool _foregroundGCShouldWait;  // true iff background GC is active and has not
   710   bool _start_sampling;
   710   bool _start_sampling;
   711 
   711 
   712   int    _numYields;
   712   int    _numYields;
   713   size_t _numDirtyCards;
   713   size_t _numDirtyCards;
   714   size_t _sweep_count;
   714   size_t _sweep_count;
   715   // number of full gc's since the last concurrent gc.
   715   // Number of full gc's since the last concurrent gc.
   716   uint   _full_gcs_since_conc_gc;
   716   uint   _full_gcs_since_conc_gc;
   717 
   717 
   718   // occupancy used for bootstrapping stats
   718   // Occupancy used for bootstrapping stats
   719   double _bootstrap_occupancy;
   719   double _bootstrap_occupancy;
   720 
   720 
   721   // timer
   721   // Timer
   722   elapsedTimer _timer;
   722   elapsedTimer _timer;
   723 
   723 
   724   // Timing, allocation and promotion statistics, used for scheduling.
   724   // Timing, allocation and promotion statistics, used for scheduling.
   725   CMSStats      _stats;
   725   CMSStats      _stats;
   726 
   726 
   768   bool par_take_from_overflow_list(size_t num,
   768   bool par_take_from_overflow_list(size_t num,
   769                                    OopTaskQueue* to_work_q,
   769                                    OopTaskQueue* to_work_q,
   770                                    int no_of_gc_threads);
   770                                    int no_of_gc_threads);
   771   void push_on_overflow_list(oop p);
   771   void push_on_overflow_list(oop p);
   772   void par_push_on_overflow_list(oop p);
   772   void par_push_on_overflow_list(oop p);
   773   // the following is, obviously, not, in general, "MT-stable"
   773   // The following is, obviously, not, in general, "MT-stable"
   774   bool overflow_list_is_empty() const;
   774   bool overflow_list_is_empty() const;
   775 
   775 
   776   void preserve_mark_if_necessary(oop p);
   776   void preserve_mark_if_necessary(oop p);
   777   void par_preserve_mark_if_necessary(oop p);
   777   void par_preserve_mark_if_necessary(oop p);
   778   void preserve_mark_work(oop p, markOop m);
   778   void preserve_mark_work(oop p, markOop m);
   779   void restore_preserved_marks_if_any();
   779   void restore_preserved_marks_if_any();
   780   NOT_PRODUCT(bool no_preserved_marks() const;)
   780   NOT_PRODUCT(bool no_preserved_marks() const;)
   781   // in support of testing overflow code
   781   // In support of testing overflow code
   782   NOT_PRODUCT(int _overflow_counter;)
   782   NOT_PRODUCT(int _overflow_counter;)
   783   NOT_PRODUCT(bool simulate_overflow();)       // sequential
   783   NOT_PRODUCT(bool simulate_overflow();)       // Sequential
   784   NOT_PRODUCT(bool par_simulate_overflow();)   // MT version
   784   NOT_PRODUCT(bool par_simulate_overflow();)   // MT version
   785 
   785 
   786   // CMS work methods
   786   // CMS work methods
   787   void checkpointRootsInitialWork(bool asynch); // initial checkpoint work
   787   void checkpointRootsInitialWork(bool asynch); // Initial checkpoint work
   788 
   788 
   789   // a return value of false indicates failure due to stack overflow
   789   // A return value of false indicates failure due to stack overflow
   790   bool markFromRootsWork(bool asynch);  // concurrent marking work
   790   bool markFromRootsWork(bool asynch);  // Concurrent marking work
   791 
   791 
   792  public:   // FIX ME!!! only for testing
   792  public:   // FIX ME!!! only for testing
   793   bool do_marking_st(bool asynch);      // single-threaded marking
   793   bool do_marking_st(bool asynch);      // Single-threaded marking
   794   bool do_marking_mt(bool asynch);      // multi-threaded  marking
   794   bool do_marking_mt(bool asynch);      // Multi-threaded  marking
   795 
   795 
   796  private:
   796  private:
   797 
   797 
   798   // concurrent precleaning work
   798   // Concurrent precleaning work
   799   size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
   799   size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
   800                                   ScanMarkedObjectsAgainCarefullyClosure* cl);
   800                                   ScanMarkedObjectsAgainCarefullyClosure* cl);
   801   size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
   801   size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
   802                              ScanMarkedObjectsAgainCarefullyClosure* cl);
   802                              ScanMarkedObjectsAgainCarefullyClosure* cl);
   803   // Does precleaning work, returning a quantity indicative of
   803   // Does precleaning work, returning a quantity indicative of
   809   // Helper function for above; merge-sorts the per-thread plab samples
   809   // Helper function for above; merge-sorts the per-thread plab samples
   810   void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads);
   810   void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads);
   811   // Resets (i.e. clears) the per-thread plab sample vectors
   811   // Resets (i.e. clears) the per-thread plab sample vectors
   812   void reset_survivor_plab_arrays();
   812   void reset_survivor_plab_arrays();
   813 
   813 
   814   // final (second) checkpoint work
   814   // Final (second) checkpoint work
   815   void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
   815   void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
   816                                 bool init_mark_was_synchronous);
   816                                 bool init_mark_was_synchronous);
   817   // work routine for parallel version of remark
   817   // Work routine for parallel version of remark
   818   void do_remark_parallel();
   818   void do_remark_parallel();
   819   // work routine for non-parallel version of remark
   819   // Work routine for non-parallel version of remark
   820   void do_remark_non_parallel();
   820   void do_remark_non_parallel();
   821   // reference processing work routine (during second checkpoint)
   821   // Reference processing work routine (during second checkpoint)
   822   void refProcessingWork(bool asynch, bool clear_all_soft_refs);
   822   void refProcessingWork(bool asynch, bool clear_all_soft_refs);
   823 
   823 
   824   // concurrent sweeping work
   824   // Concurrent sweeping work
   825   void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);
   825   void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);
   826 
   826 
   827   // (concurrent) resetting of support data structures
   827   // (Concurrent) resetting of support data structures
   828   void reset(bool asynch);
   828   void reset(bool asynch);
   829 
   829 
   830   // Clear _expansion_cause fields of constituent generations
   830   // Clear _expansion_cause fields of constituent generations
   831   void clear_expansion_cause();
   831   void clear_expansion_cause();
   832 
   832 
   833   // An auxilliary method used to record the ends of
   833   // An auxiliary method used to record the ends of
   834   // used regions of each generation to limit the extent of sweep
   834   // used regions of each generation to limit the extent of sweep
   835   void save_sweep_limits();
   835   void save_sweep_limits();
   836 
   836 
   837   // A work method used by foreground collection to determine
   837   // A work method used by foreground collection to determine
   838   // what type of collection (compacting or not, continuing or fresh)
   838   // what type of collection (compacting or not, continuing or fresh)
   852 
   852 
   853   // Work methods for reporting concurrent mode interruption or failure
   853   // Work methods for reporting concurrent mode interruption or failure
   854   bool is_external_interruption();
   854   bool is_external_interruption();
   855   void report_concurrent_mode_interruption();
   855   void report_concurrent_mode_interruption();
   856 
   856 
   857   // If the backgrould GC is active, acquire control from the background
   857   // If the background GC is active, acquire control from the background
   858   // GC and do the collection.
   858   // GC and do the collection.
   859   void acquire_control_and_collect(bool   full, bool clear_all_soft_refs);
   859   void acquire_control_and_collect(bool   full, bool clear_all_soft_refs);
   860 
   860 
   861   // For synchronizing passing of control from background to foreground
   861   // For synchronizing passing of control from background to foreground
   862   // GC.  waitForForegroundGC() is called by the background
   862   // GC.  waitForForegroundGC() is called by the background
   891   size_t get_eden_used() const;
   891   size_t get_eden_used() const;
   892   size_t get_eden_capacity() const;
   892   size_t get_eden_capacity() const;
   893 
   893 
   894   ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
   894   ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
   895 
   895 
   896   // locking checks
   896   // Locking checks
   897   NOT_PRODUCT(static bool have_cms_token();)
   897   NOT_PRODUCT(static bool have_cms_token();)
   898 
   898 
   899   // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
   899   // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
   900   bool shouldConcurrentCollect();
   900   bool shouldConcurrentCollect();
   901 
   901 
   956   void sample_eden_chunk();
   956   void sample_eden_chunk();
   957 
   957 
   958   CMSBitMap* markBitMap()  { return &_markBitMap; }
   958   CMSBitMap* markBitMap()  { return &_markBitMap; }
   959   void directAllocated(HeapWord* start, size_t size);
   959   void directAllocated(HeapWord* start, size_t size);
   960 
   960 
   961   // main CMS steps and related support
   961   // Main CMS steps and related support
   962   void checkpointRootsInitial(bool asynch);
   962   void checkpointRootsInitial(bool asynch);
   963   bool markFromRoots(bool asynch);  // a return value of false indicates failure
   963   bool markFromRoots(bool asynch);  // a return value of false indicates failure
   964                                     // due to stack overflow
   964                                     // due to stack overflow
   965   void preclean();
   965   void preclean();
   966   void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs,
   966   void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs,
   975   bool is_cms_reachable(HeapWord* addr);
   975   bool is_cms_reachable(HeapWord* addr);
   976 
   976 
   977   // Performance Counter Support
   977   // Performance Counter Support
   978   CollectorCounters* counters()    { return _gc_counters; }
   978   CollectorCounters* counters()    { return _gc_counters; }
   979 
   979 
   980   // timer stuff
   980   // Timer stuff
   981   void    startTimer() { assert(!_timer.is_active(), "Error"); _timer.start();   }
   981   void    startTimer() { assert(!_timer.is_active(), "Error"); _timer.start();   }
   982   void    stopTimer()  { assert( _timer.is_active(), "Error"); _timer.stop();    }
   982   void    stopTimer()  { assert( _timer.is_active(), "Error"); _timer.stop();    }
   983   void    resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset();   }
   983   void    resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset();   }
   984   double  timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); }
   984   double  timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); }
   985 
   985 
  1012   CMSAdaptiveSizePolicy* size_policy();
  1012   CMSAdaptiveSizePolicy* size_policy();
  1013   CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
  1013   CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
  1014 
  1014 
  1015   static void print_on_error(outputStream* st);
  1015   static void print_on_error(outputStream* st);
  1016 
  1016 
  1017   // debugging
  1017   // Debugging
  1018   void verify();
  1018   void verify();
  1019   bool verify_after_remark(bool silent = VerifySilently);
  1019   bool verify_after_remark(bool silent = VerifySilently);
  1020   void verify_ok_to_terminate() const PRODUCT_RETURN;
  1020   void verify_ok_to_terminate() const PRODUCT_RETURN;
  1021   void verify_work_stacks_empty() const PRODUCT_RETURN;
  1021   void verify_work_stacks_empty() const PRODUCT_RETURN;
  1022   void verify_overflow_empty() const PRODUCT_RETURN;
  1022   void verify_overflow_empty() const PRODUCT_RETURN;
  1023 
  1023 
  1024   // convenience methods in support of debugging
  1024   // Convenience methods in support of debugging
  1025   static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
  1025   static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
  1026   HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
  1026   HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
  1027 
  1027 
  1028   // accessors
  1028   // Accessors
  1029   CMSMarkStack* verification_mark_stack() { return &_markStack; }
  1029   CMSMarkStack* verification_mark_stack() { return &_markStack; }
  1030   CMSBitMap*    verification_mark_bm()    { return &_verification_mark_bm; }
  1030   CMSBitMap*    verification_mark_bm()    { return &_verification_mark_bm; }
  1031 
  1031 
  1032   // Initialization errors
  1032   // Initialization errors
  1033   bool completed_initialization() { return _completed_initialization; }
  1033   bool completed_initialization() { return _completed_initialization; }
  1107     Unknown_collection_type             = 3
  1107     Unknown_collection_type             = 3
  1108   };
  1108   };
  1109 
  1109 
  1110   CollectionTypes _debug_collection_type;
  1110   CollectionTypes _debug_collection_type;
  1111 
  1111 
  1112   // True if a compactiing collection was done.
  1112   // True if a compacting collection was done.
  1113   bool _did_compact;
  1113   bool _did_compact;
  1114   bool did_compact() { return _did_compact; }
  1114   bool did_compact() { return _did_compact; }
  1115 
  1115 
  1116   // Fraction of current occupancy at which to start a CMS collection which
  1116   // Fraction of current occupancy at which to start a CMS collection which
  1117   // will collect this generation (at least).
  1117   // will collect this generation (at least).
  1201 
  1201 
  1202   void space_iterate(SpaceClosure* blk, bool usedOnly = false);
  1202   void space_iterate(SpaceClosure* blk, bool usedOnly = false);
  1203 
  1203 
  1204   // Support for compaction
  1204   // Support for compaction
  1205   CompactibleSpace* first_compaction_space() const;
  1205   CompactibleSpace* first_compaction_space() const;
  1206   // Adjust quantites in the generation affected by
  1206   // Adjust quantities in the generation affected by
  1207   // the compaction.
  1207   // the compaction.
  1208   void reset_after_compaction();
  1208   void reset_after_compaction();
  1209 
  1209 
  1210   // Allocation support
  1210   // Allocation support
  1211   HeapWord* allocate(size_t size, bool tlab);
  1211   HeapWord* allocate(size_t size, bool tlab);
  1299 
  1299 
  1300   // Smart allocation  XXX -- move to CFLSpace?
  1300   // Smart allocation  XXX -- move to CFLSpace?
  1301   void setNearLargestChunk();
  1301   void setNearLargestChunk();
  1302   bool isNearLargestChunk(HeapWord* addr);
  1302   bool isNearLargestChunk(HeapWord* addr);
  1303 
  1303 
  1304   // Get the chunk at the end of the space.  Delagates to
  1304   // Get the chunk at the end of the space.  Delegates to
  1305   // the space.
  1305   // the space.
  1306   FreeChunk* find_chunk_at_end();
  1306   FreeChunk* find_chunk_at_end();
  1307 
  1307 
  1308   void post_compact();
  1308   void post_compact();
  1309 
  1309 
  1420 
  1420 
  1421 // This closure is used to do concurrent multi-threaded
  1421 // This closure is used to do concurrent multi-threaded
  1422 // marking from the roots following the first checkpoint.
  1422 // marking from the roots following the first checkpoint.
  1423 // XXX This should really be a subclass of The serial version
  1423 // XXX This should really be a subclass of The serial version
  1424 // above, but i have not had the time to refactor things cleanly.
  1424 // above, but i have not had the time to refactor things cleanly.
  1425 // That willbe done for Dolphin.
       
  1426 class Par_MarkFromRootsClosure: public BitMapClosure {
  1425 class Par_MarkFromRootsClosure: public BitMapClosure {
  1427   CMSCollector*  _collector;
  1426   CMSCollector*  _collector;
  1428   MemRegion      _whole_span;
  1427   MemRegion      _whole_span;
  1429   MemRegion      _span;
  1428   MemRegion      _span;
  1430   CMSBitMap*     _bit_map;
  1429   CMSBitMap*     _bit_map;
  1778   void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize);
  1777   void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize);
  1779   // Process a free chunk during sweeping.
  1778   // Process a free chunk during sweeping.
  1780   void do_already_free_chunk(FreeChunk *fc);
  1779   void do_already_free_chunk(FreeChunk *fc);
  1781   // Work method called when processing an already free or a
  1780   // Work method called when processing an already free or a
  1782   // freshly garbage chunk to do a lookahead and possibly a
  1781   // freshly garbage chunk to do a lookahead and possibly a
  1783   // premptive flush if crossing over _limit.
  1782   // preemptive flush if crossing over _limit.
  1784   void lookahead_and_flush(FreeChunk* fc, size_t chunkSize);
  1783   void lookahead_and_flush(FreeChunk* fc, size_t chunkSize);
  1785   // Process a garbage chunk during sweeping.
  1784   // Process a garbage chunk during sweeping.
  1786   size_t do_garbage_chunk(FreeChunk *fc);
  1785   size_t do_garbage_chunk(FreeChunk *fc);
  1787   // Process a live chunk during sweeping.
  1786   // Process a live chunk during sweeping.
  1788   size_t do_live_chunk(FreeChunk* fc);
  1787   size_t do_live_chunk(FreeChunk* fc);
  1877   void trim_queue(uint max);
  1876   void trim_queue(uint max);
  1878   void do_void();
  1877   void do_void();
  1879 };
  1878 };
  1880 
  1879 
  1881 // Allow yielding or short-circuiting of reference list
  1880 // Allow yielding or short-circuiting of reference list
  1882 // prelceaning work.
  1881 // precleaning work.
  1883 class CMSPrecleanRefsYieldClosure: public YieldClosure {
  1882 class CMSPrecleanRefsYieldClosure: public YieldClosure {
  1884   CMSCollector* _collector;
  1883   CMSCollector* _collector;
  1885   void do_yield_work();
  1884   void do_yield_work();
  1886  public:
  1885  public:
  1887   CMSPrecleanRefsYieldClosure(CMSCollector* collector):
  1886   CMSPrecleanRefsYieldClosure(CMSCollector* collector):