src/hotspot/share/gc/shared/genCollectedHeap.hpp
changeset 47622 817f2a7019e4
parent 47216 71c04702a3d5
child 47624 b055cb5170f5
equal deleted inserted replaced
47621:f5f2a2d13775 47622:817f2a7019e4
     1 /*
     1 /*
     2  * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
     2  * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     7  * published by the Free Software Foundation.
    76   bool _incremental_collection_failed;
    76   bool _incremental_collection_failed;
    77 
    77 
    78   // In support of ExplicitGCInvokesConcurrent functionality
    78   // In support of ExplicitGCInvokesConcurrent functionality
    79   unsigned int _full_collections_completed;
    79   unsigned int _full_collections_completed;
    80 
    80 
    81   // Data structure for claiming the (potentially) parallel tasks in
       
    82   // (gen-specific) roots processing.
       
    83   SubTasksDone* _process_strong_tasks;
       
    84 
       
    85   // Collects the given generation.
    81   // Collects the given generation.
    86   void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
    82   void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
    87                           bool run_verification, bool clear_soft_refs,
    83                           bool run_verification, bool clear_soft_refs,
    88                           bool restore_marks_for_biased_locking);
    84                           bool restore_marks_for_biased_locking);
    89 
    85 
    90   // In block contents verification, the number of header words to skip
       
    91   NOT_PRODUCT(static size_t _skip_header_HeapWords;)
       
    92 
       
    93   WorkGang* _workers;
       
    94 
       
    95 protected:
    86 protected:
       
    87 
       
    88   // The set of potentially parallel tasks in root scanning.
       
    89   enum GCH_strong_roots_tasks {
       
    90     GCH_PS_Universe_oops_do,
       
    91     GCH_PS_JNIHandles_oops_do,
       
    92     GCH_PS_ObjectSynchronizer_oops_do,
       
    93     GCH_PS_FlatProfiler_oops_do,
       
    94     GCH_PS_Management_oops_do,
       
    95     GCH_PS_SystemDictionary_oops_do,
       
    96     GCH_PS_ClassLoaderDataGraph_oops_do,
       
    97     GCH_PS_jvmti_oops_do,
       
    98     GCH_PS_CodeCache_oops_do,
       
    99     GCH_PS_aot_oops_do,
       
   100     GCH_PS_younger_gens,
       
   101     // Leave this one last.
       
   102     GCH_PS_NumElements
       
   103   };
       
   104 
       
   105   // Data structure for claiming the (potentially) parallel tasks in
       
   106   // (gen-specific) roots processing.
       
   107   SubTasksDone* _process_strong_tasks;
       
   108 
    96   // Helper functions for allocation
   109   // Helper functions for allocation
    97   HeapWord* attempt_allocation(size_t size,
   110   HeapWord* attempt_allocation(size_t size,
    98                                bool   is_tlab,
   111                                bool   is_tlab,
    99                                bool   first_only);
   112                                bool   first_only);
   100 
   113 
   122   bool must_clear_all_soft_refs();
   135   bool must_clear_all_soft_refs();
   123 
   136 
   124 public:
   137 public:
   125   GenCollectedHeap(GenCollectorPolicy *policy);
   138   GenCollectedHeap(GenCollectorPolicy *policy);
   126 
   139 
   127   WorkGang* workers() const { return _workers; }
       
   128 
       
   129   // Returns JNI_OK on success
   140   // Returns JNI_OK on success
   130   virtual jint initialize();
   141   virtual jint initialize();
   131 
   142 
   132   // Reserve aligned space for the heap as needed by the contained generations.
   143   // Reserve aligned space for the heap as needed by the contained generations.
   133   char* allocate(size_t alignment, ReservedSpace* heap_rs);
   144   char* allocate(size_t alignment, ReservedSpace* heap_rs);
   134 
   145 
   135   // Does operations required after initialization has been done.
   146   // Does operations required after initialization has been done.
   136   void post_initialize();
   147   void post_initialize();
   137 
   148 
       
   149   virtual void check_gen_kinds();
       
   150 
   138   // Initialize ("weak") refs processing support
   151   // Initialize ("weak") refs processing support
   139   virtual void ref_processing_init();
   152   virtual void ref_processing_init();
   140 
   153 
   141   virtual Name kind() const {
   154   virtual Name kind() const {
   142     return CollectedHeap::GenCollectedHeap;
   155     return CollectedHeap::GenCollectedHeap;
   143   }
   156   }
   144 
   157 
   145   virtual const char* name() const {
   158   virtual const char* name() const {
   146     if (UseConcMarkSweepGC) {
   159     return "Serial";
   147       return "Concurrent Mark Sweep";
       
   148     } else {
       
   149       return "Serial";
       
   150     }
       
   151   }
   160   }
   152 
   161 
   153   Generation* young_gen() const { return _young_gen; }
   162   Generation* young_gen() const { return _young_gen; }
   154   Generation* old_gen()   const { return _old_gen; }
   163   Generation* old_gen()   const { return _old_gen; }
   155 
   164 
   188   HeapWord** end_addr() const;
   197   HeapWord** end_addr() const;
   189 
   198 
   190   // Perform a full collection of the heap; intended for use in implementing
   199   // Perform a full collection of the heap; intended for use in implementing
   191   // "System.gc". This implies as full a collection as the CollectedHeap
   200   // "System.gc". This implies as full a collection as the CollectedHeap
   192   // supports. Caller does not hold the Heap_lock on entry.
   201   // supports. Caller does not hold the Heap_lock on entry.
   193   void collect(GCCause::Cause cause);
   202   virtual void collect(GCCause::Cause cause);
   194 
   203 
   195   // The same as above but assume that the caller holds the Heap_lock.
   204   // The same as above but assume that the caller holds the Heap_lock.
   196   void collect_locked(GCCause::Cause cause);
   205   void collect_locked(GCCause::Cause cause);
   197 
   206 
   198   // Perform a full collection of generations up to and including max_generation.
   207   // Perform a full collection of generations up to and including max_generation.
   205   // their inadvertent use in product jvm's, we restrict their use to
   214   // their inadvertent use in product jvm's, we restrict their use to
   206   // assertion checking or verification only.
   215   // assertion checking or verification only.
   207   bool is_in(const void* p) const;
   216   bool is_in(const void* p) const;
   208 
   217 
   209   // override
   218   // override
   210   bool is_in_closed_subset(const void* p) const {
   219   virtual bool is_in_closed_subset(const void* p) const {
   211     if (UseConcMarkSweepGC) {
   220     return is_in(p);
   212       return is_in_reserved(p);
       
   213     } else {
       
   214       return is_in(p);
       
   215     }
       
   216   }
   221   }
   217 
   222 
   218   // Returns true if the reference is to an object in the reserved space
   223   // Returns true if the reference is to an object in the reserved space
   219   // for the young generation.
   224   // for the young generation.
   220   // Assumes the the young gen address range is less than that of the old gen.
   225   // Assumes the the young gen address range is less than that of the old gen.
   276   virtual bool can_elide_tlab_store_barriers() const {
   281   virtual bool can_elide_tlab_store_barriers() const {
   277     return true;
   282     return true;
   278   }
   283   }
   279 
   284 
   280   virtual bool card_mark_must_follow_store() const {
   285   virtual bool card_mark_must_follow_store() const {
   281     return UseConcMarkSweepGC;
   286     return false;
   282   }
   287   }
   283 
   288 
   284   // We don't need barriers for stores to objects in the
   289   // We don't need barriers for stores to objects in the
   285   // young gen and, a fortiori, for initializing stores to
   290   // young gen and, a fortiori, for initializing stores to
   286   // objects therein. This applies to DefNew+Tenured and ParNew+CMS
   291   // objects therein. This applies to DefNew+Tenured and ParNew+CMS
   342   // Override.
   347   // Override.
   343   virtual void print_on(outputStream* st) const;
   348   virtual void print_on(outputStream* st) const;
   344   virtual void print_gc_threads_on(outputStream* st) const;
   349   virtual void print_gc_threads_on(outputStream* st) const;
   345   virtual void gc_threads_do(ThreadClosure* tc) const;
   350   virtual void gc_threads_do(ThreadClosure* tc) const;
   346   virtual void print_tracing_info() const;
   351   virtual void print_tracing_info() const;
   347   virtual void print_on_error(outputStream* st) const;
       
   348 
   352 
   349   void print_heap_change(size_t young_prev_used, size_t old_prev_used) const;
   353   void print_heap_change(size_t young_prev_used, size_t old_prev_used) const;
   350 
   354 
   351   // The functions below are helper functions that a subclass of
   355   // The functions below are helper functions that a subclass of
   352   // "CollectedHeap" can use in the implementation of its virtual
   356   // "CollectedHeap" can use in the implementation of its virtual
   381     SO_None                =  0x0,
   385     SO_None                =  0x0,
   382     SO_AllCodeCache        =  0x8,
   386     SO_AllCodeCache        =  0x8,
   383     SO_ScavengeCodeCache   = 0x10
   387     SO_ScavengeCodeCache   = 0x10
   384   };
   388   };
   385 
   389 
   386  private:
   390  protected:
   387   void process_roots(StrongRootsScope* scope,
   391   void process_roots(StrongRootsScope* scope,
   388                      ScanningOption so,
   392                      ScanningOption so,
   389                      OopClosure* strong_roots,
   393                      OopClosure* strong_roots,
   390                      OopClosure* weak_roots,
   394                      OopClosure* weak_roots,
   391                      CLDClosure* strong_cld_closure,
   395                      CLDClosure* strong_cld_closure,
   393                      CodeBlobToOopClosure* code_roots);
   397                      CodeBlobToOopClosure* code_roots);
   394 
   398 
   395   void process_string_table_roots(StrongRootsScope* scope,
   399   void process_string_table_roots(StrongRootsScope* scope,
   396                                   OopClosure* root_closure);
   400                                   OopClosure* root_closure);
   397 
   401 
       
   402   // Accessor for memory state verification support
       
   403   NOT_PRODUCT(
       
   404     virtual size_t skip_header_HeapWords() { return 0; }
       
   405   )
       
   406 
       
   407   virtual void gc_prologue(bool full);
       
   408   virtual void gc_epilogue(bool full);
       
   409 
   398  public:
   410  public:
   399   void young_process_roots(StrongRootsScope* scope,
   411   void young_process_roots(StrongRootsScope* scope,
   400                            OopsInGenClosure* root_closure,
   412                            OopsInGenClosure* root_closure,
   401                            OopsInGenClosure* old_gen_closure,
   413                            OopsInGenClosure* old_gen_closure,
   402                            CLDClosure* cld_closure);
   414                            CLDClosure* cld_closure);
   403 
       
   404   // If "young_gen_as_roots" is false, younger generations are
       
   405   // not scanned as roots; in this case, the caller must be arranging to
       
   406   // scan the younger generations itself.  (For example, a generation might
       
   407   // explicitly mark reachable objects in younger generations, to avoid
       
   408   // excess storage retention.)
       
   409   void cms_process_roots(StrongRootsScope* scope,
       
   410                          bool young_gen_as_roots,
       
   411                          ScanningOption so,
       
   412                          bool only_strong_roots,
       
   413                          OopsInGenClosure* root_closure,
       
   414                          CLDClosure* cld_closure);
       
   415 
   415 
   416   void full_process_roots(StrongRootsScope* scope,
   416   void full_process_roots(StrongRootsScope* scope,
   417                           bool is_adjust_phase,
   417                           bool is_adjust_phase,
   418                           ScanningOption so,
   418                           ScanningOption so,
   419                           bool only_strong_roots,
   419                           bool only_strong_roots,
   477   // generation; return the new location of obj if successful.  Otherwise, return NULL.
   477   // generation; return the new location of obj if successful.  Otherwise, return NULL.
   478   oop handle_failed_promotion(Generation* old_gen,
   478   oop handle_failed_promotion(Generation* old_gen,
   479                               oop obj,
   479                               oop obj,
   480                               size_t obj_size);
   480                               size_t obj_size);
   481 
   481 
       
   482 
   482 private:
   483 private:
   483   // Accessor for memory state verification support
       
   484   NOT_PRODUCT(
       
   485     static size_t skip_header_HeapWords() { return _skip_header_HeapWords; }
       
   486   )
       
   487 
       
   488   // Override
   484   // Override
   489   void check_for_non_bad_heap_word_value(HeapWord* addr,
   485   void check_for_non_bad_heap_word_value(HeapWord* addr,
   490     size_t size) PRODUCT_RETURN;
   486     size_t size) PRODUCT_RETURN;
   491 
   487 
   492   // For use by mark-sweep.  As implemented, mark-sweep-compact is global
   488   // For use by mark-sweep.  As implemented, mark-sweep-compact is global
   497   // Perform a full collection of the generations up to and including max_generation.
   493   // Perform a full collection of the generations up to and including max_generation.
   498   // This is the low level interface used by the public versions of
   494   // This is the low level interface used by the public versions of
   499   // collect() and collect_locked(). Caller holds the Heap_lock on entry.
   495   // collect() and collect_locked(). Caller holds the Heap_lock on entry.
   500   void collect_locked(GCCause::Cause cause, GenerationType max_generation);
   496   void collect_locked(GCCause::Cause cause, GenerationType max_generation);
   501 
   497 
   502   // Returns success or failure.
       
   503   bool create_cms_collector();
       
   504 
       
   505   // In support of ExplicitGCInvokesConcurrent functionality
       
   506   bool should_do_concurrent_full_gc(GCCause::Cause cause);
       
   507   void collect_mostly_concurrent(GCCause::Cause cause);
       
   508 
       
   509   // Save the tops of the spaces in all generations
   498   // Save the tops of the spaces in all generations
   510   void record_gen_tops_before_GC() PRODUCT_RETURN;
   499   void record_gen_tops_before_GC() PRODUCT_RETURN;
   511 
       
   512 protected:
       
   513   void gc_prologue(bool full);
       
   514   void gc_epilogue(bool full);
       
   515 
       
   516 public:
       
   517   void stop();
       
   518 };
   500 };
   519 
   501 
   520 #endif // SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP
   502 #endif // SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP