src/hotspot/share/gc/shared/collectedHeap.hpp
changeset 48961 120b61d50f85
parent 48168 cb5d2d4453d0
child 49014 407a8495d4b3
equal deleted inserted replaced
48960:040293c73621 48961:120b61d50f85
     1 /*
     1 /*
     2  * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
     2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     7  * published by the Free Software Foundation.
    99   static int       _fire_out_of_memory_count;
    99   static int       _fire_out_of_memory_count;
   100 #endif
   100 #endif
   101 
   101 
   102   GCHeapLog* _gc_heap_log;
   102   GCHeapLog* _gc_heap_log;
   103 
   103 
   104   // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2
       
   105   // or INCLUDE_JVMCI is being used
       
   106   bool _defer_initial_card_mark;
       
   107 
       
   108   MemRegion _reserved;
   104   MemRegion _reserved;
   109 
   105 
   110  protected:
   106  protected:
   111   BarrierSet* _barrier_set;
   107   BarrierSet* _barrier_set;
   112   bool _is_gc_active;
   108   bool _is_gc_active;
   126   PerfStringVariable* _perf_gc_cause;
   122   PerfStringVariable* _perf_gc_cause;
   127   PerfStringVariable* _perf_gc_lastcause;
   123   PerfStringVariable* _perf_gc_lastcause;
   128 
   124 
   129   // Constructor
   125   // Constructor
   130   CollectedHeap();
   126   CollectedHeap();
   131 
       
   132   // Do common initializations that must follow instance construction,
       
   133   // for example, those needing virtual calls.
       
   134   // This code could perhaps be moved into initialize() but would
       
   135   // be slightly more awkward because we want the latter to be a
       
   136   // pure virtual.
       
   137   void pre_initialize();
       
   138 
   127 
   139   // Create a new tlab. All TLAB allocations must go through this.
   128   // Create a new tlab. All TLAB allocations must go through this.
   140   virtual HeapWord* allocate_new_tlab(size_t size);
   129   virtual HeapWord* allocate_new_tlab(size_t size);
   141 
   130 
   142   // Accumulate statistics on all tlabs.
   131   // Accumulate statistics on all tlabs.
   406   virtual size_t unsafe_max_tlab_alloc(Thread *thr) const {
   395   virtual size_t unsafe_max_tlab_alloc(Thread *thr) const {
   407     guarantee(false, "thread-local allocation buffers not supported");
   396     guarantee(false, "thread-local allocation buffers not supported");
   408     return 0;
   397     return 0;
   409   }
   398   }
   410 
   399 
   411   // Can a compiler initialize a new object without store barriers?
       
   412   // This permission only extends from the creation of a new object
       
   413   // via a TLAB up to the first subsequent safepoint. If such permission
       
   414   // is granted for this heap type, the compiler promises to call
       
   415   // defer_store_barrier() below on any slow path allocation of
       
   416   // a new object for which such initializing store barriers will
       
   417   // have been elided.
       
   418   virtual bool can_elide_tlab_store_barriers() const = 0;
       
   419 
       
   420   // If a compiler is eliding store barriers for TLAB-allocated objects,
       
   421   // there is probably a corresponding slow path which can produce
       
   422   // an object allocated anywhere.  The compiler's runtime support
       
   423   // promises to call this function on such a slow-path-allocated
       
   424   // object before performing initializations that have elided
       
   425   // store barriers. Returns new_obj, or maybe a safer copy thereof.
       
   426   virtual oop new_store_pre_barrier(JavaThread* thread, oop new_obj);
       
   427 
       
   428   // Answers whether an initializing store to a new object currently
       
   429   // allocated at the given address doesn't need a store
       
   430   // barrier. Returns "true" if it doesn't need an initializing
       
   431   // store barrier; answers "false" if it does.
       
   432   virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0;
       
   433 
       
   434   // If a compiler is eliding store barriers for TLAB-allocated objects,
       
   435   // we will be informed of a slow-path allocation by a call
       
   436   // to new_store_pre_barrier() above. Such a call precedes the
       
   437   // initialization of the object itself, and no post-store-barriers will
       
   438   // be issued. Some heap types require that the barrier strictly follows
       
   439   // the initializing stores. (This is currently implemented by deferring the
       
   440   // barrier until the next slow-path allocation or gc-related safepoint.)
       
   441   // This interface answers whether a particular heap type needs the card
       
   442   // mark to be thus strictly sequenced after the stores.
       
   443   virtual bool card_mark_must_follow_store() const = 0;
       
   444 
       
   445   // If the CollectedHeap was asked to defer a store barrier above,
       
   446   // this informs it to flush such a deferred store barrier to the
       
   447   // remembered set.
       
   448   virtual void flush_deferred_store_barrier(JavaThread* thread);
       
   449 
       
   450   // Perform a collection of the heap; intended for use in implementing
   400   // Perform a collection of the heap; intended for use in implementing
   451   // "System.gc".  This probably implies as full a collection as the
   401   // "System.gc".  This probably implies as full a collection as the
   452   // "CollectedHeap" supports.
   402   // "CollectedHeap" supports.
   453   virtual void collect(GCCause::Cause cause) = 0;
   403   virtual void collect(GCCause::Cause cause) = 0;
   454 
   404