hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp
changeset 12646 fa5227d43363
parent 12645 e0d32945f6ab
parent 12579 4cc5610a6dd6
child 12647 f9991bc4fdde
equal deleted inserted replaced
12645:e0d32945f6ab 12646:fa5227d43363
     1 /*
       
     2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
       
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     4  *
       
     5  * This code is free software; you can redistribute it and/or modify it
       
     6  * under the terms of the GNU General Public License version 2 only, as
       
     7  * published by the Free Software Foundation.
       
     8  *
       
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    12  * version 2 for more details (a copy is included in the LICENSE file that
       
    13  * accompanied this code).
       
    14  *
       
    15  * You should have received a copy of the GNU General Public License version
       
    16  * 2 along with this work; if not, write to the Free Software Foundation,
       
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    18  *
       
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
       
    20  * or visit www.oracle.com if you need additional information or have any
       
    21  * questions.
       
    22  *
       
    23  */
       
    24 
       
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_FREELIST_HPP
       
    26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_FREELIST_HPP
       
    27 
       
    28 #include "gc_implementation/shared/allocationStats.hpp"
       
    29 
       
    30 class CompactibleFreeListSpace;
       
    31 
       
    32 // A class for maintaining a free list of FreeChunk's.  The FreeList
       
    33 // maintains a the structure of the list (head, tail, etc.) plus
       
    34 // statistics for allocations from the list.  The links between items
       
    35 // are not part of FreeList.  The statistics are
       
    36 // used to make decisions about coalescing FreeChunk's when they
       
    37 // are swept during collection.
       
    38 //
       
    39 // See the corresponding .cpp file for a description of the specifics
       
    40 // for that implementation.
       
    41 
       
    42 class Mutex;
       
    43 class TreeList;
       
    44 
       
    45 class FreeList VALUE_OBJ_CLASS_SPEC {
       
    46   friend class CompactibleFreeListSpace;
       
    47   friend class VMStructs;
       
    48   friend class PrintTreeCensusClosure;
       
    49 
       
    50  protected:
       
    51   TreeList* _parent;
       
    52   TreeList* _left;
       
    53   TreeList* _right;
       
    54 
       
    55  private:
       
    56   FreeChunk*    _head;          // Head of list of free chunks
       
    57   FreeChunk*    _tail;          // Tail of list of free chunks
       
    58   size_t        _size;          // Size in Heap words of each chunk
       
    59   ssize_t       _count;         // Number of entries in list
       
    60   size_t        _hint;          // next larger size list with a positive surplus
       
    61 
       
    62   AllocationStats _allocation_stats; // allocation-related statistics
       
    63 
       
    64 #ifdef ASSERT
       
    65   Mutex*        _protecting_lock;
       
    66 #endif
       
    67 
       
    68   // Asserts false if the protecting lock (if any) is not held.
       
    69   void assert_proper_lock_protection_work() const PRODUCT_RETURN;
       
    70   void assert_proper_lock_protection() const {
       
    71 #ifdef ASSERT
       
    72     if (_protecting_lock != NULL)
       
    73       assert_proper_lock_protection_work();
       
    74 #endif
       
    75   }
       
    76 
       
    77   // Initialize the allocation statistics.
       
    78  protected:
       
    79   void init_statistics(bool split_birth = false);
       
    80   void set_count(ssize_t v) { _count = v;}
       
    81   void increment_count()    {
       
    82     _count++;
       
    83   }
       
    84 
       
    85   void decrement_count() {
       
    86     _count--;
       
    87     assert(_count >= 0, "Count should not be negative");
       
    88   }
       
    89 
       
    90  public:
       
    91   // Constructor
       
    92   // Construct a list without any entries.
       
    93   FreeList();
       
    94   // Construct a list with "fc" as the first (and lone) entry in the list.
       
    95   FreeList(FreeChunk* fc);
       
    96   // Construct a list which will have a FreeChunk at address "addr" and
       
    97   // of size "size" as the first (and lone) entry in the list.
       
    98   FreeList(HeapWord* addr, size_t size);
       
    99 
       
   100   // Reset the head, tail, hint, and count of a free list.
       
   101   void reset(size_t hint);
       
   102 
       
   103   // Declare the current free list to be protected by the given lock.
       
   104 #ifdef ASSERT
       
   105   void set_protecting_lock(Mutex* protecting_lock) {
       
   106     _protecting_lock = protecting_lock;
       
   107   }
       
   108 #endif
       
   109 
       
   110   // Accessors.
       
   111   FreeChunk* head() const {
       
   112     assert_proper_lock_protection();
       
   113     return _head;
       
   114   }
       
   115   void set_head(FreeChunk* v) {
       
   116     assert_proper_lock_protection();
       
   117     _head = v;
       
   118     assert(!_head || _head->size() == _size, "bad chunk size");
       
   119   }
       
   120   // Set the head of the list and set the prev field of non-null
       
   121   // values to NULL.
       
   122   void link_head(FreeChunk* v) {
       
   123     assert_proper_lock_protection();
       
   124     set_head(v);
       
   125     // If this method is not used (just set the head instead),
       
   126     // this check can be avoided.
       
   127     if (v != NULL) {
       
   128       v->linkPrev(NULL);
       
   129     }
       
   130   }
       
   131 
       
   132   FreeChunk* tail() const {
       
   133     assert_proper_lock_protection();
       
   134     return _tail;
       
   135   }
       
   136   void set_tail(FreeChunk* v) {
       
   137     assert_proper_lock_protection();
       
   138     _tail = v;
       
   139     assert(!_tail || _tail->size() == _size, "bad chunk size");
       
   140   }
       
   141   // Set the tail of the list and set the next field of non-null
       
   142   // values to NULL.
       
   143   void link_tail(FreeChunk* v) {
       
   144     assert_proper_lock_protection();
       
   145     set_tail(v);
       
   146     if (v != NULL) {
       
   147       v->clearNext();
       
   148     }
       
   149   }
       
   150 
       
   151   // No locking checks in read-accessors: lock-free reads (only) are benign.
       
   152   // Readers are expected to have the lock if they are doing work that
       
   153   // requires atomicity guarantees in sections of code.
       
   154   size_t size() const {
       
   155     return _size;
       
   156   }
       
   157   void set_size(size_t v) {
       
   158     assert_proper_lock_protection();
       
   159     _size = v;
       
   160   }
       
   161   ssize_t count() const {
       
   162     return _count;
       
   163   }
       
   164   size_t hint() const {
       
   165     return _hint;
       
   166   }
       
   167   void set_hint(size_t v) {
       
   168     assert_proper_lock_protection();
       
   169     assert(v == 0 || _size < v, "Bad hint"); _hint = v;
       
   170   }
       
   171 
       
   172   // Accessors for statistics
       
   173   AllocationStats* allocation_stats() {
       
   174     assert_proper_lock_protection();
       
   175     return &_allocation_stats;
       
   176   }
       
   177 
       
   178   ssize_t desired() const {
       
   179     return _allocation_stats.desired();
       
   180   }
       
   181   void set_desired(ssize_t v) {
       
   182     assert_proper_lock_protection();
       
   183     _allocation_stats.set_desired(v);
       
   184   }
       
   185   void compute_desired(float inter_sweep_current,
       
   186                        float inter_sweep_estimate,
       
   187                        float intra_sweep_estimate) {
       
   188     assert_proper_lock_protection();
       
   189     _allocation_stats.compute_desired(_count,
       
   190                                       inter_sweep_current,
       
   191                                       inter_sweep_estimate,
       
   192                                       intra_sweep_estimate);
       
   193   }
       
   194   ssize_t coalDesired() const {
       
   195     return _allocation_stats.coalDesired();
       
   196   }
       
   197   void set_coalDesired(ssize_t v) {
       
   198     assert_proper_lock_protection();
       
   199     _allocation_stats.set_coalDesired(v);
       
   200   }
       
   201 
       
   202   ssize_t surplus() const {
       
   203     return _allocation_stats.surplus();
       
   204   }
       
   205   void set_surplus(ssize_t v) {
       
   206     assert_proper_lock_protection();
       
   207     _allocation_stats.set_surplus(v);
       
   208   }
       
   209   void increment_surplus() {
       
   210     assert_proper_lock_protection();
       
   211     _allocation_stats.increment_surplus();
       
   212   }
       
   213   void decrement_surplus() {
       
   214     assert_proper_lock_protection();
       
   215     _allocation_stats.decrement_surplus();
       
   216   }
       
   217 
       
   218   ssize_t bfrSurp() const {
       
   219     return _allocation_stats.bfrSurp();
       
   220   }
       
   221   void set_bfrSurp(ssize_t v) {
       
   222     assert_proper_lock_protection();
       
   223     _allocation_stats.set_bfrSurp(v);
       
   224   }
       
   225   ssize_t prevSweep() const {
       
   226     return _allocation_stats.prevSweep();
       
   227   }
       
   228   void set_prevSweep(ssize_t v) {
       
   229     assert_proper_lock_protection();
       
   230     _allocation_stats.set_prevSweep(v);
       
   231   }
       
   232   ssize_t beforeSweep() const {
       
   233     return _allocation_stats.beforeSweep();
       
   234   }
       
   235   void set_beforeSweep(ssize_t v) {
       
   236     assert_proper_lock_protection();
       
   237     _allocation_stats.set_beforeSweep(v);
       
   238   }
       
   239 
       
   240   ssize_t coalBirths() const {
       
   241     return _allocation_stats.coalBirths();
       
   242   }
       
   243   void set_coalBirths(ssize_t v) {
       
   244     assert_proper_lock_protection();
       
   245     _allocation_stats.set_coalBirths(v);
       
   246   }
       
   247   void increment_coalBirths() {
       
   248     assert_proper_lock_protection();
       
   249     _allocation_stats.increment_coalBirths();
       
   250   }
       
   251 
       
   252   ssize_t coalDeaths() const {
       
   253     return _allocation_stats.coalDeaths();
       
   254   }
       
   255   void set_coalDeaths(ssize_t v) {
       
   256     assert_proper_lock_protection();
       
   257     _allocation_stats.set_coalDeaths(v);
       
   258   }
       
   259   void increment_coalDeaths() {
       
   260     assert_proper_lock_protection();
       
   261     _allocation_stats.increment_coalDeaths();
       
   262   }
       
   263 
       
   264   ssize_t splitBirths() const {
       
   265     return _allocation_stats.splitBirths();
       
   266   }
       
   267   void set_splitBirths(ssize_t v) {
       
   268     assert_proper_lock_protection();
       
   269     _allocation_stats.set_splitBirths(v);
       
   270   }
       
   271   void increment_splitBirths() {
       
   272     assert_proper_lock_protection();
       
   273     _allocation_stats.increment_splitBirths();
       
   274   }
       
   275 
       
   276   ssize_t splitDeaths() const {
       
   277     return _allocation_stats.splitDeaths();
       
   278   }
       
   279   void set_splitDeaths(ssize_t v) {
       
   280     assert_proper_lock_protection();
       
   281     _allocation_stats.set_splitDeaths(v);
       
   282   }
       
   283   void increment_splitDeaths() {
       
   284     assert_proper_lock_protection();
       
   285     _allocation_stats.increment_splitDeaths();
       
   286   }
       
   287 
       
   288   NOT_PRODUCT(
       
   289     // For debugging.  The "_returnedBytes" in all the lists are summed
       
   290     // and compared with the total number of bytes swept during a
       
   291     // collection.
       
   292     size_t returnedBytes() const { return _allocation_stats.returnedBytes(); }
       
   293     void set_returnedBytes(size_t v) { _allocation_stats.set_returnedBytes(v); }
       
   294     void increment_returnedBytes_by(size_t v) {
       
   295       _allocation_stats.set_returnedBytes(_allocation_stats.returnedBytes() + v);
       
   296     }
       
   297   )
       
   298 
       
   299   // Unlink head of list and return it.  Returns NULL if
       
   300   // the list is empty.
       
   301   FreeChunk* getChunkAtHead();
       
   302 
       
   303   // Remove the first "n" or "count", whichever is smaller, chunks from the
       
   304   // list, setting "fl", which is required to be empty, to point to them.
       
   305   void getFirstNChunksFromList(size_t n, FreeList* fl);
       
   306 
       
   307   // Unlink this chunk from it's free list
       
   308   void removeChunk(FreeChunk* fc);
       
   309 
       
   310   // Add this chunk to this free list.
       
   311   void returnChunkAtHead(FreeChunk* fc);
       
   312   void returnChunkAtTail(FreeChunk* fc);
       
   313 
       
   314   // Similar to returnChunk* but also records some diagnostic
       
   315   // information.
       
   316   void returnChunkAtHead(FreeChunk* fc, bool record_return);
       
   317   void returnChunkAtTail(FreeChunk* fc, bool record_return);
       
   318 
       
   319   // Prepend "fl" (whose size is required to be the same as that of "this")
       
   320   // to the front of "this" list.
       
   321   void prepend(FreeList* fl);
       
   322 
       
   323   // Verify that the chunk is in the list.
       
   324   // found.  Return NULL if "fc" is not found.
       
   325   bool verifyChunkInFreeLists(FreeChunk* fc) const;
       
   326 
       
   327   // Stats verification
       
   328   void verify_stats() const PRODUCT_RETURN;
       
   329 
       
   330   // Printing support
       
   331   static void print_labels_on(outputStream* st, const char* c);
       
   332   void print_on(outputStream* st, const char* c = NULL) const;
       
   333 };
       
   334 
       
   335 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_FREELIST_HPP