hotspot/src/share/vm/memory/metaspace.cpp
changeset 13728 882756847a04
child 13729 3826a6124401
equal deleted inserted replaced
13727:caf5eb7dd4a7 13728:882756847a04
       
     1 /*
       
     2  * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
       
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     4  *
       
     5  * This code is free software; you can redistribute it and/or modify it
       
     6  * under the terms of the GNU General Public License version 2 only, as
       
     7  * published by the Free Software Foundation.
       
     8  *
       
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    12  * version 2 for more details (a copy is included in the LICENSE file that
       
    13  * accompanied this code).
       
    14  *
       
    15  * You should have received a copy of the GNU General Public License version
       
    16  * 2 along with this work; if not, write to the Free Software Foundation,
       
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    18  *
       
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
       
    20  * or visit www.oracle.com if you need additional information or have any
       
    21  * questions.
       
    22  *
       
    23  */
       
    24 #include "precompiled.hpp"
       
    25 #include "gc_interface/collectedHeap.hpp"
       
    26 #include "memory/binaryTreeDictionary.hpp"
       
    27 #include "memory/collectorPolicy.hpp"
       
    28 #include "memory/filemap.hpp"
       
    29 #include "memory/freeList.hpp"
       
    30 #include "memory/metaspace.hpp"
       
    31 #include "memory/metaspaceShared.hpp"
       
    32 #include "memory/resourceArea.hpp"
       
    33 #include "memory/universe.hpp"
       
    34 #include "runtime/globals.hpp"
       
    35 #include "runtime/mutex.hpp"
       
    36 #include "services/memTracker.hpp"
       
    37 #include "utilities/copy.hpp"
       
    38 #include "utilities/debug.hpp"
       
    39 
       
    40 // Define this macro to deallocate Metablock.  If not defined,
       
    41 // blocks are not yet deallocated and are only mangled.
       
    42 #undef DEALLOCATE_BLOCKS
       
    43 
       
    44 // Easily recognizable patterns
       
    45 // These patterns can be the same in 32bit or 64bit since
       
    46 // they only have to be easily recognizable.
       
    47 const void* metaspace_allocation_leader = (void*) 0X11111111;
       
    48 const void* metaspace_allocation_trailer = (void*) 0X77777777;
       
    49 
       
    50 // Parameters for stress mode testing
       
    51 const uint metadata_deallocate_a_lot_block = 10;
       
    52 const uint metadata_deallocate_a_lock_chunk = 3;
       
    53 size_t const allocation_from_dictionary_limit = 64 * K;
       
    54 const size_t metadata_chunk_initialize = 0xf7f7f7f7;
       
    55 const size_t metadata_deallocate = 0xf5f5f5f5;
       
    56 const size_t metadata_space_manager_allocate = 0xf3f3f3f3;
       
    57 
       
    58 MetaWord* last_allocated = 0;
       
    59 
       
    60 // Used in declarations in SpaceManager and ChunkManager
       
    61 enum ChunkIndex {
       
    62   SmallIndex = 0,
       
    63   MediumIndex = 1,
       
    64   HumongousIndex = 2,
       
    65   NumberOfFreeLists = 3
       
    66 };
       
    67 
       
    68 static ChunkIndex next_chunk_index(ChunkIndex i) {
       
    69   assert(i < NumberOfFreeLists, "Out of bound");
       
    70   return (ChunkIndex) (i+1);
       
    71 }
       
    72 
       
    73 // Originally _capacity_until_GC was set to MetaspaceSize here but
       
    74 // the default MetaspaceSize before argument processing was being
       
    75 // used which was not the desired value.  See the code
       
    76 // in should_expand() to see how the initialization is handled
       
    77 // now.
       
    78 size_t MetaspaceGC::_capacity_until_GC = 0;
       
    79 bool MetaspaceGC::_expand_after_GC = false;
       
    80 uint MetaspaceGC::_shrink_factor = 0;
       
    81 bool MetaspaceGC::_should_concurrent_collect = false;
       
    82 
       
    83 // Blocks of space for metadata are allocated out of Metachunks.
       
    84 //
       
    85 // Metachunk are allocated out of MetadataVirtualspaces and once
       
    86 // allocated there is no explicit link between a Metachunk and
       
    87 // the MetadataVirtualspaces from which it was allocated.
       
    88 //
       
    89 // Each SpaceManager maintains a
       
    90 // list of the chunks it is using and the current chunk.  The current
       
    91 // chunk is the chunk from which allocations are done.  Space freed in
       
    92 // a chunk is placed on the free list of blocks (BlockFreelist) and
       
    93 // reused from there.
       
    94 //
       
    95 // Future modification
       
    96 //
       
    97 // The Metachunk can conceivable be replaced by the Chunk in
       
    98 // allocation.hpp.  Note that the latter Chunk is the space for
       
    99 // allocation (allocations from the chunk are out of the space in
       
   100 // the Chunk after the header for the Chunk) where as Metachunks
       
   101 // point to space in a VirtualSpace.  To replace Metachunks with
       
   102 // Chunks, change Chunks so that they can be allocated out of a VirtualSpace.
       
   103 //
       
   104 
       
   105 // Metablock are the unit of allocation from a Chunk.  It contains
       
   106 // the size of the requested allocation in a debug build.
       
   107 // Also in a debug build it has a marker before and after the
       
   108 // body of the block. The address of the body is the address returned
       
   109 // by the allocation.
       
   110 //
       
   111 // Layout in a debug build.  In a product build only the body is present.
       
   112 //
       
   113 //     +-----------+-----------+------------+     +-----------+
       
   114 //     | word size | leader    | body       | ... | trailer   |
       
   115 //     +-----------+-----------+------------+     +-----------+
       
   116 //
       
   117 // A Metablock may be reused by its SpaceManager but are never moved between
       
   118 // SpaceManagers.  There is no explicit link to the Metachunk
       
   119 // from which it was allocated.  Metablock are not deallocated, rather
       
   120 // the Metachunk it is a part of will be deallocated when it's
       
   121 // associated class loader is collected.
       
   122 //
       
   123 // When the word size of a block is passed in to the deallocation
       
   124 // call the word size no longer needs to be part of a Metablock.
       
   125 
       
   126 class Metablock {
       
   127   friend class VMStructs;
       
   128  private:
       
   129   // Used to align the allocation (see below) and for debugging.
       
   130 #ifdef ASSERT
       
   131   struct {
       
   132     size_t _word_size;
       
   133     void*  _leader;
       
   134   } _header;
       
   135   void* _data[1];
       
   136 #endif
       
   137   static size_t _overhead;
       
   138 
       
   139 #ifdef ASSERT
       
   140   void set_word_size(size_t v) { _header._word_size = v; }
       
   141   void* leader() { return _header._leader; }
       
   142   void* trailer() {
       
   143     jlong index = (jlong) _header._word_size - sizeof(_header)/BytesPerWord - 1;
       
   144     assert(index > 0, err_msg("Bad indexling of trailer %d", index));
       
   145     void** ptr = &_data[index];
       
   146     return *ptr;
       
   147   }
       
   148   void set_leader(void* v) { _header._leader = v; }
       
   149   void set_trailer(void* v) {
       
   150     void** ptr = &_data[_header._word_size - sizeof(_header)/BytesPerWord - 1];
       
   151     *ptr = v;
       
   152   }
       
   153  public:
       
   154   size_t word_size() { return _header._word_size; }
       
   155 #endif
       
   156  public:
       
   157 
       
   158   static Metablock* initialize(MetaWord* p, size_t word_size);
       
   159 
       
   160   // This places the body of the block at a 2 word boundary
       
   161   // because every block starts on a 2 word boundary.  Work out
       
   162   // how to make the body on a 2 word boundary if the block
       
   163   // starts on a arbitrary boundary.  JJJ
       
   164 
       
   165 #ifdef ASSERT
       
   166   MetaWord* data() { return (MetaWord*) &_data[0]; }
       
   167 #else
       
   168   MetaWord* data() { return (MetaWord*) this; }
       
   169 #endif
       
   170   static Metablock* metablock_from_data(MetaWord* p) {
       
   171 #ifdef ASSERT
       
   172     size_t word_offset = offset_of(Metablock, _data)/BytesPerWord;
       
   173     Metablock* result = (Metablock*) (p - word_offset);
       
   174     return result;
       
   175 #else
       
   176     return (Metablock*) p;
       
   177 #endif
       
   178   }
       
   179 
       
   180   static size_t overhead() { return _overhead; }
       
   181   void verify();
       
   182 };
       
   183 
       
   184 //  Metachunk - Quantum of allocation from a Virtualspace
       
   185 //    Metachunks are reused (when freed are put on a global freelist) and
       
   186 //    have no permanent association to a SpaceManager.
       
   187 
       
   188 //            +--------------+ <- end
       
   189 //            |              |          --+       ---+
       
   190 //            |              |            | free     |
       
   191 //            |              |            |          |
       
   192 //            |              |            |          | capacity
       
   193 //            |              |            |          |
       
   194 //            |              | <- top   --+          |
       
   195 //            |              |           ---+        |
       
   196 //            |              |              | used   |
       
   197 //            |              |              |        |
       
   198 //            |              |              |        |
       
   199 //            +--------------+ <- bottom ---+     ---+
       
   200 
       
   201 class Metachunk VALUE_OBJ_CLASS_SPEC {
       
   202   // link to support lists of chunks
       
   203   Metachunk* _next;
       
   204 
       
   205   MetaWord* _bottom;
       
   206   MetaWord* _end;
       
   207   MetaWord* _top;
       
   208   size_t _word_size;
       
   209 
       
   210   // Metachunks are allocated out of a MetadataVirtualSpace and
       
   211   // and use some of its space to describe itself (plus alignment
       
   212   // considerations).  Metadata is allocated in the rest of the chunk.
       
   213   // This size is the overhead of maintaining the Metachunk within
       
   214   // the space.
       
   215   static size_t _overhead;
       
   216 
       
   217   void set_bottom(MetaWord* v) { _bottom = v; }
       
   218   void set_end(MetaWord* v) { _end = v; }
       
   219   void set_top(MetaWord* v) { _top = v; }
       
   220   void set_word_size(size_t v) { _word_size = v; }
       
   221  public:
       
   222 
       
   223   // Used to add a Metachunk to a list of Metachunks
       
   224   void set_next(Metachunk* v) { _next = v; assert(v != this, "Boom");}
       
   225 
       
   226   Metablock* allocate(size_t word_size);
       
   227   static Metachunk* initialize(MetaWord* ptr, size_t word_size);
       
   228 
       
   229   // Accessors
       
   230   Metachunk* next() const { return _next; }
       
   231   MetaWord* bottom() const { return _bottom; }
       
   232   MetaWord* end() const { return _end; }
       
   233   MetaWord* top() const { return _top; }
       
   234   size_t word_size() const { return _word_size; }
       
   235   static size_t overhead() { return _overhead; }
       
   236 
       
   237   // Reset top to bottom so chunk can be reused.
       
   238   void reset_empty() { _top = (_bottom + _overhead); }
       
   239   bool is_empty() { return _top == (_bottom + _overhead); }
       
   240 
       
   241   // used (has been allocated)
       
   242   // free (available for future allocations)
       
   243   // capacity (total size of chunk)
       
   244   size_t used_word_size();
       
   245   size_t free_word_size();
       
   246   size_t capacity_word_size();
       
   247 
       
   248 #ifdef ASSERT
       
   249   void mangle() {
       
   250     // Mangle the payload of the chunk and not the links that
       
   251     // maintain list of chunks.
       
   252     HeapWord* start = (HeapWord*)(bottom() + overhead());
       
   253     size_t word_size = capacity_word_size() - overhead();
       
   254     Copy::fill_to_words(start, word_size, metadata_chunk_initialize);
       
   255   }
       
   256 #endif // ASSERT
       
   257 
       
   258   void print_on(outputStream* st) const;
       
   259   void verify();
       
   260 };
       
   261 
       
   262 
       
   263 // Pointer to list of Metachunks.
       
   264 class ChunkList VALUE_OBJ_CLASS_SPEC {
       
   265   // List of free chunks
       
   266   Metachunk* _head;
       
   267 
       
   268  public:
       
   269   // Constructor
       
   270   ChunkList() : _head(NULL) {}
       
   271 
       
   272   // Accessors
       
   273   Metachunk* head() { return _head; }
       
   274   void set_head(Metachunk* v) { _head = v; }
       
   275 
       
   276   // Link at head of the list
       
   277   void add_at_head(Metachunk* head, Metachunk* tail);
       
   278   void add_at_head(Metachunk* head);
       
   279 
       
   280   size_t sum_list_size();
       
   281   size_t sum_list_count();
       
   282   size_t sum_list_capacity();
       
   283 };
       
   284 
       
   285 // Manages the global free lists of chunks.
       
   286 // Has three lists of free chunks, and a total size and
       
   287 // count that includes all three
       
   288 
       
   289 class ChunkManager VALUE_OBJ_CLASS_SPEC {
       
   290 
       
   291   // Free list of chunks of different sizes.
       
   292   //   SmallChunk
       
   293   //   MediumChunk
       
   294   //   HumongousChunk
       
   295   ChunkList _free_chunks[3];
       
   296 
       
   297   // ChunkManager in all lists of this type
       
   298   size_t _free_chunks_total;
       
   299   size_t _free_chunks_count;
       
   300 
       
   301   void dec_free_chunks_total(size_t v) {
       
   302     assert(_free_chunks_count > 0 &&
       
   303              _free_chunks_total > 0,
       
   304              "About to go negative");
       
   305     Atomic::add_ptr(-1, &_free_chunks_count);
       
   306     jlong minus_v = (jlong) - (jlong) v;
       
   307     Atomic::add_ptr(minus_v, &_free_chunks_total);
       
   308   }
       
   309 
       
   310   // Debug support
       
   311 
       
   312   size_t sum_free_chunks();
       
   313   size_t sum_free_chunks_count();
       
   314 
       
   315   void locked_verify_free_chunks_total();
       
   316   void locked_verify_free_chunks_count();
       
   317   void verify_free_chunks_count();
       
   318 
       
   319  public:
       
   320 
       
   321   ChunkManager() : _free_chunks_total(0), _free_chunks_count(0) {}
       
   322 
       
   323   // add or delete (return) a chunk to the global freelist.
       
   324   Metachunk* chunk_freelist_allocate(size_t word_size);
       
   325   void chunk_freelist_deallocate(Metachunk* chunk);
       
   326 
       
   327   // Total of the space in the free chunks list
       
   328   size_t free_chunks_total();
       
   329   size_t free_chunks_total_in_bytes();
       
   330 
       
   331   // Number of chunks in the free chunks list
       
   332   size_t free_chunks_count();
       
   333 
       
   334   void inc_free_chunks_total(size_t v, size_t count = 1) {
       
   335     Atomic::add_ptr(count, &_free_chunks_count);
       
   336     Atomic::add_ptr(v, &_free_chunks_total);
       
   337   }
       
   338   ChunkList* free_medium_chunks() { return &_free_chunks[1]; }
       
   339   ChunkList* free_small_chunks() { return &_free_chunks[0]; }
       
   340   ChunkList* free_humongous_chunks() { return &_free_chunks[2]; }
       
   341 
       
   342   ChunkList* free_chunks(ChunkIndex index);
       
   343 
       
   344   // Returns the list for the given chunk word size.
       
   345   ChunkList* find_free_chunks_list(size_t word_size);
       
   346 
       
   347   // Add and remove from a list by size.  Selects
       
   348   // list based on size of chunk.
       
   349   void free_chunks_put(Metachunk* chuck);
       
   350   Metachunk* free_chunks_get(size_t chunk_word_size);
       
   351 
       
   352   // Debug support
       
   353   void verify();
       
   354   void locked_verify();
       
   355   void verify_free_chunks_total();
       
   356 
       
   357   void locked_print_free_chunks(outputStream* st);
       
   358   void locked_print_sum_free_chunks(outputStream* st);
       
   359 };
       
   360 
       
   361 
       
   362 // Used to manage the free list of Metablocks (a block corresponds
       
   363 // to the allocation of a quantum of metadata).
       
   364 class BlockFreelist VALUE_OBJ_CLASS_SPEC {
       
   365 #ifdef DEALLOCATE_BLOCKS
       
   366   BinaryTreeDictionary<Metablock>* _dictionary;
       
   367 #endif
       
   368   static Metablock* initialize_free_chunk(Metablock* block, size_t word_size);
       
   369 
       
   370 #ifdef DEALLOCATE_BLOCKS
       
   371   // Accessors
       
   372   BinaryTreeDictionary<Metablock>* dictionary() const { return _dictionary; }
       
   373 #endif
       
   374 
       
   375  public:
       
   376   BlockFreelist();
       
   377   ~BlockFreelist();
       
   378 
       
   379   // Get and return a block to the free list
       
   380   Metablock* get_block(size_t word_size);
       
   381   void return_block(Metablock* block, size_t word_size);
       
   382 
       
   383   size_t totalSize() {
       
   384 #ifdef DEALLOCATE_BLOCKS
       
   385     if (dictionary() == NULL) {
       
   386       return 0;
       
   387     } else {
       
   388       return dictionary()->totalSize();
       
   389     }
       
   390 #else
       
   391     return 0;
       
   392 #endif
       
   393   }
       
   394 
       
   395   void print_on(outputStream* st) const;
       
   396 };
       
   397 
       
   398 class VirtualSpaceNode : public CHeapObj<mtClass> {
       
   399   friend class VirtualSpaceList;
       
   400 
       
   401   // Link to next VirtualSpaceNode
       
   402   VirtualSpaceNode* _next;
       
   403 
       
   404   // total in the VirtualSpace
       
   405   MemRegion _reserved;
       
   406   ReservedSpace _rs;
       
   407   VirtualSpace _virtual_space;
       
   408   MetaWord* _top;
       
   409 
       
   410   // Convenience functions for logical bottom and end
       
   411   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
       
   412   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
       
   413 
       
   414   // Convenience functions to access the _virtual_space
       
   415   char* low()  const { return virtual_space()->low(); }
       
   416   char* high() const { return virtual_space()->high(); }
       
   417 
       
   418  public:
       
   419 
       
   420   VirtualSpaceNode(size_t byte_size);
       
   421   VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs) {}
       
   422   ~VirtualSpaceNode();
       
   423 
       
   424   // address of next available space in _virtual_space;
       
   425   // Accessors
       
   426   VirtualSpaceNode* next() { return _next; }
       
   427   void set_next(VirtualSpaceNode* v) { _next = v; }
       
   428 
       
   429   void set_reserved(MemRegion const v) { _reserved = v; }
       
   430   void set_top(MetaWord* v) { _top = v; }
       
   431 
       
   432   // Accessors
       
   433   MemRegion* reserved() { return &_reserved; }
       
   434   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
       
   435 
       
   436   // Returns true if "word_size" is available in the virtual space
       
   437   bool is_available(size_t word_size) { return _top + word_size <= end(); }
       
   438 
       
   439   MetaWord* top() const { return _top; }
       
   440   void inc_top(size_t word_size) { _top += word_size; }
       
   441 
       
   442   // used and capacity in this single entry in the list
       
   443   size_t used_words_in_vs() const;
       
   444   size_t capacity_words_in_vs() const;
       
   445 
       
   446   bool initialize();
       
   447 
       
   448   // get space from the virtual space
       
   449   Metachunk* take_from_committed(size_t chunk_word_size);
       
   450 
       
   451   // Allocate a chunk from the virtual space and return it.
       
   452   Metachunk* get_chunk_vs(size_t chunk_word_size);
       
   453   Metachunk* get_chunk_vs_with_expand(size_t chunk_word_size);
       
   454 
       
   455   // Expands/shrinks the committed space in a virtual space.  Delegates
       
   456   // to Virtualspace
       
   457   bool expand_by(size_t words, bool pre_touch = false);
       
   458   bool shrink_by(size_t words);
       
   459 
       
   460   // Debug support
       
   461   static void verify_virtual_space_total();
       
   462   static void verify_virtual_space_count();
       
   463   void mangle();
       
   464 
       
   465   void print_on(outputStream* st) const;
       
   466 };
       
   467 
       
   468   // byte_size is the size of the associated virtualspace.
       
   469 VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0) {
       
   470   // This allocates memory with mmap.  For DumpSharedspaces, allocate the
       
   471   // space at low memory so that other shared images don't conflict.
       
   472   // This is the same address as memory needed for UseCompressedOops but
       
   473   // compressed oops don't work with CDS (offsets in metadata are wrong), so
       
   474   // borrow the same address.
       
   475   if (DumpSharedSpaces) {
       
   476     char* shared_base = (char*)HeapBaseMinAddress;
       
   477     _rs = ReservedSpace(byte_size, 0, false, shared_base, 0);
       
   478     if (_rs.is_reserved()) {
       
   479       assert(_rs.base() == shared_base, "should match");
       
   480     } else {
       
   481       // If we are dumping the heap, then allocate a wasted block of address
       
   482       // space in order to push the heap to a lower address.  This extra
       
   483       // address range allows for other (or larger) libraries to be loaded
       
   484       // without them occupying the space required for the shared spaces.
       
   485       uintx reserved = 0;
       
   486       uintx block_size = 64*1024*1024;
       
   487       while (reserved < SharedDummyBlockSize) {
       
   488         char* dummy = os::reserve_memory(block_size);
       
   489         reserved += block_size;
       
   490       }
       
   491       _rs = ReservedSpace(byte_size);
       
   492     }
       
   493     MetaspaceShared::set_shared_rs(&_rs);
       
   494   } else {
       
   495     _rs = ReservedSpace(byte_size);
       
   496   }
       
   497 
       
   498   MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
       
   499 }
       
   500 
       
   501 // List of VirtualSpaces for metadata allocation.
       
   502 // It has a  _next link for singly linked list and a MemRegion
       
   503 // for total space in the VirtualSpace.
       
   504 class VirtualSpaceList : public CHeapObj<mtClass> {
       
   505   friend class VirtualSpaceNode;
       
   506 
       
   507   enum VirtualSpaceSizes {
       
   508     VirtualSpaceSize = 256 * K
       
   509   };
       
   510 
       
   511   // Global list of virtual spaces
       
   512   // Head of the list
       
   513   VirtualSpaceNode* _virtual_space_list;
       
   514   // virtual space currently being used for allocations
       
   515   VirtualSpaceNode* _current_virtual_space;
       
   516   // Free chunk list for all other metadata
       
   517   ChunkManager      _chunk_manager;
       
   518 
       
   519   // Can this virtual list allocate >1 spaces?  Also, used to determine
       
   520   // whether to allocate unlimited small chunks in this virtual space
       
   521   bool _is_class;
       
   522   bool can_grow() const { return !is_class() || !UseCompressedKlassPointers; }
       
   523 
       
   524   // Sum of space in all virtual spaces and number of virtual spaces
       
   525   size_t _virtual_space_total;
       
   526   size_t _virtual_space_count;
       
   527 
       
   528   ~VirtualSpaceList();
       
   529 
       
   530   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
       
   531 
       
   532   void set_virtual_space_list(VirtualSpaceNode* v) {
       
   533     _virtual_space_list = v;
       
   534   }
       
   535   void set_current_virtual_space(VirtualSpaceNode* v) {
       
   536     _current_virtual_space = v;
       
   537   }
       
   538 
       
   539   void link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size);
       
   540 
       
   541   // Get another virtual space and add it to the list.  This
       
   542   // is typically prompted by a failed attempt to allocate a chunk
       
   543   // and is typically followed by the allocation of a chunk.
       
   544   bool grow_vs(size_t vs_word_size);
       
   545 
       
   546  public:
       
   547   VirtualSpaceList(size_t word_size);
       
   548   VirtualSpaceList(ReservedSpace rs);
       
   549 
       
   550   Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);
       
   551 
       
   552   VirtualSpaceNode* current_virtual_space() {
       
   553     return _current_virtual_space;
       
   554   }
       
   555 
       
   556   ChunkManager* chunk_manager() { return &_chunk_manager; }
       
   557   bool is_class() const { return _is_class; }
       
   558 
       
   559   // Allocate the first virtualspace.
       
   560   void initialize(size_t word_size);
       
   561 
       
   562   size_t virtual_space_total() { return _virtual_space_total; }
       
   563   void inc_virtual_space_total(size_t v) {
       
   564     Atomic::add_ptr(v, &_virtual_space_total);
       
   565   }
       
   566 
       
   567   size_t virtual_space_count() { return _virtual_space_count; }
       
   568   void inc_virtual_space_count() {
       
   569     Atomic::inc_ptr(&_virtual_space_count);
       
   570   }
       
   571 
       
   572   // Used and capacity in the entire list of virtual spaces.
       
   573   // These are global values shared by all Metaspaces
       
   574   size_t capacity_words_sum();
       
   575   size_t capacity_bytes_sum() { return capacity_words_sum() * BytesPerWord; }
       
   576   size_t used_words_sum();
       
   577   size_t used_bytes_sum() { return used_words_sum() * BytesPerWord; }
       
   578 
       
   579   bool contains(const void *ptr);
       
   580 
       
   581   void print_on(outputStream* st) const;
       
   582 
       
   583   class VirtualSpaceListIterator : public StackObj {
       
   584     VirtualSpaceNode* _virtual_spaces;
       
   585    public:
       
   586     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
       
   587       _virtual_spaces(virtual_spaces) {}
       
   588 
       
   589     bool repeat() {
       
   590       return _virtual_spaces != NULL;
       
   591     }
       
   592 
       
   593     VirtualSpaceNode* get_next() {
       
   594       VirtualSpaceNode* result = _virtual_spaces;
       
   595       if (_virtual_spaces != NULL) {
       
   596         _virtual_spaces = _virtual_spaces->next();
       
   597       }
       
   598       return result;
       
   599     }
       
   600   };
       
   601 };
       
   602 
       
   603 
       
   604 class Metadebug : AllStatic {
       
   605   // Debugging support for Metaspaces
       
   606   static int _deallocate_block_a_lot_count;
       
   607   static int _deallocate_chunk_a_lot_count;
       
   608   static int _allocation_fail_alot_count;
       
   609 
       
   610  public:
       
   611   static int deallocate_block_a_lot_count() {
       
   612     return _deallocate_block_a_lot_count;
       
   613   }
       
   614   static void set_deallocate_block_a_lot_count(int v) {
       
   615     _deallocate_block_a_lot_count = v;
       
   616   }
       
   617   static void inc_deallocate_block_a_lot_count() {
       
   618     _deallocate_block_a_lot_count++;
       
   619   }
       
   620   static int deallocate_chunk_a_lot_count() {
       
   621     return _deallocate_chunk_a_lot_count;
       
   622   }
       
   623   static void reset_deallocate_chunk_a_lot_count() {
       
   624     _deallocate_chunk_a_lot_count = 1;
       
   625   }
       
   626   static void inc_deallocate_chunk_a_lot_count() {
       
   627     _deallocate_chunk_a_lot_count++;
       
   628   }
       
   629 
       
   630   static void init_allocation_fail_alot_count();
       
   631 #ifdef ASSERT
       
   632   static bool test_metadata_failure();
       
   633 #endif
       
   634 
       
   635   static void deallocate_chunk_a_lot(SpaceManager* sm,
       
   636                                      size_t chunk_word_size);
       
   637   static void deallocate_block_a_lot(SpaceManager* sm,
       
   638                                      size_t chunk_word_size);
       
   639 
       
   640 };
       
   641 
       
   642 int Metadebug::_deallocate_block_a_lot_count = 0;
       
   643 int Metadebug::_deallocate_chunk_a_lot_count = 0;
       
   644 int Metadebug::_allocation_fail_alot_count = 0;
       
   645 
       
   646 //  SpaceManager - used by Metaspace to handle allocations
       
   647 class SpaceManager : public CHeapObj<mtClass> {
       
   648   friend class Metaspace;
       
   649   friend class Metadebug;
       
   650 
       
   651  private:
       
   652   // protects allocations and contains.
       
   653   Mutex* const _lock;
       
   654 
       
   655   // List of chunks in use by this SpaceManager.  Allocations
       
   656   // are done from the current chunk.  The list is used for deallocating
       
   657   // chunks when the SpaceManager is freed.
       
   658   Metachunk* _chunks_in_use[NumberOfFreeLists];
       
   659   Metachunk* _current_chunk;
       
   660 
       
   661   // Virtual space where allocation comes from.
       
   662   VirtualSpaceList* _vs_list;
       
   663 
       
   664   // Number of small chunks to allocate to a manager
       
   665   // If class space manager, small chunks are unlimited
       
   666   static uint const _small_chunk_limit;
       
   667   bool has_small_chunk_limit() { return !vs_list()->is_class(); }
       
   668 
       
   669   // Sum of all space in allocated chunks
       
   670   size_t _allocation_total;
       
   671 
       
   672   // Free lists of blocks are per SpaceManager since they
       
   673   // are assumed to be in chunks in use by the SpaceManager
       
   674   // and all chunks in use by a SpaceManager are freed when
       
   675   // the class loader using the SpaceManager is collected.
       
   676   BlockFreelist _block_freelists;
       
   677 
       
   678   // protects virtualspace and chunk expansions
       
   679   static const char*  _expand_lock_name;
       
   680   static const int    _expand_lock_rank;
       
   681   static Mutex* const _expand_lock;
       
   682 
       
   683   // Accessors
       
   684   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
       
   685   void set_chunks_in_use(ChunkIndex index, Metachunk* v) { _chunks_in_use[index] = v; }
       
   686 
       
   687   BlockFreelist* block_freelists() const {
       
   688     return (BlockFreelist*) &_block_freelists;
       
   689   }
       
   690 
       
   691   VirtualSpaceList* vs_list() const    { return _vs_list; }
       
   692 
       
   693   Metachunk* current_chunk() const { return _current_chunk; }
       
   694   void set_current_chunk(Metachunk* v) {
       
   695     _current_chunk = v;
       
   696   }
       
   697 
       
   698   Metachunk* find_current_chunk(size_t word_size);
       
   699 
       
   700   // Add chunk to the list of chunks in use
       
   701   void add_chunk(Metachunk* v, bool make_current);
       
   702 
       
   703   // Debugging support
       
   704   void verify_chunks_in_use_index(ChunkIndex index, Metachunk* v) {
       
   705     switch (index) {
       
   706     case 0:
       
   707       assert(v->word_size() == SmallChunk, "Not a SmallChunk");
       
   708       break;
       
   709     case 1:
       
   710       assert(v->word_size() == MediumChunk, "Not a MediumChunk");
       
   711       break;
       
   712     case 2:
       
   713       assert(v->word_size() > MediumChunk, "Not a HumongousChunk");
       
   714       break;
       
   715     default:
       
   716       assert(false, "Wrong list.");
       
   717     }
       
   718   }
       
   719 
       
   720  protected:
       
   721   Mutex* lock() const { return _lock; }
       
   722 
       
   723  public:
       
   724   SpaceManager(Mutex* lock, VirtualSpaceList* vs_list);
       
   725   ~SpaceManager();
       
   726 
       
   727   enum ChunkSizes {    // in words.
       
   728     SmallChunk = 512,
       
   729     MediumChunk = 8 * K,
       
   730     MediumChunkBunch = 4 * MediumChunk
       
   731   };
       
   732 
       
   733   // Accessors
       
   734   size_t allocation_total() const { return _allocation_total; }
       
   735   void inc_allocation_total(size_t v) { Atomic::add_ptr(v, &_allocation_total); }
       
   736   static bool is_humongous(size_t word_size) { return word_size > MediumChunk; }
       
   737 
       
   738   static Mutex* expand_lock() { return _expand_lock; }
       
   739 
       
   740   size_t sum_capacity_in_chunks_in_use() const;
       
   741   size_t sum_used_in_chunks_in_use() const;
       
   742   size_t sum_free_in_chunks_in_use() const;
       
   743   size_t sum_waste_in_chunks_in_use() const;
       
   744   size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
       
   745 
       
   746   size_t sum_count_in_chunks_in_use();
       
   747   size_t sum_count_in_chunks_in_use(ChunkIndex i);
       
   748 
       
   749   // Block allocation and deallocation.
       
   750   // Allocates a block from the current chunk
       
   751   MetaWord* allocate(size_t word_size);
       
   752 
       
   753   // Helper for allocations
       
   754   Metablock* allocate_work(size_t word_size);
       
   755 
       
   756   // Returns a block to the per manager freelist
       
   757   void deallocate(MetaWord* p);
       
   758 
       
   759   // Based on the allocation size and a minimum chunk size,
       
   760   // returned chunk size (for expanding space for chunk allocation).
       
   761   size_t calc_chunk_size(size_t allocation_word_size);
       
   762 
       
   763   // Called when an allocation from the current chunk fails.
       
   764   // Gets a new chunk (may require getting a new virtual space),
       
   765   // and allocates from that chunk.
       
   766   Metablock* grow_and_allocate(size_t word_size);
       
   767 
       
   768   // debugging support.
       
   769 
       
   770   void dump(outputStream* const out) const;
       
   771   void print_on(outputStream* st) const;
       
   772   void locked_print_chunks_in_use_on(outputStream* st) const;
       
   773 
       
   774   void verify();
       
   775 #ifdef ASSERT
       
   776   void mangle_freed_chunks();
       
   777   void verify_allocation_total();
       
   778 #endif
       
   779 };
       
   780 
       
   781 uint const SpaceManager::_small_chunk_limit = 4;
       
   782 
       
   783 const char* SpaceManager::_expand_lock_name =
       
   784   "SpaceManager chunk allocation lock";
       
   785 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
       
   786 Mutex* const SpaceManager::_expand_lock =
       
   787   new Mutex(SpaceManager::_expand_lock_rank,
       
   788             SpaceManager::_expand_lock_name,
       
   789             Mutex::_allow_vm_block_flag);
       
   790 
       
   791 #ifdef ASSERT
       
   792 size_t Metablock::_overhead =
       
   793   Chunk::aligned_overhead_size(sizeof(Metablock)) / BytesPerWord;
       
   794 #else
       
   795 size_t Metablock::_overhead = 0;
       
   796 #endif
       
   797 size_t Metachunk::_overhead =
       
   798   Chunk::aligned_overhead_size(sizeof(Metachunk)) / BytesPerWord;
       
   799 
       
   800 // New blocks returned by the Metaspace are zero initialized.
       
   801 // We should fix the constructors to not assume this instead.
       
   802 Metablock* Metablock::initialize(MetaWord* p, size_t word_size) {
       
   803   Metablock* result = (Metablock*) p;
       
   804 
       
   805   // Clear the memory
       
   806   Copy::fill_to_aligned_words((HeapWord*)result, word_size);
       
   807 #ifdef ASSERT
       
   808   result->set_word_size(word_size);
       
   809   // Check after work size is set.
       
   810   result->set_leader((void*) metaspace_allocation_leader);
       
   811   result->set_trailer((void*) metaspace_allocation_trailer);
       
   812 #endif
       
   813   return result;
       
   814 }
       
   815 
       
   816 void Metablock::verify() {
       
   817 #ifdef ASSERT
       
   818   assert(leader() == metaspace_allocation_leader &&
       
   819          trailer() == metaspace_allocation_trailer,
       
   820          "block has been corrupted");
       
   821 #endif
       
   822 }
       
   823 
       
   824 // Metachunk methods
       
   825 
       
   826 Metachunk* Metachunk::initialize(MetaWord* ptr, size_t word_size) {
       
   827   // Set bottom, top, and end.  Allow space for the Metachunk itself
       
   828   Metachunk* chunk = (Metachunk*) ptr;
       
   829 
       
   830   MetaWord* chunk_bottom = ptr + _overhead;
       
   831   chunk->set_bottom(ptr);
       
   832   chunk->set_top(chunk_bottom);
       
   833   MetaWord* chunk_end = ptr + word_size;
       
   834   assert(chunk_end > chunk_bottom, "Chunk must be too small");
       
   835   chunk->set_end(chunk_end);
       
   836   chunk->set_next(NULL);
       
   837   chunk->set_word_size(word_size);
       
   838 #ifdef ASSERT
       
   839   size_t data_word_size = pointer_delta(chunk_end, chunk_bottom, sizeof(MetaWord));
       
   840   Copy::fill_to_words((HeapWord*) chunk_bottom, data_word_size, metadata_chunk_initialize);
       
   841 #endif
       
   842   return chunk;
       
   843 }
       
   844 
       
   845 
       
   846 Metablock* Metachunk::allocate(size_t word_size) {
       
   847   Metablock* result = NULL;
       
   848   // If available, bump the pointer to allocate.
       
   849   if (free_word_size() >= word_size) {
       
   850     result = Metablock::initialize(_top, word_size);
       
   851     _top = _top + word_size;
       
   852   }
       
   853 #ifdef ASSERT
       
   854   assert(result == NULL ||
       
   855          result->word_size() == word_size,
       
   856          "Block size is not set correctly");
       
   857 #endif
       
   858   return result;
       
   859 }
       
   860 
       
   861 // _bottom points to the start of the chunk including the overhead.
       
   862 size_t Metachunk::used_word_size() {
       
   863   return pointer_delta(_top, _bottom, sizeof(MetaWord));
       
   864 }
       
   865 
       
   866 size_t Metachunk::free_word_size() {
       
   867   return pointer_delta(_end, _top, sizeof(MetaWord));
       
   868 }
       
   869 
       
   870 size_t Metachunk::capacity_word_size() {
       
   871   return pointer_delta(_end, _bottom, sizeof(MetaWord));
       
   872 }
       
   873 
       
   874 void Metachunk::print_on(outputStream* st) const {
       
   875   st->print_cr("Metachunk:"
       
   876                " bottom " PTR_FORMAT " top " PTR_FORMAT
       
   877                " end " PTR_FORMAT " size " SIZE_FORMAT,
       
   878                bottom(), top(), end(), word_size());
       
   879 }
       
   880 
       
   881 
       
   882 void Metachunk::verify() {
       
   883 #ifdef ASSERT
       
   884   // Cannot walk through the blocks unless the blocks have
       
   885   // headers with sizes.
       
   886   MetaWord* curr = bottom() + overhead();
       
   887   while (curr < top()) {
       
   888     Metablock* block = (Metablock*) curr;
       
   889     size_t word_size = block->word_size();
       
   890     block->verify();
       
   891     curr = curr + word_size;
       
   892   }
       
   893 #endif
       
   894   return;
       
   895 }
       
   896 
       
   897 // BlockFreelist methods
       
   898 
       
   899 #ifdef DEALLOCATE_BLOCKS
       
   900 BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
       
   901 #else
       
   902 BlockFreelist::BlockFreelist() {}
       
   903 #endif
       
   904 
       
   905 BlockFreelist::~BlockFreelist() {
       
   906 #ifdef DEALLOCATE_BLOCKS
       
   907   if (_dictionary != NULL) {
       
   908     if (Verbose && TraceMetadataChunkAllocation) {
       
   909       _dictionary->print_free_lists(gclog_or_tty);
       
   910     }
       
   911     delete _dictionary;
       
   912   }
       
   913 #endif
       
   914 }
       
   915 
       
   916 Metablock* BlockFreelist::initialize_free_chunk(Metablock* block, size_t word_size) {
       
   917 #ifdef DEALLOCATE_BLOCKS
       
   918 #ifdef ASSERT
       
   919   assert(word_size = block->word_size(), "Wrong chunk size");
       
   920 #endif
       
   921   Metablock* result = block;
       
   922   result->setSize(word_size);
       
   923   result->linkPrev(NULL);
       
   924   result->linkNext(NULL);
       
   925 
       
   926   return result;
       
   927 #else
       
   928   ShouldNotReachHere();
       
   929   return block;
       
   930 #endif
       
   931 }
       
   932 
       
   933 void BlockFreelist::return_block(Metablock* block, size_t word_size) {
       
   934 #ifdef ASSERT
       
   935   assert(word_size = block->word_size(), "Block size is wrong");;
       
   936 #endif
       
   937   Metablock* free_chunk = initialize_free_chunk(block, word_size);
       
   938 #ifdef DEALLOCATE_BLOCKS
       
   939   if (dictionary() == NULL) {
       
   940    _dictionary = new BinaryTreeDictionary<Metablock>(false /* adaptive_freelists */);
       
   941   }
       
   942   dictionary()->returnChunk(free_chunk);
       
   943 #endif
       
   944 }
       
   945 
       
   946 Metablock* BlockFreelist::get_block(size_t word_size) {
       
   947 #ifdef DEALLOCATE_BLOCKS
       
   948   if (dictionary() == NULL) {
       
   949     return NULL;
       
   950   }
       
   951 
       
   952   Metablock* free_chunk =
       
   953     dictionary()->getChunk(word_size, FreeBlockDictionary<Metablock>::exactly);
       
   954 #else
       
   955   Metablock* free_chunk = NULL;
       
   956 #endif
       
   957   if (free_chunk == NULL) {
       
   958     return NULL;
       
   959   }
       
   960   assert(free_chunk->word_size() == word_size, "Size of chunk is incorrect");
       
   961   Metablock* block = Metablock::initialize((MetaWord*) free_chunk, word_size);
       
   962 #ifdef ASSERT
       
   963   assert(block->word_size() == word_size, "Block size is not set correctly");
       
   964 #endif
       
   965 
       
   966   return block;
       
   967 }
       
   968 
       
   969 void BlockFreelist::print_on(outputStream* st) const {
       
   970 #ifdef DEALLOCATE_BLOCKS
       
   971   if (dictionary() == NULL) {
       
   972     return;
       
   973   }
       
   974   dictionary()->print_free_lists(st);
       
   975 #else
       
   976   return;
       
   977 #endif
       
   978 }
       
   979 
       
   980 // VirtualSpaceNode methods
       
   981 
       
   982 VirtualSpaceNode::~VirtualSpaceNode() {
       
   983   _rs.release();
       
   984 }
       
   985 
       
   986 size_t VirtualSpaceNode::used_words_in_vs() const {
       
   987   return pointer_delta(top(), bottom(), sizeof(MetaWord));
       
   988 }
       
   989 
       
   990 // Space committed in the VirtualSpace
       
   991 size_t VirtualSpaceNode::capacity_words_in_vs() const {
       
   992   return pointer_delta(end(), bottom(), sizeof(MetaWord));
       
   993 }
       
   994 
       
   995 
       
   996 // Allocates the chunk from the virtual space only.
       
   997 // This interface is also used internally for debugging.  Not all
       
   998 // chunks removed here are necessarily used for allocation.
       
   999 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
       
  1000   // Bottom of the new chunk
       
  1001   MetaWord* chunk_limit = top();
       
  1002   assert(chunk_limit != NULL, "Not safe to call this method");
       
  1003 
       
  1004   if (!is_available(chunk_word_size)) {
       
  1005     if (TraceMetadataChunkAllocation) {
       
  1006       tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
       
  1007       // Dump some information about the virtual space that is nearly full
       
  1008       print_on(tty);
       
  1009     }
       
  1010     return NULL;
       
  1011   }
       
  1012 
       
  1013   // Take the space  (bump top on the current virtual space).
       
  1014   inc_top(chunk_word_size);
       
  1015 
       
  1016   // Point the chunk at the space
       
  1017   Metachunk* result = Metachunk::initialize(chunk_limit, chunk_word_size);
       
  1018   return result;
       
  1019 }
       
  1020 
       
  1021 
       
  1022 // Expand the virtual space (commit more of the reserved space)
       
  1023 bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) {
       
  1024   size_t bytes = words * BytesPerWord;
       
  1025   bool result =  virtual_space()->expand_by(bytes, pre_touch);
       
  1026   if (TraceMetavirtualspaceAllocation && !result) {
       
  1027     gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed "
       
  1028                            "for byte size " SIZE_FORMAT, bytes);
       
  1029     virtual_space()->print();
       
  1030   }
       
  1031   return result;
       
  1032 }
       
  1033 
       
  1034 // Shrink the virtual space (commit more of the reserved space)
       
  1035 bool VirtualSpaceNode::shrink_by(size_t words) {
       
  1036   size_t bytes = words * BytesPerWord;
       
  1037   virtual_space()->shrink_by(bytes);
       
  1038   return true;
       
  1039 }
       
  1040 
       
  1041 // Add another chunk to the chunk list.
       
  1042 
       
  1043 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
       
  1044   assert_lock_strong(SpaceManager::expand_lock());
       
  1045   Metachunk* result = NULL;
       
  1046 
       
  1047   return take_from_committed(chunk_word_size);
       
  1048 }
       
  1049 
       
  1050 Metachunk* VirtualSpaceNode::get_chunk_vs_with_expand(size_t chunk_word_size) {
       
  1051   assert_lock_strong(SpaceManager::expand_lock());
       
  1052 
       
  1053   Metachunk* new_chunk = get_chunk_vs(chunk_word_size);
       
  1054 
       
  1055   if (new_chunk == NULL) {
       
  1056     // Only a small part of the virtualspace is committed when first
       
  1057     // allocated so committing more here can be expected.
       
  1058     size_t page_size_words = os::vm_page_size() / BytesPerWord;
       
  1059     size_t aligned_expand_vs_by_words = align_size_up(chunk_word_size,
       
  1060                                                     page_size_words);
       
  1061     expand_by(aligned_expand_vs_by_words, false);
       
  1062     new_chunk = get_chunk_vs(chunk_word_size);
       
  1063   }
       
  1064   return new_chunk;
       
  1065 }
       
  1066 
       
  1067 bool VirtualSpaceNode::initialize() {
       
  1068 
       
  1069   if (!_rs.is_reserved()) {
       
  1070     return false;
       
  1071   }
       
  1072 
       
  1073   // Commit only 1 page instead of the whole reserved space _rs.size()
       
  1074   size_t committed_byte_size = os::vm_page_size();
       
  1075   bool result = virtual_space()->initialize(_rs, committed_byte_size);
       
  1076   if (result) {
       
  1077     set_top((MetaWord*)virtual_space()->low());
       
  1078     set_reserved(MemRegion((HeapWord*)_rs.base(),
       
  1079                  (HeapWord*)(_rs.base() + _rs.size())));
       
  1080   }
       
  1081 
       
  1082   assert(reserved()->start() == (HeapWord*) _rs.base(),
       
  1083     err_msg("Reserved start was not set properly " PTR_FORMAT
       
  1084       " != " PTR_FORMAT, reserved()->start(), _rs.base()));
       
  1085   assert(reserved()->word_size() == _rs.size() / BytesPerWord,
       
  1086     err_msg("Reserved size was not set properly " SIZE_FORMAT
       
  1087       " != " SIZE_FORMAT, reserved()->word_size(),
       
  1088       _rs.size() / BytesPerWord));
       
  1089 
       
  1090   return result;
       
  1091 }
       
  1092 
       
  1093 void VirtualSpaceNode::print_on(outputStream* st) const {
       
  1094   size_t used = used_words_in_vs();
       
  1095   size_t capacity = capacity_words_in_vs();
       
  1096   VirtualSpace* vs = virtual_space();
       
  1097   st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used "
       
  1098            "[" PTR_FORMAT ", " PTR_FORMAT ", "
       
  1099            PTR_FORMAT ", " PTR_FORMAT ")",
       
  1100            vs, capacity / K, used * 100 / capacity,
       
  1101            bottom(), top(), end(),
       
  1102            vs->high_boundary());
       
  1103 }
       
  1104 
       
  1105 void VirtualSpaceNode::mangle() {
       
  1106   size_t word_size = capacity_words_in_vs();
       
  1107   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
       
  1108 }
       
  1109 
       
  1110 // VirtualSpaceList methods
       
  1111 // Space allocated from the VirtualSpace
       
  1112 
       
  1113 VirtualSpaceList::~VirtualSpaceList() {
       
  1114   VirtualSpaceListIterator iter(virtual_space_list());
       
  1115   while (iter.repeat()) {
       
  1116     VirtualSpaceNode* vsl = iter.get_next();
       
  1117     delete vsl;
       
  1118   }
       
  1119 }
       
  1120 
       
  1121 size_t VirtualSpaceList::used_words_sum() {
       
  1122   size_t allocated_by_vs = 0;
       
  1123   VirtualSpaceListIterator iter(virtual_space_list());
       
  1124   while (iter.repeat()) {
       
  1125     VirtualSpaceNode* vsl = iter.get_next();
       
  1126     // Sum used region [bottom, top) in each virtualspace
       
  1127     allocated_by_vs += vsl->used_words_in_vs();
       
  1128   }
       
  1129   assert(allocated_by_vs >= chunk_manager()->free_chunks_total(),
       
  1130     err_msg("Total in free chunks " SIZE_FORMAT
       
  1131             " greater than total from virtual_spaces " SIZE_FORMAT,
       
  1132             allocated_by_vs, chunk_manager()->free_chunks_total()));
       
  1133   size_t used =
       
  1134     allocated_by_vs - chunk_manager()->free_chunks_total();
       
  1135   return used;
       
  1136 }
       
  1137 
       
  1138 // Space available in all MetadataVirtualspaces allocated
       
  1139 // for metadata.  This is the upper limit on the capacity
       
  1140 // of chunks allocated out of all the MetadataVirtualspaces.
       
  1141 size_t VirtualSpaceList::capacity_words_sum() {
       
  1142   size_t capacity = 0;
       
  1143   VirtualSpaceListIterator iter(virtual_space_list());
       
  1144   while (iter.repeat()) {
       
  1145     VirtualSpaceNode* vsl = iter.get_next();
       
  1146     capacity += vsl->capacity_words_in_vs();
       
  1147   }
       
  1148   return capacity;
       
  1149 }
       
  1150 
       
  1151 VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
       
  1152                                    _is_class(false),
       
  1153                                    _virtual_space_list(NULL),
       
  1154                                    _current_virtual_space(NULL),
       
  1155                                    _virtual_space_total(0),
       
  1156                                    _virtual_space_count(0) {
       
  1157   MutexLockerEx cl(SpaceManager::expand_lock(),
       
  1158                    Mutex::_no_safepoint_check_flag);
       
  1159   bool initialization_succeeded = grow_vs(word_size);
       
  1160 
       
  1161   assert(initialization_succeeded,
       
  1162     " VirtualSpaceList initialization should not fail");
       
  1163 }
       
  1164 
       
  1165 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
       
  1166                                    _is_class(true),
       
  1167                                    _virtual_space_list(NULL),
       
  1168                                    _current_virtual_space(NULL),
       
  1169                                    _virtual_space_total(0),
       
  1170                                    _virtual_space_count(0) {
       
  1171   MutexLockerEx cl(SpaceManager::expand_lock(),
       
  1172                    Mutex::_no_safepoint_check_flag);
       
  1173   VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
       
  1174   bool succeeded = class_entry->initialize();
       
  1175   assert(succeeded, " VirtualSpaceList initialization should not fail");
       
  1176   link_vs(class_entry, rs.size()/BytesPerWord);
       
  1177 }
       
  1178 
       
  1179 // Allocate another meta virtual space and add it to the list.
       
  1180 bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
       
  1181   assert_lock_strong(SpaceManager::expand_lock());
       
  1182   if (vs_word_size == 0) {
       
  1183     return false;
       
  1184   }
       
  1185   // Reserve the space
       
  1186   size_t vs_byte_size = vs_word_size * BytesPerWord;
       
  1187   assert(vs_byte_size % os::vm_page_size() == 0, "Not aligned");
       
  1188 
       
  1189   // Allocate the meta virtual space and initialize it.
       
  1190   VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
       
  1191   if (!new_entry->initialize()) {
       
  1192     delete new_entry;
       
  1193     return false;
       
  1194   } else {
       
  1195     link_vs(new_entry, vs_word_size);
       
  1196     return true;
       
  1197   }
       
  1198 }
       
  1199 
       
  1200 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size) {
       
  1201   if (virtual_space_list() == NULL) {
       
  1202       set_virtual_space_list(new_entry);
       
  1203   } else {
       
  1204     current_virtual_space()->set_next(new_entry);
       
  1205   }
       
  1206   set_current_virtual_space(new_entry);
       
  1207   inc_virtual_space_total(vs_word_size);
       
  1208   inc_virtual_space_count();
       
  1209 #ifdef ASSERT
       
  1210   new_entry->mangle();
       
  1211 #endif
       
  1212   if (TraceMetavirtualspaceAllocation && Verbose) {
       
  1213     VirtualSpaceNode* vsl = current_virtual_space();
       
  1214     vsl->print_on(tty);
       
  1215   }
       
  1216 }
       
  1217 
       
  1218 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
       
  1219                                            size_t grow_chunks_by_words) {
       
  1220 
       
  1221   // Get a chunk from the chunk freelist
       
  1222   Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
       
  1223 
       
  1224   // Allocate a chunk out of the current virtual space.
       
  1225   if (next == NULL) {
       
  1226     next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
       
  1227   }
       
  1228 
       
  1229   if (next == NULL) {
       
  1230     // Not enough room in current virtual space.  Try to commit
       
  1231     // more space.
       
  1232     size_t expand_vs_by_words = MAX2((size_t)SpaceManager::MediumChunkBunch,
       
  1233                                        grow_chunks_by_words);
       
  1234     size_t page_size_words = os::vm_page_size() / BytesPerWord;
       
  1235     size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
       
  1236                                                         page_size_words);
       
  1237     bool vs_expanded =
       
  1238       current_virtual_space()->expand_by(aligned_expand_vs_by_words, false);
       
  1239     if (!vs_expanded) {
       
  1240       // Should the capacity of the metaspaces be expanded for
       
  1241       // this allocation?  If it's the virtual space for classes and is
       
  1242       // being used for CompressedHeaders, don't allocate a new virtualspace.
       
  1243       if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
       
  1244         // Get another virtual space.
       
  1245           size_t grow_vs_words =
       
  1246             MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words);
       
  1247         if (grow_vs(grow_vs_words)) {
       
  1248           // Got it.  It's on the list now.  Get a chunk from it.
       
  1249           next = current_virtual_space()->get_chunk_vs_with_expand(grow_chunks_by_words);
       
  1250         }
       
  1251         if (TraceMetadataHumongousAllocation && SpaceManager::is_humongous(word_size)) {
       
  1252           gclog_or_tty->print_cr("  aligned_expand_vs_by_words " PTR_FORMAT,
       
  1253                                  aligned_expand_vs_by_words);
       
  1254           gclog_or_tty->print_cr("  grow_vs_words " PTR_FORMAT,
       
  1255                                  grow_vs_words);
       
  1256         }
       
  1257       } else {
       
  1258         // Allocation will fail and induce a GC
       
  1259         if (TraceMetadataChunkAllocation && Verbose) {
       
  1260           gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():"
       
  1261             " Fail instead of expand the metaspace");
       
  1262         }
       
  1263       }
       
  1264     } else {
       
  1265       // The virtual space expanded, get a new chunk
       
  1266       next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
       
  1267       assert(next != NULL, "Just expanded, should succeed");
       
  1268     }
       
  1269   }
       
  1270 
       
  1271   return next;
       
  1272 }
       
  1273 
       
  1274 void VirtualSpaceList::print_on(outputStream* st) const {
       
  1275   if (TraceMetadataChunkAllocation && Verbose) {
       
  1276     VirtualSpaceListIterator iter(virtual_space_list());
       
  1277     while (iter.repeat()) {
       
  1278       VirtualSpaceNode* node = iter.get_next();
       
  1279       node->print_on(st);
       
  1280     }
       
  1281   }
       
  1282 }
       
  1283 
       
  1284 #ifndef PRODUCT
       
  1285 bool VirtualSpaceList::contains(const void *ptr) {
       
  1286   VirtualSpaceNode* list = virtual_space_list();
       
  1287   VirtualSpaceListIterator iter(list);
       
  1288   while (iter.repeat()) {
       
  1289     VirtualSpaceNode* node = iter.get_next();
       
  1290     if (node->reserved()->contains(ptr)) {
       
  1291       return true;
       
  1292     }
       
  1293   }
       
  1294   return false;
       
  1295 }
       
  1296 #endif // PRODUCT
       
  1297 
       
  1298 
       
  1299 // MetaspaceGC methods
       
  1300 
       
  1301 // VM_CollectForMetadataAllocation is the vm operation used to GC.
       
  1302 // Within the VM operation after the GC the attempt to allocate the metadata
       
  1303 // should succeed.  If the GC did not free enough space for the metaspace
       
  1304 // allocation, the HWM is increased so that another virtualspace will be
       
  1305 // allocated for the metadata.  With perm gen the increase in the perm
       
  1306 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
       
  1307 // metaspace policy uses those as the small and large steps for the HWM.
       
  1308 //
       
  1309 // After the GC the compute_new_size() for MetaspaceGC is called to
       
  1310 // resize the capacity of the metaspaces.  The current implementation
       
  1311 // is based on the flags MinHeapFreeRatio and MaxHeapFreeRatio used
       
  1312 // to resize the Java heap by some GC's.  New flags can be implemented
       
  1313 // if really needed.  MinHeapFreeRatio is used to calculate how much
       
  1314 // free space is desirable in the metaspace capacity to decide how much
       
  1315 // to increase the HWM.  MaxHeapFreeRatio is used to decide how much
       
  1316 // free space is desirable in the metaspace capacity before decreasing
       
  1317 // the HWM.
       
  1318 
       
  1319 // Calculate the amount to increase the high water mark (HWM).
       
  1320 // Increase by a minimum amount (MinMetaspaceExpansion) so that
       
  1321 // another expansion is not requested too soon.  If that is not
       
  1322 // enough to satisfy the allocation (i.e. big enough for a word_size
       
  1323 // allocation), increase by MaxMetaspaceExpansion.  If that is still
       
  1324 // not enough, expand by the size of the allocation (word_size) plus
       
  1325 // some.
       
  1326 size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) {
       
  1327   size_t before_inc = MetaspaceGC::capacity_until_GC();
       
  1328   size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord;
       
  1329   size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord;
       
  1330   size_t page_size_words = os::vm_page_size() / BytesPerWord;
       
  1331   size_t size_delta_words = align_size_up(word_size, page_size_words);
       
  1332   size_t delta_words = MAX2(size_delta_words, min_delta_words);
       
  1333   if (delta_words > min_delta_words) {
       
  1334     // Don't want to hit the high water mark on the next
       
  1335     // allocation so make the delta greater than just enough
       
  1336     // for this allocation.
       
  1337     delta_words = MAX2(delta_words, max_delta_words);
       
  1338     if (delta_words > max_delta_words) {
       
  1339       // This allocation is large but the next ones are probably not
       
  1340       // so increase by the minimum.
       
  1341       delta_words = delta_words + min_delta_words;
       
  1342     }
       
  1343   }
       
  1344   return delta_words;
       
  1345 }
       
  1346 
       
  1347 bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
       
  1348 
       
  1349   // Class virtual space should always be expanded.  Call GC for the other
       
  1350   // metadata virtual space.
       
  1351   if (vsl == Metaspace::class_space_list()) return true;
       
  1352 
       
  1353   // If the user wants a limit, impose one.
       
  1354   size_t max_metaspace_size_words = MaxMetaspaceSize / BytesPerWord;
       
  1355   size_t metaspace_size_words = MetaspaceSize / BytesPerWord;
       
  1356   if (!FLAG_IS_DEFAULT(MaxMetaspaceSize) &&
       
  1357       vsl->capacity_words_sum() >= max_metaspace_size_words) {
       
  1358     return false;
       
  1359   }
       
  1360 
       
  1361   // If this is part of an allocation after a GC, expand
       
  1362   // unconditionally.
       
  1363   if(MetaspaceGC::expand_after_GC()) {
       
  1364     return true;
       
  1365   }
       
  1366 
       
  1367   // If the capacity is below the minimum capacity, allow the
       
  1368   // expansion.  Also set the high-water-mark (capacity_until_GC)
       
  1369   // to that minimum capacity so that a GC will not be induced
       
  1370   // until that minimum capacity is exceeded.
       
  1371   if (vsl->capacity_words_sum() < metaspace_size_words ||
       
  1372       capacity_until_GC() == 0) {
       
  1373     set_capacity_until_GC(metaspace_size_words);
       
  1374     return true;
       
  1375   } else {
       
  1376     if (vsl->capacity_words_sum() < capacity_until_GC()) {
       
  1377       return true;
       
  1378     } else {
       
  1379       if (TraceMetadataChunkAllocation && Verbose) {
       
  1380         gclog_or_tty->print_cr("  allocation request size " SIZE_FORMAT
       
  1381                         "  capacity_until_GC " SIZE_FORMAT
       
  1382                         "  capacity_words_sum " SIZE_FORMAT
       
  1383                         "  used_words_sum " SIZE_FORMAT
       
  1384                         "  free chunks " SIZE_FORMAT
       
  1385                         "  free chunks count %d",
       
  1386                         word_size,
       
  1387                         capacity_until_GC(),
       
  1388                         vsl->capacity_words_sum(),
       
  1389                         vsl->used_words_sum(),
       
  1390                         vsl->chunk_manager()->free_chunks_total(),
       
  1391                         vsl->chunk_manager()->free_chunks_count());
       
  1392       }
       
  1393       return false;
       
  1394     }
       
  1395   }
       
  1396 }
       
  1397 
       
  1398 // Variables are in bytes
       
  1399 
       
  1400 void MetaspaceGC::compute_new_size() {
       
  1401   assert(_shrink_factor <= 100, "invalid shrink factor");
       
  1402   uint current_shrink_factor = _shrink_factor;
       
  1403   _shrink_factor = 0;
       
  1404 
       
  1405   VirtualSpaceList *vsl = Metaspace::space_list();
       
  1406 
       
  1407   size_t capacity_after_gc = vsl->capacity_bytes_sum();
       
  1408   // Check to see if these two can be calculated without walking the CLDG
       
  1409   size_t used_after_gc = vsl->used_bytes_sum();
       
  1410   size_t capacity_until_GC = vsl->capacity_bytes_sum();
       
  1411   size_t free_after_gc = capacity_until_GC - used_after_gc;
       
  1412 
       
  1413   const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
       
  1414   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
       
  1415 
       
  1416   const double min_tmp = used_after_gc / maximum_used_percentage;
       
  1417   size_t minimum_desired_capacity =
       
  1418     (size_t)MIN2(min_tmp, double(max_uintx));
       
  1419   // Don't shrink less than the initial generation size
       
  1420   minimum_desired_capacity = MAX2(minimum_desired_capacity,
       
  1421                                   MetaspaceSize);
       
  1422 
       
  1423   if (PrintGCDetails && Verbose) {
       
  1424     const double free_percentage = ((double)free_after_gc) / capacity_until_GC;
       
  1425     gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
       
  1426     gclog_or_tty->print_cr("  "
       
  1427                   "  minimum_free_percentage: %6.2f"
       
  1428                   "  maximum_used_percentage: %6.2f",
       
  1429                   minimum_free_percentage,
       
  1430                   maximum_used_percentage);
       
  1431     double d_free_after_gc = free_after_gc / (double) K;
       
  1432     gclog_or_tty->print_cr("  "
       
  1433                   "   free_after_gc       : %6.1fK"
       
  1434                   "   used_after_gc       : %6.1fK"
       
  1435                   "   capacity_after_gc   : %6.1fK"
       
  1436                   "   metaspace HWM     : %6.1fK",
       
  1437                   free_after_gc / (double) K,
       
  1438                   used_after_gc / (double) K,
       
  1439                   capacity_after_gc / (double) K,
       
  1440                   capacity_until_GC / (double) K);
       
  1441     gclog_or_tty->print_cr("  "
       
  1442                   "   free_percentage: %6.2f",
       
  1443                   free_percentage);
       
  1444   }
       
  1445 
       
  1446 
       
  1447   if (capacity_until_GC < minimum_desired_capacity) {
       
  1448     // If we have less capacity below the metaspace HWM, then
       
  1449     // increment the HWM.
       
  1450     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
       
  1451     // Don't expand unless it's significant
       
  1452     if (expand_bytes >= MinMetaspaceExpansion) {
       
  1453       size_t expand_words = expand_bytes / BytesPerWord;
       
  1454       MetaspaceGC::inc_capacity_until_GC(expand_words);
       
  1455     }
       
  1456     if (PrintGCDetails && Verbose) {
       
  1457       size_t new_capacity_until_GC = MetaspaceGC::capacity_until_GC_in_bytes();
       
  1458       gclog_or_tty->print_cr("    expanding:"
       
  1459                     "  minimum_desired_capacity: %6.1fK"
       
  1460                     "  expand_words: %6.1fK"
       
  1461                     "  MinMetaspaceExpansion: %6.1fK"
       
  1462                     "  new metaspace HWM:  %6.1fK",
       
  1463                     minimum_desired_capacity / (double) K,
       
  1464                     expand_bytes / (double) K,
       
  1465                     MinMetaspaceExpansion / (double) K,
       
  1466                     new_capacity_until_GC / (double) K);
       
  1467     }
       
  1468     return;
       
  1469   }
       
  1470 
       
  1471   // No expansion, now see if we want to shrink
       
  1472   size_t shrink_words = 0;
       
  1473   // We would never want to shrink more than this
       
  1474   size_t max_shrink_words = capacity_until_GC - minimum_desired_capacity;
       
  1475   assert(max_shrink_words >= 0, err_msg("max_shrink_words " SIZE_FORMAT,
       
  1476     max_shrink_words));
       
  1477 
       
  1478   // Should shrinking be considered?
       
  1479   if (MaxHeapFreeRatio < 100) {
       
  1480     const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
       
  1481     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
       
  1482     const double max_tmp = used_after_gc / minimum_used_percentage;
       
  1483     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
       
  1484     maximum_desired_capacity = MAX2(maximum_desired_capacity,
       
  1485                                     MetaspaceSize);
       
  1486     if (PrintGC && Verbose) {
       
  1487       gclog_or_tty->print_cr("  "
       
  1488                              "  maximum_free_percentage: %6.2f"
       
  1489                              "  minimum_used_percentage: %6.2f",
       
  1490                              maximum_free_percentage,
       
  1491                              minimum_used_percentage);
       
  1492       gclog_or_tty->print_cr("  "
       
  1493                              "  capacity_until_GC: %6.1fK"
       
  1494                              "  minimum_desired_capacity: %6.1fK"
       
  1495                              "  maximum_desired_capacity: %6.1fK",
       
  1496                              capacity_until_GC / (double) K,
       
  1497                              minimum_desired_capacity / (double) K,
       
  1498                              maximum_desired_capacity / (double) K);
       
  1499     }
       
  1500 
       
  1501     assert(minimum_desired_capacity <= maximum_desired_capacity,
       
  1502            "sanity check");
       
  1503 
       
  1504     if (capacity_until_GC > maximum_desired_capacity) {
       
  1505       // Capacity too large, compute shrinking size
       
  1506       shrink_words = capacity_until_GC - maximum_desired_capacity;
       
  1507       // We don't want shrink all the way back to initSize if people call
       
  1508       // System.gc(), because some programs do that between "phases" and then
       
  1509       // we'd just have to grow the heap up again for the next phase.  So we
       
  1510       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
       
  1511       // on the third call, and 100% by the fourth call.  But if we recompute
       
  1512       // size without shrinking, it goes back to 0%.
       
  1513       shrink_words = shrink_words / 100 * current_shrink_factor;
       
  1514       assert(shrink_words <= max_shrink_words,
       
  1515         err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
       
  1516           shrink_words, max_shrink_words));
       
  1517       if (current_shrink_factor == 0) {
       
  1518         _shrink_factor = 10;
       
  1519       } else {
       
  1520         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
       
  1521       }
       
  1522       if (PrintGCDetails && Verbose) {
       
  1523         gclog_or_tty->print_cr("  "
       
  1524                       "  shrinking:"
       
  1525                       "  initSize: %.1fK"
       
  1526                       "  maximum_desired_capacity: %.1fK",
       
  1527                       MetaspaceSize / (double) K,
       
  1528                       maximum_desired_capacity / (double) K);
       
  1529         gclog_or_tty->print_cr("  "
       
  1530                       "  shrink_words: %.1fK"
       
  1531                       "  current_shrink_factor: %d"
       
  1532                       "  new shrink factor: %d"
       
  1533                       "  MinMetaspaceExpansion: %.1fK",
       
  1534                       shrink_words / (double) K,
       
  1535                       current_shrink_factor,
       
  1536                       _shrink_factor,
       
  1537                       MinMetaspaceExpansion / (double) K);
       
  1538       }
       
  1539     }
       
  1540   }
       
  1541 
       
  1542 
       
  1543   // Don't shrink unless it's significant
       
  1544   if (shrink_words >= MinMetaspaceExpansion) {
       
  1545     VirtualSpaceNode* csp = vsl->current_virtual_space();
       
  1546     size_t available_to_shrink = csp->capacity_words_in_vs() -
       
  1547       csp->used_words_in_vs();
       
  1548     shrink_words = MIN2(shrink_words, available_to_shrink);
       
  1549     csp->shrink_by(shrink_words);
       
  1550     MetaspaceGC::dec_capacity_until_GC(shrink_words);
       
  1551     if (PrintGCDetails && Verbose) {
       
  1552       size_t new_capacity_until_GC = MetaspaceGC::capacity_until_GC_in_bytes();
       
  1553       gclog_or_tty->print_cr("  metaspace HWM: %.1fK", new_capacity_until_GC / (double) K);
       
  1554     }
       
  1555   }
       
  1556   assert(vsl->used_bytes_sum() == used_after_gc &&
       
  1557          used_after_gc <= vsl->capacity_bytes_sum(),
       
  1558          "sanity check");
       
  1559 
       
  1560 }
       
  1561 
       
  1562 // Metadebug methods
       
  1563 
       
  1564 void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
       
  1565                                        size_t chunk_word_size){
       
  1566 #ifdef ASSERT
       
  1567   VirtualSpaceList* vsl = sm->vs_list();
       
  1568   if (MetaDataDeallocateALot &&
       
  1569       Metadebug::deallocate_chunk_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
       
  1570     Metadebug::reset_deallocate_chunk_a_lot_count();
       
  1571     for (uint i = 0; i < metadata_deallocate_a_lock_chunk; i++) {
       
  1572       Metachunk* dummy_chunk = vsl->current_virtual_space()->take_from_committed(chunk_word_size);
       
  1573       if (dummy_chunk == NULL) {
       
  1574         break;
       
  1575       }
       
  1576       vsl->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
       
  1577 
       
  1578       if (TraceMetadataChunkAllocation && Verbose) {
       
  1579         gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
       
  1580                                sm->sum_count_in_chunks_in_use());
       
  1581         dummy_chunk->print_on(gclog_or_tty);
       
  1582         gclog_or_tty->print_cr("  Free chunks total %d  count %d",
       
  1583                                vsl->chunk_manager()->free_chunks_total(),
       
  1584                                vsl->chunk_manager()->free_chunks_count());
       
  1585       }
       
  1586     }
       
  1587   } else {
       
  1588     Metadebug::inc_deallocate_chunk_a_lot_count();
       
  1589   }
       
  1590 #endif
       
  1591 }
       
  1592 
       
  1593 void Metadebug::deallocate_block_a_lot(SpaceManager* sm,
       
  1594                                        size_t raw_word_size){
       
  1595 #ifdef ASSERT
       
  1596   if (MetaDataDeallocateALot &&
       
  1597         Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
       
  1598     Metadebug::set_deallocate_block_a_lot_count(0);
       
  1599     for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) {
       
  1600       Metablock* dummy_block = sm->allocate_work(raw_word_size);
       
  1601       if (dummy_block == 0) {
       
  1602         break;
       
  1603       }
       
  1604 #ifdef ASSERT
       
  1605       assert(dummy_block->word_size() == raw_word_size, "Block size is not set correctly");
       
  1606 #endif
       
  1607       sm->deallocate(dummy_block->data());
       
  1608     }
       
  1609   } else {
       
  1610     Metadebug::inc_deallocate_block_a_lot_count();
       
  1611   }
       
  1612 #endif
       
  1613 }
       
  1614 
       
  1615 void Metadebug::init_allocation_fail_alot_count() {
       
  1616   if (MetadataAllocationFailALot) {
       
  1617     _allocation_fail_alot_count =
       
  1618       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
       
  1619   }
       
  1620 }
       
  1621 
       
  1622 #ifdef ASSERT
       
  1623 bool Metadebug::test_metadata_failure() {
       
  1624   if (MetadataAllocationFailALot &&
       
  1625       Threads::is_vm_complete()) {
       
  1626     if (_allocation_fail_alot_count > 0) {
       
  1627       _allocation_fail_alot_count--;
       
  1628     } else {
       
  1629       if (TraceMetadataChunkAllocation && Verbose) {
       
  1630         gclog_or_tty->print_cr("Metadata allocation failing for "
       
  1631                                "MetadataAllocationFailALot");
       
  1632       }
       
  1633       init_allocation_fail_alot_count();
       
  1634       return true;
       
  1635     }
       
  1636   }
       
  1637   return false;
       
  1638 }
       
  1639 #endif
       
  1640 
       
  1641 // ChunkList methods
       
  1642 
       
  1643 size_t ChunkList::sum_list_size() {
       
  1644   size_t result = 0;
       
  1645   Metachunk* cur = head();
       
  1646   while (cur != NULL) {
       
  1647     result += cur->word_size();
       
  1648     cur = cur->next();
       
  1649   }
       
  1650   return result;
       
  1651 }
       
  1652 
       
  1653 size_t ChunkList::sum_list_count() {
       
  1654   size_t result = 0;
       
  1655   Metachunk* cur = head();
       
  1656   while (cur != NULL) {
       
  1657     result++;
       
  1658     cur = cur->next();
       
  1659   }
       
  1660   return result;
       
  1661 }
       
  1662 
       
  1663 size_t ChunkList::sum_list_capacity() {
       
  1664   size_t result = 0;
       
  1665   Metachunk* cur = head();
       
  1666   while (cur != NULL) {
       
  1667     result += cur->capacity_word_size();
       
  1668     cur = cur->next();
       
  1669   }
       
  1670   return result;
       
  1671 }
       
  1672 
       
  1673 void ChunkList::add_at_head(Metachunk* head, Metachunk* tail) {
       
  1674   assert_lock_strong(SpaceManager::expand_lock());
       
  1675   assert(tail->next() == NULL, "Not the tail");
       
  1676 
       
  1677   if (TraceMetadataChunkAllocation && Verbose) {
       
  1678     tty->print("ChunkList::add_at_head: ");
       
  1679     Metachunk* cur = head;
       
  1680     while (cur != NULL) {
       
  1681     tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ", cur, cur->word_size());
       
  1682       cur = cur->next();
       
  1683     }
       
  1684     tty->print_cr("");
       
  1685   }
       
  1686 
       
  1687   if (tail != NULL) {
       
  1688     tail->set_next(_head);
       
  1689   }
       
  1690   set_head(head);
       
  1691 }
       
  1692 
       
  1693 void ChunkList::add_at_head(Metachunk* list) {
       
  1694   if (list == NULL) {
       
  1695     // Nothing to add
       
  1696     return;
       
  1697   }
       
  1698   assert_lock_strong(SpaceManager::expand_lock());
       
  1699   Metachunk* head = list;
       
  1700   Metachunk* tail = list;
       
  1701   Metachunk* cur = head->next();
       
  1702   // Search for the tail since it is not passed.
       
  1703   while (cur != NULL) {
       
  1704     tail = cur;
       
  1705     cur = cur->next();
       
  1706   }
       
  1707   add_at_head(head, tail);
       
  1708 }
       
  1709 
       
  1710 // ChunkManager methods
       
  1711 
       
  1712 // Verification of _free_chunks_total and _free_chunks_count does not
       
  1713 // work with the CMS collector because its use of additional locks
       
  1714 // complicate the mutex deadlock detection but it can still be useful
       
  1715 // for detecting errors in the chunk accounting with other collectors.
       
  1716 
       
  1717 size_t ChunkManager::free_chunks_total() {
       
  1718 #ifdef ASSERT
       
  1719   if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
       
  1720     MutexLockerEx cl(SpaceManager::expand_lock(),
       
  1721                      Mutex::_no_safepoint_check_flag);
       
  1722     locked_verify_free_chunks_total();
       
  1723   }
       
  1724 #endif
       
  1725   return _free_chunks_total;
       
  1726 }
       
  1727 
       
  1728 size_t ChunkManager::free_chunks_total_in_bytes() {
       
  1729   return free_chunks_total() * BytesPerWord;
       
  1730 }
       
  1731 
       
  1732 size_t ChunkManager::free_chunks_count() {
       
  1733 #ifdef ASSERT
       
  1734   if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
       
  1735     MutexLockerEx cl(SpaceManager::expand_lock(),
       
  1736                      Mutex::_no_safepoint_check_flag);
       
  1737     // This lock is only needed in debug because the verification
       
  1738     // of the _free_chunks_totals walks the list of free chunks
       
  1739     locked_verify_free_chunks_count();
       
  1740   }
       
  1741 #endif
       
  1742     return _free_chunks_count;
       
  1743 }
       
  1744 
       
  1745 void ChunkManager::locked_verify_free_chunks_total() {
       
  1746   assert_lock_strong(SpaceManager::expand_lock());
       
  1747   assert(sum_free_chunks() == _free_chunks_total,
       
  1748     err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
       
  1749            " same as sum " SIZE_FORMAT, _free_chunks_total,
       
  1750            sum_free_chunks()));
       
  1751 }
       
  1752 
       
  1753 void ChunkManager::verify_free_chunks_total() {
       
  1754   MutexLockerEx cl(SpaceManager::expand_lock(),
       
  1755                      Mutex::_no_safepoint_check_flag);
       
  1756   locked_verify_free_chunks_total();
       
  1757 }
       
  1758 
       
  1759 void ChunkManager::locked_verify_free_chunks_count() {
       
  1760   assert_lock_strong(SpaceManager::expand_lock());
       
  1761   assert(sum_free_chunks_count() == _free_chunks_count,
       
  1762     err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
       
  1763            " same as sum " SIZE_FORMAT, _free_chunks_count,
       
  1764            sum_free_chunks_count()));
       
  1765 }
       
  1766 
       
  1767 void ChunkManager::verify_free_chunks_count() {
       
  1768 #ifdef ASSERT
       
  1769   MutexLockerEx cl(SpaceManager::expand_lock(),
       
  1770                      Mutex::_no_safepoint_check_flag);
       
  1771   locked_verify_free_chunks_count();
       
  1772 #endif
       
  1773 }
       
  1774 
       
  1775 void ChunkManager::verify() {
       
  1776 #ifdef ASSERT
       
  1777   if (!UseConcMarkSweepGC) {
       
  1778     MutexLockerEx cl(SpaceManager::expand_lock(),
       
  1779                        Mutex::_no_safepoint_check_flag);
       
  1780     locked_verify_free_chunks_total();
       
  1781     locked_verify_free_chunks_count();
       
  1782   }
       
  1783 #endif
       
  1784 }
       
  1785 
       
  1786 void ChunkManager::locked_verify() {
       
  1787   locked_verify_free_chunks_total();
       
  1788   locked_verify_free_chunks_count();
       
  1789 }
       
  1790 
       
  1791 void ChunkManager::locked_print_free_chunks(outputStream* st) {
       
  1792   assert_lock_strong(SpaceManager::expand_lock());
       
  1793   st->print_cr("Free chunk total 0x%x  count 0x%x",
       
  1794                 _free_chunks_total, _free_chunks_count);
       
  1795 }
       
  1796 
       
  1797 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
       
  1798   assert_lock_strong(SpaceManager::expand_lock());
       
  1799   st->print_cr("Sum free chunk total 0x%x  count 0x%x",
       
  1800                 sum_free_chunks(), sum_free_chunks_count());
       
  1801 }
       
  1802 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
       
  1803   return &_free_chunks[index];
       
  1804 }
       
  1805 
       
  1806 
       
  1807 // These methods that sum the free chunk lists are used in printing
       
  1808 // methods that are used in product builds.
       
  1809 size_t ChunkManager::sum_free_chunks() {
       
  1810   assert_lock_strong(SpaceManager::expand_lock());
       
  1811   size_t result = 0;
       
  1812   for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
       
  1813     ChunkList* list = free_chunks(i);
       
  1814 
       
  1815     if (list == NULL) {
       
  1816       continue;
       
  1817     }
       
  1818 
       
  1819     result = result + list->sum_list_capacity();
       
  1820   }
       
  1821   return result;
       
  1822 }
       
  1823 
       
  1824 size_t ChunkManager::sum_free_chunks_count() {
       
  1825   assert_lock_strong(SpaceManager::expand_lock());
       
  1826   size_t count = 0;
       
  1827   for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
       
  1828     ChunkList* list = free_chunks(i);
       
  1829     if (list == NULL) {
       
  1830       continue;
       
  1831     }
       
  1832     count = count + list->sum_list_count();
       
  1833   }
       
  1834   return count;
       
  1835 }
       
  1836 
       
  1837 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
       
  1838   switch (word_size) {
       
  1839   case SpaceManager::SmallChunk :
       
  1840       return &_free_chunks[0];
       
  1841   case SpaceManager::MediumChunk :
       
  1842       return &_free_chunks[1];
       
  1843   default:
       
  1844     assert(word_size > SpaceManager::MediumChunk, "List inconsistency");
       
  1845     return &_free_chunks[2];
       
  1846   }
       
  1847 }
       
  1848 
       
  1849 void ChunkManager::free_chunks_put(Metachunk* chunk) {
       
  1850   assert_lock_strong(SpaceManager::expand_lock());
       
  1851   ChunkList* free_list = find_free_chunks_list(chunk->word_size());
       
  1852   chunk->set_next(free_list->head());
       
  1853   free_list->set_head(chunk);
       
  1854   // chunk is being returned to the chunk free list
       
  1855   inc_free_chunks_total(chunk->capacity_word_size());
       
  1856   locked_verify();
       
  1857 }
       
  1858 
       
  1859 void ChunkManager::chunk_freelist_deallocate(Metachunk* chunk) {
       
  1860   // The deallocation of a chunk originates in the freelist
       
  1861   // manangement code for a Metaspace and does not hold the
       
  1862   // lock.
       
  1863   assert(chunk != NULL, "Deallocating NULL");
       
  1864   // MutexLockerEx fcl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
       
  1865   locked_verify();
       
  1866   if (TraceMetadataChunkAllocation) {
       
  1867     tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
       
  1868                   PTR_FORMAT "  size " SIZE_FORMAT,
       
  1869                   chunk, chunk->word_size());
       
  1870   }
       
  1871   free_chunks_put(chunk);
       
  1872 }
       
  1873 
       
  1874 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
       
  1875   assert_lock_strong(SpaceManager::expand_lock());
       
  1876 
       
  1877   locked_verify();
       
  1878   ChunkList* free_list = find_free_chunks_list(word_size);
       
  1879   assert(free_list != NULL, "Sanity check");
       
  1880 
       
  1881   Metachunk* chunk = free_list->head();
       
  1882   debug_only(Metachunk* debug_head = chunk;)
       
  1883 
       
  1884   if (chunk == NULL) {
       
  1885     return NULL;
       
  1886   }
       
  1887 
       
  1888   Metachunk* prev_chunk = chunk;
       
  1889   if (chunk->word_size() == word_size) {
       
  1890     // Chunk is being removed from the chunks free list.
       
  1891     dec_free_chunks_total(chunk->capacity_word_size());
       
  1892     // Remove the chunk as the head of the list.
       
  1893     free_list->set_head(chunk->next());
       
  1894     chunk->set_next(NULL);
       
  1895 
       
  1896     if (TraceMetadataChunkAllocation && Verbose) {
       
  1897       tty->print_cr("ChunkManager::free_chunks_get: free_list "
       
  1898                     PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
       
  1899                     free_list, chunk, chunk->word_size());
       
  1900     }
       
  1901   } else {
       
  1902     assert(SpaceManager::is_humongous(word_size),
       
  1903       "Should only need to check humongous");
       
  1904     // This code to find the best fit is just for purposes of
       
  1905     // investigating the loss due to fragmentation on a humongous
       
  1906     // chunk.  It will be replace by a binaryTreeDictionary for
       
  1907     // the humongous chunks.
       
  1908     uint count = 0;
       
  1909     Metachunk* best_fit = NULL;
       
  1910     Metachunk* best_fit_prev = NULL;
       
  1911     while (chunk != NULL) {
       
  1912       count++;
       
  1913       if (chunk->word_size() < word_size) {
       
  1914         prev_chunk = chunk;
       
  1915         chunk = chunk->next();
       
  1916       } else if (chunk->word_size() == word_size) {
       
  1917         break;
       
  1918       } else {
       
  1919         if (best_fit == NULL ||
       
  1920             best_fit->word_size() > chunk->word_size()) {
       
  1921           best_fit_prev = prev_chunk;
       
  1922           best_fit = chunk;
       
  1923         }
       
  1924         prev_chunk = chunk;
       
  1925         chunk = chunk->next();
       
  1926       }
       
  1927     }
       
  1928       if (chunk == NULL) {
       
  1929         prev_chunk = best_fit_prev;
       
  1930         chunk = best_fit;
       
  1931       }
       
  1932       if (chunk != NULL) {
       
  1933         if (TraceMetadataHumongousAllocation) {
       
  1934           size_t waste = chunk->word_size() - word_size;
       
  1935           tty->print_cr("Free list allocate humongous chunk size " SIZE_FORMAT
       
  1936                         " for requested size " SIZE_FORMAT
       
  1937                         " waste " SIZE_FORMAT
       
  1938                         " found at " SIZE_FORMAT " of " SIZE_FORMAT,
       
  1939                         chunk->word_size(), word_size, waste,
       
  1940                         count, free_list->sum_list_count());
       
  1941         }
       
  1942         // Chunk is being removed from the chunks free list.
       
  1943         dec_free_chunks_total(chunk->capacity_word_size());
       
  1944         // Remove the chunk if it is at the head of the list.
       
  1945         if (chunk == free_list->head()) {
       
  1946           free_list->set_head(chunk->next());
       
  1947 
       
  1948           if (TraceMetadataHumongousAllocation) {
       
  1949             tty->print_cr("ChunkManager::free_chunks_get: humongous free_list "
       
  1950                           PTR_FORMAT " chunk " PTR_FORMAT " size " SIZE_FORMAT
       
  1951                           " new head " PTR_FORMAT,
       
  1952                           free_list, chunk, chunk->word_size(),
       
  1953                           free_list->head());
       
  1954           }
       
  1955         } else {
       
  1956           // Remove a chunk in the interior of the list
       
  1957           prev_chunk->set_next(chunk->next());
       
  1958 
       
  1959           if (TraceMetadataHumongousAllocation) {
       
  1960             tty->print_cr("ChunkManager::free_chunks_get: humongous free_list "
       
  1961                           PTR_FORMAT " chunk " PTR_FORMAT " size " SIZE_FORMAT
       
  1962                           PTR_FORMAT "  prev " PTR_FORMAT " next " PTR_FORMAT,
       
  1963                           free_list, chunk, chunk->word_size(),
       
  1964                           prev_chunk, chunk->next());
       
  1965           }
       
  1966         }
       
  1967         chunk->set_next(NULL);
       
  1968       } else {
       
  1969         if (TraceMetadataHumongousAllocation) {
       
  1970           tty->print_cr("ChunkManager::free_chunks_get: New humongous chunk of size "
       
  1971                         SIZE_FORMAT,
       
  1972                         word_size);
       
  1973         }
       
  1974       }
       
  1975   }
       
  1976   locked_verify();
       
  1977   return chunk;
       
  1978 }
       
  1979 
       
  1980 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
       
  1981   assert_lock_strong(SpaceManager::expand_lock());
       
  1982   locked_verify();
       
  1983 
       
  1984   // Take from the beginning of the list
       
  1985   Metachunk* chunk = free_chunks_get(word_size);
       
  1986   if (chunk == NULL) {
       
  1987     return NULL;
       
  1988   }
       
  1989 
       
  1990   assert(word_size <= chunk->word_size() ||
       
  1991            SpaceManager::is_humongous(chunk->word_size()),
       
  1992            "Non-humongous variable sized chunk");
       
  1993   if (TraceMetadataChunkAllocation) {
       
  1994     tty->print("ChunkManager::chunk_freelist_allocate: chunk "
       
  1995                PTR_FORMAT "  size " SIZE_FORMAT " ",
       
  1996                chunk, chunk->word_size());
       
  1997     locked_print_free_chunks(tty);
       
  1998   }
       
  1999 
       
  2000   return chunk;
       
  2001 }
       
  2002 
       
  2003 // SpaceManager methods
       
  2004 
       
  2005 size_t SpaceManager::sum_free_in_chunks_in_use() const {
       
  2006   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
       
  2007   size_t free = 0;
       
  2008   for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
       
  2009     Metachunk* chunk = chunks_in_use(i);
       
  2010     while (chunk != NULL) {
       
  2011       free += chunk->free_word_size();
       
  2012       chunk = chunk->next();
       
  2013     }
       
  2014   }
       
  2015   return free;
       
  2016 }
       
  2017 
       
  2018 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
       
  2019   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
       
  2020   size_t result = 0;
       
  2021   for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
       
  2022    // Count the free space in all the chunk but not the
       
  2023    // current chunk from which allocations are still being done.
       
  2024    result += sum_waste_in_chunks_in_use(i);
       
  2025   }
       
  2026   return result;
       
  2027 }
       
  2028 
       
  2029 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
       
  2030   size_t result = 0;
       
  2031   size_t count = 0;
       
  2032   Metachunk* chunk = chunks_in_use(index);
       
  2033   // Count the free space in all the chunk but not the
       
  2034   // current chunk from which allocations are still being done.
       
  2035   if (chunk != NULL) {
       
  2036     while (chunk != NULL) {
       
  2037       if (chunk != current_chunk()) {
       
  2038         result += chunk->free_word_size();
       
  2039       }
       
  2040       chunk = chunk->next();
       
  2041       count++;
       
  2042     }
       
  2043   }
       
  2044   return result;
       
  2045 }
       
  2046 
       
  2047 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
       
  2048   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
       
  2049   size_t sum = 0;
       
  2050   for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
       
  2051     Metachunk* chunk = chunks_in_use(i);
       
  2052     while (chunk != NULL) {
       
  2053       // Just changed this sum += chunk->capacity_word_size();
       
  2054       // sum += chunk->word_size() - Metachunk::overhead();
       
  2055       sum += chunk->capacity_word_size();
       
  2056       chunk = chunk->next();
       
  2057     }
       
  2058   }
       
  2059   return sum;
       
  2060 }
       
  2061 
       
  2062 size_t SpaceManager::sum_count_in_chunks_in_use() {
       
  2063   size_t count = 0;
       
  2064   for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
       
  2065     count = count + sum_count_in_chunks_in_use(i);
       
  2066   }
       
  2067   return count;
       
  2068 }
       
  2069 
       
  2070 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
       
  2071   size_t count = 0;
       
  2072   Metachunk* chunk = chunks_in_use(i);
       
  2073   while (chunk != NULL) {
       
  2074     count++;
       
  2075     chunk = chunk->next();
       
  2076   }
       
  2077   return count;
       
  2078 }
       
  2079 
       
  2080 
       
  2081 size_t SpaceManager::sum_used_in_chunks_in_use() const {
       
  2082   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
       
  2083   size_t used = 0;
       
  2084   for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
       
  2085     Metachunk* chunk = chunks_in_use(i);
       
  2086     while (chunk != NULL) {
       
  2087       used += chunk->used_word_size();
       
  2088       chunk = chunk->next();
       
  2089     }
       
  2090   }
       
  2091   return used;
       
  2092 }
       
  2093 
       
  2094 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
       
  2095 
       
  2096   Metachunk* small_chunk = chunks_in_use(SmallIndex);
       
  2097   st->print_cr("SpaceManager: small chunk " PTR_FORMAT
       
  2098                " free " SIZE_FORMAT,
       
  2099                small_chunk,
       
  2100                small_chunk->free_word_size());
       
  2101 
       
  2102   Metachunk* medium_chunk = chunks_in_use(MediumIndex);
       
  2103   st->print("medium chunk " PTR_FORMAT, medium_chunk);
       
  2104   Metachunk* tail = current_chunk();
       
  2105   st->print_cr(" current chunk " PTR_FORMAT, tail);
       
  2106 
       
  2107   Metachunk* head = chunks_in_use(HumongousIndex);
       
  2108   st->print_cr("humongous chunk " PTR_FORMAT, head);
       
  2109 
       
  2110   vs_list()->chunk_manager()->locked_print_free_chunks(st);
       
  2111   vs_list()->chunk_manager()->locked_print_sum_free_chunks(st);
       
  2112 }
       
  2113 
       
  2114 size_t SpaceManager::calc_chunk_size(size_t word_size) {
       
  2115 
       
  2116   // Decide between a small chunk and a medium chunk.  Up to
       
  2117   // _small_chunk_limit small chunks can be allocated but
       
  2118   // once a medium chunk has been allocated, no more small
       
  2119   // chunks will be allocated.
       
  2120   size_t chunk_word_size;
       
  2121   if (chunks_in_use(MediumIndex) == NULL &&
       
  2122       (!has_small_chunk_limit() ||
       
  2123        sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit)) {
       
  2124     chunk_word_size = (size_t) SpaceManager::SmallChunk;
       
  2125     if (word_size + Metachunk::overhead() > SpaceManager::SmallChunk) {
       
  2126       chunk_word_size = MediumChunk;
       
  2127     }
       
  2128   } else {
       
  2129     chunk_word_size = MediumChunk;
       
  2130   }
       
  2131 
       
  2132   // Might still need a humongous chunk
       
  2133   chunk_word_size =
       
  2134     MAX2((size_t) chunk_word_size, word_size + Metachunk::overhead());
       
  2135 
       
  2136   if (TraceMetadataHumongousAllocation &&
       
  2137       SpaceManager::is_humongous(word_size)) {
       
  2138     gclog_or_tty->print_cr("Metadata humongous allocation:");
       
  2139     gclog_or_tty->print_cr("  word_size " PTR_FORMAT, word_size);
       
  2140     gclog_or_tty->print_cr("  chunk_word_size " PTR_FORMAT,
       
  2141                            chunk_word_size);
       
  2142     gclog_or_tty->print_cr("    block overhead " PTR_FORMAT
       
  2143                            " chunk overhead " PTR_FORMAT,
       
  2144                            Metablock::overhead(),
       
  2145                            Metachunk::overhead());
       
  2146   }
       
  2147   return chunk_word_size;
       
  2148 }
       
  2149 
       
  2150 Metablock* SpaceManager::grow_and_allocate(size_t word_size) {
       
  2151   assert(vs_list()->current_virtual_space() != NULL,
       
  2152          "Should have been set");
       
  2153   assert(current_chunk() == NULL ||
       
  2154          current_chunk()->allocate(word_size) == NULL,
       
  2155          "Don't need to expand");
       
  2156   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
       
  2157 
       
  2158   if (TraceMetadataChunkAllocation && Verbose) {
       
  2159     gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
       
  2160                            " words " SIZE_FORMAT " space left",
       
  2161                             word_size, current_chunk() != NULL ?
       
  2162                               current_chunk()->free_word_size() : 0);
       
  2163   }
       
  2164 
       
  2165   // Get another chunk out of the virtual space
       
  2166   size_t grow_chunks_by_words = calc_chunk_size(word_size);
       
  2167   Metachunk* next = vs_list()->get_new_chunk(word_size, grow_chunks_by_words);
       
  2168 
       
  2169   // If a chunk was available, add it to the in-use chunk list
       
  2170   // and do an allocation from it.
       
  2171   if (next != NULL) {
       
  2172     Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
       
  2173     // Add to this manager's list of chunks in use.
       
  2174     add_chunk(next, false);
       
  2175     return next->allocate(word_size);
       
  2176   }
       
  2177   return NULL;
       
  2178 }
       
  2179 
       
  2180 void SpaceManager::print_on(outputStream* st) const {
       
  2181 
       
  2182   for (ChunkIndex i = SmallIndex;
       
  2183        i < NumberOfFreeLists ;
       
  2184        i = next_chunk_index(i) ) {
       
  2185     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
       
  2186                  chunks_in_use(i),
       
  2187                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
       
  2188   }
       
  2189   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
       
  2190                " Humongous " SIZE_FORMAT,
       
  2191                sum_waste_in_chunks_in_use(SmallIndex),
       
  2192                sum_waste_in_chunks_in_use(MediumIndex),
       
  2193                sum_waste_in_chunks_in_use(HumongousIndex));
       
  2194   // Nothing in them yet
       
  2195   // block_freelists()->print_on(st);
       
  2196 }
       
  2197 
       
  2198 SpaceManager::SpaceManager(Mutex* lock, VirtualSpaceList* vs_list) :
       
  2199   _vs_list(vs_list),
       
  2200   _allocation_total(0),
       
  2201   _lock(lock) {
       
  2202   Metadebug::init_allocation_fail_alot_count();
       
  2203   for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
       
  2204     _chunks_in_use[i] = NULL;
       
  2205   }
       
  2206   _current_chunk = NULL;
       
  2207   if (TraceMetadataChunkAllocation && Verbose) {
       
  2208     gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this);
       
  2209   }
       
  2210 }
       
  2211 
       
  2212 SpaceManager::~SpaceManager() {
       
  2213   MutexLockerEx fcl(SpaceManager::expand_lock(),
       
  2214                     Mutex::_no_safepoint_check_flag);
       
  2215 
       
  2216   ChunkManager* chunk_manager = vs_list()->chunk_manager();
       
  2217 
       
  2218   chunk_manager->locked_verify();
       
  2219 
       
  2220   if (TraceMetadataChunkAllocation && Verbose) {
       
  2221     gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
       
  2222     locked_print_chunks_in_use_on(gclog_or_tty);
       
  2223   }
       
  2224 
       
  2225   // Have to update before the chunks_in_use lists are emptied
       
  2226   // below.
       
  2227   chunk_manager->inc_free_chunks_total(sum_capacity_in_chunks_in_use(),
       
  2228                                        sum_count_in_chunks_in_use());
       
  2229 
       
  2230 #ifdef ASSERT
       
  2231   // Mangle freed memory.
       
  2232   mangle_freed_chunks();
       
  2233 #endif // ASSERT
       
  2234 
       
  2235   // Add all the chunks in use by this space manager
       
  2236   // to the global list of free chunks.
       
  2237 
       
  2238   // Small chunks.  There is one _current_chunk for each
       
  2239   // Metaspace.  It could point to a small or medium chunk.
       
  2240   // Rather than determine which it is, follow the list of
       
  2241   // small chunks to add them to the free list
       
  2242   Metachunk* small_chunk = chunks_in_use(SmallIndex);
       
  2243   chunk_manager->free_small_chunks()->add_at_head(small_chunk);
       
  2244   set_chunks_in_use(SmallIndex, NULL);
       
  2245 
       
  2246   // After the small chunk are the medium chunks
       
  2247   Metachunk* medium_chunk = chunks_in_use(MediumIndex);
       
  2248   assert(medium_chunk == NULL ||
       
  2249          medium_chunk->word_size() == MediumChunk,
       
  2250          "Chunk is on the wrong list");
       
  2251 
       
  2252   if (medium_chunk != NULL) {
       
  2253     Metachunk* head = medium_chunk;
       
  2254     // If there is a medium chunk then the _current_chunk can only
       
  2255     // point to the last medium chunk.
       
  2256     Metachunk* tail = current_chunk();
       
  2257     chunk_manager->free_medium_chunks()->add_at_head(head, tail);
       
  2258     set_chunks_in_use(MediumIndex, NULL);
       
  2259   }
       
  2260 
       
  2261   // Humongous chunks
       
  2262   // Humongous chunks are never the current chunk.
       
  2263   Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
       
  2264 
       
  2265   if (humongous_chunks != NULL) {
       
  2266     chunk_manager->free_humongous_chunks()->add_at_head(humongous_chunks);
       
  2267     set_chunks_in_use(HumongousIndex, NULL);
       
  2268   }
       
  2269   chunk_manager->locked_verify();
       
  2270 }
       
  2271 
       
  2272 void SpaceManager::deallocate(MetaWord* p) {
       
  2273   assert_lock_strong(_lock);
       
  2274   ShouldNotReachHere();  // Where is this needed.
       
  2275 #ifdef DEALLOCATE_BLOCKS
       
  2276   Metablock* block = Metablock::metablock_from_data(p);
       
  2277   // This is expense but kept it until integration JJJ
       
  2278   assert(contains((address)block), "Block does not belong to this metaspace");
       
  2279   block_freelists()->return_block(block, word_size);
       
  2280 #endif
       
  2281 }
       
  2282 
       
  2283 // Adds a chunk to the list of chunks in use.
       
  2284 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
       
  2285 
       
  2286   assert(new_chunk != NULL, "Should not be NULL");
       
  2287   assert(new_chunk->next() == NULL, "Should not be on a list");
       
  2288 
       
  2289   new_chunk->reset_empty();
       
  2290 
       
  2291   // Find the correct list and and set the current
       
  2292   // chunk for that list.
       
  2293   switch (new_chunk->word_size()) {
       
  2294   case SpaceManager::SmallChunk :
       
  2295     if (chunks_in_use(SmallIndex) == NULL) {
       
  2296       // First chunk to add to the list
       
  2297       set_chunks_in_use(SmallIndex, new_chunk);
       
  2298     } else {
       
  2299       assert(current_chunk()->word_size() == SpaceManager::SmallChunk,
       
  2300         err_msg( "Incorrect mix of sizes in chunk list "
       
  2301         SIZE_FORMAT " new chunk " SIZE_FORMAT,
       
  2302         current_chunk()->word_size(), new_chunk->word_size()));
       
  2303       current_chunk()->set_next(new_chunk);
       
  2304     }
       
  2305     // Make current chunk
       
  2306     set_current_chunk(new_chunk);
       
  2307     break;
       
  2308   case SpaceManager::MediumChunk :
       
  2309     if (chunks_in_use(MediumIndex) == NULL) {
       
  2310       // About to add the first medium chunk so teminate the
       
  2311       // small chunk list.  In general once medium chunks are
       
  2312       // being added, we're past the need for small chunks.
       
  2313       if (current_chunk() != NULL) {
       
  2314         // Only a small chunk or the initial chunk could be
       
  2315         // the current chunk if this is the first medium chunk.
       
  2316         assert(current_chunk()->word_size() == SpaceManager::SmallChunk ||
       
  2317           chunks_in_use(SmallIndex) == NULL,
       
  2318           err_msg("Should be a small chunk or initial chunk, current chunk "
       
  2319           SIZE_FORMAT " new chunk " SIZE_FORMAT,
       
  2320           current_chunk()->word_size(), new_chunk->word_size()));
       
  2321         current_chunk()->set_next(NULL);
       
  2322       }
       
  2323       // First chunk to add to the list
       
  2324       set_chunks_in_use(MediumIndex, new_chunk);
       
  2325 
       
  2326     } else {
       
  2327       // As a minimum the first medium chunk added would
       
  2328       // have become the _current_chunk
       
  2329       // so the _current_chunk has to be non-NULL here
       
  2330       // (although not necessarily still the first medium chunk).
       
  2331       assert(current_chunk()->word_size() == SpaceManager::MediumChunk,
       
  2332              "A medium chunk should the current chunk");
       
  2333       current_chunk()->set_next(new_chunk);
       
  2334     }
       
  2335     // Make current chunk
       
  2336     set_current_chunk(new_chunk);
       
  2337     break;
       
  2338   default: {
       
  2339     // For null class loader data and DumpSharedSpaces, the first chunk isn't
       
  2340     // small, so small will be null.  Link this first chunk as the current
       
  2341     // chunk.
       
  2342     if (make_current) {
       
  2343       // Set as the current chunk but otherwise treat as a humongous chunk.
       
  2344       set_current_chunk(new_chunk);
       
  2345     }
       
  2346     // Link at head.  The _current_chunk only points to a humongous chunk for
       
  2347     // the null class loader metaspace (class and data virtual space managers)
       
  2348     // any humongous chunks so will not point to the tail
       
  2349     // of the humongous chunks list.
       
  2350     new_chunk->set_next(chunks_in_use(HumongousIndex));
       
  2351     set_chunks_in_use(HumongousIndex, new_chunk);
       
  2352 
       
  2353     assert(new_chunk->word_size() > MediumChunk, "List inconsistency");
       
  2354   }
       
  2355   }
       
  2356 
       
  2357   assert(new_chunk->is_empty(), "Not ready for reuse");
       
  2358   if (TraceMetadataChunkAllocation && Verbose) {
       
  2359     gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
       
  2360                         sum_count_in_chunks_in_use());
       
  2361     new_chunk->print_on(gclog_or_tty);
       
  2362     vs_list()->chunk_manager()->locked_print_free_chunks(tty);
       
  2363   }
       
  2364 }
       
  2365 
       
  2366 MetaWord* SpaceManager::allocate(size_t word_size) {
       
  2367   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
       
  2368 
       
  2369   size_t block_overhead = Metablock::overhead();
       
  2370   // If only the dictionary is going to be used (i.e., no
       
  2371   // indexed free list), then there is a minimum size requirement.
       
  2372   // MinChunkSize is a placeholder for the real minimum size JJJ
       
  2373   size_t byte_size_with_overhead = (word_size + block_overhead) * BytesPerWord;
       
  2374 #ifdef DEALLOCATE_BLOCKS
       
  2375   size_t raw_bytes_size = MAX2(ARENA_ALIGN(byte_size_with_overhead),
       
  2376                                MinChunkSize * BytesPerWord);
       
  2377 #else
       
  2378   size_t raw_bytes_size = ARENA_ALIGN(byte_size_with_overhead);
       
  2379 #endif
       
  2380   size_t raw_word_size = raw_bytes_size / BytesPerWord;
       
  2381   assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
       
  2382 
       
  2383   BlockFreelist* fl =  block_freelists();
       
  2384   Metablock* block = NULL;
       
  2385   // Allocation from the dictionary is expensive in the sense that
       
  2386   // the dictionary has to be searched for a size.  Don't allocate
       
  2387   // from the dictionary until it starts to get fat.  Is this
       
  2388   // a reasonable policy?  Maybe an skinny dictionary is fast enough
       
  2389   // for allocations.  Do some profiling.  JJJ
       
  2390   if (fl->totalSize() > allocation_from_dictionary_limit) {
       
  2391     block = fl->get_block(raw_word_size);
       
  2392   }
       
  2393   if (block == NULL) {
       
  2394     block = allocate_work(raw_word_size);
       
  2395     if (block == NULL) {
       
  2396       return NULL;
       
  2397     }
       
  2398   }
       
  2399   Metadebug::deallocate_block_a_lot(this, raw_word_size);
       
  2400 
       
  2401   // Push the allocation past the word containing the size and leader.
       
  2402 #ifdef ASSERT
       
  2403   MetaWord* result =  block->data();
       
  2404   return result;
       
  2405 #else
       
  2406   return (MetaWord*) block;
       
  2407 #endif
       
  2408 }
       
  2409 
       
  2410 // Returns the address of spaced allocated for "word_size".
       
  2411 // This methods does not know about blocks (Metablocks)
       
  2412 Metablock* SpaceManager::allocate_work(size_t word_size) {
       
  2413   assert_lock_strong(_lock);
       
  2414 #ifdef ASSERT
       
  2415   if (Metadebug::test_metadata_failure()) {
       
  2416     return NULL;
       
  2417   }
       
  2418 #endif
       
  2419   // Is there space in the current chunk?
       
  2420   Metablock* result = NULL;
       
  2421 
       
  2422   // For DumpSharedSpaces, only allocate out of the current chunk which is
       
  2423   // never null because we gave it the size we wanted.   Caller reports out
       
  2424   // of memory if this returns null.
       
  2425   if (DumpSharedSpaces) {
       
  2426     assert(current_chunk() != NULL, "should never happen");
       
  2427     inc_allocation_total(word_size);
       
  2428     return current_chunk()->allocate(word_size); // caller handles null result
       
  2429   }
       
  2430   if (current_chunk() != NULL) {
       
  2431     result = current_chunk()->allocate(word_size);
       
  2432   }
       
  2433 
       
  2434   if (result == NULL) {
       
  2435     result = grow_and_allocate(word_size);
       
  2436   }
       
  2437   if (result > 0) {
       
  2438     inc_allocation_total(word_size);
       
  2439     assert(result != (Metablock*) chunks_in_use(MediumIndex), "Head of the list is being allocated");
       
  2440     assert(result->word_size() == word_size, "Size not set correctly");
       
  2441   }
       
  2442 
       
  2443   return result;
       
  2444 }
       
  2445 
       
  2446 void SpaceManager::verify() {
       
  2447   // If there are blocks in the dictionary, then
       
  2448   // verfication of chunks does not work since
       
  2449   // being in the dictionary alters a chunk.
       
  2450   if (block_freelists()->totalSize() == 0) {
       
  2451     // Skip the small chunks because their next link points to
       
  2452     // medium chunks.  This is because the small chunk is the
       
  2453     // current chunk (for allocations) until it is full and the
       
  2454     // the addition of the next chunk does not NULL the next
       
  2455     // like of the small chunk.
       
  2456     for (ChunkIndex i = MediumIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
       
  2457       Metachunk* curr = chunks_in_use(i);
       
  2458       while (curr != NULL) {
       
  2459         curr->verify();
       
  2460         curr = curr->next();
       
  2461       }
       
  2462     }
       
  2463   }
       
  2464 }
       
  2465 
       
  2466 #ifdef ASSERT
       
  2467 void SpaceManager::verify_allocation_total() {
       
  2468 #if 0
       
  2469   // Verification is only guaranteed at a safepoint.
       
  2470   if (SafepointSynchronize::is_at_safepoint()) {
       
  2471     gclog_or_tty->print_cr("Chunk " PTR_FORMAT " allocation_total " SIZE_FORMAT
       
  2472                            " sum_used_in_chunks_in_use " SIZE_FORMAT,
       
  2473                            this,
       
  2474                            allocation_total(),
       
  2475                            sum_used_in_chunks_in_use());
       
  2476   }
       
  2477   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
       
  2478   assert(allocation_total() == sum_used_in_chunks_in_use(),
       
  2479     err_msg("allocation total is not consistent %d vs %d",
       
  2480             allocation_total(), sum_used_in_chunks_in_use()));
       
  2481 #endif
       
  2482 }
       
  2483 
       
  2484 #endif
       
  2485 
       
  2486 void SpaceManager::dump(outputStream* const out) const {
       
  2487   size_t curr_total = 0;
       
  2488   size_t waste = 0;
       
  2489   uint i = 0;
       
  2490   size_t used = 0;
       
  2491   size_t capacity = 0;
       
  2492 
       
  2493   // Add up statistics for all chunks in this SpaceManager.
       
  2494   for (ChunkIndex index = SmallIndex;
       
  2495        index < NumberOfFreeLists;
       
  2496        index = next_chunk_index(index)) {
       
  2497     for (Metachunk* curr = chunks_in_use(index);
       
  2498          curr != NULL;
       
  2499          curr = curr->next()) {
       
  2500       out->print("%d) ", i++);
       
  2501       curr->print_on(out);
       
  2502       if (TraceMetadataChunkAllocation && Verbose) {
       
  2503         block_freelists()->print_on(out);
       
  2504       }
       
  2505       curr_total += curr->word_size();
       
  2506       used += curr->used_word_size();
       
  2507       capacity += curr->capacity_word_size();
       
  2508       waste += curr->free_word_size() + curr->overhead();;
       
  2509     }
       
  2510   }
       
  2511 
       
  2512   size_t free = current_chunk()->free_word_size();
       
  2513   // Free space isn't wasted.
       
  2514   waste -= free;
       
  2515 
       
  2516   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
       
  2517                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
       
  2518                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
       
  2519 }
       
  2520 
       
  2521 #ifndef PRODUCT
       
  2522 void SpaceManager::mangle_freed_chunks() {
       
  2523   for (ChunkIndex index = SmallIndex;
       
  2524        index < NumberOfFreeLists;
       
  2525        index = next_chunk_index(index)) {
       
  2526     for (Metachunk* curr = chunks_in_use(index);
       
  2527          curr != NULL;
       
  2528          curr = curr->next()) {
       
  2529       // Try to detect incorrectly terminated small chunk
       
  2530       // list.
       
  2531       assert(index == MediumIndex || curr != chunks_in_use(MediumIndex),
       
  2532              err_msg("Mangling medium chunks in small chunks? "
       
  2533                      "curr " PTR_FORMAT " medium list " PTR_FORMAT,
       
  2534                      curr, chunks_in_use(MediumIndex)));
       
  2535       curr->mangle();
       
  2536     }
       
  2537   }
       
  2538 }
       
  2539 #endif // PRODUCT
       
  2540 
       
  2541 
       
  2542 // MetaspaceAux
       
  2543 
       
  2544 size_t MetaspaceAux::used_in_bytes(Metaspace::MetadataType mdtype) {
       
  2545   size_t used = 0;
       
  2546 #ifdef ASSERT
       
  2547   size_t free = 0;
       
  2548   size_t capacity = 0;
       
  2549 #endif
       
  2550   ClassLoaderDataGraphMetaspaceIterator iter;
       
  2551   while (iter.repeat()) {
       
  2552     Metaspace* msp = iter.get_next();
       
  2553     // Sum allocation_total for each metaspace
       
  2554     if (msp != NULL) {
       
  2555       used += msp->used_words(mdtype);
       
  2556 #ifdef ASSERT
       
  2557       free += msp->free_words(mdtype);
       
  2558       capacity += msp->capacity_words(mdtype);
       
  2559       assert(used + free == capacity,
       
  2560         err_msg("Accounting is wrong used " SIZE_FORMAT
       
  2561                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT,
       
  2562                 used, free, capacity));
       
  2563 #endif
       
  2564     }
       
  2565   }
       
  2566   return used * BytesPerWord;
       
  2567 }
       
  2568 
       
  2569 size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) {
       
  2570   size_t free = 0;
       
  2571   ClassLoaderDataGraphMetaspaceIterator iter;
       
  2572   while (iter.repeat()) {
       
  2573     Metaspace* msp = iter.get_next();
       
  2574     if (msp != NULL) {
       
  2575       free += msp->free_words(mdtype);
       
  2576     }
       
  2577   }
       
  2578   return free * BytesPerWord;
       
  2579 }
       
  2580 
       
  2581 // The total words available for metadata allocation.  This
       
  2582 // uses Metaspace capacity_words() which is the total words
       
  2583 // in chunks allocated for a Metaspace.
       
  2584 size_t MetaspaceAux::capacity_in_bytes(Metaspace::MetadataType mdtype) {
       
  2585   size_t capacity = free_chunks_total(mdtype);
       
  2586   ClassLoaderDataGraphMetaspaceIterator iter;
       
  2587   while (iter.repeat()) {
       
  2588     Metaspace* msp = iter.get_next();
       
  2589     if (msp != NULL) {
       
  2590       capacity += msp->capacity_words(mdtype);
       
  2591     }
       
  2592   }
       
  2593   return capacity * BytesPerWord;
       
  2594 }
       
  2595 
       
  2596 size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
       
  2597   size_t reserved = (mdtype == Metaspace::ClassType) ?
       
  2598                        Metaspace::class_space_list()->virtual_space_total() :
       
  2599                        Metaspace::space_list()->virtual_space_total();
       
  2600   return reserved * BytesPerWord;
       
  2601 }
       
  2602 
       
  2603 size_t MetaspaceAux::min_chunk_size() { return SpaceManager::MediumChunk; }
       
  2604 
       
  2605 size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {
       
  2606   ChunkManager* chunk = (mdtype == Metaspace::ClassType) ?
       
  2607                             Metaspace::class_space_list()->chunk_manager() :
       
  2608                             Metaspace::space_list()->chunk_manager();
       
  2609 
       
  2610   chunk->verify_free_chunks_total();
       
  2611   return chunk->free_chunks_total();
       
  2612 }
       
  2613 
       
  2614 size_t MetaspaceAux::free_chunks_total_in_bytes(Metaspace::MetadataType mdtype) {
       
  2615   return free_chunks_total(mdtype) * BytesPerWord;
       
  2616 }
       
  2617 
       
  2618 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
       
  2619   gclog_or_tty->print(", [Metaspace:");
       
  2620   if (PrintGCDetails && Verbose) {
       
  2621     gclog_or_tty->print(" "  SIZE_FORMAT
       
  2622                         "->" SIZE_FORMAT
       
  2623                         "("  SIZE_FORMAT "/" SIZE_FORMAT ")",
       
  2624                         prev_metadata_used,
       
  2625                         used_in_bytes(),
       
  2626                         capacity_in_bytes(),
       
  2627                         reserved_in_bytes());
       
  2628   } else {
       
  2629     gclog_or_tty->print(" "  SIZE_FORMAT "K"
       
  2630                         "->" SIZE_FORMAT "K"
       
  2631                         "("  SIZE_FORMAT "K/" SIZE_FORMAT "K)",
       
  2632                         prev_metadata_used / K,
       
  2633                         used_in_bytes()/ K,
       
  2634                         capacity_in_bytes()/K,
       
  2635                         reserved_in_bytes()/ K);
       
  2636   }
       
  2637 
       
  2638   gclog_or_tty->print("]");
       
  2639 }
       
  2640 
       
  2641 // This is printed when PrintGCDetails
       
  2642 void MetaspaceAux::print_on(outputStream* out) {
       
  2643   Metaspace::MetadataType ct = Metaspace::ClassType;
       
  2644   Metaspace::MetadataType nct = Metaspace::NonClassType;
       
  2645 
       
  2646   out->print_cr(" Metaspace total "
       
  2647                 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
       
  2648                 " reserved " SIZE_FORMAT "K",
       
  2649                 capacity_in_bytes()/K, used_in_bytes()/K, reserved_in_bytes()/K);
       
  2650   out->print_cr("  data space     "
       
  2651                 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
       
  2652                 " reserved " SIZE_FORMAT "K",
       
  2653                 capacity_in_bytes(nct)/K, used_in_bytes(nct)/K, reserved_in_bytes(nct)/K);
       
  2654   out->print_cr("  class space    "
       
  2655                 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
       
  2656                 " reserved " SIZE_FORMAT "K",
       
  2657                 capacity_in_bytes(ct)/K, used_in_bytes(ct)/K, reserved_in_bytes(ct)/K);
       
  2658 }
       
  2659 
       
  2660 // Print information for class space and data space separately.
       
  2661 // This is almost the same as above.
       
  2662 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
       
  2663   size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype);
       
  2664   size_t capacity_bytes = capacity_in_bytes(mdtype);
       
  2665   size_t used_bytes = used_in_bytes(mdtype);
       
  2666   size_t free_bytes = free_in_bytes(mdtype);
       
  2667   size_t used_and_free = used_bytes + free_bytes +
       
  2668                            free_chunks_capacity_bytes;
       
  2669   out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
       
  2670              "K + unused in chunks " SIZE_FORMAT "K  + "
       
  2671              " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
       
  2672              "K  capacity in allocated chunks " SIZE_FORMAT "K",
       
  2673              used_bytes / K,
       
  2674              free_bytes / K,
       
  2675              free_chunks_capacity_bytes / K,
       
  2676              used_and_free / K,
       
  2677              capacity_bytes / K);
       
  2678   assert(used_and_free == capacity_bytes, "Accounting is wrong");
       
  2679 }
       
  2680 
       
  2681 // Print total fragmentation for class and data metaspaces separately
       
  2682 void MetaspaceAux::print_waste(outputStream* out) {
       
  2683 
       
  2684   size_t small_waste = 0, medium_waste = 0, large_waste = 0;
       
  2685   size_t cls_small_waste = 0, cls_medium_waste = 0, cls_large_waste = 0;
       
  2686 
       
  2687   ClassLoaderDataGraphMetaspaceIterator iter;
       
  2688   while (iter.repeat()) {
       
  2689     Metaspace* msp = iter.get_next();
       
  2690     if (msp != NULL) {
       
  2691       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
       
  2692       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
       
  2693       large_waste += msp->vsm()->sum_waste_in_chunks_in_use(HumongousIndex);
       
  2694 
       
  2695       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
       
  2696       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
       
  2697       cls_large_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(HumongousIndex);
       
  2698     }
       
  2699   }
       
  2700   out->print_cr("Total fragmentation waste (words) doesn't count free space");
       
  2701   out->print("  data: small " SIZE_FORMAT " medium " SIZE_FORMAT,
       
  2702              small_waste, medium_waste);
       
  2703   out->print_cr(" class: small " SIZE_FORMAT, cls_small_waste);
       
  2704 }
       
  2705 
       
  2706 // Dump global metaspace things from the end of ClassLoaderDataGraph
       
  2707 void MetaspaceAux::dump(outputStream* out) {
       
  2708   out->print_cr("All Metaspace:");
       
  2709   out->print("data space: "); print_on(out, Metaspace::NonClassType);
       
  2710   out->print("class space: "); print_on(out, Metaspace::ClassType);
       
  2711   print_waste(out);
       
  2712 }
       
  2713 
       
  2714 // Metaspace methods
       
  2715 
       
  2716 size_t Metaspace::_first_chunk_word_size = 0;
       
  2717 
       
  2718 Metaspace::Metaspace(Mutex* lock, size_t word_size) {
       
  2719   initialize(lock, word_size);
       
  2720 }
       
  2721 
       
  2722 Metaspace::Metaspace(Mutex* lock) {
       
  2723   initialize(lock);
       
  2724 }
       
  2725 
       
  2726 Metaspace::~Metaspace() {
       
  2727   delete _vsm;
       
  2728   delete _class_vsm;
       
  2729 }
       
  2730 
       
  2731 VirtualSpaceList* Metaspace::_space_list = NULL;
       
  2732 VirtualSpaceList* Metaspace::_class_space_list = NULL;
       
  2733 
       
  2734 #define VIRTUALSPACEMULTIPLIER 2
       
  2735 
       
  2736 void Metaspace::global_initialize() {
       
  2737   // Initialize the alignment for shared spaces.
       
  2738   int max_alignment = os::vm_page_size();
       
  2739   MetaspaceShared::set_max_alignment(max_alignment);
       
  2740 
       
  2741   if (DumpSharedSpaces) {
       
  2742     SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
       
  2743     SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
       
  2744     SharedMiscDataSize  = align_size_up(SharedMiscDataSize, max_alignment);
       
  2745     SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize, max_alignment);
       
  2746 
       
  2747     // Initialize with the sum of the shared space sizes.  The read-only
       
  2748     // and read write metaspace chunks will be allocated out of this and the
       
  2749     // remainder is the misc code and data chunks.
       
  2750     size_t total = align_size_up(SharedReadOnlySize + SharedReadWriteSize +
       
  2751                                  SharedMiscDataSize + SharedMiscCodeSize,
       
  2752                                  os::vm_allocation_granularity());
       
  2753     size_t word_size = total/wordSize;
       
  2754     _space_list = new VirtualSpaceList(word_size);
       
  2755   } else {
       
  2756     // If using shared space, open the file that contains the shared space
       
  2757     // and map in the memory before initializing the rest of metaspace (so
       
  2758     // the addresses don't conflict)
       
  2759     if (UseSharedSpaces) {
       
  2760       FileMapInfo* mapinfo = new FileMapInfo();
       
  2761       memset(mapinfo, 0, sizeof(FileMapInfo));
       
  2762 
       
  2763       // Open the shared archive file, read and validate the header. If
       
  2764       // initialization fails, shared spaces [UseSharedSpaces] are
       
  2765       // disabled and the file is closed.
       
  2766       // Map in spaces now also
       
  2767       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
       
  2768         FileMapInfo::set_current_info(mapinfo);
       
  2769       } else {
       
  2770         assert(!mapinfo->is_open() && !UseSharedSpaces,
       
  2771                "archive file not closed or shared spaces not disabled.");
       
  2772       }
       
  2773     }
       
  2774 
       
  2775     // Initialize this before initializing the VirtualSpaceList
       
  2776     _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
       
  2777     // Arbitrarily set the initial virtual space to a multiple
       
  2778     // of the boot class loader size.
       
  2779     size_t word_size = VIRTUALSPACEMULTIPLIER * Metaspace::first_chunk_word_size();
       
  2780     // Initialize the list of virtual spaces.
       
  2781     _space_list = new VirtualSpaceList(word_size);
       
  2782   }
       
  2783 }
       
  2784 
       
  2785 // For UseCompressedKlassPointers the class space is reserved as a piece of the
       
  2786 // Java heap because the compression algorithm is the same for each.  The
       
  2787 // argument passed in is at the top of the compressed space
       
  2788 void Metaspace::initialize_class_space(ReservedSpace rs) {
       
  2789   // The reserved space size may be bigger because of alignment, esp with UseLargePages
       
  2790   assert(rs.size() >= ClassMetaspaceSize, err_msg("%d != %d", rs.size(), ClassMetaspaceSize));
       
  2791   _class_space_list = new VirtualSpaceList(rs);
       
  2792 }
       
  2793 
       
  2794 // Class space probably needs a lot less than data space
       
  2795 const int class_space_divisor = 4;
       
  2796 
       
  2797 void Metaspace::initialize(Mutex* lock, size_t initial_size) {
       
  2798   // Use SmallChunk size if not specified, adjust class to smaller size if so.
       
  2799   size_t word_size;
       
  2800   size_t class_word_size;
       
  2801   if (initial_size == 0) {
       
  2802     word_size = (size_t) SpaceManager::SmallChunk;
       
  2803     class_word_size = word_size;
       
  2804   } else {
       
  2805     word_size = initial_size;
       
  2806     class_word_size = initial_size/class_space_divisor;
       
  2807   }
       
  2808 
       
  2809   assert(space_list() != NULL,
       
  2810     "Metadata VirtualSpaceList has not been initialized");
       
  2811 
       
  2812   _vsm = new SpaceManager(lock, space_list());
       
  2813   if (_vsm == NULL) {
       
  2814     return;
       
  2815   }
       
  2816 
       
  2817   assert(class_space_list() != NULL,
       
  2818     "Class VirtualSpaceList has not been initialized");
       
  2819 
       
  2820   // Allocate SpaceManager for classes.
       
  2821   _class_vsm = new SpaceManager(lock, class_space_list());
       
  2822   if (_class_vsm == NULL) {
       
  2823     return;
       
  2824   }
       
  2825 
       
  2826   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
       
  2827 
       
  2828   // Allocate chunk for metadata objects
       
  2829   Metachunk* new_chunk =
       
  2830      space_list()->current_virtual_space()->get_chunk_vs_with_expand(word_size);
       
  2831   assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
       
  2832   if (new_chunk != NULL) {
       
  2833     // Add to this manager's list of chunks in use and current_chunk().
       
  2834     vsm()->add_chunk(new_chunk, true);
       
  2835   }
       
  2836 
       
  2837   // Allocate chunk for class metadata objects
       
  2838   Metachunk* class_chunk =
       
  2839      class_space_list()->current_virtual_space()->get_chunk_vs_with_expand(class_word_size);
       
  2840   if (class_chunk != NULL) {
       
  2841     class_vsm()->add_chunk(class_chunk, true);
       
  2842   }
       
  2843 }
       
  2844 
       
  2845 
       
  2846 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
       
  2847   // DumpSharedSpaces doesn't use class metadata area (yet)
       
  2848   if (mdtype == ClassType && !DumpSharedSpaces) {
       
  2849     return class_vsm()->allocate(word_size);
       
  2850   } else {
       
  2851     return vsm()->allocate(word_size);
       
  2852   }
       
  2853 }
       
  2854 
       
  2855 // Space allocated in the Metaspace.  This may
       
  2856 // be across several metadata virtual spaces.
       
  2857 char* Metaspace::bottom() const {
       
  2858   assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
       
  2859   return (char*)vsm()->current_chunk()->bottom();
       
  2860 }
       
  2861 
       
  2862 size_t Metaspace::used_words(MetadataType mdtype) const {
       
  2863   // return vsm()->allocation_total();
       
  2864   return mdtype == ClassType ? class_vsm()->sum_used_in_chunks_in_use() :
       
  2865                                vsm()->sum_used_in_chunks_in_use();  // includes overhead!
       
  2866 }
       
  2867 
       
  2868 size_t Metaspace::free_words(MetadataType mdtype) const {
       
  2869   return mdtype == ClassType ? class_vsm()->sum_free_in_chunks_in_use() :
       
  2870                                vsm()->sum_free_in_chunks_in_use();
       
  2871 }
       
  2872 
       
  2873 // Space capacity in the Metaspace.  It includes
       
  2874 // space in the list of chunks from which allocations
       
  2875 // have been made. Don't include space in the global freelist and
       
  2876 // in the space available in the dictionary which
       
  2877 // is already counted in some chunk.
       
  2878 size_t Metaspace::capacity_words(MetadataType mdtype) const {
       
  2879   return mdtype == ClassType ? class_vsm()->sum_capacity_in_chunks_in_use() :
       
  2880                                vsm()->sum_capacity_in_chunks_in_use();
       
  2881 }
       
  2882 
       
  2883 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
       
  2884   if (SafepointSynchronize::is_at_safepoint()) {
       
  2885     assert(Thread::current()->is_VM_thread(), "should be the VM thread");
       
  2886     // Don't take lock
       
  2887 #ifdef DEALLOCATE_BLOCKS
       
  2888     if (is_class) {
       
  2889       class_vsm()->deallocate(ptr);
       
  2890     } else {
       
  2891       vsm()->deallocate(ptr);
       
  2892     }
       
  2893 #else
       
  2894 #ifdef ASSERT
       
  2895     Copy::fill_to_words((HeapWord*)ptr, word_size, metadata_deallocate);
       
  2896 #endif
       
  2897 #endif
       
  2898 
       
  2899   } else {
       
  2900     MutexLocker ml(vsm()->lock());
       
  2901 
       
  2902 #ifdef DEALLOCATE_BLOCKS
       
  2903     if (is_class) {
       
  2904       class_vsm()->deallocate(ptr);
       
  2905     } else {
       
  2906       vsm()->deallocate(ptr);
       
  2907     }
       
  2908 #else
       
  2909 #ifdef ASSERT
       
  2910     Copy::fill_to_words((HeapWord*)ptr, word_size, metadata_deallocate);
       
  2911 #endif
       
  2912 #endif
       
  2913   }
       
  2914 }
       
  2915 
       
  2916 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
       
  2917                               bool read_only, MetadataType mdtype, TRAPS) {
       
  2918   if (HAS_PENDING_EXCEPTION) {
       
  2919     assert(false, "Should not allocate with exception pending");
       
  2920     return NULL;  // caller does a CHECK_NULL too
       
  2921   }
       
  2922 
       
  2923   // SSS: Should we align the allocations and make sure the sizes are aligned.
       
  2924   MetaWord* result = NULL;
       
  2925 
       
  2926   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
       
  2927         "ClassLoaderData::the_null_class_loader_data() should have been used.");
       
  2928   // Allocate in metaspaces without taking out a lock, because it deadlocks
       
  2929   // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
       
  2930   // to revisit this for application class data sharing.
       
  2931   if (DumpSharedSpaces) {
       
  2932     if (read_only) {
       
  2933       result = loader_data->ro_metaspace()->allocate(word_size, NonClassType);
       
  2934     } else {
       
  2935       result = loader_data->rw_metaspace()->allocate(word_size, NonClassType);
       
  2936     }
       
  2937     if (result == NULL) {
       
  2938       report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
       
  2939     }
       
  2940     return result;
       
  2941   }
       
  2942 
       
  2943   result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
       
  2944 
       
  2945   if (result == NULL) {
       
  2946     // Try to clean out some memory and retry.
       
  2947     result =
       
  2948     Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
       
  2949         loader_data, word_size, mdtype);
       
  2950 
       
  2951     // If result is still null, we are out of memory.
       
  2952     if (result == NULL) {
       
  2953       // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
       
  2954       report_java_out_of_memory("Metadata space");
       
  2955 
       
  2956       if (JvmtiExport::should_post_resource_exhausted()) {
       
  2957         JvmtiExport::post_resource_exhausted(
       
  2958             JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
       
  2959             "Metadata space");
       
  2960       }
       
  2961       THROW_OOP_0(Universe::out_of_memory_error_perm_gen());
       
  2962     }
       
  2963   }
       
  2964   return result;
       
  2965 }
       
  2966 
       
  2967 void Metaspace::print_on(outputStream* out) const {
       
  2968   // Print both class virtual space counts and metaspace.
       
  2969   if (Verbose) {
       
  2970       vsm()->print_on(out);
       
  2971       class_vsm()->print_on(out);
       
  2972   }
       
  2973 }
       
  2974 
       
  2975 #ifndef PRODUCT
       
  2976 bool Metaspace::contains(const void * ptr) const {
       
  2977   if (MetaspaceShared::is_in_shared_space(ptr)) {
       
  2978     return true;
       
  2979   }
       
  2980   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
       
  2981   return space_list()->contains(ptr) || class_space_list()->contains(ptr);
       
  2982 }
       
  2983 #endif
       
  2984 
       
  2985 void Metaspace::verify() {
       
  2986   vsm()->verify();
       
  2987   class_vsm()->verify();
       
  2988 }
       
  2989 
       
  2990 void Metaspace::dump(outputStream* const out) const {
       
  2991   if (UseMallocOnly) {
       
  2992     // Just print usage for now
       
  2993     out->print_cr("usage %d", used_words(Metaspace::NonClassType));
       
  2994   }
       
  2995   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
       
  2996   vsm()->dump(out);
       
  2997   out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
       
  2998   class_vsm()->dump(out);
       
  2999 }