src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp
branchstuefe-new-metaspace-branch
changeset 58063 bdf136b8ae0e
parent 53970 1ad7c590a6e7
child 59138 714474295e0a
equal deleted inserted replaced
58062:65cad575ace3 58063:bdf136b8ae0e
     1 /*
     1 /*
     2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
     2  * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
       
     3  * Copyright (c) 2018, 2019 SAP SE. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  * published by the Free Software Foundation.
    20  * or visit www.oracle.com if you need additional information or have any
    21  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  * questions.
    22  *
    23  *
    23  */
    24  */
    24 
    25 
       
    26 
       
    27 #include <memory/metaspace/settings.hpp>
    25 #include "precompiled.hpp"
    28 #include "precompiled.hpp"
    26 
    29 
    27 #include "logging/log.hpp"
    30 #include "logging/log.hpp"
    28 #include "logging/logStream.hpp"
    31 
       
    32 #include "memory/metaspace/chunkLevel.hpp"
       
    33 #include "memory/metaspace/chunkHeaderPool.hpp"
       
    34 #include "memory/metaspace/commitLimiter.hpp"
       
    35 #include "memory/metaspace/counter.hpp"
       
    36 #include "memory/metaspace/internStat.hpp"
    29 #include "memory/metaspace/metachunk.hpp"
    37 #include "memory/metaspace/metachunk.hpp"
    30 #include "memory/metaspace.hpp"
       
    31 #include "memory/metaspace/chunkManager.hpp"
       
    32 #include "memory/metaspace/metaDebug.hpp"
       
    33 #include "memory/metaspace/metaspaceCommon.hpp"
    38 #include "memory/metaspace/metaspaceCommon.hpp"
    34 #include "memory/metaspace/occupancyMap.hpp"
    39 #include "memory/metaspace/rootChunkArea.hpp"
       
    40 #include "memory/metaspace/runningCounters.hpp"
    35 #include "memory/metaspace/virtualSpaceNode.hpp"
    41 #include "memory/metaspace/virtualSpaceNode.hpp"
    36 #include "memory/virtualspace.hpp"
    42 
       
    43 #include "runtime/mutexLocker.hpp"
    37 #include "runtime/os.hpp"
    44 #include "runtime/os.hpp"
    38 #include "services/memTracker.hpp"
    45 
    39 #include "utilities/copy.hpp"
    46 #include "utilities/align.hpp"
    40 #include "utilities/debug.hpp"
    47 #include "utilities/debug.hpp"
    41 #include "utilities/globalDefinitions.hpp"
    48 #include "utilities/globalDefinitions.hpp"
       
    49 #include "utilities/ostream.hpp"
    42 
    50 
    43 namespace metaspace {
    51 namespace metaspace {
    44 
    52 
    45 // Decide if large pages should be committed when the memory is reserved.
    53 #ifdef ASSERT
    46 static bool should_commit_large_pages_when_reserving(size_t bytes) {
    54 void check_pointer_is_aligned_to_commit_granule(const MetaWord* p) {
    47   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
    55   assert(is_aligned(p, Settings::commit_granule_bytes()),
    48     size_t words = bytes / BytesPerWord;
    56          "Pointer not aligned to commit granule size: " PTR_FORMAT ".",
    49     bool is_class = false; // We never reserve large pages for the class space.
    57          p2i(p));
    50     if (MetaspaceGC::can_expand(words, is_class) &&
    58 }
    51         MetaspaceGC::allowed_expansion() >= words) {
    59 void check_word_size_is_aligned_to_commit_granule(size_t word_size) {
    52       return true;
    60   assert(is_aligned(word_size, Settings::commit_granule_words()),
       
    61          "Not aligned to commit granule size: " SIZE_FORMAT ".", word_size);
       
    62 }
       
    63 #endif
       
    64 
       
    65 // Given an address range, ensure it is committed.
       
    66 //
       
    67 // The range has to be aligned to granule size.
       
    68 //
       
    69 // Function will:
       
    70 // - check how many granules in that region are uncommitted; If all are committed, it
       
    71 //    returns true immediately.
       
    72 // - check if committing those uncommitted granules would bring us over the commit limit
       
    73 //    (GC threshold, MaxMetaspaceSize). If true, it returns false.
       
    74 // - commit the memory.
       
    75 // - mark the range as committed in the commit mask
       
    76 //
       
    77 // Returns true if success, false if it did hit a commit limit.
       
    78 bool VirtualSpaceNode::commit_range(MetaWord* p, size_t word_size) {
       
    79 
       
    80   DEBUG_ONLY(check_pointer_is_aligned_to_commit_granule(p);)
       
    81   DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(word_size);)
       
    82   assert_lock_strong(MetaspaceExpand_lock);
       
    83 
       
    84   // First calculate how large the committed regions in this range are
       
    85   const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size);
       
    86   DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(committed_words_in_range);)
       
    87 
       
    88   // By how much words we would increase commit charge
       
    89   //  were we to commit the given address range completely.
       
    90   const size_t commit_increase_words = word_size - committed_words_in_range;
       
    91 
       
    92   log_debug(metaspace)("VirtualSpaceNode %d, base " PTR_FORMAT ": committing range " PTR_FORMAT ".." PTR_FORMAT "(" SIZE_FORMAT " words)",
       
    93                        _node_id, p2i(_base), p2i(p), p2i(p + word_size), word_size);
       
    94 
       
    95   if (commit_increase_words == 0) {
       
    96     log_debug(metaspace)("VirtualSpaceNode %d, base " PTR_FORMAT ": ... already fully committed.",
       
    97                          _node_id, p2i(_base));
       
    98     return true; // Already fully committed, nothing to do.
       
    99   }
       
   100 
       
   101   // Before committing any more memory, check limits.
       
   102   if (_commit_limiter->possible_expansion_words() < commit_increase_words) {
       
   103     return false;
       
   104   }
       
   105 
       
   106   // Commit...
       
   107   if (os::commit_memory((char*)p, word_size * BytesPerWord, false) == false) {
       
   108     vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to commit metaspace.");
       
   109   }
       
   110 
       
   111   log_debug(gc, metaspace)("Increased metaspace by " SIZE_FORMAT " bytes.",
       
   112                            commit_increase_words * BytesPerWord);
       
   113 
       
   114   // ... tell commit limiter...
       
   115   _commit_limiter->increase_committed(commit_increase_words);
       
   116 
       
   117   // ... update counters in containing vslist ...
       
   118   _total_committed_words_counter->increment_by(commit_increase_words);
       
   119 
       
   120   // ... and update the commit mask.
       
   121   _commit_mask.mark_range_as_committed(p, word_size);
       
   122 
       
   123 #ifdef ASSERT
       
   124   // The commit boundary maintained in the CommitLimiter should be equal the sum of committed words
       
   125   // in both class and non-class vslist (outside gtests).
       
   126   if (_commit_limiter == CommitLimiter::globalLimiter()) {
       
   127     assert(_commit_limiter->committed_words() == RunningCounters::committed_words(), "counter mismatch");
       
   128   }
       
   129 #endif
       
   130 
       
   131   DEBUG_ONLY(InternalStats::inc_num_space_committed();)
       
   132 
       
   133   return true;
       
   134 
       
   135 }
       
   136 
       
   137 // Given an address range, ensure it is committed.
       
   138 //
       
   139 // The range does not have to be aligned to granule size. However, the function will always commit
       
   140 // whole granules.
       
   141 //
       
   142 // Function will:
       
   143 // - check how many granules in that region are uncommitted; If all are committed, it
       
   144 //    returns true immediately.
       
   145 // - check if committing those uncommitted granules would bring us over the commit limit
       
   146 //    (GC threshold, MaxMetaspaceSize). If true, it returns false.
       
   147 // - commit the memory.
       
   148 // - mark the range as committed in the commit mask
       
   149 //
       
   150 // !! Careful:
       
   151 //    calling ensure_range_is_committed on a range which contains both committed and uncommitted
       
   152 //    areas will commit the whole area, thus erase the content in the existing committed parts.
       
   153 //    Make sure you never call this on an address range containing live data. !!
       
   154 //
       
   155 // Returns true if success, false if it did hit a commit limit.
       
   156 bool VirtualSpaceNode::ensure_range_is_committed(MetaWord* p, size_t word_size) {
       
   157 
       
   158   assert_lock_strong(MetaspaceExpand_lock);
       
   159   assert(p != NULL && word_size > 0, "Sanity");
       
   160 
       
   161   MetaWord* p_start = align_down(p, Settings::commit_granule_bytes());
       
   162   MetaWord* p_end = align_up(p + word_size, Settings::commit_granule_bytes());
       
   163 
       
   164   // Todo: simple for now. Make it more intelligent late
       
   165   return commit_range(p_start, p_end - p_start);
       
   166 
       
   167 }
       
   168 
       
   169 // Given an address range (which has to be aligned to commit granule size):
       
   170 //  - uncommit it
       
   171 //  - mark it as uncommitted in the commit mask
       
   172 void VirtualSpaceNode::uncommit_range(MetaWord* p, size_t word_size) {
       
   173 
       
   174   DEBUG_ONLY(check_pointer_is_aligned_to_commit_granule(p);)
       
   175   DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(word_size);)
       
   176   assert_lock_strong(MetaspaceExpand_lock);
       
   177 
       
   178   // First calculate how large the committed regions in this range are
       
   179   const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size);
       
   180   DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(committed_words_in_range);)
       
   181 
       
   182   log_debug(metaspace)("VirtualSpaceNode %d, base " PTR_FORMAT ": uncommitting range " PTR_FORMAT ".." PTR_FORMAT "(" SIZE_FORMAT " words)",
       
   183                        _node_id, p2i(_base), p2i(p), p2i(p + word_size), word_size);
       
   184 
       
   185   if (committed_words_in_range == 0) {
       
   186     log_debug(metaspace)("VirtualSpaceNode %d, base " PTR_FORMAT ": ... already fully uncommitted.",
       
   187                          _node_id, p2i(_base));
       
   188     return; // Already fully uncommitted, nothing to do.
       
   189   }
       
   190 
       
   191   // Uncommit...
       
   192   if (os::uncommit_memory((char*)p, word_size * BytesPerWord) == false) {
       
   193     // Note: this can actually happen, since uncommit may increase the number of mappings.
       
   194     fatal("Failed to uncommit metaspace.");
       
   195   }
       
   196 
       
   197   log_debug(metaspace)("Decreased metaspace by " SIZE_FORMAT " bytes.",
       
   198                         committed_words_in_range * BytesPerWord);
       
   199 
       
   200   // ... tell commit limiter...
       
   201   _commit_limiter->decrease_committed(committed_words_in_range);
       
   202 
       
   203   // ... and global counters...
       
   204   _total_committed_words_counter->decrement_by(committed_words_in_range);
       
   205 
       
   206    // ... and update the commit mask.
       
   207   _commit_mask.mark_range_as_uncommitted(p, word_size);
       
   208 
       
   209 #ifdef ASSERT
       
   210   // The commit boundary maintained in the CommitLimiter should be equal the sum of committed words
       
   211   // in both class and non-class vslist (outside gtests).
       
   212   if (_commit_limiter == CommitLimiter::globalLimiter()) { // We are outside a test scenario
       
   213     assert(_commit_limiter->committed_words() == RunningCounters::committed_words(), "counter mismatch");
       
   214   }
       
   215 #endif
       
   216 
       
   217   DEBUG_ONLY(InternalStats::inc_num_space_uncommitted();)
       
   218 
       
   219 }
       
   220 
       
   221 //// creation, destruction ////
       
   222 
       
   223 VirtualSpaceNode::VirtualSpaceNode(int node_id,
       
   224                                    ReservedSpace rs,
       
   225                                    CommitLimiter* limiter,
       
   226                                    SizeCounter* reserve_words_counter,
       
   227                                    SizeCounter* commit_words_counter)
       
   228   : _next(NULL),
       
   229     _rs(rs),
       
   230     _base((MetaWord*)rs.base()),
       
   231     _word_size(rs.size() / BytesPerWord),
       
   232     _used_words(0),
       
   233     _commit_mask((MetaWord*)rs.base(), rs.size() / BytesPerWord),
       
   234     _root_chunk_area_lut((MetaWord*)rs.base(), rs.size() / BytesPerWord),
       
   235     _commit_limiter(limiter),
       
   236     _total_reserved_words_counter(reserve_words_counter),
       
   237     _total_committed_words_counter(commit_words_counter),
       
   238     _node_id(node_id)
       
   239 {
       
   240 
       
   241   log_debug(metaspace)("Create new VirtualSpaceNode %d, base " PTR_FORMAT ", word size " SIZE_FORMAT ".",
       
   242                        _node_id, p2i(_base), _word_size);
       
   243 
       
   244   // Update reserved counter in vslist
       
   245   _total_reserved_words_counter->increment_by(_word_size);
       
   246 
       
   247   assert_is_aligned(_base, chklvl::MAX_CHUNK_BYTE_SIZE);
       
   248   assert_is_aligned(_word_size, chklvl::MAX_CHUNK_WORD_SIZE);
       
   249 
       
   250   // Explicitly uncommit the whole node to make it guaranteed
       
   251   // inaccessible, for testing
       
   252 //  os::uncommit_memory((char*)_base, _word_size * BytesPerWord);
       
   253 
       
   254 }
       
   255 
       
   256 // Create a node of a given size
       
   257 VirtualSpaceNode* VirtualSpaceNode::create_node(int node_id,
       
   258                                                 size_t word_size,
       
   259                                                 CommitLimiter* limiter,
       
   260                                                 SizeCounter* reserve_words_counter,
       
   261                                                 SizeCounter* commit_words_counter)
       
   262 {
       
   263 
       
   264   DEBUG_ONLY(assert_is_aligned(word_size, chklvl::MAX_CHUNK_WORD_SIZE);)
       
   265 
       
   266   ReservedSpace rs(word_size * BytesPerWord,
       
   267                    chklvl::MAX_CHUNK_BYTE_SIZE,
       
   268                    false, // TODO deal with large pages
       
   269                    false);
       
   270 
       
   271   if (!rs.is_reserved()) {
       
   272     vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to reserve memory for metaspace");
       
   273   }
       
   274 
       
   275   assert_is_aligned(rs.base(), chklvl::MAX_CHUNK_BYTE_SIZE);
       
   276 
       
   277   return create_node(node_id, rs, limiter, reserve_words_counter, commit_words_counter);
       
   278 
       
   279 }
       
   280 
       
   281 // Create a node over an existing space
       
   282 VirtualSpaceNode* VirtualSpaceNode::create_node(int node_id,
       
   283                                                 ReservedSpace rs,
       
   284                                                 CommitLimiter* limiter,
       
   285                                                 SizeCounter* reserve_words_counter,
       
   286                                                 SizeCounter* commit_words_counter)
       
   287 {
       
   288   DEBUG_ONLY(InternalStats::inc_num_vsnodes_created();)
       
   289   return new VirtualSpaceNode(node_id, rs, limiter, reserve_words_counter, commit_words_counter);
       
   290 }
       
   291 
       
   292 VirtualSpaceNode::~VirtualSpaceNode() {
       
   293   _rs.release();
       
   294 
       
   295   log_debug(metaspace)("Destroying VirtualSpaceNode %d, base " PTR_FORMAT ", word size " SIZE_FORMAT ".",
       
   296                        _node_id, p2i(_base), _word_size);
       
   297 
       
   298   // Update counters in vslist
       
   299   _total_committed_words_counter->decrement_by(committed_words());
       
   300   _total_reserved_words_counter->decrement_by(_word_size);
       
   301 
       
   302   DEBUG_ONLY(InternalStats::inc_num_vsnodes_destroyed();)
       
   303 
       
   304 }
       
   305 
       
   306 
       
   307 
       
   308 //// Chunk allocation, splitting, merging /////
       
   309 
       
   310 // Allocate a root chunk from this node. Will fail and return NULL
       
   311 // if the node is full.
       
   312 // Note: this just returns a chunk whose memory is reserved; no memory is committed yet.
       
   313 // Hence, before using this chunk, it must be committed.
       
   314 // Also, no limits are checked, since no committing takes place.
       
   315 Metachunk* VirtualSpaceNode::allocate_root_chunk() {
       
   316 
       
   317   assert_lock_strong(MetaspaceExpand_lock);
       
   318 
       
   319   assert_is_aligned(free_words(), chklvl::MAX_CHUNK_WORD_SIZE);
       
   320 
       
   321   if (free_words() >= chklvl::MAX_CHUNK_WORD_SIZE) {
       
   322 
       
   323     MetaWord* loc = _base + _used_words;
       
   324     _used_words += chklvl::MAX_CHUNK_WORD_SIZE;
       
   325 
       
   326     RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(loc);
       
   327 
       
   328     // Create a root chunk header and initialize it;
       
   329     Metachunk* c = rca->alloc_root_chunk_header(this);
       
   330 
       
   331     assert(c->base() == loc && c->vsnode() == this &&
       
   332            c->is_free(), "Sanity");
       
   333 
       
   334     DEBUG_ONLY(c->verify(true);)
       
   335 
       
   336     log_debug(metaspace)("VirtualSpaceNode %d, base " PTR_FORMAT ": newborn root chunk " METACHUNK_FORMAT ".",
       
   337                          _node_id, p2i(_base), METACHUNK_FORMAT_ARGS(c));
       
   338 
       
   339     if (Settings::newborn_root_chunks_are_fully_committed()) {
       
   340       log_trace(metaspace)("VirtualSpaceNode %d, base " PTR_FORMAT ": committing newborn root chunk.",
       
   341                            _node_id, p2i(_base));
       
   342       // Note: use Metachunk::ensure_commit, do not commit directly. This makes sure the chunk knows
       
   343       // its commit range and does not ask needlessly.
       
   344       c->ensure_fully_committed_locked();
    53     }
   345     }
    54   }
   346 
    55 
   347     return c;
    56   return false;
   348 
    57 }
   349   }
    58 
   350 
    59 // byte_size is the size of the associated virtualspace.
   351   return NULL; // Node is full.
    60 VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
   352 
    61     _next(NULL), _is_class(is_class), _rs(), _top(NULL), _container_count(0), _occupancy_map(NULL) {
   353 }
    62   assert_is_aligned(bytes, Metaspace::reserve_alignment());
   354 
    63   bool large_pages = should_commit_large_pages_when_reserving(bytes);
   355 // Given a chunk c, split it recursively until you get a chunk of the given target_level.
    64   _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
   356 //
    65 
   357 // The original chunk must not be part of a freelist.
    66   if (_rs.is_reserved()) {
   358 //
    67     assert(_rs.base() != NULL, "Catch if we get a NULL address");
   359 // Returns pointer to the result chunk; the splitted-off chunks are added as
    68     assert(_rs.size() != 0, "Catch if we get a 0 size");
   360 //  free chunks to the freelists.
    69     assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
   361 //
    70     assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
   362 // Returns NULL if chunk cannot be split at least once.
    71 
   363 Metachunk* VirtualSpaceNode::split(chklvl_t target_level, Metachunk* c, MetachunkListCluster* freelists) {
    72     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
   364 
    73   }
   365   assert_lock_strong(MetaspaceExpand_lock);
    74 }
   366 
    75 
   367   // Get the area associated with this chunk and let it handle the splitting
    76 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
   368   RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(c->base());
    77   // When a node is purged, lets give it a thorough examination.
   369 
    78   DEBUG_ONLY(verify(true);)
   370   DEBUG_ONLY(rca->verify_area_is_ideally_merged();)
    79   Metachunk* chunk = first_chunk();
   371 
    80   Metachunk* invalid_chunk = (Metachunk*) top();
   372   return rca->split(target_level, c, freelists);
    81   while (chunk < invalid_chunk ) {
   373 
    82     assert(chunk->is_tagged_free(), "Should be tagged free");
   374 }
    83     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
   375 
    84     chunk_manager->remove_chunk(chunk);
   376 
    85     chunk->remove_sentinel();
   377 // Given a chunk, attempt to merge it recursively with its neighboring chunks.
    86     assert(chunk->next() == NULL &&
   378 //
    87         chunk->prev() == NULL,
   379 // If successful (merged at least once), returns address of
    88         "Was not removed from its list");
   380 // the merged chunk; NULL otherwise.
    89     chunk = (Metachunk*) next;
   381 //
    90   }
   382 // The merged chunks are removed from the freelists.
    91 }
   383 //
    92 
   384 // !!! Please note that if this method returns a non-NULL value, the
    93 void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const {
   385 // original chunk will be invalid and should not be accessed anymore! !!!
    94 
   386 Metachunk* VirtualSpaceNode::merge(Metachunk* c, MetachunkListCluster* freelists) {
    95   if (bottom() == top()) {
   387 
    96     return;
   388   assert(c != NULL && c->is_free(), "Sanity");
    97   }
   389   assert_lock_strong(MetaspaceExpand_lock);
    98 
   390 
    99   const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
   391   // Get the tree associated with this chunk and let it handle the merging
   100   const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk;
   392   RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(c->base());
   101   const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk;
   393 
   102 
   394   Metachunk* c2 = rca->merge(c, freelists);
   103   int line_len = 100;
   395 
   104   const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size);
   396   DEBUG_ONLY(rca->verify_area_is_ideally_merged();)
   105   line_len = (int)(section_len / spec_chunk_size);
   397 
   106 
   398   return c2;
   107   static const int NUM_LINES = 4;
   399 
   108 
   400 }
   109   char* lines[NUM_LINES];
   401 
   110   for (int i = 0; i < NUM_LINES; i ++) {
   402 // Given a chunk c, which must be "in use" and must not be a root chunk, attempt to
   111     lines[i] = (char*)os::malloc(line_len, mtInternal);
   403 // enlarge it in place by claiming its trailing buddy.
   112   }
   404 //
   113   int pos = 0;
   405 // This will only work if c is the leader of the buddy pair and the trailing buddy is free.
   114   const MetaWord* p = bottom();
   406 //
   115   const Metachunk* chunk = (const Metachunk*)p;
   407 // If successful, the follower chunk will be removed from the freelists, the leader chunk c will
   116   const MetaWord* chunk_end = p + chunk->word_size();
   408 // double in size (level decreased by one).
   117   while (p < top()) {
   409 //
   118     if (pos == line_len) {
   410 // On success, true is returned, false otherwise.
   119       pos = 0;
   411 bool VirtualSpaceNode::attempt_enlarge_chunk(Metachunk* c, MetachunkListCluster* freelists) {
   120       for (int i = 0; i < NUM_LINES; i ++) {
   412 
   121         st->fill_to(22);
   413   assert(c != NULL && c->is_in_use() && !c->is_root_chunk(), "Sanity");
   122         st->print_raw(lines[i], line_len);
   414   assert_lock_strong(MetaspaceExpand_lock);
   123         st->cr();
   415 
       
   416   // Get the tree associated with this chunk and let it handle the merging
       
   417   RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(c->base());
       
   418 
       
   419   bool rc = rca->attempt_enlarge_chunk(c, freelists);
       
   420 
       
   421   DEBUG_ONLY(rca->verify_area_is_ideally_merged();)
       
   422 
       
   423   return rc;
       
   424 
       
   425 }
       
   426 
       
   427 // Attempts to purge the node:
       
   428 //
       
   429 // If all chunks living in this node are free, they will all be removed from their freelists
       
   430 //   and deletes the node.
       
   431 //
       
   432 // Returns true if the node has been deleted, false if not.
       
   433 // !! If this returns true, do not access the node from this point on. !!
       
   434 bool VirtualSpaceNode::attempt_purge(MetachunkListCluster* freelists) {
       
   435 
       
   436   assert_lock_strong(MetaspaceExpand_lock);
       
   437 
       
   438   // First find out if all areas are empty. Since empty chunks collapse to root chunk
       
   439   // size, if all chunks in this node are free root chunks we are good to go.
       
   440   for (int narea = 0; narea < _root_chunk_area_lut.number_of_areas(); narea ++) {
       
   441     const RootChunkArea* ra = _root_chunk_area_lut.get_area_by_index(narea);
       
   442     const Metachunk* c = ra->first_chunk();
       
   443     if (c != NULL) {
       
   444       if (!(c->is_root_chunk() && c->is_free())) {
       
   445         return false;
   124       }
   446       }
   125     }
   447     }
   126     if (pos == 0) {
   448   }
   127       st->print(PTR_FORMAT ":", p2i(p));
   449 
       
   450   log_debug(metaspace)("VirtualSpaceNode %d, base " PTR_FORMAT ": purging.", _node_id, p2i(_base));
       
   451 
       
   452   // Okay, we can purge. Before we can do this, we need to remove all chunks from the freelist.
       
   453   for (int narea = 0; narea < _root_chunk_area_lut.number_of_areas(); narea ++) {
       
   454     RootChunkArea* ra = _root_chunk_area_lut.get_area_by_index(narea);
       
   455     Metachunk* c = ra->first_chunk();
       
   456     if (c != NULL) {
       
   457       log_trace(metaspace)("VirtualSpaceNode %d, base " PTR_FORMAT ": removing chunk " METACHUNK_FULL_FORMAT ".",
       
   458                            _node_id, p2i(_base), METACHUNK_FULL_FORMAT_ARGS(c));
       
   459       assert(c->is_free() && c->is_root_chunk(), "Sanity");
       
   460       freelists->remove(c);
   128     }
   461     }
   129     if (p == chunk_end) {
   462   }
   130       chunk = (Metachunk*)p;
   463 
   131       chunk_end = p + chunk->word_size();
   464   // Now, delete the node, then right away return since this object is invalid.
   132     }
   465   delete this;
   133     // line 1: chunk starting points (a dot if that area is a chunk start).
   466 
   134     lines[0][pos] = p == (const MetaWord*)chunk ? '.' : ' ';
   467   return true;
   135 
   468 
   136     // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if
   469 }
   137     // chunk is in use.
   470 
   138     const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free();
   471 
   139     if (chunk->word_size() == spec_chunk_size) {
   472 void VirtualSpaceNode::print_on(outputStream* st) const {
   140       lines[1][pos] = chunk_is_free ? 'x' : 'X';
   473 
   141     } else if (chunk->word_size() == small_chunk_size) {
   474   size_t scale = K;
   142       lines[1][pos] = chunk_is_free ? 's' : 'S';
   475 
   143     } else if (chunk->word_size() == med_chunk_size) {
   476   st->print("id: %d, base " PTR_FORMAT ": ", _node_id, p2i(base()));
   144       lines[1][pos] = chunk_is_free ? 'm' : 'M';
   477   st->print("reserved=");
   145     } else if (chunk->word_size() > med_chunk_size) {
   478   print_scaled_words(st, word_size(), scale);
   146       lines[1][pos] = chunk_is_free ? 'h' : 'H';
   479   st->print(", committed=");
   147     } else {
   480   print_scaled_words_and_percentage(st, committed_words(), word_size(), scale);
   148       ShouldNotReachHere();
   481   st->print(", used=");
   149     }
   482   print_scaled_words_and_percentage(st, used_words(), word_size(), scale);
   150 
   483 
   151     // Line 3: chunk origin
   484   st->cr();
   152     const ChunkOrigin origin = chunk->get_origin();
   485 
   153     lines[2][pos] = origin == origin_normal ? ' ' : '0' + (int) origin;
   486   _root_chunk_area_lut.print_on(st);
   154 
   487   _commit_mask.print_on(st);
   155     // Line 4: Virgin chunk? Virgin chunks are chunks created as a byproduct of padding or splitting,
   488 
   156     //         but were never used.
   489 }
   157     lines[3][pos] = chunk->get_use_count() > 0 ? ' ' : 'v';
   490 
   158 
   491 // Returns size, in words, of committed space in this node alone.
   159     p += spec_chunk_size;
   492 // Note: iterates over commit mask and hence may be a tad expensive on large nodes.
   160     pos ++;
   493 size_t VirtualSpaceNode::committed_words() const {
   161   }
   494   return _commit_mask.get_committed_size();
   162   if (pos > 0) {
   495 }
   163     for (int i = 0; i < NUM_LINES; i ++) {
       
   164       st->fill_to(22);
       
   165       st->print_raw(lines[i], line_len);
       
   166       st->cr();
       
   167     }
       
   168   }
       
   169   for (int i = 0; i < NUM_LINES; i ++) {
       
   170     os::free(lines[i]);
       
   171   }
       
   172 }
       
   173 
       
   174 
   496 
   175 #ifdef ASSERT
   497 #ifdef ASSERT
   176 
   498 // Verify counters and basic structure. Slow mode: verify all chunks in depth
   177 // Verify counters, all chunks in this list node and the occupancy map.
   499 void VirtualSpaceNode::verify(bool slow) const {
   178 void VirtualSpaceNode::verify(bool slow) {
   500 
   179   log_trace(gc, metaspace, freelist)("verifying %s virtual space node (%s).",
   501   assert_lock_strong(MetaspaceExpand_lock);
   180     (is_class() ? "class space" : "metaspace"), (slow ? "slow" : "quick"));
   502 
   181   // Fast mode: just verify chunk counters and basic geometry
   503   assert(base() != NULL, "Invalid base");
   182   // Slow mode: verify chunks and occupancy map
   504   assert(base() == (MetaWord*)_rs.base() &&
   183   uintx num_in_use_chunks = 0;
   505          word_size() == _rs.size() / BytesPerWord,
   184   Metachunk* chunk = first_chunk();
   506          "Sanity");
   185   Metachunk* invalid_chunk = (Metachunk*) top();
   507   assert_is_aligned(base(), chklvl::MAX_CHUNK_BYTE_SIZE);
   186 
   508   assert(used_words() <= word_size(), "Sanity");
   187   // Iterate the chunks in this node and verify each chunk.
   509 
   188   while (chunk < invalid_chunk ) {
   510   // Since we only ever hand out root chunks from a vsnode, top should always be aligned
   189     if (slow) {
   511   // to root chunk size.
   190       do_verify_chunk(chunk);
   512   assert_is_aligned(used_words(), chklvl::MAX_CHUNK_WORD_SIZE);
   191     }
   513 
   192     if (!chunk->is_tagged_free()) {
   514   _commit_mask.verify(slow);
   193       num_in_use_chunks ++;
   515   assert(committed_words() <= word_size(), "Sanity");
   194     }
   516   assert_is_aligned(committed_words(), Settings::commit_granule_words());
   195     const size_t s = chunk->word_size();
   517   _root_chunk_area_lut.verify(slow);
   196     // Prevent endless loop on invalid chunk size.
   518 
   197     assert(is_valid_chunksize(is_class(), s), "Invalid chunk size: " SIZE_FORMAT ".", s);
   519 }
   198     MetaWord* next = ((MetaWord*)chunk) + s;
   520 
   199     chunk = (Metachunk*) next;
       
   200   }
       
   201   assert(_container_count == num_in_use_chunks, "Container count mismatch (real: " UINTX_FORMAT
       
   202       ", counter: " UINTX_FORMAT ".", num_in_use_chunks, _container_count);
       
   203   // Also verify the occupancy map.
       
   204   if (slow) {
       
   205     occupancy_map()->verify(bottom(), top());
       
   206   }
       
   207 }
       
   208 
       
   209 // Verify that all free chunks in this node are ideally merged
       
   210 // (there not should be multiple small chunks where a large chunk could exist.)
       
   211 void VirtualSpaceNode::verify_free_chunks_are_ideally_merged() {
       
   212   Metachunk* chunk = first_chunk();
       
   213   Metachunk* invalid_chunk = (Metachunk*) top();
       
   214   // Shorthands.
       
   215   const size_t size_med = (is_class() ? ClassMediumChunk : MediumChunk) * BytesPerWord;
       
   216   const size_t size_small = (is_class() ? ClassSmallChunk : SmallChunk) * BytesPerWord;
       
   217   int num_free_chunks_since_last_med_boundary = -1;
       
   218   int num_free_chunks_since_last_small_boundary = -1;
       
   219   bool error = false;
       
   220   char err[256];
       
   221   while (!error && chunk < invalid_chunk ) {
       
   222     // Test for missed chunk merge opportunities: count number of free chunks since last chunk boundary.
       
   223     // Reset the counter when encountering a non-free chunk.
       
   224     if (chunk->get_chunk_type() != HumongousIndex) {
       
   225       if (chunk->is_tagged_free()) {
       
   226         // Count successive free, non-humongous chunks.
       
   227         if (is_aligned(chunk, size_small)) {
       
   228           if (num_free_chunks_since_last_small_boundary > 0) {
       
   229             error = true;
       
   230             jio_snprintf(err, sizeof(err), "Missed chunk merge opportunity to merge a small chunk preceding " PTR_FORMAT ".", p2i(chunk));
       
   231           } else {
       
   232             num_free_chunks_since_last_small_boundary = 0;
       
   233           }
       
   234         } else if (num_free_chunks_since_last_small_boundary != -1) {
       
   235           num_free_chunks_since_last_small_boundary ++;
       
   236         }
       
   237         if (is_aligned(chunk, size_med)) {
       
   238           if (num_free_chunks_since_last_med_boundary > 0) {
       
   239             error = true;
       
   240             jio_snprintf(err, sizeof(err), "Missed chunk merge opportunity to merge a medium chunk preceding " PTR_FORMAT ".", p2i(chunk));
       
   241           } else {
       
   242             num_free_chunks_since_last_med_boundary = 0;
       
   243           }
       
   244         } else if (num_free_chunks_since_last_med_boundary != -1) {
       
   245           num_free_chunks_since_last_med_boundary ++;
       
   246         }
       
   247       } else {
       
   248         // Encountering a non-free chunk, reset counters.
       
   249         num_free_chunks_since_last_med_boundary = -1;
       
   250         num_free_chunks_since_last_small_boundary = -1;
       
   251       }
       
   252     } else {
       
   253       // One cannot merge areas with a humongous chunk in the middle. Reset counters.
       
   254       num_free_chunks_since_last_med_boundary = -1;
       
   255       num_free_chunks_since_last_small_boundary = -1;
       
   256     }
       
   257 
       
   258     if (error) {
       
   259       print_map(tty, is_class());
       
   260       fatal("%s", err);
       
   261     }
       
   262 
       
   263     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
       
   264     chunk = (Metachunk*) next;
       
   265   }
       
   266 }
       
   267 #endif // ASSERT
       
   268 
       
   269 void VirtualSpaceNode::inc_container_count() {
       
   270   assert_lock_strong(MetaspaceExpand_lock);
       
   271   _container_count++;
       
   272 }
       
   273 
       
   274 void VirtualSpaceNode::dec_container_count() {
       
   275   assert_lock_strong(MetaspaceExpand_lock);
       
   276   _container_count--;
       
   277 }
       
   278 
       
   279 VirtualSpaceNode::~VirtualSpaceNode() {
       
   280   _rs.release();
       
   281   if (_occupancy_map != NULL) {
       
   282     delete _occupancy_map;
       
   283   }
       
   284 #ifdef ASSERT
       
   285   size_t word_size = sizeof(*this) / BytesPerWord;
       
   286   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
       
   287 #endif
   521 #endif
   288 }
   522 
   289 
       
   290 size_t VirtualSpaceNode::used_words_in_vs() const {
       
   291   return pointer_delta(top(), bottom(), sizeof(MetaWord));
       
   292 }
       
   293 
       
   294 // Space committed in the VirtualSpace
       
   295 size_t VirtualSpaceNode::capacity_words_in_vs() const {
       
   296   return pointer_delta(end(), bottom(), sizeof(MetaWord));
       
   297 }
       
   298 
       
   299 size_t VirtualSpaceNode::free_words_in_vs() const {
       
   300   return pointer_delta(end(), top(), sizeof(MetaWord));
       
   301 }
       
   302 
       
   303 // Given an address larger than top(), allocate padding chunks until top is at the given address.
       
   304 void VirtualSpaceNode::allocate_padding_chunks_until_top_is_at(MetaWord* target_top) {
       
   305 
       
   306   assert(target_top > top(), "Sanity");
       
   307 
       
   308   // Padding chunks are added to the freelist.
       
   309   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(is_class());
       
   310 
       
   311   // shorthands
       
   312   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
       
   313   const size_t small_word_size = chunk_manager->small_chunk_word_size();
       
   314   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
       
   315 
       
   316   while (top() < target_top) {
       
   317 
       
   318     // We could make this coding more generic, but right now we only deal with two possible chunk sizes
       
   319     // for padding chunks, so it is not worth it.
       
   320     size_t padding_chunk_word_size = small_word_size;
       
   321     if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) {
       
   322       assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true.
       
   323       padding_chunk_word_size = spec_word_size;
       
   324     }
       
   325     MetaWord* here = top();
       
   326     assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord));
       
   327     inc_top(padding_chunk_word_size);
       
   328 
       
   329     // Create new padding chunk.
       
   330     ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class());
       
   331     assert(padding_chunk_type == SpecializedIndex || padding_chunk_type == SmallIndex, "sanity");
       
   332 
       
   333     Metachunk* const padding_chunk =
       
   334         ::new (here) Metachunk(padding_chunk_type, is_class(), padding_chunk_word_size, this);
       
   335     assert(padding_chunk == (Metachunk*)here, "Sanity");
       
   336     DEBUG_ONLY(padding_chunk->set_origin(origin_pad);)
       
   337     log_trace(gc, metaspace, freelist)("Created padding chunk in %s at "
       
   338         PTR_FORMAT ", size " SIZE_FORMAT_HEX ".",
       
   339         (is_class() ? "class space " : "metaspace"),
       
   340         p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord));
       
   341 
       
   342     // Mark chunk start in occupancy map.
       
   343     occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true);
       
   344 
       
   345     // Chunks are born as in-use (see MetaChunk ctor). So, before returning
       
   346     // the padding chunk to its chunk manager, mark it as in use (ChunkManager
       
   347     // will assert that).
       
   348     do_update_in_use_info_for_chunk(padding_chunk, true);
       
   349 
       
   350     // Return Chunk to freelist.
       
   351     inc_container_count();
       
   352     chunk_manager->return_single_chunk(padding_chunk);
       
   353     // Please note: at this point, ChunkManager::return_single_chunk()
       
   354     // may already have merged the padding chunk with neighboring chunks, so
       
   355     // it may have vanished at this point. Do not reference the padding
       
   356     // chunk beyond this point.
       
   357   }
       
   358 
       
   359   assert(top() == target_top, "Sanity");
       
   360 
       
   361 } // allocate_padding_chunks_until_top_is_at()
       
   362 
       
   363 // Allocates the chunk from the virtual space only.
       
   364 // This interface is also used internally for debugging.  Not all
       
   365 // chunks removed here are necessarily used for allocation.
       
   366 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
       
   367   // Non-humongous chunks are to be allocated aligned to their chunk
       
   368   // size. So, start addresses of medium chunks are aligned to medium
       
   369   // chunk size, those of small chunks to small chunk size and so
       
   370   // forth. This facilitates merging of free chunks and reduces
       
   371   // fragmentation. Chunk sizes are spec < small < medium, with each
       
   372   // larger chunk size being a multiple of the next smaller chunk
       
   373   // size.
       
   374   // Because of this alignment, me may need to create a number of padding
       
   375   // chunks. These chunks are created and added to the freelist.
       
   376 
       
   377   // The chunk manager to which we will give our padding chunks.
       
   378   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(is_class());
       
   379 
       
   380   // shorthands
       
   381   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
       
   382   const size_t small_word_size = chunk_manager->small_chunk_word_size();
       
   383   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
       
   384 
       
   385   assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size ||
       
   386       chunk_word_size >= med_word_size, "Invalid chunk size requested.");
       
   387 
       
   388   // Chunk alignment (in bytes) == chunk size unless humongous.
       
   389   // Humongous chunks are aligned to the smallest chunk size (spec).
       
   390   const size_t required_chunk_alignment = (chunk_word_size > med_word_size ?
       
   391       spec_word_size : chunk_word_size) * sizeof(MetaWord);
       
   392 
       
   393   // Do we have enough space to create the requested chunk plus
       
   394   // any padding chunks needed?
       
   395   MetaWord* const next_aligned =
       
   396       static_cast<MetaWord*>(align_up(top(), required_chunk_alignment));
       
   397   if (!is_available((next_aligned - top()) + chunk_word_size)) {
       
   398     return NULL;
       
   399   }
       
   400 
       
   401   // Before allocating the requested chunk, allocate padding chunks if necessary.
       
   402   // We only need to do this for small or medium chunks: specialized chunks are the
       
   403   // smallest size, hence always aligned. Homungous chunks are allocated unaligned
       
   404   // (implicitly, also aligned to smallest chunk size).
       
   405   if ((chunk_word_size == med_word_size || chunk_word_size == small_word_size) && next_aligned > top())  {
       
   406     log_trace(gc, metaspace, freelist)("Creating padding chunks in %s between %p and %p...",
       
   407         (is_class() ? "class space " : "metaspace"),
       
   408         top(), next_aligned);
       
   409     allocate_padding_chunks_until_top_is_at(next_aligned);
       
   410     // Now, top should be aligned correctly.
       
   411     assert_is_aligned(top(), required_chunk_alignment);
       
   412   }
       
   413 
       
   414   // Now, top should be aligned correctly.
       
   415   assert_is_aligned(top(), required_chunk_alignment);
       
   416 
       
   417   // Bottom of the new chunk
       
   418   MetaWord* chunk_limit = top();
       
   419   assert(chunk_limit != NULL, "Not safe to call this method");
       
   420 
       
   421   // The virtual spaces are always expanded by the
       
   422   // commit granularity to enforce the following condition.
       
   423   // Without this the is_available check will not work correctly.
       
   424   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
       
   425       "The committed memory doesn't match the expanded memory.");
       
   426 
       
   427   if (!is_available(chunk_word_size)) {
       
   428     LogTarget(Trace, gc, metaspace, freelist) lt;
       
   429     if (lt.is_enabled()) {
       
   430       LogStream ls(lt);
       
   431       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
       
   432       // Dump some information about the virtual space that is nearly full
       
   433       print_on(&ls);
       
   434     }
       
   435     return NULL;
       
   436   }
       
   437 
       
   438   // Take the space  (bump top on the current virtual space).
       
   439   inc_top(chunk_word_size);
       
   440 
       
   441   // Initialize the chunk
       
   442   ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class());
       
   443   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this);
       
   444   assert(result == (Metachunk*)chunk_limit, "Sanity");
       
   445   occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true);
       
   446   do_update_in_use_info_for_chunk(result, true);
       
   447 
       
   448   inc_container_count();
       
   449 
       
   450 #ifdef ASSERT
       
   451   EVERY_NTH(VerifyMetaspaceInterval)
       
   452     chunk_manager->locked_verify(true);
       
   453     verify(true);
       
   454   END_EVERY_NTH
       
   455   do_verify_chunk(result);
       
   456 #endif
       
   457 
       
   458   result->inc_use_count();
       
   459 
       
   460   return result;
       
   461 }
       
   462 
       
   463 
       
   464 // Expand the virtual space (commit more of the reserved space)
       
   465 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
       
   466   size_t min_bytes = min_words * BytesPerWord;
       
   467   size_t preferred_bytes = preferred_words * BytesPerWord;
       
   468 
       
   469   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
       
   470 
       
   471   if (uncommitted < min_bytes) {
       
   472     return false;
       
   473   }
       
   474 
       
   475   size_t commit = MIN2(preferred_bytes, uncommitted);
       
   476   bool result = virtual_space()->expand_by(commit, false);
       
   477 
       
   478   if (result) {
       
   479     log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
       
   480         (is_class() ? "class" : "non-class"), commit);
       
   481     DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_committed_space_expanded));
       
   482   } else {
       
   483     log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
       
   484         (is_class() ? "class" : "non-class"), commit);
       
   485   }
       
   486 
       
   487   assert(result, "Failed to commit memory");
       
   488 
       
   489   return result;
       
   490 }
       
   491 
       
   492 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
       
   493   assert_lock_strong(MetaspaceExpand_lock);
       
   494   Metachunk* result = take_from_committed(chunk_word_size);
       
   495   return result;
       
   496 }
       
   497 
       
   498 bool VirtualSpaceNode::initialize() {
       
   499 
       
   500   if (!_rs.is_reserved()) {
       
   501     return false;
       
   502   }
       
   503 
       
   504   // These are necessary restriction to make sure that the virtual space always
       
   505   // grows in steps of Metaspace::commit_alignment(). If both base and size are
       
   506   // aligned only the middle alignment of the VirtualSpace is used.
       
   507   assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
       
   508   assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
       
   509 
       
   510   // ReservedSpaces marked as special will have the entire memory
       
   511   // pre-committed. Setting a committed size will make sure that
       
   512   // committed_size and actual_committed_size agrees.
       
   513   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
       
   514 
       
   515   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
       
   516       Metaspace::commit_alignment());
       
   517   if (result) {
       
   518     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
       
   519         "Checking that the pre-committed memory was registered by the VirtualSpace");
       
   520 
       
   521     set_top((MetaWord*)virtual_space()->low());
       
   522   }
       
   523 
       
   524   // Initialize Occupancy Map.
       
   525   const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
       
   526   _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);
       
   527 
       
   528   return result;
       
   529 }
       
   530 
       
   531 void VirtualSpaceNode::print_on(outputStream* st, size_t scale) const {
       
   532   size_t used_words = used_words_in_vs();
       
   533   size_t commit_words = committed_words();
       
   534   size_t res_words = reserved_words();
       
   535   VirtualSpace* vs = virtual_space();
       
   536 
       
   537   st->print("node @" PTR_FORMAT ": ", p2i(this));
       
   538   st->print("reserved=");
       
   539   print_scaled_words(st, res_words, scale);
       
   540   st->print(", committed=");
       
   541   print_scaled_words_and_percentage(st, commit_words, res_words, scale);
       
   542   st->print(", used=");
       
   543   print_scaled_words_and_percentage(st, used_words, res_words, scale);
       
   544   st->cr();
       
   545   st->print("   [" PTR_FORMAT ", " PTR_FORMAT ", "
       
   546       PTR_FORMAT ", " PTR_FORMAT ")",
       
   547       p2i(bottom()), p2i(top()), p2i(end()),
       
   548       p2i(vs->high_boundary()));
       
   549 }
       
   550 
       
   551 #ifdef ASSERT
       
   552 void VirtualSpaceNode::mangle() {
       
   553   size_t word_size = capacity_words_in_vs();
       
   554   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
       
   555 }
       
   556 #endif // ASSERT
       
   557 
       
   558 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
       
   559   assert(is_class() == chunk_manager->is_class(), "Wrong ChunkManager?");
       
   560 #ifdef ASSERT
       
   561   verify(false);
       
   562   EVERY_NTH(VerifyMetaspaceInterval)
       
   563     verify(true);
       
   564   END_EVERY_NTH
       
   565 #endif
       
   566   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
       
   567     ChunkIndex index = (ChunkIndex)i;
       
   568     size_t chunk_size = chunk_manager->size_by_index(index);
       
   569 
       
   570     while (free_words_in_vs() >= chunk_size) {
       
   571       Metachunk* chunk = get_chunk_vs(chunk_size);
       
   572       // Chunk will be allocated aligned, so allocation may require
       
   573       // additional padding chunks. That may cause above allocation to
       
   574       // fail. Just ignore the failed allocation and continue with the
       
   575       // next smaller chunk size. As the VirtualSpaceNode comitted
       
   576       // size should be a multiple of the smallest chunk size, we
       
   577       // should always be able to fill the VirtualSpace completely.
       
   578       if (chunk == NULL) {
       
   579         break;
       
   580       }
       
   581       chunk_manager->return_single_chunk(chunk);
       
   582     }
       
   583   }
       
   584   assert(free_words_in_vs() == 0, "should be empty now");
       
   585 }
       
   586 
   523 
   587 } // namespace metaspace
   524 } // namespace metaspace
   588 
   525