hotspot/src/share/vm/memory/metaspace.cpp
changeset 20406 934f0b12daa9
parent 20306 f6805e2a0dd8
child 20407 68e215ce8944
equal deleted inserted replaced
20405:3321f6b16639 20406:934f0b12daa9
    27 #include "memory/binaryTreeDictionary.hpp"
    27 #include "memory/binaryTreeDictionary.hpp"
    28 #include "memory/freeList.hpp"
    28 #include "memory/freeList.hpp"
    29 #include "memory/collectorPolicy.hpp"
    29 #include "memory/collectorPolicy.hpp"
    30 #include "memory/filemap.hpp"
    30 #include "memory/filemap.hpp"
    31 #include "memory/freeList.hpp"
    31 #include "memory/freeList.hpp"
       
    32 #include "memory/gcLocker.hpp"
    32 #include "memory/metablock.hpp"
    33 #include "memory/metablock.hpp"
    33 #include "memory/metachunk.hpp"
    34 #include "memory/metachunk.hpp"
    34 #include "memory/metaspace.hpp"
    35 #include "memory/metaspace.hpp"
    35 #include "memory/metaspaceShared.hpp"
    36 #include "memory/metaspaceShared.hpp"
    36 #include "memory/resourceArea.hpp"
    37 #include "memory/resourceArea.hpp"
    37 #include "memory/universe.hpp"
    38 #include "memory/universe.hpp"
       
    39 #include "runtime/atomic.inline.hpp"
    38 #include "runtime/globals.hpp"
    40 #include "runtime/globals.hpp"
       
    41 #include "runtime/init.hpp"
    39 #include "runtime/java.hpp"
    42 #include "runtime/java.hpp"
    40 #include "runtime/mutex.hpp"
    43 #include "runtime/mutex.hpp"
    41 #include "runtime/orderAccess.hpp"
    44 #include "runtime/orderAccess.hpp"
    42 #include "services/memTracker.hpp"
    45 #include "services/memTracker.hpp"
    43 #include "utilities/copy.hpp"
    46 #include "utilities/copy.hpp"
    82 static ChunkIndex next_chunk_index(ChunkIndex i) {
    85 static ChunkIndex next_chunk_index(ChunkIndex i) {
    83   assert(i < NumberOfInUseLists, "Out of bound");
    86   assert(i < NumberOfInUseLists, "Out of bound");
    84   return (ChunkIndex) (i+1);
    87   return (ChunkIndex) (i+1);
    85 }
    88 }
    86 
    89 
    87 // Originally _capacity_until_GC was set to MetaspaceSize here but
    90 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
    88 // the default MetaspaceSize before argument processing was being
       
    89 // used which was not the desired value.  See the code
       
    90 // in should_expand() to see how the initialization is handled
       
    91 // now.
       
    92 size_t MetaspaceGC::_capacity_until_GC = 0;
       
    93 bool MetaspaceGC::_expand_after_GC = false;
       
    94 uint MetaspaceGC::_shrink_factor = 0;
    91 uint MetaspaceGC::_shrink_factor = 0;
    95 bool MetaspaceGC::_should_concurrent_collect = false;
    92 bool MetaspaceGC::_should_concurrent_collect = false;
    96 
    93 
    97 // Blocks of space for metadata are allocated out of Metachunks.
    94 // Blocks of space for metadata are allocated out of Metachunks.
    98 //
    95 //
   291   // Convenience functions for logical bottom and end
   288   // Convenience functions for logical bottom and end
   292   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
   289   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
   293   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
   290   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
   294 
   291 
   295   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
   292   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
   296   size_t expanded_words() const  { return _virtual_space.committed_size() / BytesPerWord; }
       
   297   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
   293   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
       
   294 
       
   295   bool is_pre_committed() const { return _virtual_space.special(); }
   298 
   296 
   299   // address of next available space in _virtual_space;
   297   // address of next available space in _virtual_space;
   300   // Accessors
   298   // Accessors
   301   VirtualSpaceNode* next() { return _next; }
   299   VirtualSpaceNode* next() { return _next; }
   302   void set_next(VirtualSpaceNode* v) { _next = v; }
   300   void set_next(VirtualSpaceNode* v) { _next = v; }
   335   // Allocate a chunk from the virtual space and return it.
   333   // Allocate a chunk from the virtual space and return it.
   336   Metachunk* get_chunk_vs(size_t chunk_word_size);
   334   Metachunk* get_chunk_vs(size_t chunk_word_size);
   337 
   335 
   338   // Expands/shrinks the committed space in a virtual space.  Delegates
   336   // Expands/shrinks the committed space in a virtual space.  Delegates
   339   // to Virtualspace
   337   // to Virtualspace
   340   bool expand_by(size_t words, bool pre_touch = false);
   338   bool expand_by(size_t min_words, size_t preferred_words);
   341 
   339 
   342   // In preparation for deleting this node, remove all the chunks
   340   // In preparation for deleting this node, remove all the chunks
   343   // in the node from any freelist.
   341   // in the node from any freelist.
   344   void purge(ChunkManager* chunk_manager);
   342   void purge(ChunkManager* chunk_manager);
   345 
   343 
   349 #endif
   347 #endif
   350 
   348 
   351   void print_on(outputStream* st) const;
   349   void print_on(outputStream* st) const;
   352 };
   350 };
   353 
   351 
       
   352 #define assert_is_ptr_aligned(ptr, alignment) \
       
   353   assert(is_ptr_aligned(ptr, alignment),      \
       
   354     err_msg(PTR_FORMAT " is not aligned to "  \
       
   355       SIZE_FORMAT, ptr, alignment))
       
   356 
       
   357 #define assert_is_size_aligned(size, alignment) \
       
   358   assert(is_size_aligned(size, alignment),      \
       
   359     err_msg(SIZE_FORMAT " is not aligned to "   \
       
   360        SIZE_FORMAT, size, alignment))
       
   361 
       
   362 
       
   363 // Decide if large pages should be committed when the memory is reserved.
       
   364 static bool should_commit_large_pages_when_reserving(size_t bytes) {
       
   365   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
       
   366     size_t words = bytes / BytesPerWord;
       
   367     bool is_class = false; // We never reserve large pages for the class space.
       
   368     if (MetaspaceGC::can_expand(words, is_class) &&
       
   369         MetaspaceGC::allowed_expansion() >= words) {
       
   370       return true;
       
   371     }
       
   372   }
       
   373 
       
   374   return false;
       
   375 }
       
   376 
   354   // byte_size is the size of the associated virtualspace.
   377   // byte_size is the size of the associated virtualspace.
   355 VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
   378 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
   356   // align up to vm allocation granularity
   379   assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
   357   byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
       
   358 
   380 
   359   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
   381   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
   360   // configurable address, generally at the top of the Java heap so other
   382   // configurable address, generally at the top of the Java heap so other
   361   // memory addresses don't conflict.
   383   // memory addresses don't conflict.
   362   if (DumpSharedSpaces) {
   384   if (DumpSharedSpaces) {
   363     char* shared_base = (char*)SharedBaseAddress;
   385     bool large_pages = false; // No large pages when dumping the CDS archive.
   364     _rs = ReservedSpace(byte_size, 0, false, shared_base, 0);
   386     char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
       
   387 
       
   388     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0);
   365     if (_rs.is_reserved()) {
   389     if (_rs.is_reserved()) {
   366       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
   390       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
   367     } else {
   391     } else {
   368       // Get a mmap region anywhere if the SharedBaseAddress fails.
   392       // Get a mmap region anywhere if the SharedBaseAddress fails.
   369       _rs = ReservedSpace(byte_size);
   393       _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
   370     }
   394     }
   371     MetaspaceShared::set_shared_rs(&_rs);
   395     MetaspaceShared::set_shared_rs(&_rs);
   372   } else {
   396   } else {
   373     _rs = ReservedSpace(byte_size);
   397     bool large_pages = should_commit_large_pages_when_reserving(bytes);
   374   }
   398 
   375 
   399     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
   376   MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
   400   }
       
   401 
       
   402   if (_rs.is_reserved()) {
       
   403     assert(_rs.base() != NULL, "Catch if we get a NULL address");
       
   404     assert(_rs.size() != 0, "Catch if we get a 0 size");
       
   405     assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
       
   406     assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
       
   407 
       
   408     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
       
   409   }
   377 }
   410 }
   378 
   411 
   379 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
   412 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
   380   Metachunk* chunk = first_chunk();
   413   Metachunk* chunk = first_chunk();
   381   Metachunk* invalid_chunk = (Metachunk*) top();
   414   Metachunk* invalid_chunk = (Metachunk*) top();
   408   return count;
   441   return count;
   409 }
   442 }
   410 #endif
   443 #endif
   411 
   444 
   412 // List of VirtualSpaces for metadata allocation.
   445 // List of VirtualSpaces for metadata allocation.
   413 // It has a  _next link for singly linked list and a MemRegion
       
   414 // for total space in the VirtualSpace.
       
   415 class VirtualSpaceList : public CHeapObj<mtClass> {
   446 class VirtualSpaceList : public CHeapObj<mtClass> {
   416   friend class VirtualSpaceNode;
   447   friend class VirtualSpaceNode;
   417 
   448 
   418   enum VirtualSpaceSizes {
   449   enum VirtualSpaceSizes {
   419     VirtualSpaceSize = 256 * K
   450     VirtualSpaceSize = 256 * K
   420   };
   451   };
   421 
   452 
   422   // Global list of virtual spaces
       
   423   // Head of the list
   453   // Head of the list
   424   VirtualSpaceNode* _virtual_space_list;
   454   VirtualSpaceNode* _virtual_space_list;
   425   // virtual space currently being used for allocations
   455   // virtual space currently being used for allocations
   426   VirtualSpaceNode* _current_virtual_space;
   456   VirtualSpaceNode* _current_virtual_space;
   427 
   457 
   428   // Can this virtual list allocate >1 spaces?  Also, used to determine
   458   // Is this VirtualSpaceList used for the compressed class space
   429   // whether to allocate unlimited small chunks in this virtual space
       
   430   bool _is_class;
   459   bool _is_class;
   431   bool can_grow() const { return !is_class() || !UseCompressedClassPointers; }
       
   432 
   460 
   433   // Sum of reserved and committed memory in the virtual spaces
   461   // Sum of reserved and committed memory in the virtual spaces
   434   size_t _reserved_words;
   462   size_t _reserved_words;
   435   size_t _committed_words;
   463   size_t _committed_words;
   436 
   464 
   451   void link_vs(VirtualSpaceNode* new_entry);
   479   void link_vs(VirtualSpaceNode* new_entry);
   452 
   480 
   453   // Get another virtual space and add it to the list.  This
   481   // Get another virtual space and add it to the list.  This
   454   // is typically prompted by a failed attempt to allocate a chunk
   482   // is typically prompted by a failed attempt to allocate a chunk
   455   // and is typically followed by the allocation of a chunk.
   483   // and is typically followed by the allocation of a chunk.
   456   bool grow_vs(size_t vs_word_size);
   484   bool create_new_virtual_space(size_t vs_word_size);
   457 
   485 
   458  public:
   486  public:
   459   VirtualSpaceList(size_t word_size);
   487   VirtualSpaceList(size_t word_size);
   460   VirtualSpaceList(ReservedSpace rs);
   488   VirtualSpaceList(ReservedSpace rs);
   461 
   489 
   463 
   491 
   464   Metachunk* get_new_chunk(size_t word_size,
   492   Metachunk* get_new_chunk(size_t word_size,
   465                            size_t grow_chunks_by_words,
   493                            size_t grow_chunks_by_words,
   466                            size_t medium_chunk_bunch);
   494                            size_t medium_chunk_bunch);
   467 
   495 
   468   bool expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch = false);
   496   bool expand_node_by(VirtualSpaceNode* node,
   469 
   497                       size_t min_words,
   470   // Get the first chunk for a Metaspace.  Used for
   498                       size_t preferred_words);
   471   // special cases such as the boot class loader, reflection
   499 
   472   // class loader and anonymous class loader.
   500   bool expand_by(size_t min_words,
   473   Metachunk* get_initialization_chunk(size_t word_size, size_t chunk_bunch);
   501                  size_t preferred_words);
   474 
   502 
   475   VirtualSpaceNode* current_virtual_space() {
   503   VirtualSpaceNode* current_virtual_space() {
   476     return _current_virtual_space;
   504     return _current_virtual_space;
   477   }
   505   }
   478 
   506 
   479   bool is_class() const { return _is_class; }
   507   bool is_class() const { return _is_class; }
   480 
   508 
   481   // Allocate the first virtualspace.
   509   bool initialization_succeeded() { return _virtual_space_list != NULL; }
   482   void initialize(size_t word_size);
       
   483 
   510 
   484   size_t reserved_words()  { return _reserved_words; }
   511   size_t reserved_words()  { return _reserved_words; }
   485   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
   512   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
   486   size_t committed_words() { return _committed_words; }
   513   size_t committed_words() { return _committed_words; }
   487   size_t committed_bytes() { return committed_words() * BytesPerWord; }
   514   size_t committed_bytes() { return committed_words() * BytesPerWord; }
   867 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
   894 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
   868   // Bottom of the new chunk
   895   // Bottom of the new chunk
   869   MetaWord* chunk_limit = top();
   896   MetaWord* chunk_limit = top();
   870   assert(chunk_limit != NULL, "Not safe to call this method");
   897   assert(chunk_limit != NULL, "Not safe to call this method");
   871 
   898 
       
   899   // The virtual spaces are always expanded by the
       
   900   // commit granularity to enforce the following condition.
       
   901   // Without this the is_available check will not work correctly.
       
   902   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
       
   903       "The committed memory doesn't match the expanded memory.");
       
   904 
   872   if (!is_available(chunk_word_size)) {
   905   if (!is_available(chunk_word_size)) {
   873     if (TraceMetadataChunkAllocation) {
   906     if (TraceMetadataChunkAllocation) {
   874       gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
   907       gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
   875       // Dump some information about the virtual space that is nearly full
   908       // Dump some information about the virtual space that is nearly full
   876       print_on(gclog_or_tty);
   909       print_on(gclog_or_tty);
   886   return result;
   919   return result;
   887 }
   920 }
   888 
   921 
   889 
   922 
   890 // Expand the virtual space (commit more of the reserved space)
   923 // Expand the virtual space (commit more of the reserved space)
   891 bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) {
   924 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
   892   size_t bytes = words * BytesPerWord;
   925   size_t min_bytes = min_words * BytesPerWord;
   893   bool result =  virtual_space()->expand_by(bytes, pre_touch);
   926   size_t preferred_bytes = preferred_words * BytesPerWord;
   894   if (TraceMetavirtualspaceAllocation && !result) {
   927 
   895     gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed "
   928   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
   896                            "for byte size " SIZE_FORMAT, bytes);
   929 
   897     virtual_space()->print_on(gclog_or_tty);
   930   if (uncommitted < min_bytes) {
   898   }
   931     return false;
       
   932   }
       
   933 
       
   934   size_t commit = MIN2(preferred_bytes, uncommitted);
       
   935   bool result = virtual_space()->expand_by(commit, false);
       
   936 
       
   937   assert(result, "Failed to commit memory");
       
   938 
   899   return result;
   939   return result;
   900 }
   940 }
   901 
   941 
   902 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
   942 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
   903   assert_lock_strong(SpaceManager::expand_lock());
   943   assert_lock_strong(SpaceManager::expand_lock());
   912 
   952 
   913   if (!_rs.is_reserved()) {
   953   if (!_rs.is_reserved()) {
   914     return false;
   954     return false;
   915   }
   955   }
   916 
   956 
   917   // An allocation out of this Virtualspace that is larger
   957   // These are necessary restriction to make sure that the virtual space always
   918   // than an initial commit size can waste that initial committed
   958   // grows in steps of Metaspace::commit_alignment(). If both base and size are
   919   // space.
   959   // aligned only the middle alignment of the VirtualSpace is used.
   920   size_t committed_byte_size = 0;
   960   assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
   921   bool result = virtual_space()->initialize(_rs, committed_byte_size);
   961   assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
       
   962 
       
   963   // ReservedSpaces marked as special will have the entire memory
       
   964   // pre-committed. Setting a committed size will make sure that
       
   965   // committed_size and actual_committed_size agrees.
       
   966   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
       
   967 
       
   968   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
       
   969                                             Metaspace::commit_alignment());
   922   if (result) {
   970   if (result) {
       
   971     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
       
   972         "Checking that the pre-committed memory was registered by the VirtualSpace");
       
   973 
   923     set_top((MetaWord*)virtual_space()->low());
   974     set_top((MetaWord*)virtual_space()->low());
   924     set_reserved(MemRegion((HeapWord*)_rs.base(),
   975     set_reserved(MemRegion((HeapWord*)_rs.base(),
   925                  (HeapWord*)(_rs.base() + _rs.size())));
   976                  (HeapWord*)(_rs.base() + _rs.size())));
   926 
   977 
   927     assert(reserved()->start() == (HeapWord*) _rs.base(),
   978     assert(reserved()->start() == (HeapWord*) _rs.base(),
   974 void VirtualSpaceList::dec_reserved_words(size_t v) {
  1025 void VirtualSpaceList::dec_reserved_words(size_t v) {
   975   assert_lock_strong(SpaceManager::expand_lock());
  1026   assert_lock_strong(SpaceManager::expand_lock());
   976   _reserved_words = _reserved_words - v;
  1027   _reserved_words = _reserved_words - v;
   977 }
  1028 }
   978 
  1029 
       
  1030 #define assert_committed_below_limit()                             \
       
  1031   assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize,      \
       
  1032       err_msg("Too much committed memory. Committed: " SIZE_FORMAT \
       
  1033               " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
       
  1034           MetaspaceAux::committed_bytes(), MaxMetaspaceSize));
       
  1035 
   979 void VirtualSpaceList::inc_committed_words(size_t v) {
  1036 void VirtualSpaceList::inc_committed_words(size_t v) {
   980   assert_lock_strong(SpaceManager::expand_lock());
  1037   assert_lock_strong(SpaceManager::expand_lock());
   981   _committed_words = _committed_words + v;
  1038   _committed_words = _committed_words + v;
       
  1039 
       
  1040   assert_committed_below_limit();
   982 }
  1041 }
   983 void VirtualSpaceList::dec_committed_words(size_t v) {
  1042 void VirtualSpaceList::dec_committed_words(size_t v) {
   984   assert_lock_strong(SpaceManager::expand_lock());
  1043   assert_lock_strong(SpaceManager::expand_lock());
   985   _committed_words = _committed_words - v;
  1044   _committed_words = _committed_words - v;
       
  1045 
       
  1046   assert_committed_below_limit();
   986 }
  1047 }
   987 
  1048 
   988 void VirtualSpaceList::inc_virtual_space_count() {
  1049 void VirtualSpaceList::inc_virtual_space_count() {
   989   assert_lock_strong(SpaceManager::expand_lock());
  1050   assert_lock_strong(SpaceManager::expand_lock());
   990   _virtual_space_count++;
  1051   _virtual_space_count++;
  1023     // Don't free the current virtual space since it will likely
  1084     // Don't free the current virtual space since it will likely
  1024     // be needed soon.
  1085     // be needed soon.
  1025     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
  1086     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
  1026       // Unlink it from the list
  1087       // Unlink it from the list
  1027       if (prev_vsl == vsl) {
  1088       if (prev_vsl == vsl) {
  1028         // This is the case of the current note being the first note.
  1089         // This is the case of the current node being the first node.
  1029         assert(vsl == virtual_space_list(), "Expected to be the first note");
  1090         assert(vsl == virtual_space_list(), "Expected to be the first node");
  1030         set_virtual_space_list(vsl->next());
  1091         set_virtual_space_list(vsl->next());
  1031       } else {
  1092       } else {
  1032         prev_vsl->set_next(vsl->next());
  1093         prev_vsl->set_next(vsl->next());
  1033       }
  1094       }
  1034 
  1095 
  1052     }
  1113     }
  1053   }
  1114   }
  1054 #endif
  1115 #endif
  1055 }
  1116 }
  1056 
  1117 
  1057 VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
  1118 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
  1058                                    _is_class(false),
  1119                                    _is_class(false),
  1059                                    _virtual_space_list(NULL),
  1120                                    _virtual_space_list(NULL),
  1060                                    _current_virtual_space(NULL),
  1121                                    _current_virtual_space(NULL),
  1061                                    _reserved_words(0),
  1122                                    _reserved_words(0),
  1062                                    _committed_words(0),
  1123                                    _committed_words(0),
  1063                                    _virtual_space_count(0) {
  1124                                    _virtual_space_count(0) {
  1064   MutexLockerEx cl(SpaceManager::expand_lock(),
  1125   MutexLockerEx cl(SpaceManager::expand_lock(),
  1065                    Mutex::_no_safepoint_check_flag);
  1126                    Mutex::_no_safepoint_check_flag);
  1066   bool initialization_succeeded = grow_vs(word_size);
  1127   create_new_virtual_space(word_size);
  1067   assert(initialization_succeeded,
       
  1068     " VirtualSpaceList initialization should not fail");
       
  1069 }
  1128 }
  1070 
  1129 
  1071 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
  1130 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
  1072                                    _is_class(true),
  1131                                    _is_class(true),
  1073                                    _virtual_space_list(NULL),
  1132                                    _virtual_space_list(NULL),
  1077                                    _virtual_space_count(0) {
  1136                                    _virtual_space_count(0) {
  1078   MutexLockerEx cl(SpaceManager::expand_lock(),
  1137   MutexLockerEx cl(SpaceManager::expand_lock(),
  1079                    Mutex::_no_safepoint_check_flag);
  1138                    Mutex::_no_safepoint_check_flag);
  1080   VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
  1139   VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
  1081   bool succeeded = class_entry->initialize();
  1140   bool succeeded = class_entry->initialize();
  1082   assert(succeeded, " VirtualSpaceList initialization should not fail");
  1141   if (succeeded) {
  1083   link_vs(class_entry);
  1142     link_vs(class_entry);
       
  1143   }
  1084 }
  1144 }
  1085 
  1145 
  1086 size_t VirtualSpaceList::free_bytes() {
  1146 size_t VirtualSpaceList::free_bytes() {
  1087   return virtual_space_list()->free_words_in_vs() * BytesPerWord;
  1147   return virtual_space_list()->free_words_in_vs() * BytesPerWord;
  1088 }
  1148 }
  1089 
  1149 
  1090 // Allocate another meta virtual space and add it to the list.
  1150 // Allocate another meta virtual space and add it to the list.
  1091 bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
  1151 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
  1092   assert_lock_strong(SpaceManager::expand_lock());
  1152   assert_lock_strong(SpaceManager::expand_lock());
       
  1153 
       
  1154   if (is_class()) {
       
  1155     assert(false, "We currently don't support more than one VirtualSpace for"
       
  1156                   " the compressed class space. The initialization of the"
       
  1157                   " CCS uses another code path and should not hit this path.");
       
  1158     return false;
       
  1159   }
       
  1160 
  1093   if (vs_word_size == 0) {
  1161   if (vs_word_size == 0) {
       
  1162     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
  1094     return false;
  1163     return false;
  1095   }
  1164   }
       
  1165 
  1096   // Reserve the space
  1166   // Reserve the space
  1097   size_t vs_byte_size = vs_word_size * BytesPerWord;
  1167   size_t vs_byte_size = vs_word_size * BytesPerWord;
  1098   assert(vs_byte_size % os::vm_allocation_granularity() == 0, "Not aligned");
  1168   assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
  1099 
  1169 
  1100   // Allocate the meta virtual space and initialize it.
  1170   // Allocate the meta virtual space and initialize it.
  1101   VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
  1171   VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
  1102   if (!new_entry->initialize()) {
  1172   if (!new_entry->initialize()) {
  1103     delete new_entry;
  1173     delete new_entry;
  1104     return false;
  1174     return false;
  1105   } else {
  1175   } else {
  1106     assert(new_entry->reserved_words() == vs_word_size, "Must be");
  1176     assert(new_entry->reserved_words() == vs_word_size,
       
  1177         "Reserved memory size differs from requested memory size");
  1107     // ensure lock-free iteration sees fully initialized node
  1178     // ensure lock-free iteration sees fully initialized node
  1108     OrderAccess::storestore();
  1179     OrderAccess::storestore();
  1109     link_vs(new_entry);
  1180     link_vs(new_entry);
  1110     return true;
  1181     return true;
  1111   }
  1182   }
  1128     VirtualSpaceNode* vsl = current_virtual_space();
  1199     VirtualSpaceNode* vsl = current_virtual_space();
  1129     vsl->print_on(gclog_or_tty);
  1200     vsl->print_on(gclog_or_tty);
  1130   }
  1201   }
  1131 }
  1202 }
  1132 
  1203 
  1133 bool VirtualSpaceList::expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch) {
  1204 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
       
  1205                                       size_t min_words,
       
  1206                                       size_t preferred_words) {
  1134   size_t before = node->committed_words();
  1207   size_t before = node->committed_words();
  1135 
  1208 
  1136   bool result = node->expand_by(word_size, pre_touch);
  1209   bool result = node->expand_by(min_words, preferred_words);
  1137 
  1210 
  1138   size_t after = node->committed_words();
  1211   size_t after = node->committed_words();
  1139 
  1212 
  1140   // after and before can be the same if the memory was pre-committed.
  1213   // after and before can be the same if the memory was pre-committed.
  1141   assert(after >= before, "Must be");
  1214   assert(after >= before, "Inconsistency");
  1142   inc_committed_words(after - before);
  1215   inc_committed_words(after - before);
  1143 
  1216 
  1144   return result;
  1217   return result;
       
  1218 }
       
  1219 
       
  1220 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
       
  1221   assert_is_size_aligned(min_words,       Metaspace::commit_alignment_words());
       
  1222   assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
       
  1223   assert(min_words <= preferred_words, "Invalid arguments");
       
  1224 
       
  1225   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
       
  1226     return  false;
       
  1227   }
       
  1228 
       
  1229   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
       
  1230   if (allowed_expansion_words < min_words) {
       
  1231     return false;
       
  1232   }
       
  1233 
       
  1234   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
       
  1235 
       
  1236   // Commit more memory from the the current virtual space.
       
  1237   bool vs_expanded = expand_node_by(current_virtual_space(),
       
  1238                                     min_words,
       
  1239                                     max_expansion_words);
       
  1240   if (vs_expanded) {
       
  1241     return true;
       
  1242   }
       
  1243 
       
  1244   // Get another virtual space.
       
  1245   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
       
  1246   grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
       
  1247 
       
  1248   if (create_new_virtual_space(grow_vs_words)) {
       
  1249     if (current_virtual_space()->is_pre_committed()) {
       
  1250       // The memory was pre-committed, so we are done here.
       
  1251       assert(min_words <= current_virtual_space()->committed_words(),
       
  1252           "The new VirtualSpace was pre-committed, so it"
       
  1253           "should be large enough to fit the alloc request.");
       
  1254       return true;
       
  1255     }
       
  1256 
       
  1257     return expand_node_by(current_virtual_space(),
       
  1258                           min_words,
       
  1259                           max_expansion_words);
       
  1260   }
       
  1261 
       
  1262   return false;
  1145 }
  1263 }
  1146 
  1264 
  1147 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
  1265 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
  1148                                            size_t grow_chunks_by_words,
  1266                                            size_t grow_chunks_by_words,
  1149                                            size_t medium_chunk_bunch) {
  1267                                            size_t medium_chunk_bunch) {
  1150 
  1268 
  1151   // Allocate a chunk out of the current virtual space.
  1269   // Allocate a chunk out of the current virtual space.
  1152   Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  1270   Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  1153 
  1271 
  1154   if (next == NULL) {
  1272   if (next != NULL) {
  1155     // Not enough room in current virtual space.  Try to commit
  1273     return next;
  1156     // more space.
  1274   }
  1157     size_t expand_vs_by_words = MAX2(medium_chunk_bunch,
  1275 
  1158                                      grow_chunks_by_words);
  1276   // The expand amount is currently only determined by the requested sizes
  1159     size_t page_size_words = os::vm_page_size() / BytesPerWord;
  1277   // and not how much committed memory is left in the current virtual space.
  1160     size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
  1278 
  1161                                                         page_size_words);
  1279   size_t min_word_size       = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
  1162     bool vs_expanded =
  1280   size_t preferred_word_size = align_size_up(medium_chunk_bunch,   Metaspace::commit_alignment_words());
  1163       expand_by(current_virtual_space(), aligned_expand_vs_by_words);
  1281   if (min_word_size >= preferred_word_size) {
  1164     if (!vs_expanded) {
  1282     // Can happen when humongous chunks are allocated.
  1165       // Should the capacity of the metaspaces be expanded for
  1283     preferred_word_size = min_word_size;
  1166       // this allocation?  If it's the virtual space for classes and is
  1284   }
  1167       // being used for CompressedHeaders, don't allocate a new virtualspace.
  1285 
  1168       if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
  1286   bool expanded = expand_by(min_word_size, preferred_word_size);
  1169         // Get another virtual space.
  1287   if (expanded) {
  1170         size_t allocation_aligned_expand_words =
  1288     next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  1171             align_size_up(aligned_expand_vs_by_words, os::vm_allocation_granularity() / BytesPerWord);
  1289     assert(next != NULL, "The allocation was expected to succeed after the expansion");
  1172         size_t grow_vs_words =
  1290   }
  1173             MAX2((size_t)VirtualSpaceSize, allocation_aligned_expand_words);
  1291 
  1174         if (grow_vs(grow_vs_words)) {
  1292    return next;
  1175           // Got it.  It's on the list now.  Get a chunk from it.
       
  1176           assert(current_virtual_space()->expanded_words() == 0,
       
  1177               "New virtual space nodes should not have expanded");
       
  1178 
       
  1179           size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words,
       
  1180                                                               page_size_words);
       
  1181           // We probably want to expand by aligned_expand_vs_by_words here.
       
  1182           expand_by(current_virtual_space(), grow_chunks_by_words_aligned);
       
  1183           next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
       
  1184         }
       
  1185       } else {
       
  1186         // Allocation will fail and induce a GC
       
  1187         if (TraceMetadataChunkAllocation && Verbose) {
       
  1188           gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():"
       
  1189             " Fail instead of expand the metaspace");
       
  1190         }
       
  1191       }
       
  1192     } else {
       
  1193       // The virtual space expanded, get a new chunk
       
  1194       next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
       
  1195       assert(next != NULL, "Just expanded, should succeed");
       
  1196     }
       
  1197   }
       
  1198 
       
  1199   assert(next == NULL || (next->next() == NULL && next->prev() == NULL),
       
  1200          "New chunk is still on some list");
       
  1201   return next;
       
  1202 }
       
  1203 
       
  1204 Metachunk* VirtualSpaceList::get_initialization_chunk(size_t chunk_word_size,
       
  1205                                                       size_t chunk_bunch) {
       
  1206   // Get a chunk from the chunk freelist
       
  1207   Metachunk* new_chunk = get_new_chunk(chunk_word_size,
       
  1208                                        chunk_word_size,
       
  1209                                        chunk_bunch);
       
  1210   return new_chunk;
       
  1211 }
  1293 }
  1212 
  1294 
  1213 void VirtualSpaceList::print_on(outputStream* st) const {
  1295 void VirtualSpaceList::print_on(outputStream* st) const {
  1214   if (TraceMetadataChunkAllocation && Verbose) {
  1296   if (TraceMetadataChunkAllocation && Verbose) {
  1215     VirtualSpaceListIterator iter(virtual_space_list());
  1297     VirtualSpaceListIterator iter(virtual_space_list());
  1254 // the HWM.
  1336 // the HWM.
  1255 
  1337 
  1256 // Calculate the amount to increase the high water mark (HWM).
  1338 // Calculate the amount to increase the high water mark (HWM).
  1257 // Increase by a minimum amount (MinMetaspaceExpansion) so that
  1339 // Increase by a minimum amount (MinMetaspaceExpansion) so that
  1258 // another expansion is not requested too soon.  If that is not
  1340 // another expansion is not requested too soon.  If that is not
  1259 // enough to satisfy the allocation (i.e. big enough for a word_size
  1341 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
  1260 // allocation), increase by MaxMetaspaceExpansion.  If that is still
  1342 // If that is still not enough, expand by the size of the allocation
  1261 // not enough, expand by the size of the allocation (word_size) plus
  1343 // plus some.
  1262 // some.
  1344 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
  1263 size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) {
  1345   size_t min_delta = MinMetaspaceExpansion;
  1264   size_t before_inc = MetaspaceGC::capacity_until_GC();
  1346   size_t max_delta = MaxMetaspaceExpansion;
  1265   size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord;
  1347   size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
  1266   size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord;
  1348 
  1267   size_t page_size_words = os::vm_page_size() / BytesPerWord;
  1349   if (delta <= min_delta) {
  1268   size_t size_delta_words = align_size_up(word_size, page_size_words);
  1350     delta = min_delta;
  1269   size_t delta_words = MAX2(size_delta_words, min_delta_words);
  1351   } else if (delta <= max_delta) {
  1270   if (delta_words > min_delta_words) {
       
  1271     // Don't want to hit the high water mark on the next
  1352     // Don't want to hit the high water mark on the next
  1272     // allocation so make the delta greater than just enough
  1353     // allocation so make the delta greater than just enough
  1273     // for this allocation.
  1354     // for this allocation.
  1274     delta_words = MAX2(delta_words, max_delta_words);
  1355     delta = max_delta;
  1275     if (delta_words > max_delta_words) {
  1356   } else {
  1276       // This allocation is large but the next ones are probably not
  1357     // This allocation is large but the next ones are probably not
  1277       // so increase by the minimum.
  1358     // so increase by the minimum.
  1278       delta_words = delta_words + min_delta_words;
  1359     delta = delta + min_delta;
  1279     }
  1360   }
  1280   }
  1361 
  1281   return delta_words;
  1362   assert_is_size_aligned(delta, Metaspace::commit_alignment());
  1282 }
  1363 
  1283 
  1364   return delta;
  1284 bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
  1365 }
  1285 
  1366 
  1286   // If the user wants a limit, impose one.
  1367 size_t MetaspaceGC::capacity_until_GC() {
  1287   // The reason for someone using this flag is to limit reserved space.  So
  1368   size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
  1288   // for non-class virtual space, compare against virtual spaces that are reserved.
  1369   assert(value >= MetaspaceSize, "Not initialied properly?");
  1289   // For class virtual space, we only compare against the committed space, not
  1370   return value;
  1290   // reserved space, because this is a larger space prereserved for compressed
  1371 }
  1291   // class pointers.
  1372 
  1292   if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
  1373 size_t MetaspaceGC::inc_capacity_until_GC(size_t v) {
  1293     size_t nonclass_allocated = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
  1374   assert_is_size_aligned(v, Metaspace::commit_alignment());
  1294     size_t class_allocated    = MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
  1375 
  1295     size_t real_allocated     = nonclass_allocated + class_allocated;
  1376   return (size_t)Atomic::add_ptr(v, &_capacity_until_GC);
  1296     if (real_allocated >= MaxMetaspaceSize) {
  1377 }
       
  1378 
       
  1379 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
       
  1380   assert_is_size_aligned(v, Metaspace::commit_alignment());
       
  1381 
       
  1382   return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
       
  1383 }
       
  1384 
       
  1385 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
       
  1386   // Check if the compressed class space is full.
       
  1387   if (is_class && Metaspace::using_class_space()) {
       
  1388     size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
       
  1389     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
  1297       return false;
  1390       return false;
  1298     }
  1391     }
  1299   }
  1392   }
  1300 
  1393 
  1301   // Class virtual space should always be expanded.  Call GC for the other
  1394   // Check if the user has imposed a limit on the metaspace memory.
  1302   // metadata virtual space.
  1395   size_t committed_bytes = MetaspaceAux::committed_bytes();
  1303   if (Metaspace::using_class_space() &&
  1396   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
  1304       (vsl == Metaspace::class_space_list())) return true;
  1397     return false;
  1305 
  1398   }
  1306   // If this is part of an allocation after a GC, expand
  1399 
  1307   // unconditionally.
  1400   return true;
  1308   if (MetaspaceGC::expand_after_GC()) {
  1401 }
  1309     return true;
  1402 
  1310   }
  1403 size_t MetaspaceGC::allowed_expansion() {
  1311 
  1404   size_t committed_bytes = MetaspaceAux::committed_bytes();
  1312 
  1405 
  1313   // If the capacity is below the minimum capacity, allow the
  1406   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
  1314   // expansion.  Also set the high-water-mark (capacity_until_GC)
  1407 
  1315   // to that minimum capacity so that a GC will not be induced
  1408   // Always grant expansion if we are initiating the JVM,
  1316   // until that minimum capacity is exceeded.
  1409   // or if the GC_locker is preventing GCs.
  1317   size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes();
  1410   if (!is_init_completed() || GC_locker::is_active_and_needs_gc()) {
  1318   size_t metaspace_size_bytes = MetaspaceSize;
  1411     return left_until_max / BytesPerWord;
  1319   if (committed_capacity_bytes < metaspace_size_bytes ||
  1412   }
  1320       capacity_until_GC() == 0) {
  1413 
  1321     set_capacity_until_GC(metaspace_size_bytes);
  1414   size_t capacity_until_gc = capacity_until_GC();
  1322     return true;
  1415 
  1323   } else {
  1416   if (capacity_until_gc <= committed_bytes) {
  1324     if (committed_capacity_bytes < capacity_until_GC()) {
  1417     return 0;
  1325       return true;
  1418   }
  1326     } else {
  1419 
  1327       if (TraceMetadataChunkAllocation && Verbose) {
  1420   size_t left_until_GC = capacity_until_gc - committed_bytes;
  1328         gclog_or_tty->print_cr("  allocation request size " SIZE_FORMAT
  1421   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
  1329                         "  capacity_until_GC " SIZE_FORMAT
  1422 
  1330                         "  allocated_capacity_bytes " SIZE_FORMAT,
  1423   return left_to_commit / BytesPerWord;
  1331                         word_size,
  1424 }
  1332                         capacity_until_GC(),
       
  1333                         MetaspaceAux::allocated_capacity_bytes());
       
  1334       }
       
  1335       return false;
       
  1336     }
       
  1337   }
       
  1338 }
       
  1339 
       
  1340 
       
  1341 
  1425 
  1342 void MetaspaceGC::compute_new_size() {
  1426 void MetaspaceGC::compute_new_size() {
  1343   assert(_shrink_factor <= 100, "invalid shrink factor");
  1427   assert(_shrink_factor <= 100, "invalid shrink factor");
  1344   uint current_shrink_factor = _shrink_factor;
  1428   uint current_shrink_factor = _shrink_factor;
  1345   _shrink_factor = 0;
  1429   _shrink_factor = 0;
  1346 
  1430 
  1347   // Until a faster way of calculating the "used" quantity is implemented,
       
  1348   // use "capacity".
       
  1349   const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
  1431   const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
  1350   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
  1432   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
  1351 
  1433 
  1352   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
  1434   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
  1353   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
  1435   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
  1375   size_t shrink_bytes = 0;
  1457   size_t shrink_bytes = 0;
  1376   if (capacity_until_GC < minimum_desired_capacity) {
  1458   if (capacity_until_GC < minimum_desired_capacity) {
  1377     // If we have less capacity below the metaspace HWM, then
  1459     // If we have less capacity below the metaspace HWM, then
  1378     // increment the HWM.
  1460     // increment the HWM.
  1379     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
  1461     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
       
  1462     expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
  1380     // Don't expand unless it's significant
  1463     // Don't expand unless it's significant
  1381     if (expand_bytes >= MinMetaspaceExpansion) {
  1464     if (expand_bytes >= MinMetaspaceExpansion) {
  1382       MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes);
  1465       MetaspaceGC::inc_capacity_until_GC(expand_bytes);
  1383     }
  1466     }
  1384     if (PrintGCDetails && Verbose) {
  1467     if (PrintGCDetails && Verbose) {
  1385       size_t new_capacity_until_GC = capacity_until_GC;
  1468       size_t new_capacity_until_GC = capacity_until_GC;
  1386       gclog_or_tty->print_cr("    expanding:"
  1469       gclog_or_tty->print_cr("    expanding:"
  1387                     "  minimum_desired_capacity: %6.1fKB"
  1470                     "  minimum_desired_capacity: %6.1fKB"
  1434       // we'd just have to grow the heap up again for the next phase.  So we
  1517       // we'd just have to grow the heap up again for the next phase.  So we
  1435       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
  1518       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
  1436       // on the third call, and 100% by the fourth call.  But if we recompute
  1519       // on the third call, and 100% by the fourth call.  But if we recompute
  1437       // size without shrinking, it goes back to 0%.
  1520       // size without shrinking, it goes back to 0%.
  1438       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
  1521       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
       
  1522 
       
  1523       shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
       
  1524 
  1439       assert(shrink_bytes <= max_shrink_bytes,
  1525       assert(shrink_bytes <= max_shrink_bytes,
  1440         err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
  1526         err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
  1441           shrink_bytes, max_shrink_bytes));
  1527           shrink_bytes, max_shrink_bytes));
  1442       if (current_shrink_factor == 0) {
  1528       if (current_shrink_factor == 0) {
  1443         _shrink_factor = 10;
  1529         _shrink_factor = 10;
  1465   }
  1551   }
  1466 
  1552 
  1467   // Don't shrink unless it's significant
  1553   // Don't shrink unless it's significant
  1468   if (shrink_bytes >= MinMetaspaceExpansion &&
  1554   if (shrink_bytes >= MinMetaspaceExpansion &&
  1469       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
  1555       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
  1470     MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes);
  1556     MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
  1471   }
  1557   }
  1472 }
  1558 }
  1473 
  1559 
  1474 // Metadebug methods
  1560 // Metadebug methods
  1475 
  1561 
  1698   if (list_index(word_size) != HumongousIndex) {
  1784   if (list_index(word_size) != HumongousIndex) {
  1699     ChunkList* free_list = find_free_chunks_list(word_size);
  1785     ChunkList* free_list = find_free_chunks_list(word_size);
  1700     assert(free_list != NULL, "Sanity check");
  1786     assert(free_list != NULL, "Sanity check");
  1701 
  1787 
  1702     chunk = free_list->head();
  1788     chunk = free_list->head();
  1703     debug_only(Metachunk* debug_head = chunk;)
       
  1704 
  1789 
  1705     if (chunk == NULL) {
  1790     if (chunk == NULL) {
  1706       return NULL;
  1791       return NULL;
  1707     }
  1792     }
  1708 
  1793 
  1709     // Remove the chunk as the head of the list.
  1794     // Remove the chunk as the head of the list.
  1710     free_list->remove_chunk(chunk);
  1795     free_list->remove_chunk(chunk);
  1711 
       
  1712     // Chunk is being removed from the chunks free list.
       
  1713     dec_free_chunks_total(chunk->capacity_word_size());
       
  1714 
  1796 
  1715     if (TraceMetadataChunkAllocation && Verbose) {
  1797     if (TraceMetadataChunkAllocation && Verbose) {
  1716       gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
  1798       gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
  1717                              PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
  1799                              PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
  1718                              free_list, chunk, chunk->word_size());
  1800                              free_list, chunk, chunk->word_size());
  1720   } else {
  1802   } else {
  1721     chunk = humongous_dictionary()->get_chunk(
  1803     chunk = humongous_dictionary()->get_chunk(
  1722       word_size,
  1804       word_size,
  1723       FreeBlockDictionary<Metachunk>::atLeast);
  1805       FreeBlockDictionary<Metachunk>::atLeast);
  1724 
  1806 
  1725     if (chunk != NULL) {
  1807     if (chunk == NULL) {
  1726       if (TraceMetadataHumongousAllocation) {
       
  1727         size_t waste = chunk->word_size() - word_size;
       
  1728         gclog_or_tty->print_cr("Free list allocate humongous chunk size "
       
  1729                                SIZE_FORMAT " for requested size " SIZE_FORMAT
       
  1730                                " waste " SIZE_FORMAT,
       
  1731                                chunk->word_size(), word_size, waste);
       
  1732       }
       
  1733       // Chunk is being removed from the chunks free list.
       
  1734       dec_free_chunks_total(chunk->capacity_word_size());
       
  1735     } else {
       
  1736       return NULL;
  1808       return NULL;
  1737     }
  1809     }
  1738   }
  1810 
       
  1811     if (TraceMetadataHumongousAllocation) {
       
  1812       size_t waste = chunk->word_size() - word_size;
       
  1813       gclog_or_tty->print_cr("Free list allocate humongous chunk size "
       
  1814                              SIZE_FORMAT " for requested size " SIZE_FORMAT
       
  1815                              " waste " SIZE_FORMAT,
       
  1816                              chunk->word_size(), word_size, waste);
       
  1817     }
       
  1818   }
       
  1819 
       
  1820   // Chunk is being removed from the chunks free list.
       
  1821   dec_free_chunks_total(chunk->capacity_word_size());
  1739 
  1822 
  1740   // Remove it from the links to this freelist
  1823   // Remove it from the links to this freelist
  1741   chunk->set_next(NULL);
  1824   chunk->set_next(NULL);
  1742   chunk->set_prev(NULL);
  1825   chunk->set_prev(NULL);
  1743 #ifdef ASSERT
  1826 #ifdef ASSERT
  2000 
  2083 
  2001   // Get another chunk out of the virtual space
  2084   // Get another chunk out of the virtual space
  2002   size_t grow_chunks_by_words = calc_chunk_size(word_size);
  2085   size_t grow_chunks_by_words = calc_chunk_size(word_size);
  2003   Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
  2086   Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
  2004 
  2087 
       
  2088   if (next != NULL) {
       
  2089     Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
       
  2090   }
       
  2091 
       
  2092   MetaWord* mem = NULL;
       
  2093 
  2005   // If a chunk was available, add it to the in-use chunk list
  2094   // If a chunk was available, add it to the in-use chunk list
  2006   // and do an allocation from it.
  2095   // and do an allocation from it.
  2007   if (next != NULL) {
  2096   if (next != NULL) {
  2008     Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
       
  2009     // Add to this manager's list of chunks in use.
  2097     // Add to this manager's list of chunks in use.
  2010     add_chunk(next, false);
  2098     add_chunk(next, false);
  2011     return next->allocate(word_size);
  2099     mem = next->allocate(word_size);
  2012   }
  2100   }
  2013   return NULL;
  2101 
       
  2102   return mem;
  2014 }
  2103 }
  2015 
  2104 
  2016 void SpaceManager::print_on(outputStream* st) const {
  2105 void SpaceManager::print_on(outputStream* st) const {
  2017 
  2106 
  2018   for (ChunkIndex i = ZeroIndex;
  2107   for (ChunkIndex i = ZeroIndex;
  2364   if (DumpSharedSpaces) {
  2453   if (DumpSharedSpaces) {
  2365     assert(current_chunk() != NULL, "should never happen");
  2454     assert(current_chunk() != NULL, "should never happen");
  2366     inc_used_metrics(word_size);
  2455     inc_used_metrics(word_size);
  2367     return current_chunk()->allocate(word_size); // caller handles null result
  2456     return current_chunk()->allocate(word_size); // caller handles null result
  2368   }
  2457   }
       
  2458 
  2369   if (current_chunk() != NULL) {
  2459   if (current_chunk() != NULL) {
  2370     result = current_chunk()->allocate(word_size);
  2460     result = current_chunk()->allocate(word_size);
  2371   }
  2461   }
  2372 
  2462 
  2373   if (result == NULL) {
  2463   if (result == NULL) {
  2374     result = grow_and_allocate(word_size);
  2464     result = grow_and_allocate(word_size);
  2375   }
  2465   }
  2376   if (result != 0) {
  2466 
       
  2467   if (result != NULL) {
  2377     inc_used_metrics(word_size);
  2468     inc_used_metrics(word_size);
  2378     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
  2469     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
  2379            "Head of the list is being allocated");
  2470            "Head of the list is being allocated");
  2380   }
  2471   }
  2381 
  2472 
  2637 
  2728 
  2638 // This is printed when PrintGCDetails
  2729 // This is printed when PrintGCDetails
  2639 void MetaspaceAux::print_on(outputStream* out) {
  2730 void MetaspaceAux::print_on(outputStream* out) {
  2640   Metaspace::MetadataType nct = Metaspace::NonClassType;
  2731   Metaspace::MetadataType nct = Metaspace::NonClassType;
  2641 
  2732 
  2642   out->print_cr(" Metaspace total "
  2733   out->print_cr(" Metaspace       "
  2643                 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
  2734                 "used "      SIZE_FORMAT "K, "
  2644                 " reserved " SIZE_FORMAT "K",
  2735                 "capacity "  SIZE_FORMAT "K, "
  2645                 allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_bytes()/K);
  2736                 "committed " SIZE_FORMAT "K, "
  2646 
  2737                 "reserved "  SIZE_FORMAT "K",
  2647   out->print_cr("  data space     "
  2738                 allocated_used_bytes()/K,
  2648                 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
  2739                 allocated_capacity_bytes()/K,
  2649                 " reserved " SIZE_FORMAT "K",
  2740                 committed_bytes()/K,
  2650                 allocated_capacity_bytes(nct)/K,
  2741                 reserved_bytes()/K);
  2651                 allocated_used_bytes(nct)/K,
  2742 
  2652                 reserved_bytes(nct)/K);
       
  2653   if (Metaspace::using_class_space()) {
  2743   if (Metaspace::using_class_space()) {
  2654     Metaspace::MetadataType ct = Metaspace::ClassType;
  2744     Metaspace::MetadataType ct = Metaspace::ClassType;
  2655     out->print_cr("  class space    "
  2745     out->print_cr("  class space    "
  2656                   SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
  2746                   "used "      SIZE_FORMAT "K, "
  2657                   " reserved " SIZE_FORMAT "K",
  2747                   "capacity "  SIZE_FORMAT "K, "
       
  2748                   "committed " SIZE_FORMAT "K, "
       
  2749                   "reserved "  SIZE_FORMAT "K",
       
  2750                   allocated_used_bytes(ct)/K,
  2658                   allocated_capacity_bytes(ct)/K,
  2751                   allocated_capacity_bytes(ct)/K,
  2659                   allocated_used_bytes(ct)/K,
  2752                   committed_bytes(ct)/K,
  2660                   reserved_bytes(ct)/K);
  2753                   reserved_bytes(ct)/K);
  2661   }
  2754   }
  2662 }
  2755 }
  2663 
  2756 
  2664 // Print information for class space and data space separately.
  2757 // Print information for class space and data space separately.
  2806 // Metaspace methods
  2899 // Metaspace methods
  2807 
  2900 
  2808 size_t Metaspace::_first_chunk_word_size = 0;
  2901 size_t Metaspace::_first_chunk_word_size = 0;
  2809 size_t Metaspace::_first_class_chunk_word_size = 0;
  2902 size_t Metaspace::_first_class_chunk_word_size = 0;
  2810 
  2903 
       
  2904 size_t Metaspace::_commit_alignment = 0;
       
  2905 size_t Metaspace::_reserve_alignment = 0;
       
  2906 
  2811 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
  2907 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
  2812   initialize(lock, type);
  2908   initialize(lock, type);
  2813 }
  2909 }
  2814 
  2910 
  2815 Metaspace::~Metaspace() {
  2911 Metaspace::~Metaspace() {
  2867 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
  2963 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
  2868   assert(using_class_space(), "called improperly");
  2964   assert(using_class_space(), "called improperly");
  2869   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
  2965   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
  2870   assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
  2966   assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
  2871          "Metaspace size is too big");
  2967          "Metaspace size is too big");
       
  2968   assert_is_ptr_aligned(requested_addr,          _reserve_alignment);
       
  2969   assert_is_ptr_aligned(cds_base,                _reserve_alignment);
       
  2970   assert_is_size_aligned(class_metaspace_size(), _reserve_alignment);
       
  2971 
       
  2972   // Don't use large pages for the class space.
       
  2973   bool large_pages = false;
  2872 
  2974 
  2873   ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(),
  2975   ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(),
  2874                                              os::vm_allocation_granularity(),
  2976                                              _reserve_alignment,
  2875                                              false, requested_addr, 0);
  2977                                              large_pages,
       
  2978                                              requested_addr, 0);
  2876   if (!metaspace_rs.is_reserved()) {
  2979   if (!metaspace_rs.is_reserved()) {
  2877     if (UseSharedSpaces) {
  2980     if (UseSharedSpaces) {
       
  2981       size_t increment = align_size_up(1*G, _reserve_alignment);
       
  2982 
  2878       // Keep trying to allocate the metaspace, increasing the requested_addr
  2983       // Keep trying to allocate the metaspace, increasing the requested_addr
  2879       // by 1GB each time, until we reach an address that will no longer allow
  2984       // by 1GB each time, until we reach an address that will no longer allow
  2880       // use of CDS with compressed klass pointers.
  2985       // use of CDS with compressed klass pointers.
  2881       char *addr = requested_addr;
  2986       char *addr = requested_addr;
  2882       while (!metaspace_rs.is_reserved() && (addr + 1*G > addr) &&
  2987       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
  2883              can_use_cds_with_metaspace_addr(addr + 1*G, cds_base)) {
  2988              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
  2884         addr = addr + 1*G;
  2989         addr = addr + increment;
  2885         metaspace_rs = ReservedSpace(class_metaspace_size(),
  2990         metaspace_rs = ReservedSpace(class_metaspace_size(),
  2886                                      os::vm_allocation_granularity(), false, addr, 0);
  2991                                      _reserve_alignment, large_pages, addr, 0);
  2887       }
  2992       }
  2888     }
  2993     }
  2889 
  2994 
  2890     // If no successful allocation then try to allocate the space anywhere.  If
  2995     // If no successful allocation then try to allocate the space anywhere.  If
  2891     // that fails then OOM doom.  At this point we cannot try allocating the
  2996     // that fails then OOM doom.  At this point we cannot try allocating the
  2892     // metaspace as if UseCompressedClassPointers is off because too much
  2997     // metaspace as if UseCompressedClassPointers is off because too much
  2893     // initialization has happened that depends on UseCompressedClassPointers.
  2998     // initialization has happened that depends on UseCompressedClassPointers.
  2894     // So, UseCompressedClassPointers cannot be turned off at this point.
  2999     // So, UseCompressedClassPointers cannot be turned off at this point.
  2895     if (!metaspace_rs.is_reserved()) {
  3000     if (!metaspace_rs.is_reserved()) {
  2896       metaspace_rs = ReservedSpace(class_metaspace_size(),
  3001       metaspace_rs = ReservedSpace(class_metaspace_size(),
  2897                                    os::vm_allocation_granularity(), false);
  3002                                    _reserve_alignment, large_pages);
  2898       if (!metaspace_rs.is_reserved()) {
  3003       if (!metaspace_rs.is_reserved()) {
  2899         vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
  3004         vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
  2900                                               class_metaspace_size()));
  3005                                               class_metaspace_size()));
  2901       }
  3006       }
  2902     }
  3007     }
  2931   assert(rs.size() >= CompressedClassSpaceSize,
  3036   assert(rs.size() >= CompressedClassSpaceSize,
  2932          err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
  3037          err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
  2933   assert(using_class_space(), "Must be using class space");
  3038   assert(using_class_space(), "Must be using class space");
  2934   _class_space_list = new VirtualSpaceList(rs);
  3039   _class_space_list = new VirtualSpaceList(rs);
  2935   _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
  3040   _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
       
  3041 
       
  3042   if (!_class_space_list->initialization_succeeded()) {
       
  3043     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
       
  3044   }
  2936 }
  3045 }
  2937 
  3046 
  2938 #endif
  3047 #endif
       
  3048 
       
  3049 // Align down. If the aligning result in 0, return 'alignment'.
       
  3050 static size_t restricted_align_down(size_t size, size_t alignment) {
       
  3051   return MAX2(alignment, align_size_down_(size, alignment));
       
  3052 }
       
  3053 
       
  3054 void Metaspace::ergo_initialize() {
       
  3055   if (DumpSharedSpaces) {
       
  3056     // Using large pages when dumping the shared archive is currently not implemented.
       
  3057     FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
       
  3058   }
       
  3059 
       
  3060   size_t page_size = os::vm_page_size();
       
  3061   if (UseLargePages && UseLargePagesInMetaspace) {
       
  3062     page_size = os::large_page_size();
       
  3063   }
       
  3064 
       
  3065   _commit_alignment  = page_size;
       
  3066   _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
       
  3067 
       
  3068   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
       
  3069   // override if MaxMetaspaceSize was set on the command line or not.
       
  3070   // This information is needed later to conform to the specification of the
       
  3071   // java.lang.management.MemoryUsage API.
       
  3072   //
       
  3073   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
       
  3074   // globals.hpp to the aligned value, but this is not possible, since the
       
  3075   // alignment depends on other flags being parsed.
       
  3076   MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, _reserve_alignment);
       
  3077 
       
  3078   if (MetaspaceSize > MaxMetaspaceSize) {
       
  3079     MetaspaceSize = MaxMetaspaceSize;
       
  3080   }
       
  3081 
       
  3082   MetaspaceSize = restricted_align_down(MetaspaceSize, _commit_alignment);
       
  3083 
       
  3084   assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
       
  3085 
       
  3086   if (MetaspaceSize < 256*K) {
       
  3087     vm_exit_during_initialization("Too small initial Metaspace size");
       
  3088   }
       
  3089 
       
  3090   MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, _commit_alignment);
       
  3091   MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, _commit_alignment);
       
  3092 
       
  3093   CompressedClassSpaceSize = restricted_align_down(CompressedClassSpaceSize, _reserve_alignment);
       
  3094   set_class_metaspace_size(CompressedClassSpaceSize);
       
  3095 }
  2939 
  3096 
  2940 void Metaspace::global_initialize() {
  3097 void Metaspace::global_initialize() {
  2941   // Initialize the alignment for shared spaces.
  3098   // Initialize the alignment for shared spaces.
  2942   int max_alignment = os::vm_page_size();
  3099   int max_alignment = os::vm_page_size();
  2943   size_t cds_total = 0;
  3100   size_t cds_total = 0;
  2944 
  3101 
  2945   set_class_metaspace_size(align_size_up(CompressedClassSpaceSize,
       
  2946                                          os::vm_allocation_granularity()));
       
  2947 
       
  2948   MetaspaceShared::set_max_alignment(max_alignment);
  3102   MetaspaceShared::set_max_alignment(max_alignment);
  2949 
  3103 
  2950   if (DumpSharedSpaces) {
  3104   if (DumpSharedSpaces) {
  2951     SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
  3105     SharedReadOnlySize  = align_size_up(SharedReadOnlySize,  max_alignment);
  2952     SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
  3106     SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
  2953     SharedMiscDataSize  = align_size_up(SharedMiscDataSize, max_alignment);
  3107     SharedMiscDataSize  = align_size_up(SharedMiscDataSize,  max_alignment);
  2954     SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize, max_alignment);
  3108     SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize,  max_alignment);
  2955 
  3109 
  2956     // Initialize with the sum of the shared space sizes.  The read-only
  3110     // Initialize with the sum of the shared space sizes.  The read-only
  2957     // and read write metaspace chunks will be allocated out of this and the
  3111     // and read write metaspace chunks will be allocated out of this and the
  2958     // remainder is the misc code and data chunks.
  3112     // remainder is the misc code and data chunks.
  2959     cds_total = FileMapInfo::shared_spaces_size();
  3113     cds_total = FileMapInfo::shared_spaces_size();
       
  3114     cds_total = align_size_up(cds_total, _reserve_alignment);
  2960     _space_list = new VirtualSpaceList(cds_total/wordSize);
  3115     _space_list = new VirtualSpaceList(cds_total/wordSize);
  2961     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
  3116     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
  2962 
  3117 
       
  3118     if (!_space_list->initialization_succeeded()) {
       
  3119       vm_exit_during_initialization("Unable to dump shared archive.", NULL);
       
  3120     }
       
  3121 
  2963 #ifdef _LP64
  3122 #ifdef _LP64
       
  3123     if (cds_total + class_metaspace_size() > (uint64_t)max_juint) {
       
  3124       vm_exit_during_initialization("Unable to dump shared archive.",
       
  3125           err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
       
  3126                   SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
       
  3127                   "klass limit: " SIZE_FORMAT, cds_total, class_metaspace_size(),
       
  3128                   cds_total + class_metaspace_size(), (size_t)max_juint));
       
  3129     }
       
  3130 
  2964     // Set the compressed klass pointer base so that decoding of these pointers works
  3131     // Set the compressed klass pointer base so that decoding of these pointers works
  2965     // properly when creating the shared archive.
  3132     // properly when creating the shared archive.
  2966     assert(UseCompressedOops && UseCompressedClassPointers,
  3133     assert(UseCompressedOops && UseCompressedClassPointers,
  2967       "UseCompressedOops and UseCompressedClassPointers must be set");
  3134       "UseCompressedOops and UseCompressedClassPointers must be set");
  2968     Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
  3135     Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
  2969     if (TraceMetavirtualspaceAllocation && Verbose) {
  3136     if (TraceMetavirtualspaceAllocation && Verbose) {
  2970       gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
  3137       gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
  2971                              _space_list->current_virtual_space()->bottom());
  3138                              _space_list->current_virtual_space()->bottom());
  2972     }
  3139     }
  2973 
  3140 
  2974     // Set the shift to zero.
       
  2975     assert(class_metaspace_size() < (uint64_t)(max_juint) - cds_total,
       
  2976            "CDS region is too large");
       
  2977     Universe::set_narrow_klass_shift(0);
  3141     Universe::set_narrow_klass_shift(0);
  2978 #endif
  3142 #endif
  2979 
  3143 
  2980   } else {
  3144   } else {
  2981     // If using shared space, open the file that contains the shared space
  3145     // If using shared space, open the file that contains the shared space
  2990       // initialization fails, shared spaces [UseSharedSpaces] are
  3154       // initialization fails, shared spaces [UseSharedSpaces] are
  2991       // disabled and the file is closed.
  3155       // disabled and the file is closed.
  2992       // Map in spaces now also
  3156       // Map in spaces now also
  2993       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
  3157       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
  2994         FileMapInfo::set_current_info(mapinfo);
  3158         FileMapInfo::set_current_info(mapinfo);
       
  3159         cds_total = FileMapInfo::shared_spaces_size();
       
  3160         cds_address = (address)mapinfo->region_base(0);
  2995       } else {
  3161       } else {
  2996         assert(!mapinfo->is_open() && !UseSharedSpaces,
  3162         assert(!mapinfo->is_open() && !UseSharedSpaces,
  2997                "archive file not closed or shared spaces not disabled.");
  3163                "archive file not closed or shared spaces not disabled.");
  2998       }
  3164       }
  2999       cds_total = FileMapInfo::shared_spaces_size();
       
  3000       cds_address = (address)mapinfo->region_base(0);
       
  3001     }
  3165     }
  3002 
  3166 
  3003 #ifdef _LP64
  3167 #ifdef _LP64
  3004     // If UseCompressedClassPointers is set then allocate the metaspace area
  3168     // If UseCompressedClassPointers is set then allocate the metaspace area
  3005     // above the heap and above the CDS area (if it exists).
  3169     // above the heap and above the CDS area (if it exists).
  3006     if (using_class_space()) {
  3170     if (using_class_space()) {
  3007       if (UseSharedSpaces) {
  3171       if (UseSharedSpaces) {
  3008         allocate_metaspace_compressed_klass_ptrs((char *)(cds_address + cds_total), cds_address);
  3172         char* cds_end = (char*)(cds_address + cds_total);
       
  3173         cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
       
  3174         allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
  3009       } else {
  3175       } else {
  3010         allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0);
  3176         allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0);
  3011       }
  3177       }
  3012     }
  3178     }
  3013 #endif
  3179 #endif
  3021     _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
  3187     _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
  3022                                        (CompressedClassSpaceSize/BytesPerWord)*2);
  3188                                        (CompressedClassSpaceSize/BytesPerWord)*2);
  3023     _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
  3189     _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
  3024     // Arbitrarily set the initial virtual space to a multiple
  3190     // Arbitrarily set the initial virtual space to a multiple
  3025     // of the boot class loader size.
  3191     // of the boot class loader size.
  3026     size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
  3192     size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
       
  3193     word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
       
  3194 
  3027     // Initialize the list of virtual spaces.
  3195     // Initialize the list of virtual spaces.
  3028     _space_list = new VirtualSpaceList(word_size);
  3196     _space_list = new VirtualSpaceList(word_size);
  3029     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
  3197     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
  3030   }
  3198 
       
  3199     if (!_space_list->initialization_succeeded()) {
       
  3200       vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
       
  3201     }
       
  3202   }
       
  3203 
       
  3204   MetaspaceGC::initialize();
  3031 }
  3205 }
  3032 
  3206 
  3033 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
  3207 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
  3034                                                size_t chunk_word_size,
  3208                                                size_t chunk_word_size,
  3035                                                size_t chunk_bunch) {
  3209                                                size_t chunk_bunch) {
  3037   Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
  3211   Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
  3038   if (chunk != NULL) {
  3212   if (chunk != NULL) {
  3039     return chunk;
  3213     return chunk;
  3040   }
  3214   }
  3041 
  3215 
  3042   return get_space_list(mdtype)->get_initialization_chunk(chunk_word_size, chunk_bunch);
  3216   return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch);
  3043 }
  3217 }
  3044 
  3218 
  3045 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
  3219 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
  3046 
  3220 
  3047   assert(space_list() != NULL,
  3221   assert(space_list() != NULL,
  3110     return  vsm()->allocate(word_size);
  3284     return  vsm()->allocate(word_size);
  3111   }
  3285   }
  3112 }
  3286 }
  3113 
  3287 
  3114 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
  3288 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
  3115   MetaWord* result;
  3289   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
  3116   MetaspaceGC::set_expand_after_GC(true);
  3290   assert(delta_bytes > 0, "Must be");
  3117   size_t before_inc = MetaspaceGC::capacity_until_GC();
  3291 
  3118   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord;
  3292   size_t after_inc = MetaspaceGC::inc_capacity_until_GC(delta_bytes);
  3119   MetaspaceGC::inc_capacity_until_GC(delta_bytes);
  3293   size_t before_inc = after_inc - delta_bytes;
       
  3294 
  3120   if (PrintGCDetails && Verbose) {
  3295   if (PrintGCDetails && Verbose) {
  3121     gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
  3296     gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
  3122       " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC());
  3297         " to " SIZE_FORMAT, before_inc, after_inc);
  3123   }
  3298   }
  3124 
  3299 
  3125   result = allocate(word_size, mdtype);
  3300   return allocate(word_size, mdtype);
  3126 
       
  3127   return result;
       
  3128 }
  3301 }
  3129 
  3302 
  3130 // Space allocated in the Metaspace.  This may
  3303 // Space allocated in the Metaspace.  This may
  3131 // be across several metadata virtual spaces.
  3304 // be across several metadata virtual spaces.
  3132 char* Metaspace::bottom() const {
  3305 char* Metaspace::bottom() const {
  3204       vsm()->deallocate(ptr, word_size);
  3377       vsm()->deallocate(ptr, word_size);
  3205     }
  3378     }
  3206   }
  3379   }
  3207 }
  3380 }
  3208 
  3381 
       
  3382 
  3209 Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
  3383 Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
  3210                               bool read_only, MetaspaceObj::Type type, TRAPS) {
  3384                               bool read_only, MetaspaceObj::Type type, TRAPS) {
  3211   if (HAS_PENDING_EXCEPTION) {
  3385   if (HAS_PENDING_EXCEPTION) {
  3212     assert(false, "Should not allocate with exception pending");
  3386     assert(false, "Should not allocate with exception pending");
  3213     return NULL;  // caller does a CHECK_NULL too
  3387     return NULL;  // caller does a CHECK_NULL too
  3214   }
  3388   }
  3215 
  3389 
  3216   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
       
  3217 
       
  3218   // SSS: Should we align the allocations and make sure the sizes are aligned.
       
  3219   MetaWord* result = NULL;
       
  3220 
       
  3221   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
  3390   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
  3222         "ClassLoaderData::the_null_class_loader_data() should have been used.");
  3391         "ClassLoaderData::the_null_class_loader_data() should have been used.");
       
  3392 
  3223   // Allocate in metaspaces without taking out a lock, because it deadlocks
  3393   // Allocate in metaspaces without taking out a lock, because it deadlocks
  3224   // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
  3394   // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
  3225   // to revisit this for application class data sharing.
  3395   // to revisit this for application class data sharing.
  3226   if (DumpSharedSpaces) {
  3396   if (DumpSharedSpaces) {
  3227     assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
  3397     assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
  3228     Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
  3398     Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
  3229     result = space->allocate(word_size, NonClassType);
  3399     MetaWord* result = space->allocate(word_size, NonClassType);
  3230     if (result == NULL) {
  3400     if (result == NULL) {
  3231       report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
  3401       report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
  3232     } else {
  3402     } else {
  3233       space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
  3403       space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
  3234     }
  3404     }
  3235     return Metablock::initialize(result, word_size);
  3405     return Metablock::initialize(result, word_size);
  3236   }
  3406   }
  3237 
  3407 
  3238   result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
  3408   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
       
  3409 
       
  3410   // Try to allocate metadata.
       
  3411   MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
  3239 
  3412 
  3240   if (result == NULL) {
  3413   if (result == NULL) {
  3241     // Try to clean out some memory and retry.
  3414     // Allocation failed.
  3242     result =
  3415     if (is_init_completed()) {
  3243       Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
  3416       // Only start a GC if the bootstrapping has completed.
  3244         loader_data, word_size, mdtype);
  3417 
  3245 
  3418       // Try to clean out some memory and retry.
  3246     // If result is still null, we are out of memory.
  3419       result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
  3247     if (result == NULL) {
  3420           loader_data, word_size, mdtype);
  3248       if (Verbose && TraceMetadataChunkAllocation) {
  3421     }
  3249         gclog_or_tty->print_cr("Metaspace allocation failed for size "
  3422   }
  3250           SIZE_FORMAT, word_size);
  3423 
  3251         if (loader_data->metaspace_or_null() != NULL) loader_data->dump(gclog_or_tty);
  3424   if (result == NULL) {
  3252         MetaspaceAux::dump(gclog_or_tty);
  3425     report_metadata_oome(loader_data, word_size, mdtype, THREAD);
  3253       }
  3426     // Will not reach here.
  3254       // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
  3427     return NULL;
  3255       const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" :
  3428   }
  3256                                                                      "Metadata space";
  3429 
  3257       report_java_out_of_memory(space_string);
       
  3258 
       
  3259       if (JvmtiExport::should_post_resource_exhausted()) {
       
  3260         JvmtiExport::post_resource_exhausted(
       
  3261             JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
       
  3262             space_string);
       
  3263       }
       
  3264       if (is_class_space_allocation(mdtype)) {
       
  3265         THROW_OOP_0(Universe::out_of_memory_error_class_metaspace());
       
  3266       } else {
       
  3267         THROW_OOP_0(Universe::out_of_memory_error_metaspace());
       
  3268       }
       
  3269     }
       
  3270   }
       
  3271   return Metablock::initialize(result, word_size);
  3430   return Metablock::initialize(result, word_size);
       
  3431 }
       
  3432 
       
  3433 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetadataType mdtype, TRAPS) {
       
  3434   // If result is still null, we are out of memory.
       
  3435   if (Verbose && TraceMetadataChunkAllocation) {
       
  3436     gclog_or_tty->print_cr("Metaspace allocation failed for size "
       
  3437         SIZE_FORMAT, word_size);
       
  3438     if (loader_data->metaspace_or_null() != NULL) {
       
  3439       loader_data->dump(gclog_or_tty);
       
  3440     }
       
  3441     MetaspaceAux::dump(gclog_or_tty);
       
  3442   }
       
  3443 
       
  3444   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
       
  3445   const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" :
       
  3446                                                                  "Metadata space";
       
  3447   report_java_out_of_memory(space_string);
       
  3448 
       
  3449   if (JvmtiExport::should_post_resource_exhausted()) {
       
  3450     JvmtiExport::post_resource_exhausted(
       
  3451         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
       
  3452         space_string);
       
  3453   }
       
  3454 
       
  3455   if (!is_init_completed()) {
       
  3456     vm_exit_during_initialization("OutOfMemoryError", space_string);
       
  3457   }
       
  3458 
       
  3459   if (is_class_space_allocation(mdtype)) {
       
  3460     THROW_OOP(Universe::out_of_memory_error_class_metaspace());
       
  3461   } else {
       
  3462     THROW_OOP(Universe::out_of_memory_error_metaspace());
       
  3463   }
  3272 }
  3464 }
  3273 
  3465 
  3274 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
  3466 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
  3275   assert(DumpSharedSpaces, "sanity");
  3467   assert(DumpSharedSpaces, "sanity");
  3276 
  3468