src/hotspot/share/gc/z/zCollectedHeap.cpp
changeset 50525 767cdb97f103
child 50871 d283a214f42b
equal deleted inserted replaced
50524:04f4e983c2f7 50525:767cdb97f103
       
     1 /*
       
     2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
       
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     4  *
       
     5  * This code is free software; you can redistribute it and/or modify it
       
     6  * under the terms of the GNU General Public License version 2 only, as
       
     7  * published by the Free Software Foundation.
       
     8  *
       
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    12  * version 2 for more details (a copy is included in the LICENSE file that
       
    13  * accompanied this code).
       
    14  *
       
    15  * You should have received a copy of the GNU General Public License version
       
    16  * 2 along with this work; if not, write to the Free Software Foundation,
       
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    18  *
       
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
       
    20  * or visit www.oracle.com if you need additional information or have any
       
    21  * questions.
       
    22  */
       
    23 
       
    24 #include "precompiled.hpp"
       
    25 #include "gc/shared/gcHeapSummary.hpp"
       
    26 #include "gc/z/zCollectedHeap.hpp"
       
    27 #include "gc/z/zGlobals.hpp"
       
    28 #include "gc/z/zHeap.inline.hpp"
       
    29 #include "gc/z/zNMethodTable.hpp"
       
    30 #include "gc/z/zServiceability.hpp"
       
    31 #include "gc/z/zStat.hpp"
       
    32 #include "gc/z/zUtils.inline.hpp"
       
    33 #include "runtime/mutexLocker.hpp"
       
    34 
       
    35 ZCollectedHeap* ZCollectedHeap::heap() {
       
    36   CollectedHeap* heap = Universe::heap();
       
    37   assert(heap != NULL, "Uninitialized access to ZCollectedHeap::heap()");
       
    38   assert(heap->kind() == CollectedHeap::Z, "Invalid name");
       
    39   return (ZCollectedHeap*)heap;
       
    40 }
       
    41 
       
    42 ZCollectedHeap::ZCollectedHeap(ZCollectorPolicy* policy) :
       
    43     _collector_policy(policy),
       
    44     _soft_ref_policy(),
       
    45     _barrier_set(),
       
    46     _initialize(&_barrier_set),
       
    47     _heap(),
       
    48     _director(new ZDirector()),
       
    49     _driver(new ZDriver()),
       
    50     _stat(new ZStat()),
       
    51     _runtime_workers() {}
       
    52 
       
    53 CollectedHeap::Name ZCollectedHeap::kind() const {
       
    54   return CollectedHeap::Z;
       
    55 }
       
    56 
       
    57 const char* ZCollectedHeap::name() const {
       
    58   return ZGCName;
       
    59 }
       
    60 
       
    61 jint ZCollectedHeap::initialize() {
       
    62   if (!_heap.is_initialized()) {
       
    63     return JNI_ENOMEM;
       
    64   }
       
    65 
       
    66   initialize_reserved_region((HeapWord*)ZAddressReservedStart(),
       
    67                              (HeapWord*)ZAddressReservedEnd());
       
    68 
       
    69   return JNI_OK;
       
    70 }
       
    71 
       
    72 void ZCollectedHeap::initialize_serviceability() {
       
    73   _heap.serviceability_initialize();
       
    74 }
       
    75 
       
    76 void ZCollectedHeap::stop() {
       
    77   _director->stop();
       
    78   _driver->stop();
       
    79   _stat->stop();
       
    80 }
       
    81 
       
    82 CollectorPolicy* ZCollectedHeap::collector_policy() const {
       
    83   return _collector_policy;
       
    84 }
       
    85 
       
    86 SoftRefPolicy* ZCollectedHeap::soft_ref_policy() {
       
    87   return &_soft_ref_policy;
       
    88 }
       
    89 
       
    90 size_t ZCollectedHeap::max_capacity() const {
       
    91   return _heap.max_capacity();
       
    92 }
       
    93 
       
    94 size_t ZCollectedHeap::capacity() const {
       
    95   return _heap.capacity();
       
    96 }
       
    97 
       
    98 size_t ZCollectedHeap::used() const {
       
    99   return _heap.used();
       
   100 }
       
   101 
       
   102 bool ZCollectedHeap::is_maximal_no_gc() const {
       
   103   // Not supported
       
   104   ShouldNotReachHere();
       
   105   return false;
       
   106 }
       
   107 
       
   108 bool ZCollectedHeap::is_scavengable(oop obj) {
       
   109   return false;
       
   110 }
       
   111 
       
   112 bool ZCollectedHeap::is_in(const void* p) const {
       
   113   return is_in_reserved(p) && _heap.is_in((uintptr_t)p);
       
   114 }
       
   115 
       
   116 bool ZCollectedHeap::is_in_closed_subset(const void* p) const {
       
   117   return is_in(p);
       
   118 }
       
   119 
       
   120 HeapWord* ZCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
       
   121   const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(requested_size));
       
   122   const uintptr_t addr = _heap.alloc_tlab(size_in_bytes);
       
   123 
       
   124   if (addr != 0) {
       
   125     *actual_size = requested_size;
       
   126   }
       
   127 
       
   128   return (HeapWord*)addr;
       
   129 }
       
   130 
       
   131 HeapWord* ZCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) {
       
   132   const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(size));
       
   133   return (HeapWord*)_heap.alloc_object(size_in_bytes);
       
   134 }
       
   135 
       
   136 MetaWord* ZCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
       
   137                                                              size_t size,
       
   138                                                              Metaspace::MetadataType mdtype) {
       
   139   MetaWord* result;
       
   140 
       
   141   // Start asynchronous GC
       
   142   collect(GCCause::_metadata_GC_threshold);
       
   143 
       
   144   // Expand and retry allocation
       
   145   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
       
   146   if (result != NULL) {
       
   147     return result;
       
   148   }
       
   149 
       
   150   // Start synchronous GC
       
   151   collect(GCCause::_metadata_GC_clear_soft_refs);
       
   152 
       
   153   // Retry allocation
       
   154   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
       
   155   if (result != NULL) {
       
   156     return result;
       
   157   }
       
   158 
       
   159   // Expand and retry allocation
       
   160   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
       
   161   if (result != NULL) {
       
   162     return result;
       
   163   }
       
   164 
       
   165   // Out of memory
       
   166   return NULL;
       
   167 }
       
   168 
       
   169 void ZCollectedHeap::collect(GCCause::Cause cause) {
       
   170   _driver->collect(cause);
       
   171 }
       
   172 
       
   173 void ZCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
       
   174   // These collection requests are ignored since ZGC can't run a synchronous
       
   175   // GC cycle from within the VM thread. This is considered benign, since the
       
   176   // only GC causes comming in here should be heap dumper and heap inspector.
       
   177   // However, neither the heap dumper nor the heap inspector really need a GC
       
   178   // to happen, but the result of their heap iterations might in that case be
       
   179   // less accurate since they might include objects that would otherwise have
       
   180   // been collected by a GC.
       
   181   assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
       
   182   guarantee(cause == GCCause::_heap_dump ||
       
   183             cause == GCCause::_heap_inspection, "Invalid cause");
       
   184 }
       
   185 
       
   186 void ZCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
       
   187   // Not supported
       
   188   ShouldNotReachHere();
       
   189 }
       
   190 
       
   191 bool ZCollectedHeap::supports_tlab_allocation() const {
       
   192   return true;
       
   193 }
       
   194 
       
   195 size_t ZCollectedHeap::tlab_capacity(Thread* ignored) const {
       
   196   return _heap.tlab_capacity();
       
   197 }
       
   198 
       
   199 size_t ZCollectedHeap::tlab_used(Thread* ignored) const {
       
   200   return _heap.tlab_used();
       
   201 }
       
   202 
       
   203 size_t ZCollectedHeap::max_tlab_size() const {
       
   204   return _heap.max_tlab_size();
       
   205 }
       
   206 
       
   207 size_t ZCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
       
   208   return _heap.unsafe_max_tlab_alloc();
       
   209 }
       
   210 
       
   211 bool ZCollectedHeap::can_elide_tlab_store_barriers() const {
       
   212   return false;
       
   213 }
       
   214 
       
   215 bool ZCollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
       
   216   // Not supported
       
   217   ShouldNotReachHere();
       
   218   return true;
       
   219 }
       
   220 
       
   221 bool ZCollectedHeap::card_mark_must_follow_store() const {
       
   222   // Not supported
       
   223   ShouldNotReachHere();
       
   224   return false;
       
   225 }
       
   226 
       
   227 GrowableArray<GCMemoryManager*> ZCollectedHeap::memory_managers() {
       
   228   return GrowableArray<GCMemoryManager*>(1, 1, _heap.serviceability_memory_manager());
       
   229 }
       
   230 
       
   231 GrowableArray<MemoryPool*> ZCollectedHeap::memory_pools() {
       
   232   return GrowableArray<MemoryPool*>(1, 1, _heap.serviceability_memory_pool());
       
   233 }
       
   234 
       
   235 void ZCollectedHeap::object_iterate(ObjectClosure* cl) {
       
   236   _heap.object_iterate(cl);
       
   237 }
       
   238 
       
   239 void ZCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
       
   240   _heap.object_iterate(cl);
       
   241 }
       
   242 
       
   243 HeapWord* ZCollectedHeap::block_start(const void* addr) const {
       
   244   return (HeapWord*)_heap.block_start((uintptr_t)addr);
       
   245 }
       
   246 
       
   247 size_t ZCollectedHeap::block_size(const HeapWord* addr) const {
       
   248   size_t size_in_bytes = _heap.block_size((uintptr_t)addr);
       
   249   return ZUtils::bytes_to_words(size_in_bytes);
       
   250 }
       
   251 
       
   252 bool ZCollectedHeap::block_is_obj(const HeapWord* addr) const {
       
   253   return _heap.block_is_obj((uintptr_t)addr);
       
   254 }
       
   255 
       
   256 void ZCollectedHeap::register_nmethod(nmethod* nm) {
       
   257   assert_locked_or_safepoint(CodeCache_lock);
       
   258   ZNMethodTable::register_nmethod(nm);
       
   259 }
       
   260 
       
   261 void ZCollectedHeap::unregister_nmethod(nmethod* nm) {
       
   262   assert_locked_or_safepoint(CodeCache_lock);
       
   263   ZNMethodTable::unregister_nmethod(nm);
       
   264 }
       
   265 
       
   266 void ZCollectedHeap::verify_nmethod(nmethod* nm) {
       
   267   // Does nothing
       
   268 }
       
   269 
       
   270 WorkGang* ZCollectedHeap::get_safepoint_workers() {
       
   271   return _runtime_workers.workers();
       
   272 }
       
   273 
       
   274 jlong ZCollectedHeap::millis_since_last_gc() {
       
   275   return ZStatCycle::time_since_last() / MILLIUNITS;
       
   276 }
       
   277 
       
   278 void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
       
   279   tc->do_thread(_director);
       
   280   tc->do_thread(_driver);
       
   281   tc->do_thread(_stat);
       
   282   _heap.worker_threads_do(tc);
       
   283   _runtime_workers.threads_do(tc);
       
   284 }
       
   285 
       
   286 VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() {
       
   287   const size_t capacity_in_words = capacity() / HeapWordSize;
       
   288   const size_t max_capacity_in_words = max_capacity() / HeapWordSize;
       
   289   return VirtualSpaceSummary(reserved_region().start(),
       
   290                              reserved_region().start() + capacity_in_words,
       
   291                              reserved_region().start() + max_capacity_in_words);
       
   292 }
       
   293 
       
   294 void ZCollectedHeap::prepare_for_verify() {
       
   295   // Does nothing
       
   296 }
       
   297 
       
   298 void ZCollectedHeap::print_on(outputStream* st) const {
       
   299   _heap.print_on(st);
       
   300 }
       
   301 
       
   302 void ZCollectedHeap::print_on_error(outputStream* st) const {
       
   303   CollectedHeap::print_on_error(st);
       
   304 
       
   305   st->print_cr("Address Space");
       
   306   st->print_cr( "     Start:             " PTR_FORMAT, ZAddressSpaceStart);
       
   307   st->print_cr( "     End:               " PTR_FORMAT, ZAddressSpaceEnd);
       
   308   st->print_cr( "     Size:              " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressSpaceSize, ZAddressSpaceSize);
       
   309   st->print_cr( "Heap");
       
   310   st->print_cr( "     GlobalPhase:       %u", ZGlobalPhase);
       
   311   st->print_cr( "     GlobalSeqNum:      %u", ZGlobalSeqNum);
       
   312   st->print_cr( "     Offset Max:        " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressOffsetMax, ZAddressOffsetMax);
       
   313   st->print_cr( "     Page Size Small:   " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeSmall, ZPageSizeSmall);
       
   314   st->print_cr( "     Page Size Medium:  " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeMedium, ZPageSizeMedium);
       
   315   st->print_cr( "Metadata Bits");
       
   316   st->print_cr( "     Good:              " PTR_FORMAT, ZAddressGoodMask);
       
   317   st->print_cr( "     Bad:               " PTR_FORMAT, ZAddressBadMask);
       
   318   st->print_cr( "     WeakBad:           " PTR_FORMAT, ZAddressWeakBadMask);
       
   319   st->print_cr( "     Marked:            " PTR_FORMAT, ZAddressMetadataMarked);
       
   320   st->print_cr( "     Remapped:          " PTR_FORMAT, ZAddressMetadataRemapped);
       
   321 }
       
   322 
       
   323 void ZCollectedHeap::print_extended_on(outputStream* st) const {
       
   324   _heap.print_extended_on(st);
       
   325 }
       
   326 
       
   327 void ZCollectedHeap::print_gc_threads_on(outputStream* st) const {
       
   328   _director->print_on(st);
       
   329   st->cr();
       
   330   _driver->print_on(st);
       
   331   st->cr();
       
   332   _stat->print_on(st);
       
   333   st->cr();
       
   334   _heap.print_worker_threads_on(st);
       
   335   _runtime_workers.print_threads_on(st);
       
   336 }
       
   337 
       
   338 void ZCollectedHeap::print_tracing_info() const {
       
   339   // Does nothing
       
   340 }
       
   341 
       
   342 void ZCollectedHeap::verify(VerifyOption option /* ignored */) {
       
   343   _heap.verify();
       
   344 }
       
   345 
       
   346 bool ZCollectedHeap::is_oop(oop object) const {
       
   347   return CollectedHeap::is_oop(object) && _heap.is_oop(object);
       
   348 }