src/hotspot/share/gc/shared/collectedHeap.inline.hpp
changeset 47216 71c04702a3d5
parent 46625 edefffab74e2
child 47779 24022215d092
equal deleted inserted replaced
47215:4ebc2e2fb97c 47216:71c04702a3d5
       
     1 /*
       
     2  * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
       
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     4  *
       
     5  * This code is free software; you can redistribute it and/or modify it
       
     6  * under the terms of the GNU General Public License version 2 only, as
       
     7  * published by the Free Software Foundation.
       
     8  *
       
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    12  * version 2 for more details (a copy is included in the LICENSE file that
       
    13  * accompanied this code).
       
    14  *
       
    15  * You should have received a copy of the GNU General Public License version
       
    16  * 2 along with this work; if not, write to the Free Software Foundation,
       
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    18  *
       
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
       
    20  * or visit www.oracle.com if you need additional information or have any
       
    21  * questions.
       
    22  *
       
    23  */
       
    24 
       
    25 #ifndef SHARE_VM_GC_SHARED_COLLECTEDHEAP_INLINE_HPP
       
    26 #define SHARE_VM_GC_SHARED_COLLECTEDHEAP_INLINE_HPP
       
    27 
       
    28 #include "classfile/javaClasses.hpp"
       
    29 #include "gc/shared/allocTracer.hpp"
       
    30 #include "gc/shared/collectedHeap.hpp"
       
    31 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
       
    32 #include "memory/universe.hpp"
       
    33 #include "oops/arrayOop.hpp"
       
    34 #include "oops/oop.inline.hpp"
       
    35 #include "prims/jvmtiExport.hpp"
       
    36 #include "runtime/sharedRuntime.hpp"
       
    37 #include "runtime/thread.inline.hpp"
       
    38 #include "services/lowMemoryDetector.hpp"
       
    39 #include "utilities/align.hpp"
       
    40 #include "utilities/copy.hpp"
       
    41 
       
    42 // Inline allocation implementations.
       
    43 
       
    44 void CollectedHeap::post_allocation_setup_common(Klass* klass,
       
    45                                                  HeapWord* obj_ptr) {
       
    46   post_allocation_setup_no_klass_install(klass, obj_ptr);
       
    47   oop obj = (oop)obj_ptr;
       
    48 #if ! INCLUDE_ALL_GCS
       
    49   obj->set_klass(klass);
       
    50 #else
       
    51   // Need a release store to ensure array/class length, mark word, and
       
    52   // object zeroing are visible before setting the klass non-NULL, for
       
    53   // concurrent collectors.
       
    54   obj->release_set_klass(klass);
       
    55 #endif
       
    56 }
       
    57 
       
    58 void CollectedHeap::post_allocation_setup_no_klass_install(Klass* klass,
       
    59                                                            HeapWord* obj_ptr) {
       
    60   oop obj = (oop)obj_ptr;
       
    61 
       
    62   assert(obj != NULL, "NULL object pointer");
       
    63   if (UseBiasedLocking && (klass != NULL)) {
       
    64     obj->set_mark(klass->prototype_header());
       
    65   } else {
       
    66     // May be bootstrapping
       
    67     obj->set_mark(markOopDesc::prototype());
       
    68   }
       
    69 }
       
    70 
       
    71 // Support for jvmti and dtrace
       
    72 inline void post_allocation_notify(Klass* klass, oop obj, int size) {
       
    73   // support low memory notifications (no-op if not enabled)
       
    74   LowMemoryDetector::detect_low_memory_for_collected_pools();
       
    75 
       
    76   // support for JVMTI VMObjectAlloc event (no-op if not enabled)
       
    77   JvmtiExport::vm_object_alloc_event_collector(obj);
       
    78 
       
    79   if (DTraceAllocProbes) {
       
    80     // support for Dtrace object alloc event (no-op most of the time)
       
    81     if (klass != NULL && klass->name() != NULL) {
       
    82       SharedRuntime::dtrace_object_alloc(obj, size);
       
    83     }
       
    84   }
       
    85 }
       
    86 
       
    87 void CollectedHeap::post_allocation_setup_obj(Klass* klass,
       
    88                                               HeapWord* obj_ptr,
       
    89                                               int size) {
       
    90   post_allocation_setup_common(klass, obj_ptr);
       
    91   oop obj = (oop)obj_ptr;
       
    92   assert(Universe::is_bootstrapping() ||
       
    93          !obj->is_array(), "must not be an array");
       
    94   // notify jvmti and dtrace
       
    95   post_allocation_notify(klass, obj, size);
       
    96 }
       
    97 
       
    98 void CollectedHeap::post_allocation_setup_class(Klass* klass,
       
    99                                                 HeapWord* obj_ptr,
       
   100                                                 int size) {
       
   101   // Set oop_size field before setting the _klass field because a
       
   102   // non-NULL _klass field indicates that the object is parsable by
       
   103   // concurrent GC.
       
   104   oop new_cls = (oop)obj_ptr;
       
   105   assert(size > 0, "oop_size must be positive.");
       
   106   java_lang_Class::set_oop_size(new_cls, size);
       
   107   post_allocation_setup_common(klass, obj_ptr);
       
   108   assert(Universe::is_bootstrapping() ||
       
   109          !new_cls->is_array(), "must not be an array");
       
   110   // notify jvmti and dtrace
       
   111   post_allocation_notify(klass, new_cls, size);
       
   112 }
       
   113 
       
   114 void CollectedHeap::post_allocation_setup_array(Klass* klass,
       
   115                                                 HeapWord* obj_ptr,
       
   116                                                 int length) {
       
   117   // Set array length before setting the _klass field because a
       
   118   // non-NULL klass field indicates that the object is parsable by
       
   119   // concurrent GC.
       
   120   assert(length >= 0, "length should be non-negative");
       
   121   ((arrayOop)obj_ptr)->set_length(length);
       
   122   post_allocation_setup_common(klass, obj_ptr);
       
   123   oop new_obj = (oop)obj_ptr;
       
   124   assert(new_obj->is_array(), "must be an array");
       
   125   // notify jvmti and dtrace (must be after length is set for dtrace)
       
   126   post_allocation_notify(klass, new_obj, new_obj->size());
       
   127 }
       
   128 
       
   129 HeapWord* CollectedHeap::common_mem_allocate_noinit(Klass* klass, size_t size, TRAPS) {
       
   130 
       
   131   // Clear unhandled oops for memory allocation.  Memory allocation might
       
   132   // not take out a lock if from tlab, so clear here.
       
   133   CHECK_UNHANDLED_OOPS_ONLY(THREAD->clear_unhandled_oops();)
       
   134 
       
   135   if (HAS_PENDING_EXCEPTION) {
       
   136     NOT_PRODUCT(guarantee(false, "Should not allocate with exception pending"));
       
   137     return NULL;  // caller does a CHECK_0 too
       
   138   }
       
   139 
       
   140   HeapWord* result = NULL;
       
   141   if (UseTLAB) {
       
   142     result = allocate_from_tlab(klass, THREAD, size);
       
   143     if (result != NULL) {
       
   144       assert(!HAS_PENDING_EXCEPTION,
       
   145              "Unexpected exception, will result in uninitialized storage");
       
   146       return result;
       
   147     }
       
   148   }
       
   149   bool gc_overhead_limit_was_exceeded = false;
       
   150   result = Universe::heap()->mem_allocate(size,
       
   151                                           &gc_overhead_limit_was_exceeded);
       
   152   if (result != NULL) {
       
   153     NOT_PRODUCT(Universe::heap()->
       
   154       check_for_non_bad_heap_word_value(result, size));
       
   155     assert(!HAS_PENDING_EXCEPTION,
       
   156            "Unexpected exception, will result in uninitialized storage");
       
   157     THREAD->incr_allocated_bytes(size * HeapWordSize);
       
   158 
       
   159     AllocTracer::send_allocation_outside_tlab_event(klass, size * HeapWordSize);
       
   160 
       
   161     return result;
       
   162   }
       
   163 
       
   164 
       
   165   if (!gc_overhead_limit_was_exceeded) {
       
   166     // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
       
   167     report_java_out_of_memory("Java heap space");
       
   168 
       
   169     if (JvmtiExport::should_post_resource_exhausted()) {
       
   170       JvmtiExport::post_resource_exhausted(
       
   171         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
       
   172         "Java heap space");
       
   173     }
       
   174 
       
   175     THROW_OOP_0(Universe::out_of_memory_error_java_heap());
       
   176   } else {
       
   177     // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
       
   178     report_java_out_of_memory("GC overhead limit exceeded");
       
   179 
       
   180     if (JvmtiExport::should_post_resource_exhausted()) {
       
   181       JvmtiExport::post_resource_exhausted(
       
   182         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
       
   183         "GC overhead limit exceeded");
       
   184     }
       
   185 
       
   186     THROW_OOP_0(Universe::out_of_memory_error_gc_overhead_limit());
       
   187   }
       
   188 }
       
   189 
       
   190 HeapWord* CollectedHeap::common_mem_allocate_init(Klass* klass, size_t size, TRAPS) {
       
   191   HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL);
       
   192   init_obj(obj, size);
       
   193   return obj;
       
   194 }
       
   195 
       
   196 HeapWord* CollectedHeap::allocate_from_tlab(Klass* klass, Thread* thread, size_t size) {
       
   197   assert(UseTLAB, "should use UseTLAB");
       
   198 
       
   199   HeapWord* obj = thread->tlab().allocate(size);
       
   200   if (obj != NULL) {
       
   201     return obj;
       
   202   }
       
   203   // Otherwise...
       
   204   return allocate_from_tlab_slow(klass, thread, size);
       
   205 }
       
   206 
       
   207 void CollectedHeap::init_obj(HeapWord* obj, size_t size) {
       
   208   assert(obj != NULL, "cannot initialize NULL object");
       
   209   const size_t hs = oopDesc::header_size();
       
   210   assert(size >= hs, "unexpected object size");
       
   211   ((oop)obj)->set_klass_gap(0);
       
   212   Copy::fill_to_aligned_words(obj + hs, size - hs);
       
   213 }
       
   214 
       
   215 oop CollectedHeap::obj_allocate(Klass* klass, int size, TRAPS) {
       
   216   debug_only(check_for_valid_allocation_state());
       
   217   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
       
   218   assert(size >= 0, "int won't convert to size_t");
       
   219   HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL);
       
   220   post_allocation_setup_obj(klass, obj, size);
       
   221   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
       
   222   return (oop)obj;
       
   223 }
       
   224 
       
   225 oop CollectedHeap::class_allocate(Klass* klass, int size, TRAPS) {
       
   226   debug_only(check_for_valid_allocation_state());
       
   227   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
       
   228   assert(size >= 0, "int won't convert to size_t");
       
   229   HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL);
       
   230   post_allocation_setup_class(klass, obj, size); // set oop_size
       
   231   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
       
   232   return (oop)obj;
       
   233 }
       
   234 
       
   235 oop CollectedHeap::array_allocate(Klass* klass,
       
   236                                   int size,
       
   237                                   int length,
       
   238                                   TRAPS) {
       
   239   debug_only(check_for_valid_allocation_state());
       
   240   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
       
   241   assert(size >= 0, "int won't convert to size_t");
       
   242   HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL);
       
   243   post_allocation_setup_array(klass, obj, length);
       
   244   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
       
   245   return (oop)obj;
       
   246 }
       
   247 
       
   248 oop CollectedHeap::array_allocate_nozero(Klass* klass,
       
   249                                          int size,
       
   250                                          int length,
       
   251                                          TRAPS) {
       
   252   debug_only(check_for_valid_allocation_state());
       
   253   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
       
   254   assert(size >= 0, "int won't convert to size_t");
       
   255   HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL);
       
   256   ((oop)obj)->set_klass_gap(0);
       
   257   post_allocation_setup_array(klass, obj, length);
       
   258 #ifndef PRODUCT
       
   259   const size_t hs = oopDesc::header_size()+1;
       
   260   Universe::heap()->check_for_non_bad_heap_word_value(obj+hs, size-hs);
       
   261 #endif
       
   262   return (oop)obj;
       
   263 }
       
   264 
       
   265 inline HeapWord* CollectedHeap::align_allocation_or_fail(HeapWord* addr,
       
   266                                                          HeapWord* end,
       
   267                                                          unsigned short alignment_in_bytes) {
       
   268   if (alignment_in_bytes <= ObjectAlignmentInBytes) {
       
   269     return addr;
       
   270   }
       
   271 
       
   272   assert(is_aligned(addr, HeapWordSize),
       
   273          "Address " PTR_FORMAT " is not properly aligned.", p2i(addr));
       
   274   assert(is_aligned(alignment_in_bytes, HeapWordSize),
       
   275          "Alignment size %u is incorrect.", alignment_in_bytes);
       
   276 
       
   277   HeapWord* new_addr = align_up(addr, alignment_in_bytes);
       
   278   size_t padding = pointer_delta(new_addr, addr);
       
   279 
       
   280   if (padding == 0) {
       
   281     return addr;
       
   282   }
       
   283 
       
   284   if (padding < CollectedHeap::min_fill_size()) {
       
   285     padding += alignment_in_bytes / HeapWordSize;
       
   286     assert(padding >= CollectedHeap::min_fill_size(),
       
   287            "alignment_in_bytes %u is expect to be larger "
       
   288            "than the minimum object size", alignment_in_bytes);
       
   289     new_addr = addr + padding;
       
   290   }
       
   291 
       
   292   assert(new_addr > addr, "Unexpected arithmetic overflow "
       
   293          PTR_FORMAT " not greater than " PTR_FORMAT, p2i(new_addr), p2i(addr));
       
   294   if(new_addr < end) {
       
   295     CollectedHeap::fill_with_object(addr, padding);
       
   296     return new_addr;
       
   297   } else {
       
   298     return NULL;
       
   299   }
       
   300 }
       
   301 
       
   302 #ifndef PRODUCT
       
   303 
       
   304 inline bool
       
   305 CollectedHeap::promotion_should_fail(volatile size_t* count) {
       
   306   // Access to count is not atomic; the value does not have to be exact.
       
   307   if (PromotionFailureALot) {
       
   308     const size_t gc_num = total_collections();
       
   309     const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number;
       
   310     if (elapsed_gcs >= PromotionFailureALotInterval) {
       
   311       // Test for unsigned arithmetic wrap-around.
       
   312       if (++*count >= PromotionFailureALotCount) {
       
   313         *count = 0;
       
   314         return true;
       
   315       }
       
   316     }
       
   317   }
       
   318   return false;
       
   319 }
       
   320 
       
   321 inline bool CollectedHeap::promotion_should_fail() {
       
   322   return promotion_should_fail(&_promotion_failure_alot_count);
       
   323 }
       
   324 
       
   325 inline void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) {
       
   326   if (PromotionFailureALot) {
       
   327     _promotion_failure_alot_gc_number = total_collections();
       
   328     *count = 0;
       
   329   }
       
   330 }
       
   331 
       
   332 inline void CollectedHeap::reset_promotion_should_fail() {
       
   333   reset_promotion_should_fail(&_promotion_failure_alot_count);
       
   334 }
       
   335 #endif  // #ifndef PRODUCT
       
   336 
       
   337 #endif // SHARE_VM_GC_SHARED_COLLECTEDHEAP_INLINE_HPP