hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp
changeset 1 489c9b5090e2
child 360 21d113ecbf6a
equal deleted inserted replaced
0:fd16c54261b3 1:489c9b5090e2
       
     1 /*
       
     2  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
       
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     4  *
       
     5  * This code is free software; you can redistribute it and/or modify it
       
     6  * under the terms of the GNU General Public License version 2 only, as
       
     7  * published by the Free Software Foundation.
       
     8  *
       
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    12  * version 2 for more details (a copy is included in the LICENSE file that
       
    13  * accompanied this code).
       
    14  *
       
    15  * You should have received a copy of the GNU General Public License version
       
    16  * 2 along with this work; if not, write to the Free Software Foundation,
       
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    18  *
       
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
       
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
       
    21  * have any questions.
       
    22  *
       
    23  */
       
    24 
       
    25 // Inline allocation implementations.
       
    26 
       
    27 void CollectedHeap::post_allocation_setup_common(KlassHandle klass,
       
    28                                                  HeapWord* obj,
       
    29                                                  size_t size) {
       
    30   post_allocation_setup_no_klass_install(klass, obj, size);
       
    31   post_allocation_install_obj_klass(klass, oop(obj), (int) size);
       
    32 }
       
    33 
       
    34 void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass,
       
    35                                                            HeapWord* objPtr,
       
    36                                                            size_t size) {
       
    37 
       
    38   oop obj = (oop)objPtr;
       
    39 
       
    40   assert(obj != NULL, "NULL object pointer");
       
    41   if (UseBiasedLocking && (klass() != NULL)) {
       
    42     obj->set_mark(klass->prototype_header());
       
    43   } else {
       
    44     // May be bootstrapping
       
    45     obj->set_mark(markOopDesc::prototype());
       
    46   }
       
    47 
       
    48   // support low memory notifications (no-op if not enabled)
       
    49   LowMemoryDetector::detect_low_memory_for_collected_pools();
       
    50 }
       
    51 
       
    52 void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass,
       
    53                                                    oop obj,
       
    54                                                    int size) {
       
    55   // These asserts are kind of complicated because of klassKlass
       
    56   // and the beginning of the world.
       
    57   assert(klass() != NULL || !Universe::is_fully_initialized(), "NULL klass");
       
    58   assert(klass() == NULL || klass()->is_klass(), "not a klass");
       
    59   assert(klass() == NULL || klass()->klass_part() != NULL, "not a klass");
       
    60   assert(obj != NULL, "NULL object pointer");
       
    61   obj->set_klass(klass());
       
    62   assert(!Universe::is_fully_initialized() || obj->blueprint() != NULL,
       
    63          "missing blueprint");
       
    64 
       
    65   // support for JVMTI VMObjectAlloc event (no-op if not enabled)
       
    66   JvmtiExport::vm_object_alloc_event_collector(obj);
       
    67 
       
    68   if (DTraceAllocProbes) {
       
    69     // support for Dtrace object alloc event (no-op most of the time)
       
    70     if (klass() != NULL && klass()->klass_part()->name() != NULL) {
       
    71       SharedRuntime::dtrace_object_alloc(obj);
       
    72     }
       
    73   }
       
    74 }
       
    75 
       
    76 void CollectedHeap::post_allocation_setup_obj(KlassHandle klass,
       
    77                                               HeapWord* obj,
       
    78                                               size_t size) {
       
    79   post_allocation_setup_common(klass, obj, size);
       
    80   assert(Universe::is_bootstrapping() ||
       
    81          !((oop)obj)->blueprint()->oop_is_array(), "must not be an array");
       
    82 }
       
    83 
       
    84 void CollectedHeap::post_allocation_setup_array(KlassHandle klass,
       
    85                                                 HeapWord* obj,
       
    86                                                 size_t size,
       
    87                                                 int length) {
       
    88   // Set array length before posting jvmti object alloc event
       
    89   // in post_allocation_setup_common()
       
    90   assert(length >= 0, "length should be non-negative");
       
    91   ((arrayOop)obj)->set_length(length);
       
    92   post_allocation_setup_common(klass, obj, size);
       
    93   assert(((oop)obj)->blueprint()->oop_is_array(), "must be an array");
       
    94 }
       
    95 
       
    96 HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, bool is_noref, TRAPS) {
       
    97 
       
    98   // Clear unhandled oops for memory allocation.  Memory allocation might
       
    99   // not take out a lock if from tlab, so clear here.
       
   100   CHECK_UNHANDLED_OOPS_ONLY(THREAD->clear_unhandled_oops();)
       
   101 
       
   102   if (HAS_PENDING_EXCEPTION) {
       
   103     NOT_PRODUCT(guarantee(false, "Should not allocate with exception pending"));
       
   104     return NULL;  // caller does a CHECK_0 too
       
   105   }
       
   106 
       
   107   // We may want to update this, is_noref objects might not be allocated in TLABs.
       
   108   HeapWord* result = NULL;
       
   109   if (UseTLAB) {
       
   110     result = CollectedHeap::allocate_from_tlab(THREAD, size);
       
   111     if (result != NULL) {
       
   112       assert(!HAS_PENDING_EXCEPTION,
       
   113              "Unexpected exception, will result in uninitialized storage");
       
   114       return result;
       
   115     }
       
   116   }
       
   117   bool gc_overhead_limit_was_exceeded;
       
   118   result = Universe::heap()->mem_allocate(size,
       
   119                                           is_noref,
       
   120                                           false,
       
   121                                           &gc_overhead_limit_was_exceeded);
       
   122   if (result != NULL) {
       
   123     NOT_PRODUCT(Universe::heap()->
       
   124       check_for_non_bad_heap_word_value(result, size));
       
   125     assert(!HAS_PENDING_EXCEPTION,
       
   126            "Unexpected exception, will result in uninitialized storage");
       
   127     return result;
       
   128   }
       
   129 
       
   130 
       
   131   if (!gc_overhead_limit_was_exceeded) {
       
   132     // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
       
   133     report_java_out_of_memory("Java heap space");
       
   134 
       
   135     if (JvmtiExport::should_post_resource_exhausted()) {
       
   136       JvmtiExport::post_resource_exhausted(
       
   137         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
       
   138         "Java heap space");
       
   139     }
       
   140 
       
   141     THROW_OOP_0(Universe::out_of_memory_error_java_heap());
       
   142   } else {
       
   143     // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
       
   144     report_java_out_of_memory("GC overhead limit exceeded");
       
   145 
       
   146     if (JvmtiExport::should_post_resource_exhausted()) {
       
   147       JvmtiExport::post_resource_exhausted(
       
   148         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
       
   149         "GC overhead limit exceeded");
       
   150     }
       
   151 
       
   152     THROW_OOP_0(Universe::out_of_memory_error_gc_overhead_limit());
       
   153   }
       
   154 }
       
   155 
       
   156 HeapWord* CollectedHeap::common_mem_allocate_init(size_t size, bool is_noref, TRAPS) {
       
   157   HeapWord* obj = common_mem_allocate_noinit(size, is_noref, CHECK_NULL);
       
   158   init_obj(obj, size);
       
   159   return obj;
       
   160 }
       
   161 
       
   162 // Need to investigate, do we really want to throw OOM exception here?
       
   163 HeapWord* CollectedHeap::common_permanent_mem_allocate_noinit(size_t size, TRAPS) {
       
   164   if (HAS_PENDING_EXCEPTION) {
       
   165     NOT_PRODUCT(guarantee(false, "Should not allocate with exception pending"));
       
   166     return NULL;  // caller does a CHECK_NULL too
       
   167   }
       
   168 
       
   169 #ifdef ASSERT
       
   170   if (CIFireOOMAt > 0 && THREAD->is_Compiler_thread() &&
       
   171       ++_fire_out_of_memory_count >= CIFireOOMAt) {
       
   172     // For testing of OOM handling in the CI throw an OOM and see how
       
   173     // it does.  Historically improper handling of these has resulted
       
   174     // in crashes which we really don't want to have in the CI.
       
   175     THROW_OOP_0(Universe::out_of_memory_error_perm_gen());
       
   176   }
       
   177 #endif
       
   178 
       
   179   HeapWord* result = Universe::heap()->permanent_mem_allocate(size);
       
   180   if (result != NULL) {
       
   181     NOT_PRODUCT(Universe::heap()->
       
   182       check_for_non_bad_heap_word_value(result, size));
       
   183     assert(!HAS_PENDING_EXCEPTION,
       
   184            "Unexpected exception, will result in uninitialized storage");
       
   185     return result;
       
   186   }
       
   187   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
       
   188   report_java_out_of_memory("PermGen space");
       
   189 
       
   190   if (JvmtiExport::should_post_resource_exhausted()) {
       
   191     JvmtiExport::post_resource_exhausted(
       
   192         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
       
   193         "PermGen space");
       
   194   }
       
   195 
       
   196   THROW_OOP_0(Universe::out_of_memory_error_perm_gen());
       
   197 }
       
   198 
       
   199 HeapWord* CollectedHeap::common_permanent_mem_allocate_init(size_t size, TRAPS) {
       
   200   HeapWord* obj = common_permanent_mem_allocate_noinit(size, CHECK_NULL);
       
   201   init_obj(obj, size);
       
   202   return obj;
       
   203 }
       
   204 
       
   205 HeapWord* CollectedHeap::allocate_from_tlab(Thread* thread, size_t size) {
       
   206   assert(UseTLAB, "should use UseTLAB");
       
   207 
       
   208   HeapWord* obj = thread->tlab().allocate(size);
       
   209   if (obj != NULL) {
       
   210     return obj;
       
   211   }
       
   212   // Otherwise...
       
   213   return allocate_from_tlab_slow(thread, size);
       
   214 }
       
   215 
       
   216 void CollectedHeap::init_obj(HeapWord* obj, size_t size) {
       
   217   assert(obj != NULL, "cannot initialize NULL object");
       
   218   const size_t hs = oopDesc::header_size();
       
   219   assert(size >= hs, "unexpected object size");
       
   220   Copy::fill_to_aligned_words(obj + hs, size - hs);
       
   221 }
       
   222 
       
   223 oop CollectedHeap::obj_allocate(KlassHandle klass, int size, TRAPS) {
       
   224   debug_only(check_for_valid_allocation_state());
       
   225   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
       
   226   assert(size >= 0, "int won't convert to size_t");
       
   227   HeapWord* obj = common_mem_allocate_init(size, false, CHECK_NULL);
       
   228   post_allocation_setup_obj(klass, obj, size);
       
   229   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
       
   230   return (oop)obj;
       
   231 }
       
   232 
       
   233 oop CollectedHeap::array_allocate(KlassHandle klass,
       
   234                                   int size,
       
   235                                   int length,
       
   236                                   TRAPS) {
       
   237   debug_only(check_for_valid_allocation_state());
       
   238   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
       
   239   assert(size >= 0, "int won't convert to size_t");
       
   240   HeapWord* obj = common_mem_allocate_init(size, false, CHECK_NULL);
       
   241   post_allocation_setup_array(klass, obj, size, length);
       
   242   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
       
   243   return (oop)obj;
       
   244 }
       
   245 
       
   246 oop CollectedHeap::large_typearray_allocate(KlassHandle klass,
       
   247                                             int size,
       
   248                                             int length,
       
   249                                             TRAPS) {
       
   250   debug_only(check_for_valid_allocation_state());
       
   251   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
       
   252   assert(size >= 0, "int won't convert to size_t");
       
   253   HeapWord* obj = common_mem_allocate_init(size, true, CHECK_NULL);
       
   254   post_allocation_setup_array(klass, obj, size, length);
       
   255   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
       
   256   return (oop)obj;
       
   257 }
       
   258 
       
   259 oop CollectedHeap::permanent_obj_allocate(KlassHandle klass, int size, TRAPS) {
       
   260   oop obj = permanent_obj_allocate_no_klass_install(klass, size, CHECK_NULL);
       
   261   post_allocation_install_obj_klass(klass, obj, size);
       
   262   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value((HeapWord*) obj,
       
   263                                                               size));
       
   264   return obj;
       
   265 }
       
   266 
       
   267 oop CollectedHeap::permanent_obj_allocate_no_klass_install(KlassHandle klass,
       
   268                                                            int size,
       
   269                                                            TRAPS) {
       
   270   debug_only(check_for_valid_allocation_state());
       
   271   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
       
   272   assert(size >= 0, "int won't convert to size_t");
       
   273   HeapWord* obj = common_permanent_mem_allocate_init(size, CHECK_NULL);
       
   274   post_allocation_setup_no_klass_install(klass, obj, size);
       
   275   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
       
   276   return (oop)obj;
       
   277 }
       
   278 
       
   279 oop CollectedHeap::permanent_array_allocate(KlassHandle klass,
       
   280                                             int size,
       
   281                                             int length,
       
   282                                             TRAPS) {
       
   283   debug_only(check_for_valid_allocation_state());
       
   284   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
       
   285   assert(size >= 0, "int won't convert to size_t");
       
   286   HeapWord* obj = common_permanent_mem_allocate_init(size, CHECK_NULL);
       
   287   post_allocation_setup_array(klass, obj, size, length);
       
   288   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
       
   289   return (oop)obj;
       
   290 }
       
   291 
       
   292 // Returns "TRUE" if "p" is a method oop in the
       
   293 // current heap with high probability. NOTE: The main
       
   294 // current consumers of this interface are Forte::
       
   295 // and ThreadProfiler::. In these cases, the
       
   296 // interpreter frame from which "p" came, may be
       
   297 // under construction when sampled asynchronously, so
       
   298 // the clients want to check that it represents a
       
   299 // valid method before using it. Nonetheless since
       
   300 // the clients do not typically lock out GC, the
       
   301 // predicate is_valid_method() is not stable, so
       
   302 // it is possible that by the time "p" is used, it
       
   303 // is no longer valid.
       
   304 inline bool CollectedHeap::is_valid_method(oop p) const {
       
   305   return
       
   306     p != NULL &&
       
   307 
       
   308     // Check whether it is aligned at a HeapWord boundary.
       
   309     Space::is_aligned(p) &&
       
   310 
       
   311     // Check whether "method" is in the allocated part of the
       
   312     // permanent generation -- this needs to be checked before
       
   313     // p->klass() below to avoid a SEGV (but see below
       
   314     // for a potential window of vulnerability).
       
   315     is_permanent((void*)p) &&
       
   316 
       
   317     // See if GC is active; however, there is still an
       
   318     // apparently unavoidable window after this call
       
   319     // and before the client of this interface uses "p".
       
   320     // If the client chooses not to lock out GC, then
       
   321     // it's a risk the client must accept.
       
   322     !is_gc_active() &&
       
   323 
       
   324     // Check that p is a methodOop.
       
   325     p->klass() == Universe::methodKlassObj();
       
   326 }
       
   327 
       
   328 
       
   329 #ifndef PRODUCT
       
   330 
       
   331 inline bool
       
   332 CollectedHeap::promotion_should_fail(volatile size_t* count) {
       
   333   // Access to count is not atomic; the value does not have to be exact.
       
   334   if (PromotionFailureALot) {
       
   335     const size_t gc_num = total_collections();
       
   336     const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number;
       
   337     if (elapsed_gcs >= PromotionFailureALotInterval) {
       
   338       // Test for unsigned arithmetic wrap-around.
       
   339       if (++*count >= PromotionFailureALotCount) {
       
   340         *count = 0;
       
   341         return true;
       
   342       }
       
   343     }
       
   344   }
       
   345   return false;
       
   346 }
       
   347 
       
   348 inline bool CollectedHeap::promotion_should_fail() {
       
   349   return promotion_should_fail(&_promotion_failure_alot_count);
       
   350 }
       
   351 
       
   352 inline void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) {
       
   353   if (PromotionFailureALot) {
       
   354     _promotion_failure_alot_gc_number = total_collections();
       
   355     *count = 0;
       
   356   }
       
   357 }
       
   358 
       
   359 inline void CollectedHeap::reset_promotion_should_fail() {
       
   360   reset_promotion_should_fail(&_promotion_failure_alot_count);
       
   361 }
       
   362 #endif  // #ifndef PRODUCT