src/hotspot/share/gc/shared/collectedHeap.cpp
changeset 50882 80abf702eed8
parent 50578 e2a7f431f65c
child 51625 c265860d5d45
equal deleted inserted replaced
50881:a21cad3fa448 50882:80abf702eed8
    31 #include "gc/shared/gcLocker.inline.hpp"
    31 #include "gc/shared/gcLocker.inline.hpp"
    32 #include "gc/shared/gcHeapSummary.hpp"
    32 #include "gc/shared/gcHeapSummary.hpp"
    33 #include "gc/shared/gcTrace.hpp"
    33 #include "gc/shared/gcTrace.hpp"
    34 #include "gc/shared/gcTraceTime.inline.hpp"
    34 #include "gc/shared/gcTraceTime.inline.hpp"
    35 #include "gc/shared/gcWhen.hpp"
    35 #include "gc/shared/gcWhen.hpp"
       
    36 #include "gc/shared/memAllocator.hpp"
    36 #include "gc/shared/vmGCOperations.hpp"
    37 #include "gc/shared/vmGCOperations.hpp"
    37 #include "logging/log.hpp"
    38 #include "logging/log.hpp"
    38 #include "memory/metaspace.hpp"
    39 #include "memory/metaspace.hpp"
    39 #include "memory/resourceArea.hpp"
    40 #include "memory/resourceArea.hpp"
    40 #include "oops/instanceMirrorKlass.hpp"
    41 #include "oops/instanceMirrorKlass.hpp"
    44 #include "runtime/thread.inline.hpp"
    45 #include "runtime/thread.inline.hpp"
    45 #include "runtime/threadSMR.hpp"
    46 #include "runtime/threadSMR.hpp"
    46 #include "runtime/vmThread.hpp"
    47 #include "runtime/vmThread.hpp"
    47 #include "services/heapDumper.hpp"
    48 #include "services/heapDumper.hpp"
    48 #include "utilities/align.hpp"
    49 #include "utilities/align.hpp"
       
    50 #include "utilities/copy.hpp"
    49 
    51 
    50 class ClassLoaderData;
    52 class ClassLoaderData;
    51 
    53 
    52 #ifdef ASSERT
    54 #ifdef ASSERT
    53 int CollectedHeap::_fire_out_of_memory_count = 0;
    55 int CollectedHeap::_fire_out_of_memory_count = 0;
   325     }
   327     }
   326   } while (true);  // Until a GC is done
   328   } while (true);  // Until a GC is done
   327 }
   329 }
   328 
   330 
   329 #ifndef PRODUCT
   331 #ifndef PRODUCT
   330 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
       
   331   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
       
   332     for (size_t slot = 0; slot < size; slot += 1) {
       
   333       assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
       
   334              "Found badHeapWordValue in post-allocation check");
       
   335     }
       
   336   }
       
   337 }
       
   338 
       
   339 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
   332 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
   340   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
   333   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
   341     for (size_t slot = 0; slot < size; slot += 1) {
   334     for (size_t slot = 0; slot < size; slot += 1) {
   342       assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
   335       assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
   343              "Found non badHeapWordValue in pre-allocation check");
   336              "Found non badHeapWordValue in pre-allocation check");
   344     }
   337     }
   345   }
   338   }
   346 }
   339 }
   347 #endif // PRODUCT
   340 #endif // PRODUCT
   348 
       
   349 #ifdef ASSERT
       
   350 void CollectedHeap::check_for_valid_allocation_state() {
       
   351   Thread *thread = Thread::current();
       
   352   // How to choose between a pending exception and a potential
       
   353   // OutOfMemoryError?  Don't allow pending exceptions.
       
   354   // This is a VM policy failure, so how do we exhaustively test it?
       
   355   assert(!thread->has_pending_exception(),
       
   356          "shouldn't be allocating with pending exception");
       
   357   if (StrictSafepointChecks) {
       
   358     assert(thread->allow_allocation(),
       
   359            "Allocation done by thread for which allocation is blocked "
       
   360            "by No_Allocation_Verifier!");
       
   361     // Allocation of an oop can always invoke a safepoint,
       
   362     // hence, the true argument
       
   363     thread->check_for_valid_safepoint_state(true);
       
   364   }
       
   365 }
       
   366 #endif
       
   367 
       
   368 HeapWord* CollectedHeap::obj_allocate_raw(Klass* klass, size_t size,
       
   369                                           bool* gc_overhead_limit_was_exceeded, TRAPS) {
       
   370   if (UseTLAB) {
       
   371     HeapWord* result = allocate_from_tlab(klass, size, THREAD);
       
   372     if (result != NULL) {
       
   373       return result;
       
   374     }
       
   375   }
       
   376 
       
   377   return allocate_outside_tlab(klass, size, gc_overhead_limit_was_exceeded, THREAD);
       
   378 }
       
   379 
       
   380 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, size_t size, TRAPS) {
       
   381   HeapWord* obj = NULL;
       
   382 
       
   383   // In assertion mode, check that there was a sampling collector present
       
   384   // in the stack. This enforces checking that no path is without a sampling
       
   385   // collector.
       
   386   // Only check if the sampler could actually sample something in this call path.
       
   387   assert(!JvmtiExport::should_post_sampled_object_alloc()
       
   388          || !JvmtiSampledObjectAllocEventCollector::object_alloc_is_safe_to_sample()
       
   389          || THREAD->heap_sampler().sampling_collector_present(),
       
   390          "Sampling collector not present.");
       
   391 
       
   392   if (ThreadHeapSampler::enabled()) {
       
   393     // Try to allocate the sampled object from TLAB, it is possible a sample
       
   394     // point was put and the TLAB still has space.
       
   395     obj = THREAD->tlab().allocate_sampled_object(size);
       
   396 
       
   397     if (obj != NULL) {
       
   398       return obj;
       
   399     }
       
   400   }
       
   401 
       
   402   ThreadLocalAllocBuffer& tlab = THREAD->tlab();
       
   403 
       
   404   // Retain tlab and allocate object in shared space if
       
   405   // the amount free in the tlab is too large to discard.
       
   406   if (tlab.free() > tlab.refill_waste_limit()) {
       
   407     tlab.record_slow_allocation(size);
       
   408     return NULL;
       
   409   }
       
   410 
       
   411   // Discard tlab and allocate a new one.
       
   412   // To minimize fragmentation, the last TLAB may be smaller than the rest.
       
   413   size_t new_tlab_size = tlab.compute_size(size);
       
   414 
       
   415   tlab.clear_before_allocation();
       
   416 
       
   417   if (new_tlab_size == 0) {
       
   418     return NULL;
       
   419   }
       
   420 
       
   421   // Allocate a new TLAB requesting new_tlab_size. Any size
       
   422   // between minimal and new_tlab_size is accepted.
       
   423   size_t actual_tlab_size = 0;
       
   424   size_t min_tlab_size = ThreadLocalAllocBuffer::compute_min_size(size);
       
   425   obj = Universe::heap()->allocate_new_tlab(min_tlab_size, new_tlab_size, &actual_tlab_size);
       
   426   if (obj == NULL) {
       
   427     assert(actual_tlab_size == 0, "Allocation failed, but actual size was updated. min: " SIZE_FORMAT ", desired: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
       
   428            min_tlab_size, new_tlab_size, actual_tlab_size);
       
   429     return NULL;
       
   430   }
       
   431   assert(actual_tlab_size != 0, "Allocation succeeded but actual size not updated. obj at: " PTR_FORMAT " min: " SIZE_FORMAT ", desired: " SIZE_FORMAT,
       
   432          p2i(obj), min_tlab_size, new_tlab_size);
       
   433 
       
   434   AllocTracer::send_allocation_in_new_tlab(klass, obj, actual_tlab_size * HeapWordSize, size * HeapWordSize, THREAD);
       
   435 
       
   436   if (ZeroTLAB) {
       
   437     // ..and clear it.
       
   438     Copy::zero_to_words(obj, actual_tlab_size);
       
   439   } else {
       
   440     // ...and zap just allocated object.
       
   441 #ifdef ASSERT
       
   442     // Skip mangling the space corresponding to the object header to
       
   443     // ensure that the returned space is not considered parsable by
       
   444     // any concurrent GC thread.
       
   445     size_t hdr_size = oopDesc::header_size();
       
   446     Copy::fill_to_words(obj + hdr_size, actual_tlab_size - hdr_size, badHeapWordVal);
       
   447 #endif // ASSERT
       
   448   }
       
   449 
       
   450   // Send the thread information about this allocation in case a sample is
       
   451   // requested.
       
   452   if (ThreadHeapSampler::enabled()) {
       
   453     size_t tlab_bytes_since_last_sample = THREAD->tlab().bytes_since_last_sample_point();
       
   454     THREAD->heap_sampler().check_for_sampling(obj, size, tlab_bytes_since_last_sample);
       
   455   }
       
   456 
       
   457   tlab.fill(obj, obj + size, actual_tlab_size);
       
   458   return obj;
       
   459 }
       
   460 
   341 
   461 size_t CollectedHeap::max_tlab_size() const {
   342 size_t CollectedHeap::max_tlab_size() const {
   462   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
   343   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
   463   // This restriction could be removed by enabling filling with multiple arrays.
   344   // This restriction could be removed by enabling filling with multiple arrays.
   464   // If we compute that the reasonable way as
   345   // If we compute that the reasonable way as
   507 
   388 
   508   const size_t payload_size = words - filler_array_hdr_size();
   389   const size_t payload_size = words - filler_array_hdr_size();
   509   const size_t len = payload_size * HeapWordSize / sizeof(jint);
   390   const size_t len = payload_size * HeapWordSize / sizeof(jint);
   510   assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
   391   assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
   511 
   392 
   512   // Set the length first for concurrent GC.
   393   ObjArrayAllocator allocator(Universe::intArrayKlassObj(), words, (int)len, /* do_zero */ false);
   513   ((arrayOop)start)->set_length((int)len);
   394   allocator.initialize(start);
   514   post_allocation_setup_common(Universe::intArrayKlassObj(), start);
       
   515   DEBUG_ONLY(zap_filler_array(start, words, zap);)
   395   DEBUG_ONLY(zap_filler_array(start, words, zap);)
   516 }
   396 }
   517 
   397 
   518 void
   398 void
   519 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
   399 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
   522 
   402 
   523   if (words >= filler_array_min_size()) {
   403   if (words >= filler_array_min_size()) {
   524     fill_with_array(start, words, zap);
   404     fill_with_array(start, words, zap);
   525   } else if (words > 0) {
   405   } else if (words > 0) {
   526     assert(words == min_fill_size(), "unaligned size");
   406     assert(words == min_fill_size(), "unaligned size");
   527     post_allocation_setup_common(SystemDictionary::Object_klass(), start);
   407     ObjAllocator allocator(SystemDictionary::Object_klass(), words);
       
   408     allocator.initialize(start);
   528   }
   409   }
   529 }
   410 }
   530 
   411 
   531 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
   412 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
   532 {
   413 {
   562 HeapWord* CollectedHeap::allocate_new_tlab(size_t min_size,
   443 HeapWord* CollectedHeap::allocate_new_tlab(size_t min_size,
   563                                            size_t requested_size,
   444                                            size_t requested_size,
   564                                            size_t* actual_size) {
   445                                            size_t* actual_size) {
   565   guarantee(false, "thread-local allocation buffers not supported");
   446   guarantee(false, "thread-local allocation buffers not supported");
   566   return NULL;
   447   return NULL;
       
   448 }
       
   449 
       
   450 oop CollectedHeap::obj_allocate(Klass* klass, int size, TRAPS) {
       
   451   ObjAllocator allocator(klass, size, THREAD);
       
   452   return allocator.allocate();
       
   453 }
       
   454 
       
   455 oop CollectedHeap::array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS) {
       
   456   ObjArrayAllocator allocator(klass, size, length, do_zero, THREAD);
       
   457   return allocator.allocate();
       
   458 }
       
   459 
       
   460 oop CollectedHeap::class_allocate(Klass* klass, int size, TRAPS) {
       
   461   ClassAllocator allocator(klass, size, THREAD);
       
   462   return allocator.allocate();
   567 }
   463 }
   568 
   464 
   569 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
   465 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
   570   // The second disjunct in the assertion below makes a concession
   466   // The second disjunct in the assertion below makes a concession
   571   // for the start-up verification done while the VM is being
   467   // for the start-up verification done while the VM is being