hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp
changeset 34302 c932a347d579
parent 34297 b7ee28694686
parent 34301 080f957bd40f
child 34310 32e3c906b613
equal deleted inserted replaced
34297:b7ee28694686 34302:c932a347d579
   412   }
   412   }
   413 
   413 
   414   return new_obj;
   414   return new_obj;
   415 }
   415 }
   416 
   416 
       
   417 size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) {
       
   418   assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size);
       
   419   return align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
       
   420 }
       
   421 
   417 // If could fit into free regions w/o expansion, try.
   422 // If could fit into free regions w/o expansion, try.
   418 // Otherwise, if can expand, do so.
   423 // Otherwise, if can expand, do so.
   419 // Otherwise, if using ex regions might help, try with ex given back.
   424 // Otherwise, if using ex regions might help, try with ex given back.
   420 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
   425 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
   421   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   426   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   422 
   427 
   423   verify_region_sets_optional();
   428   verify_region_sets_optional();
   424 
   429 
   425   uint first = G1_NO_HRM_INDEX;
   430   uint first = G1_NO_HRM_INDEX;
   426   uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
   431   uint obj_regions = (uint) humongous_obj_size_in_regions(word_size);
   427 
   432 
   428   if (obj_regions == 1) {
   433   if (obj_regions == 1) {
   429     // Only one region to allocate, try to use a fast path by directly allocating
   434     // Only one region to allocate, try to use a fast path by directly allocating
   430     // from the free lists. Do not try to expand here, we will potentially do that
   435     // from the free lists. Do not try to expand here, we will potentially do that
   431     // later.
   436     // later.
  1021       // Given that humongous objects are not allocated in young
  1026       // Given that humongous objects are not allocated in young
  1022       // regions, we'll first try to do the allocation without doing a
  1027       // regions, we'll first try to do the allocation without doing a
  1023       // collection hoping that there's enough space in the heap.
  1028       // collection hoping that there's enough space in the heap.
  1024       result = humongous_obj_allocate(word_size, AllocationContext::current());
  1029       result = humongous_obj_allocate(word_size, AllocationContext::current());
  1025       if (result != NULL) {
  1030       if (result != NULL) {
       
  1031         size_t size_in_regions = humongous_obj_size_in_regions(word_size);
       
  1032         g1_policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
  1026         return result;
  1033         return result;
  1027       }
  1034       }
  1028 
  1035 
  1029       if (GC_locker::is_active_and_needs_gc()) {
  1036       if (GC_locker::is_active_and_needs_gc()) {
  1030         should_try_gc = false;
  1037         should_try_gc = false;
  5208   DerivedPointerTable::update_pointers();
  5215   DerivedPointerTable::update_pointers();
  5209 #endif
  5216 #endif
  5210 }
  5217 }
  5211 
  5218 
  5212 void G1CollectedHeap::record_obj_copy_mem_stats() {
  5219 void G1CollectedHeap::record_obj_copy_mem_stats() {
       
  5220   g1_policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);
       
  5221 
  5213   _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
  5222   _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
  5214                                                create_g1_evac_summary(&_old_evac_stats));
  5223                                                create_g1_evac_summary(&_old_evac_stats));
  5215 }
  5224 }
  5216 
  5225 
  5217 void G1CollectedHeap::free_region(HeapRegion* hr,
  5226 void G1CollectedHeap::free_region(HeapRegion* hr,
  5592       cur->uninstall_surv_rate_group();
  5601       cur->uninstall_surv_rate_group();
  5593       if (cur->is_young()) {
  5602       if (cur->is_young()) {
  5594         cur->set_young_index_in_cset(-1);
  5603         cur->set_young_index_in_cset(-1);
  5595       }
  5604       }
  5596       cur->set_evacuation_failed(false);
  5605       cur->set_evacuation_failed(false);
       
  5606       // When moving a young gen region to old gen, we "allocate" that whole region
       
  5607       // there. This is in addition to any already evacuated objects. Notify the
       
  5608       // policy about that.
       
  5609       // Old gen regions do not cause an additional allocation: both the objects
       
  5610       // still in the region and the ones already moved are accounted for elsewhere.
       
  5611       if (cur->is_young()) {
       
  5612         policy->add_bytes_allocated_in_old_since_last_gc(HeapRegion::GrainBytes);
       
  5613       }
  5597       // The region is now considered to be old.
  5614       // The region is now considered to be old.
  5598       cur->set_old();
  5615       cur->set_old();
  5599       // Do some allocation statistics accounting. Regions that failed evacuation
  5616       // Do some allocation statistics accounting. Regions that failed evacuation
  5600       // are always made old, so there is no need to update anything in the young
  5617       // are always made old, so there is no need to update anything in the young
  5601       // gen statistics, but we need to update old gen statistics.
  5618       // gen statistics, but we need to update old gen statistics.