hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp
changeset 31346 a70d45c06136
parent 31344 2316eb7a0358
child 31397 c9cc3289b80c
equal deleted inserted replaced
31345:1bba15125d8d 31346:a70d45c06136
   403 
   403 
   404 // Returns true if the reference points to an object that
   404 // Returns true if the reference points to an object that
   405 // can move in an incremental collection.
   405 // can move in an incremental collection.
   406 bool G1CollectedHeap::is_scavengable(const void* p) {
   406 bool G1CollectedHeap::is_scavengable(const void* p) {
   407   HeapRegion* hr = heap_region_containing(p);
   407   HeapRegion* hr = heap_region_containing(p);
   408   return !hr->is_humongous();
   408   return !hr->is_pinned();
   409 }
   409 }
   410 
   410 
   411 // Private methods.
   411 // Private methods.
   412 
   412 
   413 HeapRegion*
   413 HeapRegion*
   906 
   906 
   907   ShouldNotReachHere();
   907   ShouldNotReachHere();
   908   return NULL;
   908   return NULL;
   909 }
   909 }
   910 
   910 
       
   911 void G1CollectedHeap::begin_archive_alloc_range() {
       
   912   assert_at_safepoint(true /* should_be_vm_thread */);
       
   913   if (_archive_allocator == NULL) {
       
   914     _archive_allocator = G1ArchiveAllocator::create_allocator(this);
       
   915   }
       
   916 }
       
   917 
       
   918 bool G1CollectedHeap::is_archive_alloc_too_large(size_t word_size) {
       
   919   // Allocations in archive regions cannot be of a size that would be considered
       
   920   // humongous even for a minimum-sized region, because G1 region sizes/boundaries
       
   921   // may be different at archive-restore time.
       
   922   return word_size >= humongous_threshold_for(HeapRegion::min_region_size_in_words());
       
   923 }
       
   924 
       
   925 HeapWord* G1CollectedHeap::archive_mem_allocate(size_t word_size) {
       
   926   assert_at_safepoint(true /* should_be_vm_thread */);
       
   927   assert(_archive_allocator != NULL, "_archive_allocator not initialized");
       
   928   if (is_archive_alloc_too_large(word_size)) {
       
   929     return NULL;
       
   930   }
       
   931   return _archive_allocator->archive_mem_allocate(word_size);
       
   932 }
       
   933 
       
   934 void G1CollectedHeap::end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
       
   935                                               size_t end_alignment_in_bytes) {
       
   936   assert_at_safepoint(true /* should_be_vm_thread */);
       
   937   assert(_archive_allocator != NULL, "_archive_allocator not initialized");
       
   938 
       
   939   // Call complete_archive to do the real work, filling in the MemRegion
       
   940   // array with the archive regions.
       
   941   _archive_allocator->complete_archive(ranges, end_alignment_in_bytes);
       
   942   delete _archive_allocator;
       
   943   _archive_allocator = NULL;
       
   944 }
       
   945 
       
   946 bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
       
   947   assert(ranges != NULL, "MemRegion array NULL");
       
   948   assert(count != 0, "No MemRegions provided");
       
   949   MemRegion reserved = _hrm.reserved();
       
   950   for (size_t i = 0; i < count; i++) {
       
   951     if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) {
       
   952       return false;
       
   953     }
       
   954   }
       
   955   return true;
       
   956 }
       
   957 
       
   958 bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
       
   959   assert(ranges != NULL, "MemRegion array NULL");
       
   960   assert(count != 0, "No MemRegions provided");
       
   961   MutexLockerEx x(Heap_lock);
       
   962 
       
   963   MemRegion reserved = _hrm.reserved();
       
   964   HeapWord* prev_last_addr = NULL;
       
   965   HeapRegion* prev_last_region = NULL;
       
   966 
       
   967   // Temporarily disable pretouching of heap pages. This interface is used
       
   968   // when mmap'ing archived heap data in, so pre-touching is wasted.
       
   969   FlagSetting fs(AlwaysPreTouch, false);
       
   970 
       
   971   // Enable archive object checking in G1MarkSweep. We have to let it know
       
   972   // about each archive range, so that objects in those ranges aren't marked.
       
   973   G1MarkSweep::enable_archive_object_check();
       
   974 
       
   975   // For each specified MemRegion range, allocate the corresponding G1
       
   976   // regions and mark them as archive regions. We expect the ranges in
       
   977   // ascending starting address order, without overlap.
       
   978   for (size_t i = 0; i < count; i++) {
       
   979     MemRegion curr_range = ranges[i];
       
   980     HeapWord* start_address = curr_range.start();
       
   981     size_t word_size = curr_range.word_size();
       
   982     HeapWord* last_address = curr_range.last();
       
   983     size_t commits = 0;
       
   984 
       
   985     guarantee(reserved.contains(start_address) && reserved.contains(last_address),
       
   986               err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
       
   987               p2i(start_address), p2i(last_address)));
       
   988     guarantee(start_address > prev_last_addr,
       
   989               err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
       
   990               p2i(start_address), p2i(prev_last_addr)));
       
   991     prev_last_addr = last_address;
       
   992 
       
   993     // Check for ranges that start in the same G1 region in which the previous
       
   994     // range ended, and adjust the start address so we don't try to allocate
       
   995     // the same region again. If the current range is entirely within that
       
   996     // region, skip it, just adjusting the recorded top.
       
   997     HeapRegion* start_region = _hrm.addr_to_region(start_address);
       
   998     if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
       
   999       start_address = start_region->end();
       
  1000       if (start_address > last_address) {
       
  1001         _allocator->increase_used(word_size * HeapWordSize);
       
  1002         start_region->set_top(last_address + 1);
       
  1003         continue;
       
  1004       }
       
  1005       start_region->set_top(start_address);
       
  1006       curr_range = MemRegion(start_address, last_address + 1);
       
  1007       start_region = _hrm.addr_to_region(start_address);
       
  1008     }
       
  1009 
       
  1010     // Perform the actual region allocation, exiting if it fails.
       
  1011     // Then note how much new space we have allocated.
       
  1012     if (!_hrm.allocate_containing_regions(curr_range, &commits)) {
       
  1013       return false;
       
  1014     }
       
  1015     _allocator->increase_used(word_size * HeapWordSize);
       
  1016     if (commits != 0) {
       
  1017       ergo_verbose1(ErgoHeapSizing,
       
  1018                     "attempt heap expansion",
       
  1019                     ergo_format_reason("allocate archive regions")
       
  1020                     ergo_format_byte("total size"),
       
  1021                     HeapRegion::GrainWords * HeapWordSize * commits);
       
  1022     }
       
  1023 
       
  1024     // Mark each G1 region touched by the range as archive, add it to the old set,
       
  1025     // and set the allocation context and top.
       
  1026     HeapRegion* curr_region = _hrm.addr_to_region(start_address);
       
  1027     HeapRegion* last_region = _hrm.addr_to_region(last_address);
       
  1028     prev_last_region = last_region;
       
  1029 
       
  1030     while (curr_region != NULL) {
       
  1031       assert(curr_region->is_empty() && !curr_region->is_pinned(),
       
  1032              err_msg("Region already in use (index %u)", curr_region->hrm_index()));
       
  1033       _hr_printer.alloc(curr_region, G1HRPrinter::Archive);
       
  1034       curr_region->set_allocation_context(AllocationContext::system());
       
  1035       curr_region->set_archive();
       
  1036       _old_set.add(curr_region);
       
  1037       if (curr_region != last_region) {
       
  1038         curr_region->set_top(curr_region->end());
       
  1039         curr_region = _hrm.next_region_in_heap(curr_region);
       
  1040       } else {
       
  1041         curr_region->set_top(last_address + 1);
       
  1042         curr_region = NULL;
       
  1043       }
       
  1044     }
       
  1045 
       
  1046     // Notify mark-sweep of the archive range.
       
  1047     G1MarkSweep::mark_range_archive(curr_range);
       
  1048   }
       
  1049   return true;
       
  1050 }
       
  1051 
       
  1052 void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
       
  1053   assert(ranges != NULL, "MemRegion array NULL");
       
  1054   assert(count != 0, "No MemRegions provided");
       
  1055   MemRegion reserved = _hrm.reserved();
       
  1056   HeapWord *prev_last_addr = NULL;
       
  1057   HeapRegion* prev_last_region = NULL;
       
  1058 
       
  1059   // For each MemRegion, create filler objects, if needed, in the G1 regions
       
  1060   // that contain the address range. The address range actually within the
       
  1061   // MemRegion will not be modified. That is assumed to have been initialized
       
  1062   // elsewhere, probably via an mmap of archived heap data.
       
  1063   MutexLockerEx x(Heap_lock);
       
  1064   for (size_t i = 0; i < count; i++) {
       
  1065     HeapWord* start_address = ranges[i].start();
       
  1066     HeapWord* last_address = ranges[i].last();
       
  1067 
       
  1068     assert(reserved.contains(start_address) && reserved.contains(last_address),
       
  1069            err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
       
  1070                    p2i(start_address), p2i(last_address)));
       
  1071     assert(start_address > prev_last_addr,
       
  1072            err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
       
  1073                    p2i(start_address), p2i(prev_last_addr)));
       
  1074 
       
  1075     HeapRegion* start_region = _hrm.addr_to_region(start_address);
       
  1076     HeapRegion* last_region = _hrm.addr_to_region(last_address);
       
  1077     HeapWord* bottom_address = start_region->bottom();
       
  1078 
       
  1079     // Check for a range beginning in the same region in which the
       
  1080     // previous one ended.
       
  1081     if (start_region == prev_last_region) {
       
  1082       bottom_address = prev_last_addr + 1;
       
  1083     }
       
  1084 
       
  1085     // Verify that the regions were all marked as archive regions by
       
  1086     // alloc_archive_regions.
       
  1087     HeapRegion* curr_region = start_region;
       
  1088     while (curr_region != NULL) {
       
  1089       guarantee(curr_region->is_archive(),
       
  1090                 err_msg("Expected archive region at index %u", curr_region->hrm_index()));
       
  1091       if (curr_region != last_region) {
       
  1092         curr_region = _hrm.next_region_in_heap(curr_region);
       
  1093       } else {
       
  1094         curr_region = NULL;
       
  1095       }
       
  1096     }
       
  1097 
       
  1098     prev_last_addr = last_address;
       
  1099     prev_last_region = last_region;
       
  1100 
       
  1101     // Fill the memory below the allocated range with dummy object(s),
       
  1102     // if the region bottom does not match the range start, or if the previous
       
  1103     // range ended within the same G1 region, and there is a gap.
       
  1104     if (start_address != bottom_address) {
       
  1105       size_t fill_size = pointer_delta(start_address, bottom_address);
       
  1106       G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
       
  1107       _allocator->increase_used(fill_size * HeapWordSize);
       
  1108     }
       
  1109   }
       
  1110 }
       
  1111 
   911 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
  1112 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
   912                                                         uint* gc_count_before_ret,
  1113                                                         uint* gc_count_before_ret,
   913                                                         uint* gclocker_retry_count_ret) {
  1114                                                         uint* gclocker_retry_count_ret) {
   914   // The structure of this method has a lot of similarities to
  1115   // The structure of this method has a lot of similarities to
   915   // attempt_allocation_slow(). The reason these two were not merged
  1116   // attempt_allocation_slow(). The reason these two were not merged
  1130       } else {
  1331       } else {
  1131         _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
  1332         _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
  1132       }
  1333       }
  1133     } else if (hr->is_continues_humongous()) {
  1334     } else if (hr->is_continues_humongous()) {
  1134       _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
  1335       _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
       
  1336     } else if (hr->is_archive()) {
       
  1337       _hr_printer->post_compaction(hr, G1HRPrinter::Archive);
  1135     } else if (hr->is_old()) {
  1338     } else if (hr->is_old()) {
  1136       _hr_printer->post_compaction(hr, G1HRPrinter::Old);
  1339       _hr_printer->post_compaction(hr, G1HRPrinter::Old);
  1137     } else {
  1340     } else {
  1138       ShouldNotReachHere();
  1341       ShouldNotReachHere();
  1139     }
  1342     }
  1721   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
  1924   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
  1722   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
  1925   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
  1723   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
  1926   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
  1724   _humongous_reclaim_candidates(),
  1927   _humongous_reclaim_candidates(),
  1725   _has_humongous_reclaim_candidates(false),
  1928   _has_humongous_reclaim_candidates(false),
       
  1929   _archive_allocator(NULL),
  1726   _free_regions_coming(false),
  1930   _free_regions_coming(false),
  1727   _young_list(new YoungList(this)),
  1931   _young_list(new YoungList(this)),
  1728   _gc_time_stamp(0),
  1932   _gc_time_stamp(0),
  1729   _survivor_plab_stats(YoungPLABSize, PLABWeight),
  1933   _survivor_plab_stats(YoungPLABSize, PLABWeight),
  1730   _old_plab_stats(OldPLABSize, PLABWeight),
  1934   _old_plab_stats(OldPLABSize, PLABWeight),
  1746                           /* are_GC_task_threads */true,
  1950                           /* are_GC_task_threads */true,
  1747                           /* are_ConcurrentGC_threads */false);
  1951                           /* are_ConcurrentGC_threads */false);
  1748   _workers->initialize_workers();
  1952   _workers->initialize_workers();
  1749 
  1953 
  1750   _allocator = G1Allocator::create_allocator(this);
  1954   _allocator = G1Allocator::create_allocator(this);
  1751   _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
  1955   _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
       
  1956 
       
  1957   // Override the default _filler_array_max_size so that no humongous filler
       
  1958   // objects are created.
       
  1959   _filler_array_max_size = _humongous_object_threshold_in_words;
  1752 
  1960 
  1753   uint n_queues = ParallelGCThreads;
  1961   uint n_queues = ParallelGCThreads;
  1754   _task_queues = new RefToScanQueueSet(n_queues);
  1962   _task_queues = new RefToScanQueueSet(n_queues);
  1755 
  1963 
  1756   uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
  1964   uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
  2161 }
  2369 }
  2162 
  2370 
  2163 
  2371 
  2164 // Computes the sum of the storage used by the various regions.
  2372 // Computes the sum of the storage used by the various regions.
  2165 size_t G1CollectedHeap::used() const {
  2373 size_t G1CollectedHeap::used() const {
  2166   return _allocator->used();
  2374   size_t result = _allocator->used();
       
  2375   if (_archive_allocator != NULL) {
       
  2376     result += _archive_allocator->used();
       
  2377   }
       
  2378   return result;
  2167 }
  2379 }
  2168 
  2380 
  2169 size_t G1CollectedHeap::used_unlocked() const {
  2381 size_t G1CollectedHeap::used_unlocked() const {
  2170   return _allocator->used_unlocked();
  2382   return _allocator->used_unlocked();
  2171 }
  2383 }
  2574   }
  2786   }
  2575 }
  2787 }
  2576 
  2788 
  2577 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
  2789 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
  2578   HeapRegion* result = _hrm.next_region_in_heap(from);
  2790   HeapRegion* result = _hrm.next_region_in_heap(from);
  2579   while (result != NULL && result->is_humongous()) {
  2791   while (result != NULL && result->is_pinned()) {
  2580     result = _hrm.next_region_in_heap(result);
  2792     result = _hrm.next_region_in_heap(result);
  2581   }
  2793   }
  2582   return result;
  2794   return result;
  2583 }
  2795 }
  2584 
  2796 
  2882     }
  3094     }
  2883   }
  3095   }
  2884   size_t live_bytes() { return _live_bytes; }
  3096   size_t live_bytes() { return _live_bytes; }
  2885 };
  3097 };
  2886 
  3098 
       
  3099 class VerifyArchiveOopClosure: public OopClosure {
       
  3100 public:
       
  3101   VerifyArchiveOopClosure(HeapRegion *hr) { }
       
  3102   void do_oop(narrowOop *p) { do_oop_work(p); }
       
  3103   void do_oop(      oop *p) { do_oop_work(p); }
       
  3104 
       
  3105   template <class T> void do_oop_work(T *p) {
       
  3106     oop obj = oopDesc::load_decode_heap_oop(p);
       
  3107     guarantee(obj == NULL || G1MarkSweep::in_archive_range(obj),
       
  3108               err_msg("Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT,
       
  3109                       p2i(p), p2i(obj)));
       
  3110   }
       
  3111 };
       
  3112 
       
  3113 class VerifyArchiveRegionClosure: public ObjectClosure {
       
  3114 public:
       
  3115   VerifyArchiveRegionClosure(HeapRegion *hr) { }
       
  3116   // Verify that all object pointers are to archive regions.
       
  3117   void do_object(oop o) {
       
  3118     VerifyArchiveOopClosure checkOop(NULL);
       
  3119     assert(o != NULL, "Should not be here for NULL oops");
       
  3120     o->oop_iterate_no_header(&checkOop);
       
  3121   }
       
  3122 };
       
  3123 
  2887 class VerifyRegionClosure: public HeapRegionClosure {
  3124 class VerifyRegionClosure: public HeapRegionClosure {
  2888 private:
  3125 private:
  2889   bool             _par;
  3126   bool             _par;
  2890   VerifyOption     _vo;
  3127   VerifyOption     _vo;
  2891   bool             _failures;
  3128   bool             _failures;
  2901   bool failures() {
  3138   bool failures() {
  2902     return _failures;
  3139     return _failures;
  2903   }
  3140   }
  2904 
  3141 
  2905   bool doHeapRegion(HeapRegion* r) {
  3142   bool doHeapRegion(HeapRegion* r) {
       
  3143     // For archive regions, verify there are no heap pointers to
       
  3144     // non-pinned regions. For all others, verify liveness info.
       
  3145     if (r->is_archive()) {
       
  3146       VerifyArchiveRegionClosure verify_oop_pointers(r);
       
  3147       r->object_iterate(&verify_oop_pointers);
       
  3148       return true;
       
  3149     }
  2906     if (!r->is_continues_humongous()) {
  3150     if (!r->is_continues_humongous()) {
  2907       bool failures = false;
  3151       bool failures = false;
  2908       r->verify(_vo, &failures);
  3152       r->verify(_vo, &failures);
  2909       if (failures) {
  3153       if (failures) {
  2910         _failures = true;
  3154         _failures = true;
  3085                                        const HeapRegion* hr,
  3329                                        const HeapRegion* hr,
  3086                                        const VerifyOption vo) const {
  3330                                        const VerifyOption vo) const {
  3087   switch (vo) {
  3331   switch (vo) {
  3088   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
  3332   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
  3089   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
  3333   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
  3090   case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
  3334   case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked() && !hr->is_archive();
  3091   default:                            ShouldNotReachHere();
  3335   default:                            ShouldNotReachHere();
  3092   }
  3336   }
  3093   return false; // keep some compilers happy
  3337   return false; // keep some compilers happy
  3094 }
  3338 }
  3095 
  3339 
  3096 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
  3340 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
  3097                                        const VerifyOption vo) const {
  3341                                        const VerifyOption vo) const {
  3098   switch (vo) {
  3342   switch (vo) {
  3099   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
  3343   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
  3100   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
  3344   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
  3101   case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
  3345   case VerifyOption_G1UseMarkWord: {
       
  3346     HeapRegion* hr = _hrm.addr_to_region((HeapWord*)obj);
       
  3347     return !obj->is_gc_marked() && !hr->is_archive();
       
  3348   }
  3102   default:                            ShouldNotReachHere();
  3349   default:                            ShouldNotReachHere();
  3103   }
  3350   }
  3104   return false; // keep some compilers happy
  3351   return false; // keep some compilers happy
  3105 }
  3352 }
  3106 
  3353 
  3129 
  3376 
  3130   // Print the per-region information.
  3377   // Print the per-region information.
  3131   st->cr();
  3378   st->cr();
  3132   st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
  3379   st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
  3133                "HS=humongous(starts), HC=humongous(continues), "
  3380                "HS=humongous(starts), HC=humongous(continues), "
  3134                "CS=collection set, F=free, TS=gc time stamp, "
  3381                "CS=collection set, F=free, A=archive, TS=gc time stamp, "
  3135                "PTAMS=previous top-at-mark-start, "
  3382                "PTAMS=previous top-at-mark-start, "
  3136                "NTAMS=next top-at-mark-start)");
  3383                "NTAMS=next top-at-mark-start)");
  3137   PrintRegionClosure blk(st);
  3384   PrintRegionClosure blk(st);
  3138   heap_region_iterate(&blk);
  3385   heap_region_iterate(&blk);
  3139 }
  3386 }
  3850 
  4097 
  3851         _young_list->reset_auxilary_lists();
  4098         _young_list->reset_auxilary_lists();
  3852 
  4099 
  3853         if (evacuation_failed()) {
  4100         if (evacuation_failed()) {
  3854           _allocator->set_used(recalculate_used());
  4101           _allocator->set_used(recalculate_used());
       
  4102           if (_archive_allocator != NULL) {
       
  4103             _archive_allocator->clear_used();
       
  4104           }
  3855           for (uint i = 0; i < ParallelGCThreads; i++) {
  4105           for (uint i = 0; i < ParallelGCThreads; i++) {
  3856             if (_evacuation_failed_info_array[i].has_failed()) {
  4106             if (_evacuation_failed_info_array[i].has_failed()) {
  3857               _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
  4107               _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
  3858             }
  4108             }
  3859           }
  4109           }
  6171       _hrm->insert_into_free_list(r);
  6421       _hrm->insert_into_free_list(r);
  6172     } else if (!_free_list_only) {
  6422     } else if (!_free_list_only) {
  6173       assert(!r->is_young(), "we should not come across young regions");
  6423       assert(!r->is_young(), "we should not come across young regions");
  6174 
  6424 
  6175       if (r->is_humongous()) {
  6425       if (r->is_humongous()) {
  6176         // We ignore humongous regions, we left the humongous set unchanged
  6426         // We ignore humongous regions. We left the humongous set unchanged.
  6177       } else {
  6427       } else {
  6178         // Objects that were compacted would have ended up on regions
  6428         // Objects that were compacted would have ended up on regions
  6179         // that were previously old or free.
  6429         // that were previously old or free.  Archive regions (which are
       
  6430         // old) will not have been touched.
  6180         assert(r->is_free() || r->is_old(), "invariant");
  6431         assert(r->is_free() || r->is_old(), "invariant");
  6181         // We now consider them old, so register as such.
  6432         // We now consider them old, so register as such. Leave
  6182         r->set_old();
  6433         // archive regions set that way, however, while still adding
       
  6434         // them to the old set.
       
  6435         if (!r->is_archive()) {
       
  6436           r->set_old();
       
  6437         }
  6183         _old_set->add(r);
  6438         _old_set->add(r);
  6184       }
  6439       }
  6185       _total_used += r->used();
  6440       _total_used += r->used();
  6186     }
  6441     }
  6187 
  6442 
  6203   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
  6458   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
  6204   heap_region_iterate(&cl);
  6459   heap_region_iterate(&cl);
  6205 
  6460 
  6206   if (!free_list_only) {
  6461   if (!free_list_only) {
  6207     _allocator->set_used(cl.total_used());
  6462     _allocator->set_used(cl.total_used());
       
  6463     if (_archive_allocator != NULL) {
       
  6464       _archive_allocator->clear_used();
       
  6465     }
  6208   }
  6466   }
  6209   assert(_allocator->used_unlocked() == recalculate_used(),
  6467   assert(_allocator->used_unlocked() == recalculate_used(),
  6210          err_msg("inconsistent _allocator->used_unlocked(), "
  6468          err_msg("inconsistent _allocator->used_unlocked(), "
  6211                  "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
  6469                  "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
  6212                  _allocator->used_unlocked(), recalculate_used()));
  6470                  _allocator->used_unlocked(), recalculate_used()));
  6303     _old_set.add(alloc_region);
  6561     _old_set.add(alloc_region);
  6304   }
  6562   }
  6305   _hr_printer.retire(alloc_region);
  6563   _hr_printer.retire(alloc_region);
  6306 }
  6564 }
  6307 
  6565 
       
  6566 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
       
  6567   bool expanded = false;
       
  6568   uint index = _hrm.find_highest_free(&expanded);
       
  6569 
       
  6570   if (index != G1_NO_HRM_INDEX) {
       
  6571     if (expanded) {
       
  6572       ergo_verbose1(ErgoHeapSizing,
       
  6573                     "attempt heap expansion",
       
  6574                     ergo_format_reason("requested address range outside heap bounds")
       
  6575                     ergo_format_byte("region size"),
       
  6576                     HeapRegion::GrainWords * HeapWordSize);
       
  6577     }
       
  6578     _hrm.allocate_free_regions_starting_at(index, 1);
       
  6579     return region_at(index);
       
  6580   }
       
  6581   return NULL;
       
  6582 }
       
  6583 
       
  6584 
  6308 // Heap region set verification
  6585 // Heap region set verification
  6309 
  6586 
  6310 class VerifyRegionListsClosure : public HeapRegionClosure {
  6587 class VerifyRegionListsClosure : public HeapRegionClosure {
  6311 private:
  6588 private:
  6312   HeapRegionSet*   _old_set;
  6589   HeapRegionSet*   _old_set;
  6339       _free_count.increment(1u, hr->capacity());
  6616       _free_count.increment(1u, hr->capacity());
  6340     } else if (hr->is_old()) {
  6617     } else if (hr->is_old()) {
  6341       assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index()));
  6618       assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index()));
  6342       _old_count.increment(1u, hr->capacity());
  6619       _old_count.increment(1u, hr->capacity());
  6343     } else {
  6620     } else {
       
  6621       // There are no other valid region types. Check for one invalid
       
  6622       // one we can identify: pinned without old or humongous set.
       
  6623       assert(!hr->is_pinned(), err_msg("Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index()));
  6344       ShouldNotReachHere();
  6624       ShouldNotReachHere();
  6345     }
  6625     }
  6346     return false;
  6626     return false;
  6347   }
  6627   }
  6348 
  6628