hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
changeset 7923 fc200fcd4e05
parent 7908 d1ab39295cee
child 8072 f223f43cd62f
equal deleted inserted replaced
7922:e97540c35e38 7923:fc200fcd4e05
    26 #include "code/icBuffer.hpp"
    26 #include "code/icBuffer.hpp"
    27 #include "gc_implementation/g1/bufferingOopClosure.hpp"
    27 #include "gc_implementation/g1/bufferingOopClosure.hpp"
    28 #include "gc_implementation/g1/concurrentG1Refine.hpp"
    28 #include "gc_implementation/g1/concurrentG1Refine.hpp"
    29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
    29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
    30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    31 #include "gc_implementation/g1/concurrentZFThread.hpp"
       
    32 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    31 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    33 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    32 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    34 #include "gc_implementation/g1/g1MarkSweep.hpp"
    33 #include "gc_implementation/g1/g1MarkSweep.hpp"
    35 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
    34 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
    36 #include "gc_implementation/g1/g1RemSet.inline.hpp"
    35 #include "gc_implementation/g1/g1RemSet.inline.hpp"
   423   return hr;
   422   return hr;
   424 }
   423 }
   425 
   424 
   426 void G1CollectedHeap::stop_conc_gc_threads() {
   425 void G1CollectedHeap::stop_conc_gc_threads() {
   427   _cg1r->stop();
   426   _cg1r->stop();
   428   _czft->stop();
       
   429   _cmThread->stop();
   427   _cmThread->stop();
   430 }
   428 }
   431 
       
   432 
   429 
   433 void G1CollectedHeap::check_ct_logs_at_safepoint() {
   430 void G1CollectedHeap::check_ct_logs_at_safepoint() {
   434   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   431   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   435   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
   432   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
   436 
   433 
   479 
   476 
   480 G1CollectedHeap* G1CollectedHeap::_g1h;
   477 G1CollectedHeap* G1CollectedHeap::_g1h;
   481 
   478 
   482 // Private methods.
   479 // Private methods.
   483 
   480 
   484 // Finds a HeapRegion that can be used to allocate a given size of block.
   481 HeapRegion*
   485 
   482 G1CollectedHeap::new_region_try_secondary_free_list(size_t word_size) {
   486 
   483   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
   487 HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size,
   484   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
   488                                                  bool do_expand,
   485     if (!_secondary_free_list.is_empty()) {
   489                                                  bool zero_filled) {
   486       if (G1ConcRegionFreeingVerbose) {
   490   ConcurrentZFThread::note_region_alloc();
   487         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   491   HeapRegion* res = alloc_free_region_from_lists(zero_filled);
   488                                "secondary_free_list has "SIZE_FORMAT" entries",
       
   489                                _secondary_free_list.length());
       
   490       }
       
   491       // It looks as if there are free regions available on the
       
   492       // secondary_free_list. Let's move them to the free_list and try
       
   493       // again to allocate from it.
       
   494       append_secondary_free_list();
       
   495 
       
   496       assert(!_free_list.is_empty(), "if the secondary_free_list was not "
       
   497              "empty we should have moved at least one entry to the free_list");
       
   498       HeapRegion* res = _free_list.remove_head();
       
   499       if (G1ConcRegionFreeingVerbose) {
       
   500         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
       
   501                                "allocated "HR_FORMAT" from secondary_free_list",
       
   502                                HR_FORMAT_PARAMS(res));
       
   503       }
       
   504       return res;
       
   505     }
       
   506 
       
   507     // Wait here until we get notifed either when (a) there are no
       
   508     // more free regions coming or (b) some regions have been moved on
       
   509     // the secondary_free_list.
       
   510     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
       
   511   }
       
   512 
       
   513   if (G1ConcRegionFreeingVerbose) {
       
   514     gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
       
   515                            "could not allocate from secondary_free_list");
       
   516   }
       
   517   return NULL;
       
   518 }
       
   519 
       
   520 HeapRegion* G1CollectedHeap::new_region_work(size_t word_size,
       
   521                                              bool do_expand) {
       
   522   assert(!isHumongous(word_size) ||
       
   523                                   word_size <= (size_t) HeapRegion::GrainWords,
       
   524          "the only time we use this to allocate a humongous region is "
       
   525          "when we are allocating a single humongous region");
       
   526 
       
   527   HeapRegion* res;
       
   528   if (G1StressConcRegionFreeing) {
       
   529     if (!_secondary_free_list.is_empty()) {
       
   530       if (G1ConcRegionFreeingVerbose) {
       
   531         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
       
   532                                "forced to look at the secondary_free_list");
       
   533       }
       
   534       res = new_region_try_secondary_free_list(word_size);
       
   535       if (res != NULL) {
       
   536         return res;
       
   537       }
       
   538     }
       
   539   }
       
   540   res = _free_list.remove_head_or_null();
       
   541   if (res == NULL) {
       
   542     if (G1ConcRegionFreeingVerbose) {
       
   543       gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
       
   544                              "res == NULL, trying the secondary_free_list");
       
   545     }
       
   546     res = new_region_try_secondary_free_list(word_size);
       
   547   }
   492   if (res == NULL && do_expand) {
   548   if (res == NULL && do_expand) {
   493     expand(word_size * HeapWordSize);
   549     expand(word_size * HeapWordSize);
   494     res = alloc_free_region_from_lists(zero_filled);
   550     res = _free_list.remove_head_or_null();
   495     assert(res == NULL ||
       
   496            (!res->isHumongous() &&
       
   497             (!zero_filled ||
       
   498              res->zero_fill_state() == HeapRegion::Allocated)),
       
   499            "Alloc Regions must be zero filled (and non-H)");
       
   500   }
   551   }
   501   if (res != NULL) {
   552   if (res != NULL) {
   502     if (res->is_empty()) {
       
   503       _free_regions--;
       
   504     }
       
   505     assert(!res->isHumongous() &&
       
   506            (!zero_filled || res->zero_fill_state() == HeapRegion::Allocated),
       
   507            err_msg("Non-young alloc Regions must be zero filled (and non-H):"
       
   508                    " res->isHumongous()=%d, zero_filled=%d, res->zero_fill_state()=%d",
       
   509                    res->isHumongous(), zero_filled, res->zero_fill_state()));
       
   510     assert(!res->is_on_unclean_list(),
       
   511            "Alloc Regions must not be on the unclean list");
       
   512     if (G1PrintHeapRegions) {
   553     if (G1PrintHeapRegions) {
   513       gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
   554       gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT","PTR_FORMAT"], "
   514                              "top "PTR_FORMAT,
   555                              "top "PTR_FORMAT, res->hrs_index(),
   515                              res->hrs_index(), res->bottom(), res->end(), res->top());
   556                              res->bottom(), res->end(), res->top());
   516     }
   557     }
   517   }
   558   }
   518   return res;
   559   return res;
   519 }
   560 }
   520 
   561 
   521 HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose,
   562 HeapRegion* G1CollectedHeap::new_gc_alloc_region(int purpose,
   522                                                          size_t word_size,
   563                                                  size_t word_size) {
   523                                                          bool zero_filled) {
       
   524   HeapRegion* alloc_region = NULL;
   564   HeapRegion* alloc_region = NULL;
   525   if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
   565   if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
   526     alloc_region = newAllocRegion_work(word_size, true, zero_filled);
   566     alloc_region = new_region_work(word_size, true /* do_expand */);
   527     if (purpose == GCAllocForSurvived && alloc_region != NULL) {
   567     if (purpose == GCAllocForSurvived && alloc_region != NULL) {
   528       alloc_region->set_survivor();
   568       alloc_region->set_survivor();
   529     }
   569     }
   530     ++_gc_alloc_region_counts[purpose];
   570     ++_gc_alloc_region_counts[purpose];
   531   } else {
   571   } else {
   532     g1_policy()->note_alloc_region_limit_reached(purpose);
   572     g1_policy()->note_alloc_region_limit_reached(purpose);
   533   }
   573   }
   534   return alloc_region;
   574   return alloc_region;
       
   575 }
       
   576 
       
   577 int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
       
   578                                                        size_t word_size) {
       
   579   int first = -1;
       
   580   if (num_regions == 1) {
       
   581     // Only one region to allocate, no need to go through the slower
       
   582     // path. The caller will attempt the expasion if this fails, so
       
   583     // let's not try to expand here too.
       
   584     HeapRegion* hr = new_region_work(word_size, false /* do_expand */);
       
   585     if (hr != NULL) {
       
   586       first = hr->hrs_index();
       
   587     } else {
       
   588       first = -1;
       
   589     }
       
   590   } else {
       
   591     // We can't allocate humongous regions while cleanupComplete() is
       
   592     // running, since some of the regions we find to be empty might not
       
   593     // yet be added to the free list and it is not straightforward to
       
   594     // know which list they are on so that we can remove them. Note
       
   595     // that we only need to do this if we need to allocate more than
       
   596     // one region to satisfy the current humongous allocation
       
   597     // request. If we are only allocating one region we use the common
       
   598     // region allocation code (see above).
       
   599     wait_while_free_regions_coming();
       
   600     append_secondary_free_list_if_not_empty();
       
   601 
       
   602     if (free_regions() >= num_regions) {
       
   603       first = _hrs->find_contiguous(num_regions);
       
   604       if (first != -1) {
       
   605         for (int i = first; i < first + (int) num_regions; ++i) {
       
   606           HeapRegion* hr = _hrs->at(i);
       
   607           assert(hr->is_empty(), "sanity");
       
   608           assert(is_on_free_list(hr), "sanity");
       
   609           hr->set_pending_removal(true);
       
   610         }
       
   611         _free_list.remove_all_pending(num_regions);
       
   612       }
       
   613     }
       
   614   }
       
   615   return first;
   535 }
   616 }
   536 
   617 
   537 // If could fit into free regions w/o expansion, try.
   618 // If could fit into free regions w/o expansion, try.
   538 // Otherwise, if can expand, do so.
   619 // Otherwise, if can expand, do so.
   539 // Otherwise, if using ex regions might help, try with ex given back.
   620 // Otherwise, if using ex regions might help, try with ex given back.
   540 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
   621 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
   541   assert_heap_locked_or_at_safepoint();
   622   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   542   assert(regions_accounted_for(), "Region leakage!");
   623 
   543 
   624   verify_region_sets_optional();
   544   // We can't allocate humongous regions while cleanupComplete is
       
   545   // running, since some of the regions we find to be empty might not
       
   546   // yet be added to the unclean list. If we're already at a
       
   547   // safepoint, this call is unnecessary, not to mention wrong.
       
   548   if (!SafepointSynchronize::is_at_safepoint()) {
       
   549     wait_for_cleanup_complete();
       
   550   }
       
   551 
   625 
   552   size_t num_regions =
   626   size_t num_regions =
   553          round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
   627          round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
   554 
       
   555   // Special case if < one region???
       
   556 
       
   557   // Remember the ft size.
       
   558   size_t x_size = expansion_regions();
   628   size_t x_size = expansion_regions();
   559 
   629   size_t fs = _hrs->free_suffix();
   560   HeapWord* res = NULL;
   630   int first = humongous_obj_allocate_find_first(num_regions, word_size);
   561   bool eliminated_allocated_from_lists = false;
   631   if (first == -1) {
   562 
   632     // The only thing we can do now is attempt expansion.
   563   // Can the allocation potentially fit in the free regions?
       
   564   if (free_regions() >= num_regions) {
       
   565     res = _hrs->obj_allocate(word_size);
       
   566   }
       
   567   if (res == NULL) {
       
   568     // Try expansion.
       
   569     size_t fs = _hrs->free_suffix();
       
   570     if (fs + x_size >= num_regions) {
   633     if (fs + x_size >= num_regions) {
   571       expand((num_regions - fs) * HeapRegion::GrainBytes);
   634       expand((num_regions - fs) * HeapRegion::GrainBytes);
   572       res = _hrs->obj_allocate(word_size);
   635       first = humongous_obj_allocate_find_first(num_regions, word_size);
   573       assert(res != NULL, "This should have worked.");
   636       assert(first != -1, "this should have worked");
   574     } else {
   637     }
   575       // Expansion won't help.  Are there enough free regions if we get rid
   638   }
   576       // of reservations?
   639 
   577       size_t avail = free_regions();
   640   if (first != -1) {
   578       if (avail >= num_regions) {
   641     // Index of last region in the series + 1.
   579         res = _hrs->obj_allocate(word_size);
   642     int last = first + (int) num_regions;
   580         if (res != NULL) {
   643 
   581           remove_allocated_regions_from_lists();
   644     // We need to initialize the region(s) we just discovered. This is
   582           eliminated_allocated_from_lists = true;
   645     // a bit tricky given that it can happen concurrently with
   583         }
   646     // refinement threads refining cards on these regions and
   584       }
   647     // potentially wanting to refine the BOT as they are scanning
   585     }
   648     // those cards (this can happen shortly after a cleanup; see CR
   586   }
   649     // 6991377). So we have to set up the region(s) carefully and in
   587   if (res != NULL) {
   650     // a specific order.
   588     // Increment by the number of regions allocated.
   651 
   589     // FIXME: Assumes regions all of size GrainBytes.
   652     // The word size sum of all the regions we will allocate.
   590 #ifndef PRODUCT
   653     size_t word_size_sum = num_regions * HeapRegion::GrainWords;
   591     mr_bs()->verify_clean_region(MemRegion(res, res + num_regions *
   654     assert(word_size <= word_size_sum, "sanity");
   592                                            HeapRegion::GrainWords));
   655 
   593 #endif
   656     // This will be the "starts humongous" region.
   594     if (!eliminated_allocated_from_lists)
   657     HeapRegion* first_hr = _hrs->at(first);
   595       remove_allocated_regions_from_lists();
   658     // The header of the new object will be placed at the bottom of
   596     _summary_bytes_used += word_size * HeapWordSize;
   659     // the first region.
   597     _free_regions -= num_regions;
   660     HeapWord* new_obj = first_hr->bottom();
   598     _num_humongous_regions += (int) num_regions;
   661     // This will be the new end of the first region in the series that
   599   }
   662     // should also match the end of the last region in the seriers.
   600   assert(regions_accounted_for(), "Region Leakage");
   663     HeapWord* new_end = new_obj + word_size_sum;
   601   return res;
   664     // This will be the new top of the first region that will reflect
       
   665     // this allocation.
       
   666     HeapWord* new_top = new_obj + word_size;
       
   667 
       
   668     // First, we need to zero the header of the space that we will be
       
   669     // allocating. When we update top further down, some refinement
       
   670     // threads might try to scan the region. By zeroing the header we
       
   671     // ensure that any thread that will try to scan the region will
       
   672     // come across the zero klass word and bail out.
       
   673     //
       
   674     // NOTE: It would not have been correct to have used
       
   675     // CollectedHeap::fill_with_object() and make the space look like
       
   676     // an int array. The thread that is doing the allocation will
       
   677     // later update the object header to a potentially different array
       
   678     // type and, for a very short period of time, the klass and length
       
   679     // fields will be inconsistent. This could cause a refinement
       
   680     // thread to calculate the object size incorrectly.
       
   681     Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
       
   682 
       
   683     // We will set up the first region as "starts humongous". This
       
   684     // will also update the BOT covering all the regions to reflect
       
   685     // that there is a single object that starts at the bottom of the
       
   686     // first region.
       
   687     first_hr->set_startsHumongous(new_top, new_end);
       
   688 
       
   689     // Then, if there are any, we will set up the "continues
       
   690     // humongous" regions.
       
   691     HeapRegion* hr = NULL;
       
   692     for (int i = first + 1; i < last; ++i) {
       
   693       hr = _hrs->at(i);
       
   694       hr->set_continuesHumongous(first_hr);
       
   695     }
       
   696     // If we have "continues humongous" regions (hr != NULL), then the
       
   697     // end of the last one should match new_end.
       
   698     assert(hr == NULL || hr->end() == new_end, "sanity");
       
   699 
       
   700     // Up to this point no concurrent thread would have been able to
       
   701     // do any scanning on any region in this series. All the top
       
   702     // fields still point to bottom, so the intersection between
       
   703     // [bottom,top] and [card_start,card_end] will be empty. Before we
       
   704     // update the top fields, we'll do a storestore to make sure that
       
   705     // no thread sees the update to top before the zeroing of the
       
   706     // object header and the BOT initialization.
       
   707     OrderAccess::storestore();
       
   708 
       
   709     // Now that the BOT and the object header have been initialized,
       
   710     // we can update top of the "starts humongous" region.
       
   711     assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
       
   712            "new_top should be in this region");
       
   713     first_hr->set_top(new_top);
       
   714 
       
   715     // Now, we will update the top fields of the "continues humongous"
       
   716     // regions. The reason we need to do this is that, otherwise,
       
   717     // these regions would look empty and this will confuse parts of
       
   718     // G1. For example, the code that looks for a consecutive number
       
   719     // of empty regions will consider them empty and try to
       
   720     // re-allocate them. We can extend is_empty() to also include
       
   721     // !continuesHumongous(), but it is easier to just update the top
       
   722     // fields here. The way we set top for all regions (i.e., top ==
       
   723     // end for all regions but the last one, top == new_top for the
       
   724     // last one) is actually used when we will free up the humongous
       
   725     // region in free_humongous_region().
       
   726     hr = NULL;
       
   727     for (int i = first + 1; i < last; ++i) {
       
   728       hr = _hrs->at(i);
       
   729       if ((i + 1) == last) {
       
   730         // last continues humongous region
       
   731         assert(hr->bottom() < new_top && new_top <= hr->end(),
       
   732                "new_top should fall on this region");
       
   733         hr->set_top(new_top);
       
   734       } else {
       
   735         // not last one
       
   736         assert(new_top > hr->end(), "new_top should be above this region");
       
   737         hr->set_top(hr->end());
       
   738       }
       
   739     }
       
   740     // If we have continues humongous regions (hr != NULL), then the
       
   741     // end of the last one should match new_end and its top should
       
   742     // match new_top.
       
   743     assert(hr == NULL ||
       
   744            (hr->end() == new_end && hr->top() == new_top), "sanity");
       
   745 
       
   746     assert(first_hr->used() == word_size * HeapWordSize, "invariant");
       
   747     _summary_bytes_used += first_hr->used();
       
   748     _humongous_set.add(first_hr);
       
   749 
       
   750     return new_obj;
       
   751   }
       
   752 
       
   753   verify_region_sets_optional();
       
   754   return NULL;
   602 }
   755 }
   603 
   756 
   604 void
   757 void
   605 G1CollectedHeap::retire_cur_alloc_region(HeapRegion* cur_alloc_region) {
   758 G1CollectedHeap::retire_cur_alloc_region(HeapRegion* cur_alloc_region) {
   606   // The cleanup operation might update _summary_bytes_used
       
   607   // concurrently with this method. So, right now, if we don't wait
       
   608   // for it to complete, updates to _summary_bytes_used might get
       
   609   // lost. This will be resolved in the near future when the operation
       
   610   // of the free region list is revamped as part of CR 6977804.
       
   611   wait_for_cleanup_complete();
       
   612 
       
   613   // Other threads might still be trying to allocate using CASes out
   759   // Other threads might still be trying to allocate using CASes out
   614   // of the region we are retiring, as they can do so without holding
   760   // of the region we are retiring, as they can do so without holding
   615   // the Heap_lock. So we first have to make sure that noone else can
   761   // the Heap_lock. So we first have to make sure that noone else can
   616   // allocate in it by doing a maximal allocation. Even if our CAS
   762   // allocate in it by doing a maximal allocation. Even if our CAS
   617   // attempt fails a few times, we'll succeed sooner or later given
   763   // attempt fails a few times, we'll succeed sooner or later given
   652 HeapWord*
   798 HeapWord*
   653 G1CollectedHeap::replace_cur_alloc_region_and_allocate(size_t word_size,
   799 G1CollectedHeap::replace_cur_alloc_region_and_allocate(size_t word_size,
   654                                                        bool at_safepoint,
   800                                                        bool at_safepoint,
   655                                                        bool do_dirtying,
   801                                                        bool do_dirtying,
   656                                                        bool can_expand) {
   802                                                        bool can_expand) {
   657   assert_heap_locked_or_at_safepoint();
   803   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   658   assert(_cur_alloc_region == NULL,
   804   assert(_cur_alloc_region == NULL,
   659          "replace_cur_alloc_region_and_allocate() should only be called "
   805          "replace_cur_alloc_region_and_allocate() should only be called "
   660          "after retiring the previous current alloc region");
   806          "after retiring the previous current alloc region");
   661   assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
   807   assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
   662          "at_safepoint and is_at_safepoint() should be a tautology");
   808          "at_safepoint and is_at_safepoint() should be a tautology");
   663   assert(!can_expand || g1_policy()->can_expand_young_list(),
   809   assert(!can_expand || g1_policy()->can_expand_young_list(),
   664          "we should not call this method with can_expand == true if "
   810          "we should not call this method with can_expand == true if "
   665          "we are not allowed to expand the young gen");
   811          "we are not allowed to expand the young gen");
   666 
   812 
   667   if (can_expand || !g1_policy()->is_young_list_full()) {
   813   if (can_expand || !g1_policy()->is_young_list_full()) {
   668     if (!at_safepoint) {
   814     HeapRegion* new_cur_alloc_region = new_alloc_region(word_size);
   669       // The cleanup operation might update _summary_bytes_used
       
   670       // concurrently with this method. So, right now, if we don't
       
   671       // wait for it to complete, updates to _summary_bytes_used might
       
   672       // get lost. This will be resolved in the near future when the
       
   673       // operation of the free region list is revamped as part of
       
   674       // CR 6977804. If we're already at a safepoint, this call is
       
   675       // unnecessary, not to mention wrong.
       
   676       wait_for_cleanup_complete();
       
   677     }
       
   678 
       
   679     HeapRegion* new_cur_alloc_region = newAllocRegion(word_size,
       
   680                                                       false /* zero_filled */);
       
   681     if (new_cur_alloc_region != NULL) {
   815     if (new_cur_alloc_region != NULL) {
   682       assert(new_cur_alloc_region->is_empty(),
   816       assert(new_cur_alloc_region->is_empty(),
   683              "the newly-allocated region should be empty, "
   817              "the newly-allocated region should be empty, "
   684              "as right now we only allocate new regions out of the free list");
   818              "as right now we only allocate new regions out of the free list");
   685       g1_policy()->update_region_num(true /* next_is_young */);
   819       g1_policy()->update_region_num(true /* next_is_young */);
   686       _summary_bytes_used -= new_cur_alloc_region->used();
       
   687       set_region_short_lived_locked(new_cur_alloc_region);
   820       set_region_short_lived_locked(new_cur_alloc_region);
   688 
   821 
   689       assert(!new_cur_alloc_region->isHumongous(),
   822       assert(!new_cur_alloc_region->isHumongous(),
   690              "Catch a regression of this bug.");
   823              "Catch a regression of this bug.");
   691 
   824 
   731     }
   864     }
   732   }
   865   }
   733 
   866 
   734   assert(_cur_alloc_region == NULL, "we failed to allocate a new current "
   867   assert(_cur_alloc_region == NULL, "we failed to allocate a new current "
   735          "alloc region, it should still be NULL");
   868          "alloc region, it should still be NULL");
   736   assert_heap_locked_or_at_safepoint();
   869   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   737   return NULL;
   870   return NULL;
   738 }
   871 }
   739 
   872 
   740 // See the comment in the .hpp file about the locking protocol and
   873 // See the comment in the .hpp file about the locking protocol and
   741 // assumptions of this method (and other related ones).
   874 // assumptions of this method (and other related ones).
   743 G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
   876 G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
   744   assert_heap_locked_and_not_at_safepoint();
   877   assert_heap_locked_and_not_at_safepoint();
   745   assert(!isHumongous(word_size), "attempt_allocation_slow() should not be "
   878   assert(!isHumongous(word_size), "attempt_allocation_slow() should not be "
   746          "used for humongous allocations");
   879          "used for humongous allocations");
   747 
   880 
       
   881   // We should only reach here when we were unable to allocate
       
   882   // otherwise. So, we should have not active current alloc region.
       
   883   assert(_cur_alloc_region == NULL, "current alloc region should be NULL");
       
   884 
   748   // We will loop while succeeded is false, which means that we tried
   885   // We will loop while succeeded is false, which means that we tried
   749   // to do a collection, but the VM op did not succeed. So, when we
   886   // to do a collection, but the VM op did not succeed. So, when we
   750   // exit the loop, either one of the allocation attempts was
   887   // exit the loop, either one of the allocation attempts was
   751   // successful, or we succeeded in doing the VM op but which was
   888   // successful, or we succeeded in doing the VM op but which was
   752   // unable to allocate after the collection.
   889   // unable to allocate after the collection.
   753   for (int try_count = 1; /* we'll return or break */; try_count += 1) {
   890   for (int try_count = 1; /* we'll return or break */; try_count += 1) {
   754     bool succeeded = true;
   891     bool succeeded = true;
   755 
   892 
   756     // Every time we go round the loop we should be holding the Heap_lock.
   893     // Every time we go round the loop we should be holding the Heap_lock.
   757     assert_heap_locked();
   894     assert_heap_locked();
   758 
       
   759     {
       
   760       // We may have concurrent cleanup working at the time. Wait for
       
   761       // it to complete. In the future we would probably want to make
       
   762       // the concurrent cleanup truly concurrent by decoupling it from
       
   763       // the allocation. This will happen in the near future as part
       
   764       // of CR 6977804 which will revamp the operation of the free
       
   765       // region list. The fact that wait_for_cleanup_complete() will
       
   766       // do a wait() means that we'll give up the Heap_lock. So, it's
       
   767       // possible that when we exit wait_for_cleanup_complete() we
       
   768       // might be able to allocate successfully (since somebody else
       
   769       // might have done a collection meanwhile). So, we'll attempt to
       
   770       // allocate again, just in case. When we make cleanup truly
       
   771       // concurrent with allocation, we should remove this allocation
       
   772       // attempt as it's redundant (we only reach here after an
       
   773       // allocation attempt has been unsuccessful).
       
   774       wait_for_cleanup_complete();
       
   775 
       
   776       HeapWord* result = attempt_allocation_locked(word_size);
       
   777       if (result != NULL) {
       
   778         assert_heap_not_locked();
       
   779         return result;
       
   780       }
       
   781     }
       
   782 
   895 
   783     if (GC_locker::is_active_and_needs_gc()) {
   896     if (GC_locker::is_active_and_needs_gc()) {
   784       // We are locked out of GC because of the GC locker. We can
   897       // We are locked out of GC because of the GC locker. We can
   785       // allocate a new region only if we can expand the young gen.
   898       // allocate a new region only if we can expand the young gen.
   786 
   899 
   892                                               bool at_safepoint) {
  1005                                               bool at_safepoint) {
   893   // This is the method that will allocate a humongous object. All
  1006   // This is the method that will allocate a humongous object. All
   894   // allocation paths that attempt to allocate a humongous object
  1007   // allocation paths that attempt to allocate a humongous object
   895   // should eventually reach here. Currently, the only paths are from
  1008   // should eventually reach here. Currently, the only paths are from
   896   // mem_allocate() and attempt_allocation_at_safepoint().
  1009   // mem_allocate() and attempt_allocation_at_safepoint().
   897   assert_heap_locked_or_at_safepoint();
  1010   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   898   assert(isHumongous(word_size), "attempt_allocation_humongous() "
  1011   assert(isHumongous(word_size), "attempt_allocation_humongous() "
   899          "should only be used for humongous allocations");
  1012          "should only be used for humongous allocations");
   900   assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
  1013   assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
   901          "at_safepoint and is_at_safepoint() should be a tautology");
  1014          "at_safepoint and is_at_safepoint() should be a tautology");
   902 
  1015 
   969       warning("G1CollectedHeap::attempt_allocation_humongous "
  1082       warning("G1CollectedHeap::attempt_allocation_humongous "
   970               "retries %d times", try_count);
  1083               "retries %d times", try_count);
   971     }
  1084     }
   972   }
  1085   }
   973 
  1086 
   974   assert_heap_locked_or_at_safepoint();
  1087   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   975   return NULL;
  1088   return NULL;
   976 }
  1089 }
   977 
  1090 
   978 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
  1091 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
   979                                            bool expect_null_cur_alloc_region) {
  1092                                            bool expect_null_cur_alloc_region) {
   980   assert_at_safepoint();
  1093   assert_at_safepoint(true /* should_be_vm_thread */);
   981   assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region,
  1094   assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region,
   982          err_msg("the current alloc region was unexpectedly found "
  1095          err_msg("the current alloc region was unexpectedly found "
   983                  "to be non-NULL, cur alloc region: "PTR_FORMAT" "
  1096                  "to be non-NULL, cur alloc region: "PTR_FORMAT" "
   984                  "expect_null_cur_alloc_region: %d word_size: "SIZE_FORMAT,
  1097                  "expect_null_cur_alloc_region: %d word_size: "SIZE_FORMAT,
   985                  _cur_alloc_region, expect_null_cur_alloc_region, word_size));
  1098                  _cur_alloc_region, expect_null_cur_alloc_region, word_size));
  1129 
  1242 
  1130   ShouldNotReachHere();
  1243   ShouldNotReachHere();
  1131 }
  1244 }
  1132 
  1245 
  1133 void G1CollectedHeap::abandon_cur_alloc_region() {
  1246 void G1CollectedHeap::abandon_cur_alloc_region() {
  1134   if (_cur_alloc_region != NULL) {
  1247   assert_at_safepoint(true /* should_be_vm_thread */);
  1135     // We're finished with the _cur_alloc_region.
  1248 
  1136     if (_cur_alloc_region->is_empty()) {
  1249   HeapRegion* cur_alloc_region = _cur_alloc_region;
  1137       _free_regions++;
  1250   if (cur_alloc_region != NULL) {
  1138       free_region(_cur_alloc_region);
  1251     assert(!cur_alloc_region->is_empty(),
  1139     } else {
  1252            "the current alloc region can never be empty");
  1140       // As we're builing (at least the young portion) of the collection
  1253     assert(cur_alloc_region->is_young(),
  1141       // set incrementally we'll add the current allocation region to
  1254            "the current alloc region should be young");
  1142       // the collection set here.
  1255 
  1143       if (_cur_alloc_region->is_young()) {
  1256     retire_cur_alloc_region_common(cur_alloc_region);
  1144         g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region);
  1257   }
  1145       }
  1258   assert(_cur_alloc_region == NULL, "post-condition");
  1146       _summary_bytes_used += _cur_alloc_region->used();
       
  1147     }
       
  1148     _cur_alloc_region = NULL;
       
  1149   }
       
  1150 }
  1259 }
  1151 
  1260 
  1152 void G1CollectedHeap::abandon_gc_alloc_regions() {
  1261 void G1CollectedHeap::abandon_gc_alloc_regions() {
  1153   // first, make sure that the GC alloc region list is empty (it should!)
  1262   // first, make sure that the GC alloc region list is empty (it should!)
  1154   assert(_gc_alloc_region_list == NULL, "invariant");
  1263   assert(_gc_alloc_region_list == NULL, "invariant");
  1225 };
  1334 };
  1226 
  1335 
  1227 bool G1CollectedHeap::do_collection(bool explicit_gc,
  1336 bool G1CollectedHeap::do_collection(bool explicit_gc,
  1228                                     bool clear_all_soft_refs,
  1337                                     bool clear_all_soft_refs,
  1229                                     size_t word_size) {
  1338                                     size_t word_size) {
       
  1339   assert_at_safepoint(true /* should_be_vm_thread */);
       
  1340 
  1230   if (GC_locker::check_active_before_gc()) {
  1341   if (GC_locker::check_active_before_gc()) {
  1231     return false;
  1342     return false;
  1232   }
  1343   }
  1233 
  1344 
  1234   SvcGCMarker sgcm(SvcGCMarker::FULL);
  1345   SvcGCMarker sgcm(SvcGCMarker::FULL);
  1236 
  1347 
  1237   if (PrintHeapAtGC) {
  1348   if (PrintHeapAtGC) {
  1238     Universe::print_heap_before_gc();
  1349     Universe::print_heap_before_gc();
  1239   }
  1350   }
  1240 
  1351 
  1241   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  1352   verify_region_sets_optional();
  1242   assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
       
  1243 
  1353 
  1244   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
  1354   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
  1245                            collector_policy()->should_clear_all_soft_refs();
  1355                            collector_policy()->should_clear_all_soft_refs();
  1246 
  1356 
  1247   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
  1357   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
  1260     TraceMemoryManagerStats tms(true /* fullGC */);
  1370     TraceMemoryManagerStats tms(true /* fullGC */);
  1261 
  1371 
  1262     double start = os::elapsedTime();
  1372     double start = os::elapsedTime();
  1263     g1_policy()->record_full_collection_start();
  1373     g1_policy()->record_full_collection_start();
  1264 
  1374 
       
  1375     wait_while_free_regions_coming();
       
  1376     append_secondary_free_list_if_not_empty();
       
  1377 
  1265     gc_prologue(true);
  1378     gc_prologue(true);
  1266     increment_total_collections(true /* full gc */);
  1379     increment_total_collections(true /* full gc */);
  1267 
  1380 
  1268     size_t g1h_prev_used = used();
  1381     size_t g1h_prev_used = used();
  1269     assert(used() == recalculate_used(), "Should be equal");
  1382     assert(used() == recalculate_used(), "Should be equal");
  1272       HandleMark hm;  // Discard invalid handles created during verification
  1385       HandleMark hm;  // Discard invalid handles created during verification
  1273       prepare_for_verify();
  1386       prepare_for_verify();
  1274       gclog_or_tty->print(" VerifyBeforeGC:");
  1387       gclog_or_tty->print(" VerifyBeforeGC:");
  1275       Universe::verify(true);
  1388       Universe::verify(true);
  1276     }
  1389     }
  1277     assert(regions_accounted_for(), "Region leakage!");
       
  1278 
  1390 
  1279     COMPILER2_PRESENT(DerivedPointerTable::clear());
  1391     COMPILER2_PRESENT(DerivedPointerTable::clear());
  1280 
  1392 
  1281     // We want to discover references, but not process them yet.
  1393     // We want to discover references, but not process them yet.
  1282     // This mode is disabled in
  1394     // This mode is disabled in
  1296     abandon_cur_alloc_region();
  1408     abandon_cur_alloc_region();
  1297     abandon_gc_alloc_regions();
  1409     abandon_gc_alloc_regions();
  1298     assert(_cur_alloc_region == NULL, "Invariant.");
  1410     assert(_cur_alloc_region == NULL, "Invariant.");
  1299     g1_rem_set()->cleanupHRRS();
  1411     g1_rem_set()->cleanupHRRS();
  1300     tear_down_region_lists();
  1412     tear_down_region_lists();
  1301     set_used_regions_to_need_zero_fill();
       
  1302 
  1413 
  1303     // We may have added regions to the current incremental collection
  1414     // We may have added regions to the current incremental collection
  1304     // set between the last GC or pause and now. We need to clear the
  1415     // set between the last GC or pause and now. We need to clear the
  1305     // incremental collection set and then start rebuilding it afresh
  1416     // incremental collection set and then start rebuilding it afresh
  1306     // after this full GC.
  1417     // after this full GC.
  1331     // Do collection work
  1442     // Do collection work
  1332     {
  1443     {
  1333       HandleMark hm;  // Discard invalid handles created during gc
  1444       HandleMark hm;  // Discard invalid handles created during gc
  1334       G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs);
  1445       G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs);
  1335     }
  1446     }
  1336     // Because freeing humongous regions may have added some unclean
  1447     assert(free_regions() == 0, "we should not have added any free regions");
  1337     // regions, it is necessary to tear down again before rebuilding.
       
  1338     tear_down_region_lists();
       
  1339     rebuild_region_lists();
  1448     rebuild_region_lists();
  1340 
  1449 
  1341     _summary_bytes_used = recalculate_used();
  1450     _summary_bytes_used = recalculate_used();
  1342 
  1451 
  1343     ref_processor()->enqueue_discovered_references();
  1452     ref_processor()->enqueue_discovered_references();
  1415 
  1524 
  1416     // Discard all rset updates
  1525     // Discard all rset updates
  1417     JavaThread::dirty_card_queue_set().abandon_logs();
  1526     JavaThread::dirty_card_queue_set().abandon_logs();
  1418     assert(!G1DeferredRSUpdate
  1527     assert(!G1DeferredRSUpdate
  1419            || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
  1528            || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
  1420     assert(regions_accounted_for(), "Region leakage!");
       
  1421   }
  1529   }
  1422 
  1530 
  1423   if (g1_policy()->in_young_gc_mode()) {
  1531   if (g1_policy()->in_young_gc_mode()) {
  1424     _young_list->reset_sampled_info();
  1532     _young_list->reset_sampled_info();
  1425     // At this point there should be no regions in the
  1533     // At this point there should be no regions in the
  1428             "young list should be empty at this point");
  1536             "young list should be empty at this point");
  1429   }
  1537   }
  1430 
  1538 
  1431   // Update the number of full collections that have been completed.
  1539   // Update the number of full collections that have been completed.
  1432   increment_full_collections_completed(false /* concurrent */);
  1540   increment_full_collections_completed(false /* concurrent */);
       
  1541 
       
  1542   verify_region_sets_optional();
  1433 
  1543 
  1434   if (PrintHeapAtGC) {
  1544   if (PrintHeapAtGC) {
  1435     Universe::print_heap_after_gc();
  1545     Universe::print_heap_after_gc();
  1436   }
  1546   }
  1437 
  1547 
  1569 
  1679 
  1570 
  1680 
  1571 HeapWord*
  1681 HeapWord*
  1572 G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
  1682 G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
  1573                                            bool* succeeded) {
  1683                                            bool* succeeded) {
  1574   assert(SafepointSynchronize::is_at_safepoint(),
  1684   assert_at_safepoint(true /* should_be_vm_thread */);
  1575          "satisfy_failed_allocation() should only be called at a safepoint");
       
  1576   assert(Thread::current()->is_VM_thread(),
       
  1577          "satisfy_failed_allocation() should only be called by the VM thread");
       
  1578 
  1685 
  1579   *succeeded = true;
  1686   *succeeded = true;
  1580   // Let's attempt the allocation first.
  1687   // Let's attempt the allocation first.
  1581   HeapWord* result = attempt_allocation_at_safepoint(word_size,
  1688   HeapWord* result = attempt_allocation_at_safepoint(word_size,
  1582                                      false /* expect_null_cur_alloc_region */);
  1689                                      false /* expect_null_cur_alloc_region */);
  1644 // to support an allocation of the given "word_size".  If
  1751 // to support an allocation of the given "word_size".  If
  1645 // successful, perform the allocation and return the address of the
  1752 // successful, perform the allocation and return the address of the
  1646 // allocated block, or else "NULL".
  1753 // allocated block, or else "NULL".
  1647 
  1754 
  1648 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
  1755 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
  1649   assert(SafepointSynchronize::is_at_safepoint(),
  1756   assert_at_safepoint(true /* should_be_vm_thread */);
  1650          "expand_and_allocate() should only be called at a safepoint");
  1757 
  1651   assert(Thread::current()->is_VM_thread(),
  1758   verify_region_sets_optional();
  1652          "expand_and_allocate() should only be called by the VM thread");
       
  1653 
  1759 
  1654   size_t expand_bytes = word_size * HeapWordSize;
  1760   size_t expand_bytes = word_size * HeapWordSize;
  1655   if (expand_bytes < MinHeapDeltaBytes) {
  1761   if (expand_bytes < MinHeapDeltaBytes) {
  1656     expand_bytes = MinHeapDeltaBytes;
  1762     expand_bytes = MinHeapDeltaBytes;
  1657   }
  1763   }
  1658   expand(expand_bytes);
  1764   expand(expand_bytes);
  1659   assert(regions_accounted_for(), "Region leakage!");
  1765 
       
  1766   verify_region_sets_optional();
  1660 
  1767 
  1661   return attempt_allocation_at_safepoint(word_size,
  1768   return attempt_allocation_at_safepoint(word_size,
  1662                                      false /* expect_null_cur_alloc_region */);
  1769                                      false /* expect_null_cur_alloc_region */);
  1663 }
       
  1664 
       
  1665 size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) {
       
  1666   size_t pre_used = 0;
       
  1667   size_t cleared_h_regions = 0;
       
  1668   size_t freed_regions = 0;
       
  1669   UncleanRegionList local_list;
       
  1670   free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions,
       
  1671                                     freed_regions, &local_list);
       
  1672 
       
  1673   finish_free_region_work(pre_used, cleared_h_regions, freed_regions,
       
  1674                           &local_list);
       
  1675   return pre_used;
       
  1676 }
       
  1677 
       
  1678 void
       
  1679 G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr,
       
  1680                                                    size_t& pre_used,
       
  1681                                                    size_t& cleared_h,
       
  1682                                                    size_t& freed_regions,
       
  1683                                                    UncleanRegionList* list,
       
  1684                                                    bool par) {
       
  1685   assert(!hr->continuesHumongous(), "should have filtered these out");
       
  1686   size_t res = 0;
       
  1687   if (hr->used() > 0 && hr->garbage_bytes() == hr->used() &&
       
  1688       !hr->is_young()) {
       
  1689     if (G1PolicyVerbose > 0)
       
  1690       gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)"
       
  1691                                                                                " during cleanup", hr, hr->used());
       
  1692     free_region_work(hr, pre_used, cleared_h, freed_regions, list, par);
       
  1693   }
       
  1694 }
  1770 }
  1695 
  1771 
  1696 // FIXME: both this and shrink could probably be more efficient by
  1772 // FIXME: both this and shrink could probably be more efficient by
  1697 // doing one "VirtualSpace::expand_by" call rather than several.
  1773 // doing one "VirtualSpace::expand_by" call rather than several.
  1698 void G1CollectedHeap::expand(size_t expand_bytes) {
  1774 void G1CollectedHeap::expand(size_t expand_bytes) {
  1723       // Now update max_committed if necessary.
  1799       // Now update max_committed if necessary.
  1724       _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high));
  1800       _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high));
  1725 
  1801 
  1726       // Add it to the HeapRegionSeq.
  1802       // Add it to the HeapRegionSeq.
  1727       _hrs->insert(hr);
  1803       _hrs->insert(hr);
  1728       // Set the zero-fill state, according to whether it's already
  1804       _free_list.add_as_tail(hr);
  1729       // zeroed.
       
  1730       {
       
  1731         MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
       
  1732         if (is_zeroed) {
       
  1733           hr->set_zero_fill_complete();
       
  1734           put_free_region_on_list_locked(hr);
       
  1735         } else {
       
  1736           hr->set_zero_fill_needed();
       
  1737           put_region_on_unclean_list_locked(hr);
       
  1738         }
       
  1739       }
       
  1740       _free_regions++;
       
  1741       // And we used up an expansion region to create it.
  1805       // And we used up an expansion region to create it.
  1742       _expansion_regions--;
  1806       _expansion_regions--;
  1743       // Tell the cardtable about it.
  1807       // Tell the cardtable about it.
  1744       Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
  1808       Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
  1745       // And the offset table as well.
  1809       // And the offset table as well.
  1746       _bot_shared->resize(_g1_committed.word_size());
  1810       _bot_shared->resize(_g1_committed.word_size());
  1747     }
  1811     }
  1748   }
  1812   }
       
  1813 
  1749   if (Verbose && PrintGC) {
  1814   if (Verbose && PrintGC) {
  1750     size_t new_mem_size = _g1_storage.committed_size();
  1815     size_t new_mem_size = _g1_storage.committed_size();
  1751     gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK",
  1816     gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK",
  1752                            old_mem_size/K, aligned_expand_bytes/K,
  1817                            old_mem_size/K, aligned_expand_bytes/K,
  1753                            new_mem_size/K);
  1818                            new_mem_size/K);
  1768   if (mr.byte_size() > 0)
  1833   if (mr.byte_size() > 0)
  1769     _g1_storage.shrink_by(mr.byte_size());
  1834     _g1_storage.shrink_by(mr.byte_size());
  1770   assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
  1835   assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
  1771 
  1836 
  1772   _g1_committed.set_end(mr.start());
  1837   _g1_committed.set_end(mr.start());
  1773   _free_regions -= num_regions_deleted;
       
  1774   _expansion_regions += num_regions_deleted;
  1838   _expansion_regions += num_regions_deleted;
  1775 
  1839 
  1776   // Tell the cardtable about it.
  1840   // Tell the cardtable about it.
  1777   Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
  1841   Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
  1778 
  1842 
  1788                            new_mem_size/K);
  1852                            new_mem_size/K);
  1789   }
  1853   }
  1790 }
  1854 }
  1791 
  1855 
  1792 void G1CollectedHeap::shrink(size_t shrink_bytes) {
  1856 void G1CollectedHeap::shrink(size_t shrink_bytes) {
       
  1857   verify_region_sets_optional();
       
  1858 
  1793   release_gc_alloc_regions(true /* totally */);
  1859   release_gc_alloc_regions(true /* totally */);
       
  1860   // Instead of tearing down / rebuilding the free lists here, we
       
  1861   // could instead use the remove_all_pending() method on free_list to
       
  1862   // remove only the ones that we need to remove.
  1794   tear_down_region_lists();  // We will rebuild them in a moment.
  1863   tear_down_region_lists();  // We will rebuild them in a moment.
  1795   shrink_helper(shrink_bytes);
  1864   shrink_helper(shrink_bytes);
  1796   rebuild_region_lists();
  1865   rebuild_region_lists();
       
  1866 
       
  1867   verify_region_sets_optional();
  1797 }
  1868 }
  1798 
  1869 
  1799 // Public methods.
  1870 // Public methods.
  1800 
  1871 
  1801 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
  1872 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
  1810   _into_cset_dirty_card_queue_set(false),
  1881   _into_cset_dirty_card_queue_set(false),
  1811   _is_alive_closure(this),
  1882   _is_alive_closure(this),
  1812   _ref_processor(NULL),
  1883   _ref_processor(NULL),
  1813   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
  1884   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
  1814   _bot_shared(NULL),
  1885   _bot_shared(NULL),
  1815   _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"),
       
  1816   _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
  1886   _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
  1817   _evac_failure_scan_stack(NULL) ,
  1887   _evac_failure_scan_stack(NULL) ,
  1818   _mark_in_progress(false),
  1888   _mark_in_progress(false),
  1819   _cg1r(NULL), _czft(NULL), _summary_bytes_used(0),
  1889   _cg1r(NULL), _summary_bytes_used(0),
  1820   _cur_alloc_region(NULL),
  1890   _cur_alloc_region(NULL),
  1821   _refine_cte_cl(NULL),
  1891   _refine_cte_cl(NULL),
  1822   _free_region_list(NULL), _free_region_list_size(0),
       
  1823   _free_regions(0),
       
  1824   _full_collection(false),
  1892   _full_collection(false),
  1825   _unclean_region_list(),
  1893   _free_list("Master Free List"),
  1826   _unclean_regions_coming(false),
  1894   _secondary_free_list("Secondary Free List"),
       
  1895   _humongous_set("Master Humongous Set"),
       
  1896   _free_regions_coming(false),
  1827   _young_list(new YoungList(this)),
  1897   _young_list(new YoungList(this)),
  1828   _gc_time_stamp(0),
  1898   _gc_time_stamp(0),
  1829   _surviving_young_words(NULL),
  1899   _surviving_young_words(NULL),
  1830   _full_collections_completed(0),
  1900   _full_collections_completed(0),
  1831   _in_cset_fast_test(NULL),
  1901   _in_cset_fast_test(NULL),
  1942   _reserved.set_start((HeapWord*)heap_rs.base());
  2012   _reserved.set_start((HeapWord*)heap_rs.base());
  1943   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
  2013   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
  1944 
  2014 
  1945   _expansion_regions = max_byte_size/HeapRegion::GrainBytes;
  2015   _expansion_regions = max_byte_size/HeapRegion::GrainBytes;
  1946 
  2016 
  1947   _num_humongous_regions = 0;
       
  1948 
       
  1949   // Create the gen rem set (and barrier set) for the entire reserved region.
  2017   // Create the gen rem set (and barrier set) for the entire reserved region.
  1950   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
  2018   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
  1951   set_barrier_set(rem_set()->bs());
  2019   set_barrier_set(rem_set()->bs());
  1952   if (barrier_set()->is_a(BarrierSet::ModRef)) {
  2020   if (barrier_set()->is_a(BarrierSet::ModRef)) {
  1953     _mr_bs = (ModRefBarrierSet*)_barrier_set;
  2021     _mr_bs = (ModRefBarrierSet*)_barrier_set;
  1988   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
  2056   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
  1989   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
  2057   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
  1990   guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region,
  2058   guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region,
  1991             "too many cards per region");
  2059             "too many cards per region");
  1992 
  2060 
       
  2061   HeapRegionSet::set_unrealistically_long_length(max_regions() + 1);
       
  2062 
  1993   _bot_shared = new G1BlockOffsetSharedArray(_reserved,
  2063   _bot_shared = new G1BlockOffsetSharedArray(_reserved,
  1994                                              heap_word_size(init_byte_size));
  2064                                              heap_word_size(init_byte_size));
  1995 
  2065 
  1996   _g1h = this;
  2066   _g1h = this;
  1997 
  2067 
  2011 
  2081 
  2012   // Create the ConcurrentMark data structure and thread.
  2082   // Create the ConcurrentMark data structure and thread.
  2013   // (Must do this late, so that "max_regions" is defined.)
  2083   // (Must do this late, so that "max_regions" is defined.)
  2014   _cm       = new ConcurrentMark(heap_rs, (int) max_regions());
  2084   _cm       = new ConcurrentMark(heap_rs, (int) max_regions());
  2015   _cmThread = _cm->cmThread();
  2085   _cmThread = _cm->cmThread();
  2016 
       
  2017   // ...and the concurrent zero-fill thread, if necessary.
       
  2018   if (G1ConcZeroFill) {
       
  2019     _czft = new ConcurrentZFThread();
       
  2020   }
       
  2021 
  2086 
  2022   // Initialize the from_card cache structure of HeapRegionRemSet.
  2087   // Initialize the from_card cache structure of HeapRegionRemSet.
  2023   HeapRegionRemSet::init_heap(max_regions());
  2088   HeapRegionRemSet::init_heap(max_regions());
  2024 
  2089 
  2025   // Now expand into the initial heap size.
  2090   // Now expand into the initial heap size.
  2190   return blk.result();
  2255   return blk.result();
  2191 }
  2256 }
  2192 #endif // PRODUCT
  2257 #endif // PRODUCT
  2193 
  2258 
  2194 size_t G1CollectedHeap::unsafe_max_alloc() {
  2259 size_t G1CollectedHeap::unsafe_max_alloc() {
  2195   if (_free_regions > 0) return HeapRegion::GrainBytes;
  2260   if (free_regions() > 0) return HeapRegion::GrainBytes;
  2196   // otherwise, is there space in the current allocation region?
  2261   // otherwise, is there space in the current allocation region?
  2197 
  2262 
  2198   // We need to store the current allocation region in a local variable
  2263   // We need to store the current allocation region in a local variable
  2199   // here. The problem is that this method doesn't take any locks and
  2264   // here. The problem is that this method doesn't take any locks and
  2200   // there may be other threads which overwrite the current allocation
  2265   // there may be other threads which overwrite the current allocation
  2270   // waiting in VM_G1IncCollectionPause::doit_epilogue().
  2335   // waiting in VM_G1IncCollectionPause::doit_epilogue().
  2271   FullGCCount_lock->notify_all();
  2336   FullGCCount_lock->notify_all();
  2272 }
  2337 }
  2273 
  2338 
  2274 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
  2339 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
  2275   assert(Thread::current()->is_VM_thread(), "Precondition#1");
  2340   assert_at_safepoint(true /* should_be_vm_thread */);
  2276   assert(Heap_lock->is_locked(), "Precondition#2");
       
  2277   GCCauseSetter gcs(this, cause);
  2341   GCCauseSetter gcs(this, cause);
  2278   switch (cause) {
  2342   switch (cause) {
  2279     case GCCause::_heap_inspection:
  2343     case GCCause::_heap_inspection:
  2280     case GCCause::_heap_dump: {
  2344     case GCCause::_heap_dump: {
  2281       HandleMark hm;
  2345       HandleMark hm;
  2293 
  2357 
  2294   unsigned int gc_count_before;
  2358   unsigned int gc_count_before;
  2295   unsigned int full_gc_count_before;
  2359   unsigned int full_gc_count_before;
  2296   {
  2360   {
  2297     MutexLocker ml(Heap_lock);
  2361     MutexLocker ml(Heap_lock);
  2298 
       
  2299     // Don't want to do a GC until cleanup is completed. This
       
  2300     // limitation will be removed in the near future when the
       
  2301     // operation of the free region list is revamped as part of
       
  2302     // CR 6977804.
       
  2303     wait_for_cleanup_complete();
       
  2304 
  2362 
  2305     // Read the GC count while holding the Heap_lock
  2363     // Read the GC count while holding the Heap_lock
  2306     gc_count_before = SharedHeap::heap()->total_collections();
  2364     gc_count_before = SharedHeap::heap()->total_collections();
  2307     full_gc_count_before = SharedHeap::heap()->total_full_collections();
  2365     full_gc_count_before = SharedHeap::heap()->total_full_collections();
  2308   }
  2366   }
  2678     return MIN2(MAX2(cur_alloc_space->free(), (size_t)MinTLABSize),
  2736     return MIN2(MAX2(cur_alloc_space->free(), (size_t)MinTLABSize),
  2679                 max_tlab_size);
  2737                 max_tlab_size);
  2680   }
  2738   }
  2681 }
  2739 }
  2682 
  2740 
  2683 bool G1CollectedHeap::allocs_are_zero_filled() {
       
  2684   return false;
       
  2685 }
       
  2686 
       
  2687 size_t G1CollectedHeap::large_typearray_limit() {
  2741 size_t G1CollectedHeap::large_typearray_limit() {
  2688   // FIXME
  2742   // FIXME
  2689   return HeapRegion::GrainBytes/HeapWordSize;
  2743   return HeapRegion::GrainBytes/HeapWordSize;
  2690 }
  2744 }
  2691 
  2745 
  2695 
  2749 
  2696 jlong G1CollectedHeap::millis_since_last_gc() {
  2750 jlong G1CollectedHeap::millis_since_last_gc() {
  2697   // assert(false, "NYI");
  2751   // assert(false, "NYI");
  2698   return 0;
  2752   return 0;
  2699 }
  2753 }
  2700 
       
  2701 
  2754 
  2702 void G1CollectedHeap::prepare_for_verify() {
  2755 void G1CollectedHeap::prepare_for_verify() {
  2703   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
  2756   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
  2704     ensure_parsability(false);
  2757     ensure_parsability(false);
  2705   }
  2758   }
  2907                          &rootsCl,
  2960                          &rootsCl,
  2908                          &blobsCl,
  2961                          &blobsCl,
  2909                          &rootsCl);
  2962                          &rootsCl);
  2910     bool failures = rootsCl.failures();
  2963     bool failures = rootsCl.failures();
  2911     rem_set()->invalidate(perm_gen()->used_region(), false);
  2964     rem_set()->invalidate(perm_gen()->used_region(), false);
  2912     if (!silent) { gclog_or_tty->print("heapRegions "); }
  2965     if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
       
  2966     verify_region_sets();
       
  2967     if (!silent) { gclog_or_tty->print("HeapRegions "); }
  2913     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
  2968     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
  2914       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  2969       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  2915              "sanity check");
  2970              "sanity check");
  2916 
  2971 
  2917       G1ParVerifyTask task(this, allow_dirty, use_prev_marking);
  2972       G1ParVerifyTask task(this, allow_dirty, use_prev_marking);
  2935       _hrs->iterate(&blk);
  2990       _hrs->iterate(&blk);
  2936       if (blk.failures()) {
  2991       if (blk.failures()) {
  2937         failures = true;
  2992         failures = true;
  2938       }
  2993       }
  2939     }
  2994     }
  2940     if (!silent) gclog_or_tty->print("remset ");
  2995     if (!silent) gclog_or_tty->print("RemSet ");
  2941     rem_set()->verify();
  2996     rem_set()->verify();
  2942 
  2997 
  2943     if (failures) {
  2998     if (failures) {
  2944       gclog_or_tty->print_cr("Heap:");
  2999       gclog_or_tty->print_cr("Heap:");
  2945       print_on(gclog_or_tty, true /* extended */);
  3000       print_on(gclog_or_tty, true /* extended */);
  3006 
  3061 
  3007 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
  3062 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
  3008   if (G1CollectedHeap::use_parallel_gc_threads()) {
  3063   if (G1CollectedHeap::use_parallel_gc_threads()) {
  3009     workers()->print_worker_threads_on(st);
  3064     workers()->print_worker_threads_on(st);
  3010   }
  3065   }
  3011 
       
  3012   _cmThread->print_on(st);
  3066   _cmThread->print_on(st);
  3013   st->cr();
  3067   st->cr();
  3014 
       
  3015   _cm->print_worker_threads_on(st);
  3068   _cm->print_worker_threads_on(st);
  3016 
       
  3017   _cg1r->print_worker_threads_on(st);
  3069   _cg1r->print_worker_threads_on(st);
  3018 
       
  3019   _czft->print_on(st);
       
  3020   st->cr();
  3070   st->cr();
  3021 }
  3071 }
  3022 
  3072 
  3023 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
  3073 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
  3024   if (G1CollectedHeap::use_parallel_gc_threads()) {
  3074   if (G1CollectedHeap::use_parallel_gc_threads()) {
  3025     workers()->threads_do(tc);
  3075     workers()->threads_do(tc);
  3026   }
  3076   }
  3027   tc->do_thread(_cmThread);
  3077   tc->do_thread(_cmThread);
  3028   _cg1r->threads_do(tc);
  3078   _cg1r->threads_do(tc);
  3029   tc->do_thread(_czft);
       
  3030 }
  3079 }
  3031 
  3080 
  3032 void G1CollectedHeap::print_tracing_info() const {
  3081 void G1CollectedHeap::print_tracing_info() const {
  3033   // We'll overload this to mean "trace GC pause statistics."
  3082   // We'll overload this to mean "trace GC pause statistics."
  3034   if (TraceGen0Time || TraceGen1Time) {
  3083   if (TraceGen0Time || TraceGen1Time) {
  3040     g1_rem_set()->print_summary_info();
  3089     g1_rem_set()->print_summary_info();
  3041   }
  3090   }
  3042   if (G1SummarizeConcMark) {
  3091   if (G1SummarizeConcMark) {
  3043     concurrent_mark()->print_summary_info();
  3092     concurrent_mark()->print_summary_info();
  3044   }
  3093   }
  3045   if (G1SummarizeZFStats) {
       
  3046     ConcurrentZFThread::print_summary_info();
       
  3047   }
       
  3048   g1_policy()->print_yg_surv_rate_info();
  3094   g1_policy()->print_yg_surv_rate_info();
  3049 
       
  3050   SpecializationStats::print();
  3095   SpecializationStats::print();
  3051 }
  3096 }
  3052 
       
  3053 
  3097 
  3054 int G1CollectedHeap::addr_to_arena_id(void* addr) const {
  3098 int G1CollectedHeap::addr_to_arena_id(void* addr) const {
  3055   HeapRegion* hr = heap_region_containing(addr);
  3099   HeapRegion* hr = heap_region_containing(addr);
  3056   if (hr == NULL) {
  3100   if (hr == NULL) {
  3057     return 0;
  3101     return 0;
  3247 }
  3291 }
  3248 #endif // TASKQUEUE_STATS
  3292 #endif // TASKQUEUE_STATS
  3249 
  3293 
  3250 bool
  3294 bool
  3251 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
  3295 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
       
  3296   assert_at_safepoint(true /* should_be_vm_thread */);
       
  3297   guarantee(!is_gc_active(), "collection is not reentrant");
       
  3298 
  3252   if (GC_locker::check_active_before_gc()) {
  3299   if (GC_locker::check_active_before_gc()) {
  3253     return false;
  3300     return false;
  3254   }
  3301   }
  3255 
  3302 
  3256   SvcGCMarker sgcm(SvcGCMarker::MINOR);
  3303   SvcGCMarker sgcm(SvcGCMarker::MINOR);
  3257   ResourceMark rm;
  3304   ResourceMark rm;
  3258 
  3305 
  3259   if (PrintHeapAtGC) {
  3306   if (PrintHeapAtGC) {
  3260     Universe::print_heap_before_gc();
  3307     Universe::print_heap_before_gc();
  3261   }
  3308   }
       
  3309 
       
  3310   verify_region_sets_optional();
  3262 
  3311 
  3263   {
  3312   {
  3264     // This call will decide whether this pause is an initial-mark
  3313     // This call will decide whether this pause is an initial-mark
  3265     // pause. If it is, during_initial_mark_pause() will return true
  3314     // pause. If it is, during_initial_mark_pause() will return true
  3266     // for the duration of this pause.
  3315     // for the duration of this pause.
  3288     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  3337     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  3289     TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
  3338     TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
  3290 
  3339 
  3291     TraceMemoryManagerStats tms(false /* fullGC */);
  3340     TraceMemoryManagerStats tms(false /* fullGC */);
  3292 
  3341 
  3293     assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  3342     // If there are any free regions available on the secondary_free_list
  3294     assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
  3343     // make sure we append them to the free_list. However, we don't
  3295     guarantee(!is_gc_active(), "collection is not reentrant");
  3344     // have to wait for the rest of the cleanup operation to
  3296     assert(regions_accounted_for(), "Region leakage!");
  3345     // finish. If it's still going on that's OK. If we run out of
       
  3346     // regions, the region allocation code will check the
       
  3347     // secondary_free_list and potentially wait if more free regions
       
  3348     // are coming (see new_region_try_secondary_free_list()).
       
  3349     if (!G1StressConcRegionFreeing) {
       
  3350       append_secondary_free_list_if_not_empty();
       
  3351     }
  3297 
  3352 
  3298     increment_gc_time_stamp();
  3353     increment_gc_time_stamp();
  3299 
  3354 
  3300     if (g1_policy()->in_young_gc_mode()) {
  3355     if (g1_policy()->in_young_gc_mode()) {
  3301       assert(check_young_list_well_formed(),
  3356       assert(check_young_list_well_formed(),
  3370       // Record the number of elements currently on the mark stack, so we
  3425       // Record the number of elements currently on the mark stack, so we
  3371       // only iterate over these.  (Since evacuation may add to the mark
  3426       // only iterate over these.  (Since evacuation may add to the mark
  3372       // stack, doing more exposes race conditions.)  If no mark is in
  3427       // stack, doing more exposes race conditions.)  If no mark is in
  3373       // progress, this will be zero.
  3428       // progress, this will be zero.
  3374       _cm->set_oops_do_bound();
  3429       _cm->set_oops_do_bound();
  3375 
       
  3376       assert(regions_accounted_for(), "Region leakage.");
       
  3377 
  3430 
  3378       if (mark_in_progress())
  3431       if (mark_in_progress())
  3379         concurrent_mark()->newCSet();
  3432         concurrent_mark()->newCSet();
  3380 
  3433 
  3381 #if YOUNG_LIST_VERBOSE
  3434 #if YOUNG_LIST_VERBOSE
  3468       double end_time_sec = os::elapsedTime();
  3521       double end_time_sec = os::elapsedTime();
  3469       double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
  3522       double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
  3470       g1_policy()->record_pause_time_ms(pause_time_ms);
  3523       g1_policy()->record_pause_time_ms(pause_time_ms);
  3471       g1_policy()->record_collection_pause_end();
  3524       g1_policy()->record_collection_pause_end();
  3472 
  3525 
  3473       assert(regions_accounted_for(), "Region leakage.");
       
  3474 
       
  3475       MemoryService::track_memory_usage();
  3526       MemoryService::track_memory_usage();
  3476 
  3527 
  3477       if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
  3528       if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
  3478         HandleMark hm;  // Discard invalid handles created during verification
  3529         HandleMark hm;  // Discard invalid handles created during verification
  3479         gclog_or_tty->print(" VerifyAfterGC:");
  3530         gclog_or_tty->print(" VerifyAfterGC:");
  3500 #endif
  3551 #endif
  3501 
  3552 
  3502       gc_epilogue(false);
  3553       gc_epilogue(false);
  3503     }
  3554     }
  3504 
  3555 
  3505     assert(verify_region_lists(), "Bad region lists.");
       
  3506 
       
  3507     if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
  3556     if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
  3508       gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
  3557       gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
  3509       print_tracing_info();
  3558       print_tracing_info();
  3510       vm_exit(-1);
  3559       vm_exit(-1);
  3511     }
  3560     }
  3512   }
  3561   }
       
  3562 
       
  3563   verify_region_sets_optional();
  3513 
  3564 
  3514   TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
  3565   TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
  3515   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
  3566   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
  3516 
  3567 
  3517   if (PrintHeapAtGC) {
  3568   if (PrintHeapAtGC) {
  3615   }
  3666   }
  3616 }
  3667 }
  3617 
  3668 
  3618 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) {
  3669 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) {
  3619   assert(Thread::current()->is_VM_thread() ||
  3670   assert(Thread::current()->is_VM_thread() ||
  3620          par_alloc_during_gc_lock()->owned_by_self(), "Precondition");
  3671          FreeList_lock->owned_by_self(), "Precondition");
  3621   assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(),
  3672   assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(),
  3622          "Precondition.");
  3673          "Precondition.");
  3623   hr->set_is_gc_alloc_region(true);
  3674   hr->set_is_gc_alloc_region(true);
  3624   hr->set_next_gc_alloc_region(_gc_alloc_region_list);
  3675   hr->set_next_gc_alloc_region(_gc_alloc_region_list);
  3625   _gc_alloc_region_list = hr;
  3676   _gc_alloc_region_list = hr;
  3637   }
  3688   }
  3638 };
  3689 };
  3639 #endif // G1_DEBUG
  3690 #endif // G1_DEBUG
  3640 
  3691 
  3641 void G1CollectedHeap::forget_alloc_region_list() {
  3692 void G1CollectedHeap::forget_alloc_region_list() {
  3642   assert(Thread::current()->is_VM_thread(), "Precondition");
  3693   assert_at_safepoint(true /* should_be_vm_thread */);
  3643   while (_gc_alloc_region_list != NULL) {
  3694   while (_gc_alloc_region_list != NULL) {
  3644     HeapRegion* r = _gc_alloc_region_list;
  3695     HeapRegion* r = _gc_alloc_region_list;
  3645     assert(r->is_gc_alloc_region(), "Invariant.");
  3696     assert(r->is_gc_alloc_region(), "Invariant.");
  3646     // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on
  3697     // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on
  3647     // newly allocated data in order to be able to apply deferred updates
  3698     // newly allocated data in order to be able to apply deferred updates
  3657         r->set_not_young();
  3708         r->set_not_young();
  3658       } else {
  3709       } else {
  3659         _young_list->add_survivor_region(r);
  3710         _young_list->add_survivor_region(r);
  3660       }
  3711       }
  3661     }
  3712     }
  3662     if (r->is_empty()) {
       
  3663       ++_free_regions;
       
  3664     }
       
  3665   }
  3713   }
  3666 #ifdef G1_DEBUG
  3714 #ifdef G1_DEBUG
  3667   FindGCAllocRegion fa;
  3715   FindGCAllocRegion fa;
  3668   heap_region_iterate(&fa);
  3716   heap_region_iterate(&fa);
  3669 #endif // G1_DEBUG
  3717 #endif // G1_DEBUG
  3712       }
  3760       }
  3713     }
  3761     }
  3714 
  3762 
  3715     if (alloc_region == NULL) {
  3763     if (alloc_region == NULL) {
  3716       // we will get a new GC alloc region
  3764       // we will get a new GC alloc region
  3717       alloc_region = newAllocRegionWithExpansion(ap, 0);
  3765       alloc_region = new_gc_alloc_region(ap, 0);
  3718     } else {
  3766     } else {
  3719       // the region was retained from the last collection
  3767       // the region was retained from the last collection
  3720       ++_gc_alloc_region_counts[ap];
  3768       ++_gc_alloc_region_counts[ap];
  3721       if (G1PrintHeapRegions) {
  3769       if (G1PrintHeapRegions) {
  3722         gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
  3770         gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
  3767     if (r != NULL) {
  3815     if (r != NULL) {
  3768       // we retain nothing on _gc_alloc_regions between GCs
  3816       // we retain nothing on _gc_alloc_regions between GCs
  3769       set_gc_alloc_region(ap, NULL);
  3817       set_gc_alloc_region(ap, NULL);
  3770 
  3818 
  3771       if (r->is_empty()) {
  3819       if (r->is_empty()) {
  3772         // we didn't actually allocate anything in it; let's just put
  3820         // We didn't actually allocate anything in it; let's just put
  3773         // it on the free list
  3821         // it back on the free list.
  3774         MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  3822         _free_list.add_as_tail(r);
  3775         r->set_zero_fill_complete();
       
  3776         put_free_region_on_list_locked(r);
       
  3777       } else if (_retain_gc_alloc_region[ap] && !totally) {
  3823       } else if (_retain_gc_alloc_region[ap] && !totally) {
  3778         // retain it so that we can use it at the beginning of the next GC
  3824         // retain it so that we can use it at the beginning of the next GC
  3779         _retained_gc_alloc_regions[ap] = r;
  3825         _retained_gc_alloc_regions[ap] = r;
  3780       }
  3826       }
  3781     }
  3827     }
  4126   // let the caller handle alloc failure
  4172   // let the caller handle alloc failure
  4127   if (alloc_region == NULL) return NULL;
  4173   if (alloc_region == NULL) return NULL;
  4128 
  4174 
  4129   HeapWord* block = alloc_region->par_allocate(word_size);
  4175   HeapWord* block = alloc_region->par_allocate(word_size);
  4130   if (block == NULL) {
  4176   if (block == NULL) {
  4131     MutexLockerEx x(par_alloc_during_gc_lock(),
       
  4132                     Mutex::_no_safepoint_check_flag);
       
  4133     block = allocate_during_gc_slow(purpose, alloc_region, true, word_size);
  4177     block = allocate_during_gc_slow(purpose, alloc_region, true, word_size);
  4134   }
  4178   }
  4135   return block;
  4179   return block;
  4136 }
  4180 }
  4137 
  4181 
  4155                                          bool           par,
  4199                                          bool           par,
  4156                                          size_t         word_size) {
  4200                                          size_t         word_size) {
  4157   assert(!isHumongous(word_size),
  4201   assert(!isHumongous(word_size),
  4158          err_msg("we should not be seeing humongous allocation requests "
  4202          err_msg("we should not be seeing humongous allocation requests "
  4159                  "during GC, word_size = "SIZE_FORMAT, word_size));
  4203                  "during GC, word_size = "SIZE_FORMAT, word_size));
       
  4204 
       
  4205   // We need to make sure we serialize calls to this method. Given
       
  4206   // that the FreeList_lock guards accesses to the free_list anyway,
       
  4207   // and we need to potentially remove a region from it, we'll use it
       
  4208   // to protect the whole call.
       
  4209   MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
  4160 
  4210 
  4161   HeapWord* block = NULL;
  4211   HeapWord* block = NULL;
  4162   // In the parallel case, a previous thread to obtain the lock may have
  4212   // In the parallel case, a previous thread to obtain the lock may have
  4163   // already assigned a new gc_alloc_region.
  4213   // already assigned a new gc_alloc_region.
  4164   if (alloc_region != _gc_alloc_regions[purpose]) {
  4214   if (alloc_region != _gc_alloc_regions[purpose]) {
  4201       return NULL;
  4251       return NULL;
  4202     }
  4252     }
  4203   }
  4253   }
  4204 
  4254 
  4205   // Now allocate a new region for allocation.
  4255   // Now allocate a new region for allocation.
  4206   alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/);
  4256   alloc_region = new_gc_alloc_region(purpose, word_size);
  4207 
  4257 
  4208   // let the caller handle alloc failure
  4258   // let the caller handle alloc failure
  4209   if (alloc_region != NULL) {
  4259   if (alloc_region != NULL) {
  4210 
  4260 
  4211     assert(check_gc_alloc_regions(), "alloc regions messed up");
  4261     assert(check_gc_alloc_regions(), "alloc regions messed up");
  4212     assert(alloc_region->saved_mark_at_top(),
  4262     assert(alloc_region->saved_mark_at_top(),
  4213            "Mark should have been saved already.");
  4263            "Mark should have been saved already.");
  4214     // We used to assert that the region was zero-filled here, but no
       
  4215     // longer.
       
  4216 
       
  4217     // This must be done last: once it's installed, other regions may
  4264     // This must be done last: once it's installed, other regions may
  4218     // allocate in it (without holding the lock.)
  4265     // allocate in it (without holding the lock.)
  4219     set_gc_alloc_region(purpose, alloc_region);
  4266     set_gc_alloc_region(purpose, alloc_region);
  4220 
  4267 
  4221     if (par) {
  4268     if (par) {
  4876     assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
  4923     assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
  4877   }
  4924   }
  4878   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  4925   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  4879 }
  4926 }
  4880 
  4927 
  4881 void G1CollectedHeap::free_region(HeapRegion* hr) {
  4928 void G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr,
  4882   size_t pre_used = 0;
  4929                                      size_t* pre_used,
  4883   size_t cleared_h_regions = 0;
  4930                                      FreeRegionList* free_list,
  4884   size_t freed_regions = 0;
  4931                                      HumongousRegionSet* humongous_proxy_set,
  4885   UncleanRegionList local_list;
  4932                                      bool par) {
  4886 
  4933   if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
  4887   HeapWord* start = hr->bottom();
  4934     if (hr->isHumongous()) {
  4888   HeapWord* end   = hr->prev_top_at_mark_start();
  4935       assert(hr->startsHumongous(), "we should only see starts humongous");
  4889   size_t used_bytes = hr->used();
  4936       free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par);
  4890   size_t live_bytes = hr->max_live_bytes();
  4937     } else {
  4891   if (used_bytes > 0) {
  4938       free_region(hr, pre_used, free_list, par);
  4892     guarantee( live_bytes <= used_bytes, "invariant" );
  4939     }
  4893   } else {
  4940   }
  4894     guarantee( live_bytes == 0, "invariant" );
  4941 }
  4895   }
  4942 
  4896 
  4943 void G1CollectedHeap::free_region(HeapRegion* hr,
  4897   size_t garbage_bytes = used_bytes - live_bytes;
  4944                                   size_t* pre_used,
  4898   if (garbage_bytes > 0)
  4945                                   FreeRegionList* free_list,
  4899     g1_policy()->decrease_known_garbage_bytes(garbage_bytes);
       
  4900 
       
  4901   free_region_work(hr, pre_used, cleared_h_regions, freed_regions,
       
  4902                    &local_list);
       
  4903   finish_free_region_work(pre_used, cleared_h_regions, freed_regions,
       
  4904                           &local_list);
       
  4905 }
       
  4906 
       
  4907 void
       
  4908 G1CollectedHeap::free_region_work(HeapRegion* hr,
       
  4909                                   size_t& pre_used,
       
  4910                                   size_t& cleared_h_regions,
       
  4911                                   size_t& freed_regions,
       
  4912                                   UncleanRegionList* list,
       
  4913                                   bool par) {
  4946                                   bool par) {
  4914   pre_used += hr->used();
  4947   assert(!hr->isHumongous(), "this is only for non-humongous regions");
  4915   if (hr->isHumongous()) {
  4948   assert(!hr->is_empty(), "the region should not be empty");
  4916     assert(hr->startsHumongous(),
  4949   assert(free_list != NULL, "pre-condition");
  4917            "Only the start of a humongous region should be freed.");
  4950 
  4918     int ind = _hrs->find(hr);
  4951   *pre_used += hr->used();
  4919     assert(ind != -1, "Should have an index.");
  4952   hr->hr_clear(par, true /* clear_space */);
  4920     // Clear the start region.
  4953   free_list->add_as_tail(hr);
  4921     hr->hr_clear(par, true /*clear_space*/);
  4954 }
  4922     list->insert_before_head(hr);
  4955 
  4923     cleared_h_regions++;
  4956 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
  4924     freed_regions++;
  4957                                      size_t* pre_used,
  4925     // Clear any continued regions.
  4958                                      FreeRegionList* free_list,
  4926     ind++;
  4959                                      HumongousRegionSet* humongous_proxy_set,
  4927     while ((size_t)ind < n_regions()) {
  4960                                      bool par) {
  4928       HeapRegion* hrc = _hrs->at(ind);
  4961   assert(hr->startsHumongous(), "this is only for starts humongous regions");
  4929       if (!hrc->continuesHumongous()) break;
  4962   assert(free_list != NULL, "pre-condition");
  4930       // Otherwise, does continue the H region.
  4963   assert(humongous_proxy_set != NULL, "pre-condition");
  4931       assert(hrc->humongous_start_region() == hr, "Huh?");
  4964 
  4932       hrc->hr_clear(par, true /*clear_space*/);
  4965   size_t hr_used = hr->used();
  4933       cleared_h_regions++;
  4966   size_t hr_capacity = hr->capacity();
  4934       freed_regions++;
  4967   size_t hr_pre_used = 0;
  4935       list->insert_before_head(hrc);
  4968   _humongous_set.remove_with_proxy(hr, humongous_proxy_set);
  4936       ind++;
  4969   hr->set_notHumongous();
  4937     }
  4970   free_region(hr, &hr_pre_used, free_list, par);
  4938   } else {
  4971 
  4939     hr->hr_clear(par, true /*clear_space*/);
  4972   int i = hr->hrs_index() + 1;
  4940     list->insert_before_head(hr);
  4973   size_t num = 1;
  4941     freed_regions++;
  4974   while ((size_t) i < n_regions()) {
  4942     // If we're using clear2, this should not be enabled.
  4975     HeapRegion* curr_hr = _hrs->at(i);
  4943     // assert(!hr->in_cohort(), "Can't be both free and in a cohort.");
  4976     if (!curr_hr->continuesHumongous()) {
  4944   }
  4977       break;
  4945 }
  4978     }
  4946 
  4979     curr_hr->set_notHumongous();
  4947 void G1CollectedHeap::finish_free_region_work(size_t pre_used,
  4980     free_region(curr_hr, &hr_pre_used, free_list, par);
  4948                                               size_t cleared_h_regions,
  4981     num += 1;
  4949                                               size_t freed_regions,
  4982     i += 1;
  4950                                               UncleanRegionList* list) {
  4983   }
  4951   if (list != NULL && list->sz() > 0) {
  4984   assert(hr_pre_used == hr_used,
  4952     prepend_region_list_on_unclean_list(list);
  4985          err_msg("hr_pre_used: "SIZE_FORMAT" and hr_used: "SIZE_FORMAT" "
  4953   }
  4986                  "should be the same", hr_pre_used, hr_used));
  4954   // Acquire a lock, if we're parallel, to update possibly-shared
  4987   *pre_used += hr_pre_used;
  4955   // variables.
  4988 }
  4956   Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL;
  4989 
  4957   {
  4990 void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used,
       
  4991                                        FreeRegionList* free_list,
       
  4992                                        HumongousRegionSet* humongous_proxy_set,
       
  4993                                        bool par) {
       
  4994   if (pre_used > 0) {
       
  4995     Mutex* lock = (par) ? ParGCRareEvent_lock : NULL;
  4958     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  4996     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
       
  4997     assert(_summary_bytes_used >= pre_used,
       
  4998            err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" "
       
  4999                    "should be >= pre_used: "SIZE_FORMAT,
       
  5000                    _summary_bytes_used, pre_used));
  4959     _summary_bytes_used -= pre_used;
  5001     _summary_bytes_used -= pre_used;
  4960     _num_humongous_regions -= (int) cleared_h_regions;
  5002   }
  4961     _free_regions += freed_regions;
  5003   if (free_list != NULL && !free_list->is_empty()) {
  4962   }
  5004     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
  4963 }
  5005     _free_list.add_as_tail(free_list);
  4964 
  5006   }
       
  5007   if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) {
       
  5008     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
       
  5009     _humongous_set.update_from_proxy(humongous_proxy_set);
       
  5010   }
       
  5011 }
  4965 
  5012 
  4966 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) {
  5013 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) {
  4967   while (list != NULL) {
  5014   while (list != NULL) {
  4968     guarantee( list->is_young(), "invariant" );
  5015     guarantee( list->is_young(), "invariant" );
  4969 
  5016 
  5083   }
  5130   }
  5084 #endif
  5131 #endif
  5085 }
  5132 }
  5086 
  5133 
  5087 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
  5134 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
       
  5135   size_t pre_used = 0;
       
  5136   FreeRegionList local_free_list("Local List for CSet Freeing");
       
  5137 
  5088   double young_time_ms     = 0.0;
  5138   double young_time_ms     = 0.0;
  5089   double non_young_time_ms = 0.0;
  5139   double non_young_time_ms = 0.0;
  5090 
  5140 
  5091   // Since the collection set is a superset of the the young list,
  5141   // Since the collection set is a superset of the the young list,
  5092   // all we need to do to clear the young list is clear its
  5142   // all we need to do to clear the young list is clear its
  5101   HeapRegion* cur = cs_head;
  5151   HeapRegion* cur = cs_head;
  5102   int age_bound = -1;
  5152   int age_bound = -1;
  5103   size_t rs_lengths = 0;
  5153   size_t rs_lengths = 0;
  5104 
  5154 
  5105   while (cur != NULL) {
  5155   while (cur != NULL) {
       
  5156     assert(!is_on_free_list(cur), "sanity");
       
  5157 
  5106     if (non_young) {
  5158     if (non_young) {
  5107       if (cur->is_young()) {
  5159       if (cur->is_young()) {
  5108         double end_sec = os::elapsedTime();
  5160         double end_sec = os::elapsedTime();
  5109         double elapsed_ms = (end_sec - start_sec) * 1000.0;
  5161         double elapsed_ms = (end_sec - start_sec) * 1000.0;
  5110         non_young_time_ms += elapsed_ms;
  5162         non_young_time_ms += elapsed_ms;
  5111 
  5163 
  5112         start_sec = os::elapsedTime();
  5164         start_sec = os::elapsedTime();
  5113         non_young = false;
  5165         non_young = false;
  5114       }
  5166       }
  5115     } else {
  5167     } else {
  5116       if (!cur->is_on_free_list()) {
  5168       double end_sec = os::elapsedTime();
  5117         double end_sec = os::elapsedTime();
  5169       double elapsed_ms = (end_sec - start_sec) * 1000.0;
  5118         double elapsed_ms = (end_sec - start_sec) * 1000.0;
  5170       young_time_ms += elapsed_ms;
  5119         young_time_ms += elapsed_ms;
  5171 
  5120 
  5172       start_sec = os::elapsedTime();
  5121         start_sec = os::elapsedTime();
  5173       non_young = true;
  5122         non_young = true;
       
  5123       }
       
  5124     }
  5174     }
  5125 
  5175 
  5126     rs_lengths += cur->rem_set()->occupied();
  5176     rs_lengths += cur->rem_set()->occupied();
  5127 
  5177 
  5128     HeapRegion* next = cur->next_in_collection_set();
  5178     HeapRegion* next = cur->next_in_collection_set();
  5151             (!cur->is_young() && cur->young_index_in_cset() == -1),
  5201             (!cur->is_young() && cur->young_index_in_cset() == -1),
  5152             "invariant" );
  5202             "invariant" );
  5153 
  5203 
  5154     if (!cur->evacuation_failed()) {
  5204     if (!cur->evacuation_failed()) {
  5155       // And the region is empty.
  5205       // And the region is empty.
  5156       assert(!cur->is_empty(),
  5206       assert(!cur->is_empty(), "Should not have empty regions in a CS.");
  5157              "Should not have empty regions in a CS.");
  5207       free_region(cur, &pre_used, &local_free_list, false /* par */);
  5158       free_region(cur);
       
  5159     } else {
  5208     } else {
  5160       cur->uninstall_surv_rate_group();
  5209       cur->uninstall_surv_rate_group();
  5161       if (cur->is_young())
  5210       if (cur->is_young())
  5162         cur->set_young_index_in_cset(-1);
  5211         cur->set_young_index_in_cset(-1);
  5163       cur->set_not_young();
  5212       cur->set_not_young();
  5174   if (non_young)
  5223   if (non_young)
  5175     non_young_time_ms += elapsed_ms;
  5224     non_young_time_ms += elapsed_ms;
  5176   else
  5225   else
  5177     young_time_ms += elapsed_ms;
  5226     young_time_ms += elapsed_ms;
  5178 
  5227 
       
  5228   update_sets_after_freeing_regions(pre_used, &local_free_list,
       
  5229                                     NULL /* humongous_proxy_set */,
       
  5230                                     false /* par */);
  5179   policy->record_young_free_cset_time_ms(young_time_ms);
  5231   policy->record_young_free_cset_time_ms(young_time_ms);
  5180   policy->record_non_young_free_cset_time_ms(non_young_time_ms);
  5232   policy->record_non_young_free_cset_time_ms(non_young_time_ms);
  5181 }
  5233 }
  5182 
  5234 
  5183 // This routine is similar to the above but does not record
  5235 // This routine is similar to the above but does not record
  5199     cur->set_young_index_in_cset(-1);
  5251     cur->set_young_index_in_cset(-1);
  5200     cur = next;
  5252     cur = next;
  5201   }
  5253   }
  5202 }
  5254 }
  5203 
  5255 
  5204 HeapRegion*
  5256 void G1CollectedHeap::set_free_regions_coming() {
  5205 G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) {
  5257   if (G1ConcRegionFreeingVerbose) {
  5206   assert(ZF_mon->owned_by_self(), "Precondition");
  5258     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
  5207   HeapRegion* res = pop_unclean_region_list_locked();
  5259                            "setting free regions coming");
  5208   if (res != NULL) {
  5260   }
  5209     assert(!res->continuesHumongous() &&
  5261 
  5210            res->zero_fill_state() != HeapRegion::Allocated,
  5262   assert(!free_regions_coming(), "pre-condition");
  5211            "Only free regions on unclean list.");
  5263   _free_regions_coming = true;
  5212     if (zero_filled) {
  5264 }
  5213       res->ensure_zero_filled_locked();
  5265 
  5214       res->set_zero_fill_allocated();
  5266 void G1CollectedHeap::reset_free_regions_coming() {
  5215     }
       
  5216   }
       
  5217   return res;
       
  5218 }
       
  5219 
       
  5220 HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) {
       
  5221   MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag);
       
  5222   return alloc_region_from_unclean_list_locked(zero_filled);
       
  5223 }
       
  5224 
       
  5225 void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) {
       
  5226   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
       
  5227   put_region_on_unclean_list_locked(r);
       
  5228   if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread.
       
  5229 }
       
  5230 
       
  5231 void G1CollectedHeap::set_unclean_regions_coming(bool b) {
       
  5232   MutexLockerEx x(Cleanup_mon);
       
  5233   set_unclean_regions_coming_locked(b);
       
  5234 }
       
  5235 
       
  5236 void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) {
       
  5237   assert(Cleanup_mon->owned_by_self(), "Precondition");
       
  5238   _unclean_regions_coming = b;
       
  5239   // Wake up mutator threads that might be waiting for completeCleanup to
       
  5240   // finish.
       
  5241   if (!b) Cleanup_mon->notify_all();
       
  5242 }
       
  5243 
       
  5244 void G1CollectedHeap::wait_for_cleanup_complete() {
       
  5245   assert_not_at_safepoint();
       
  5246   MutexLockerEx x(Cleanup_mon);
       
  5247   wait_for_cleanup_complete_locked();
       
  5248 }
       
  5249 
       
  5250 void G1CollectedHeap::wait_for_cleanup_complete_locked() {
       
  5251   assert(Cleanup_mon->owned_by_self(), "precondition");
       
  5252   while (_unclean_regions_coming) {
       
  5253     Cleanup_mon->wait();
       
  5254   }
       
  5255 }
       
  5256 
       
  5257 void
       
  5258 G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) {
       
  5259   assert(ZF_mon->owned_by_self(), "precondition.");
       
  5260 #ifdef ASSERT
       
  5261   if (r->is_gc_alloc_region()) {
       
  5262     ResourceMark rm;
       
  5263     stringStream region_str;
       
  5264     print_on(&region_str);
       
  5265     assert(!r->is_gc_alloc_region(), err_msg("Unexpected GC allocation region: %s",
       
  5266                                              region_str.as_string()));
       
  5267   }
       
  5268 #endif
       
  5269   _unclean_region_list.insert_before_head(r);
       
  5270 }
       
  5271 
       
  5272 void
       
  5273 G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) {
       
  5274   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
       
  5275   prepend_region_list_on_unclean_list_locked(list);
       
  5276   if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread.
       
  5277 }
       
  5278 
       
  5279 void
       
  5280 G1CollectedHeap::
       
  5281 prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) {
       
  5282   assert(ZF_mon->owned_by_self(), "precondition.");
       
  5283   _unclean_region_list.prepend_list(list);
       
  5284 }
       
  5285 
       
  5286 HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() {
       
  5287   assert(ZF_mon->owned_by_self(), "precondition.");
       
  5288   HeapRegion* res = _unclean_region_list.pop();
       
  5289   if (res != NULL) {
       
  5290     // Inform ZF thread that there's a new unclean head.
       
  5291     if (_unclean_region_list.hd() != NULL && should_zf())
       
  5292       ZF_mon->notify_all();
       
  5293   }
       
  5294   return res;
       
  5295 }
       
  5296 
       
  5297 HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() {
       
  5298   assert(ZF_mon->owned_by_self(), "precondition.");
       
  5299   return _unclean_region_list.hd();
       
  5300 }
       
  5301 
       
  5302 
       
  5303 bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() {
       
  5304   assert(ZF_mon->owned_by_self(), "Precondition");
       
  5305   HeapRegion* r = peek_unclean_region_list_locked();
       
  5306   if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) {
       
  5307     // Result of below must be equal to "r", since we hold the lock.
       
  5308     (void)pop_unclean_region_list_locked();
       
  5309     put_free_region_on_list_locked(r);
       
  5310     return true;
       
  5311   } else {
       
  5312     return false;
       
  5313   }
       
  5314 }
       
  5315 
       
  5316 bool G1CollectedHeap::move_cleaned_region_to_free_list() {
       
  5317   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
       
  5318   return move_cleaned_region_to_free_list_locked();
       
  5319 }
       
  5320 
       
  5321 
       
  5322 void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) {
       
  5323   assert(ZF_mon->owned_by_self(), "precondition.");
       
  5324   assert(_free_region_list_size == free_region_list_length(), "Inv");
       
  5325   assert(r->zero_fill_state() == HeapRegion::ZeroFilled,
       
  5326         "Regions on free list must be zero filled");
       
  5327   assert(!r->isHumongous(), "Must not be humongous.");
       
  5328   assert(r->is_empty(), "Better be empty");
       
  5329   assert(!r->is_on_free_list(),
       
  5330          "Better not already be on free list");
       
  5331   assert(!r->is_on_unclean_list(),
       
  5332          "Better not already be on unclean list");
       
  5333   r->set_on_free_list(true);
       
  5334   r->set_next_on_free_list(_free_region_list);
       
  5335   _free_region_list = r;
       
  5336   _free_region_list_size++;
       
  5337   assert(_free_region_list_size == free_region_list_length(), "Inv");
       
  5338 }
       
  5339 
       
  5340 void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) {
       
  5341   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
       
  5342   put_free_region_on_list_locked(r);
       
  5343 }
       
  5344 
       
  5345 HeapRegion* G1CollectedHeap::pop_free_region_list_locked() {
       
  5346   assert(ZF_mon->owned_by_self(), "precondition.");
       
  5347   assert(_free_region_list_size == free_region_list_length(), "Inv");
       
  5348   HeapRegion* res = _free_region_list;
       
  5349   if (res != NULL) {
       
  5350     _free_region_list = res->next_from_free_list();
       
  5351     _free_region_list_size--;
       
  5352     res->set_on_free_list(false);
       
  5353     res->set_next_on_free_list(NULL);
       
  5354     assert(_free_region_list_size == free_region_list_length(), "Inv");
       
  5355   }
       
  5356   return res;
       
  5357 }
       
  5358 
       
  5359 
       
  5360 HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) {
       
  5361   // By self, or on behalf of self.
       
  5362   assert(Heap_lock->is_locked(), "Precondition");
       
  5363   HeapRegion* res = NULL;
       
  5364   bool first = true;
       
  5365   while (res == NULL) {
       
  5366     if (zero_filled || !first) {
       
  5367       MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
       
  5368       res = pop_free_region_list_locked();
       
  5369       if (res != NULL) {
       
  5370         assert(!res->zero_fill_is_allocated(),
       
  5371                "No allocated regions on free list.");
       
  5372         res->set_zero_fill_allocated();
       
  5373       } else if (!first) {
       
  5374         break;  // We tried both, time to return NULL.
       
  5375       }
       
  5376     }
       
  5377 
       
  5378     if (res == NULL) {
       
  5379       res = alloc_region_from_unclean_list(zero_filled);
       
  5380     }
       
  5381     assert(res == NULL ||
       
  5382            !zero_filled ||
       
  5383            res->zero_fill_is_allocated(),
       
  5384            "We must have allocated the region we're returning");
       
  5385     first = false;
       
  5386   }
       
  5387   return res;
       
  5388 }
       
  5389 
       
  5390 void G1CollectedHeap::remove_allocated_regions_from_lists() {
       
  5391   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
       
  5392   {
  5267   {
  5393     HeapRegion* prev = NULL;
  5268     assert(free_regions_coming(), "pre-condition");
  5394     HeapRegion* cur = _unclean_region_list.hd();
  5269     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  5395     while (cur != NULL) {
  5270     _free_regions_coming = false;
  5396       HeapRegion* next = cur->next_from_unclean_list();
  5271     SecondaryFreeList_lock->notify_all();
  5397       if (cur->zero_fill_is_allocated()) {
  5272   }
  5398         // Remove from the list.
  5273 
  5399         if (prev == NULL) {
  5274   if (G1ConcRegionFreeingVerbose) {
  5400           (void)_unclean_region_list.pop();
  5275     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
  5401         } else {
  5276                            "reset free regions coming");
  5402           _unclean_region_list.delete_after(prev);
  5277   }
  5403         }
  5278 }
  5404         cur->set_on_unclean_list(false);
  5279 
  5405         cur->set_next_on_unclean_list(NULL);
  5280 void G1CollectedHeap::wait_while_free_regions_coming() {
  5406       } else {
  5281   // Most of the time we won't have to wait, so let's do a quick test
  5407         prev = cur;
  5282   // first before we take the lock.
  5408       }
  5283   if (!free_regions_coming()) {
  5409       cur = next;
  5284     return;
  5410     }
  5285   }
  5411     assert(_unclean_region_list.sz() == unclean_region_list_length(),
  5286 
  5412            "Inv");
  5287   if (G1ConcRegionFreeingVerbose) {
       
  5288     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
       
  5289                            "waiting for free regions");
  5413   }
  5290   }
  5414 
  5291 
  5415   {
  5292   {
  5416     HeapRegion* prev = NULL;
  5293     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  5417     HeapRegion* cur = _free_region_list;
  5294     while (free_regions_coming()) {
  5418     while (cur != NULL) {
  5295       SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
  5419       HeapRegion* next = cur->next_from_free_list();
  5296     }
  5420       if (cur->zero_fill_is_allocated()) {
  5297   }
  5421         // Remove from the list.
  5298 
  5422         if (prev == NULL) {
  5299   if (G1ConcRegionFreeingVerbose) {
  5423           _free_region_list = cur->next_from_free_list();
  5300     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
  5424         } else {
  5301                            "done waiting for free regions");
  5425           prev->set_next_on_free_list(cur->next_from_free_list());
  5302   }
  5426         }
       
  5427         cur->set_on_free_list(false);
       
  5428         cur->set_next_on_free_list(NULL);
       
  5429         _free_region_list_size--;
       
  5430       } else {
       
  5431         prev = cur;
       
  5432       }
       
  5433       cur = next;
       
  5434     }
       
  5435     assert(_free_region_list_size == free_region_list_length(), "Inv");
       
  5436   }
       
  5437 }
       
  5438 
       
  5439 bool G1CollectedHeap::verify_region_lists() {
       
  5440   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
       
  5441   return verify_region_lists_locked();
       
  5442 }
       
  5443 
       
  5444 bool G1CollectedHeap::verify_region_lists_locked() {
       
  5445   HeapRegion* unclean = _unclean_region_list.hd();
       
  5446   while (unclean != NULL) {
       
  5447     guarantee(unclean->is_on_unclean_list(), "Well, it is!");
       
  5448     guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!");
       
  5449     guarantee(unclean->zero_fill_state() != HeapRegion::Allocated,
       
  5450               "Everything else is possible.");
       
  5451     unclean = unclean->next_from_unclean_list();
       
  5452   }
       
  5453   guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv");
       
  5454 
       
  5455   HeapRegion* free_r = _free_region_list;
       
  5456   while (free_r != NULL) {
       
  5457     assert(free_r->is_on_free_list(), "Well, it is!");
       
  5458     assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!");
       
  5459     switch (free_r->zero_fill_state()) {
       
  5460     case HeapRegion::NotZeroFilled:
       
  5461     case HeapRegion::ZeroFilling:
       
  5462       guarantee(false, "Should not be on free list.");
       
  5463       break;
       
  5464     default:
       
  5465       // Everything else is possible.
       
  5466       break;
       
  5467     }
       
  5468     free_r = free_r->next_from_free_list();
       
  5469   }
       
  5470   guarantee(_free_region_list_size == free_region_list_length(), "Inv");
       
  5471   // If we didn't do an assertion...
       
  5472   return true;
       
  5473 }
       
  5474 
       
  5475 size_t G1CollectedHeap::free_region_list_length() {
       
  5476   assert(ZF_mon->owned_by_self(), "precondition.");
       
  5477   size_t len = 0;
       
  5478   HeapRegion* cur = _free_region_list;
       
  5479   while (cur != NULL) {
       
  5480     len++;
       
  5481     cur = cur->next_from_free_list();
       
  5482   }
       
  5483   return len;
       
  5484 }
       
  5485 
       
  5486 size_t G1CollectedHeap::unclean_region_list_length() {
       
  5487   assert(ZF_mon->owned_by_self(), "precondition.");
       
  5488   return _unclean_region_list.length();
       
  5489 }
  5303 }
  5490 
  5304 
  5491 size_t G1CollectedHeap::n_regions() {
  5305 size_t G1CollectedHeap::n_regions() {
  5492   return _hrs->length();
  5306   return _hrs->length();
  5493 }
  5307 }
  5494 
  5308 
  5495 size_t G1CollectedHeap::max_regions() {
  5309 size_t G1CollectedHeap::max_regions() {
  5496   return
  5310   return
  5497     (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) /
  5311     (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) /
  5498     HeapRegion::GrainBytes;
  5312     HeapRegion::GrainBytes;
  5499 }
       
  5500 
       
  5501 size_t G1CollectedHeap::free_regions() {
       
  5502   /* Possibly-expensive assert.
       
  5503   assert(_free_regions == count_free_regions(),
       
  5504          "_free_regions is off.");
       
  5505   */
       
  5506   return _free_regions;
       
  5507 }
       
  5508 
       
  5509 bool G1CollectedHeap::should_zf() {
       
  5510   return _free_region_list_size < (size_t) G1ConcZFMaxRegions;
       
  5511 }
       
  5512 
       
  5513 class RegionCounter: public HeapRegionClosure {
       
  5514   size_t _n;
       
  5515 public:
       
  5516   RegionCounter() : _n(0) {}
       
  5517   bool doHeapRegion(HeapRegion* r) {
       
  5518     if (r->is_empty()) {
       
  5519       assert(!r->isHumongous(), "H regions should not be empty.");
       
  5520       _n++;
       
  5521     }
       
  5522     return false;
       
  5523   }
       
  5524   int res() { return (int) _n; }
       
  5525 };
       
  5526 
       
  5527 size_t G1CollectedHeap::count_free_regions() {
       
  5528   RegionCounter rc;
       
  5529   heap_region_iterate(&rc);
       
  5530   size_t n = rc.res();
       
  5531   if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty())
       
  5532     n--;
       
  5533   return n;
       
  5534 }
       
  5535 
       
  5536 size_t G1CollectedHeap::count_free_regions_list() {
       
  5537   size_t n = 0;
       
  5538   size_t o = 0;
       
  5539   ZF_mon->lock_without_safepoint_check();
       
  5540   HeapRegion* cur = _free_region_list;
       
  5541   while (cur != NULL) {
       
  5542     cur = cur->next_from_free_list();
       
  5543     n++;
       
  5544   }
       
  5545   size_t m = unclean_region_list_length();
       
  5546   ZF_mon->unlock();
       
  5547   return n + m;
       
  5548 }
  5313 }
  5549 
  5314 
  5550 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
  5315 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
  5551   assert(heap_lock_held_for_gc(),
  5316   assert(heap_lock_held_for_gc(),
  5552               "the heap lock should already be held by or for this thread");
  5317               "the heap lock should already be held by or for this thread");
  5616       }
  5381       }
  5617     }
  5382     }
  5618   }
  5383   }
  5619 }
  5384 }
  5620 
  5385 
  5621 
       
  5622 // Done at the start of full GC.
  5386 // Done at the start of full GC.
  5623 void G1CollectedHeap::tear_down_region_lists() {
  5387 void G1CollectedHeap::tear_down_region_lists() {
  5624   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  5388   _free_list.remove_all();
  5625   while (pop_unclean_region_list_locked() != NULL) ;
  5389 }
  5626   assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0,
       
  5627          "Postconditions of loop.");
       
  5628   while (pop_free_region_list_locked() != NULL) ;
       
  5629   assert(_free_region_list == NULL, "Postcondition of loop.");
       
  5630   if (_free_region_list_size != 0) {
       
  5631     gclog_or_tty->print_cr("Size is %d.", _free_region_list_size);
       
  5632     print_on(gclog_or_tty, true /* extended */);
       
  5633   }
       
  5634   assert(_free_region_list_size == 0, "Postconditions of loop.");
       
  5635 }
       
  5636 
       
  5637 
  5390 
  5638 class RegionResetter: public HeapRegionClosure {
  5391 class RegionResetter: public HeapRegionClosure {
  5639   G1CollectedHeap* _g1;
  5392   G1CollectedHeap* _g1h;
  5640   int _n;
  5393   FreeRegionList _local_free_list;
       
  5394 
  5641 public:
  5395 public:
  5642   RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {}
  5396   RegionResetter() : _g1h(G1CollectedHeap::heap()),
       
  5397                      _local_free_list("Local Free List for RegionResetter") { }
       
  5398 
  5643   bool doHeapRegion(HeapRegion* r) {
  5399   bool doHeapRegion(HeapRegion* r) {
  5644     if (r->continuesHumongous()) return false;
  5400     if (r->continuesHumongous()) return false;
  5645     if (r->top() > r->bottom()) {
  5401     if (r->top() > r->bottom()) {
  5646       if (r->top() < r->end()) {
  5402       if (r->top() < r->end()) {
  5647         Copy::fill_to_words(r->top(),
  5403         Copy::fill_to_words(r->top(),
  5648                           pointer_delta(r->end(), r->top()));
  5404                           pointer_delta(r->end(), r->top()));
  5649       }
  5405       }
  5650       r->set_zero_fill_allocated();
       
  5651     } else {
  5406     } else {
  5652       assert(r->is_empty(), "tautology");
  5407       assert(r->is_empty(), "tautology");
  5653       _n++;
  5408       _local_free_list.add_as_tail(r);
  5654       switch (r->zero_fill_state()) {
       
  5655         case HeapRegion::NotZeroFilled:
       
  5656         case HeapRegion::ZeroFilling:
       
  5657           _g1->put_region_on_unclean_list_locked(r);
       
  5658           break;
       
  5659         case HeapRegion::Allocated:
       
  5660           r->set_zero_fill_complete();
       
  5661           // no break; go on to put on free list.
       
  5662         case HeapRegion::ZeroFilled:
       
  5663           _g1->put_free_region_on_list_locked(r);
       
  5664           break;
       
  5665       }
       
  5666     }
  5409     }
  5667     return false;
  5410     return false;
  5668   }
  5411   }
  5669 
  5412 
  5670   int getFreeRegionCount() {return _n;}
  5413   void update_free_lists() {
       
  5414     _g1h->update_sets_after_freeing_regions(0, &_local_free_list, NULL,
       
  5415                                             false /* par */);
       
  5416   }
  5671 };
  5417 };
  5672 
  5418 
  5673 // Done at the end of full GC.
  5419 // Done at the end of full GC.
  5674 void G1CollectedHeap::rebuild_region_lists() {
  5420 void G1CollectedHeap::rebuild_region_lists() {
  5675   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
       
  5676   // This needs to go at the end of the full GC.
  5421   // This needs to go at the end of the full GC.
  5677   RegionResetter rs;
  5422   RegionResetter rs;
  5678   heap_region_iterate(&rs);
  5423   heap_region_iterate(&rs);
  5679   _free_regions = rs.getFreeRegionCount();
  5424   rs.update_free_lists();
  5680   // Tell the ZF thread it may have work to do.
       
  5681   if (should_zf()) ZF_mon->notify_all();
       
  5682 }
       
  5683 
       
  5684 class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure {
       
  5685   G1CollectedHeap* _g1;
       
  5686   int _n;
       
  5687 public:
       
  5688   UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {}
       
  5689   bool doHeapRegion(HeapRegion* r) {
       
  5690     if (r->continuesHumongous()) return false;
       
  5691     if (r->top() > r->bottom()) {
       
  5692       // There are assertions in "set_zero_fill_needed()" below that
       
  5693       // require top() == bottom(), so this is technically illegal.
       
  5694       // We'll skirt the law here, by making that true temporarily.
       
  5695       DEBUG_ONLY(HeapWord* save_top = r->top();
       
  5696                  r->set_top(r->bottom()));
       
  5697       r->set_zero_fill_needed();
       
  5698       DEBUG_ONLY(r->set_top(save_top));
       
  5699     }
       
  5700     return false;
       
  5701   }
       
  5702 };
       
  5703 
       
  5704 // Done at the start of full GC.
       
  5705 void G1CollectedHeap::set_used_regions_to_need_zero_fill() {
       
  5706   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
       
  5707   // This needs to go at the end of the full GC.
       
  5708   UsedRegionsNeedZeroFillSetter rs;
       
  5709   heap_region_iterate(&rs);
       
  5710 }
  5425 }
  5711 
  5426 
  5712 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
  5427 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
  5713   _refine_cte_cl->set_concurrent(concurrent);
  5428   _refine_cte_cl->set_concurrent(concurrent);
  5714 }
  5429 }
  5715 
  5430 
  5716 #ifndef PRODUCT
  5431 #ifdef ASSERT
  5717 
       
  5718 class PrintHeapRegionClosure: public HeapRegionClosure {
       
  5719 public:
       
  5720   bool doHeapRegion(HeapRegion *r) {
       
  5721     gclog_or_tty->print("Region: "PTR_FORMAT":", r);
       
  5722     if (r != NULL) {
       
  5723       if (r->is_on_free_list())
       
  5724         gclog_or_tty->print("Free ");
       
  5725       if (r->is_young())
       
  5726         gclog_or_tty->print("Young ");
       
  5727       if (r->isHumongous())
       
  5728         gclog_or_tty->print("Is Humongous ");
       
  5729       r->print();
       
  5730     }
       
  5731     return false;
       
  5732   }
       
  5733 };
       
  5734 
       
  5735 class SortHeapRegionClosure : public HeapRegionClosure {
       
  5736   size_t young_regions,free_regions, unclean_regions;
       
  5737   size_t hum_regions, count;
       
  5738   size_t unaccounted, cur_unclean, cur_alloc;
       
  5739   size_t total_free;
       
  5740   HeapRegion* cur;
       
  5741 public:
       
  5742   SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0),
       
  5743     free_regions(0), unclean_regions(0),
       
  5744     hum_regions(0),
       
  5745     count(0), unaccounted(0),
       
  5746     cur_alloc(0), total_free(0)
       
  5747   {}
       
  5748   bool doHeapRegion(HeapRegion *r) {
       
  5749     count++;
       
  5750     if (r->is_on_free_list()) free_regions++;
       
  5751     else if (r->is_on_unclean_list()) unclean_regions++;
       
  5752     else if (r->isHumongous())  hum_regions++;
       
  5753     else if (r->is_young()) young_regions++;
       
  5754     else if (r == cur) cur_alloc++;
       
  5755     else unaccounted++;
       
  5756     return false;
       
  5757   }
       
  5758   void print() {
       
  5759     total_free = free_regions + unclean_regions;
       
  5760     gclog_or_tty->print("%d regions\n", count);
       
  5761     gclog_or_tty->print("%d free: free_list = %d unclean = %d\n",
       
  5762                         total_free, free_regions, unclean_regions);
       
  5763     gclog_or_tty->print("%d humongous %d young\n",
       
  5764                         hum_regions, young_regions);
       
  5765     gclog_or_tty->print("%d cur_alloc\n", cur_alloc);
       
  5766     gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted);
       
  5767   }
       
  5768 };
       
  5769 
       
  5770 void G1CollectedHeap::print_region_counts() {
       
  5771   SortHeapRegionClosure sc(_cur_alloc_region);
       
  5772   PrintHeapRegionClosure cl;
       
  5773   heap_region_iterate(&cl);
       
  5774   heap_region_iterate(&sc);
       
  5775   sc.print();
       
  5776   print_region_accounting_info();
       
  5777 };
       
  5778 
       
  5779 bool G1CollectedHeap::regions_accounted_for() {
       
  5780   // TODO: regions accounting for young/survivor/tenured
       
  5781   return true;
       
  5782 }
       
  5783 
       
  5784 bool G1CollectedHeap::print_region_accounting_info() {
       
  5785   gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).",
       
  5786                          free_regions(),
       
  5787                          count_free_regions(), count_free_regions_list(),
       
  5788                          _free_region_list_size, _unclean_region_list.sz());
       
  5789   gclog_or_tty->print_cr("cur_alloc: %d.",
       
  5790                          (_cur_alloc_region == NULL ? 0 : 1));
       
  5791   gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions);
       
  5792 
       
  5793   // TODO: check regions accounting for young/survivor/tenured
       
  5794   return true;
       
  5795 }
       
  5796 
  5432 
  5797 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
  5433 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
  5798   HeapRegion* hr = heap_region_containing(p);
  5434   HeapRegion* hr = heap_region_containing(p);
  5799   if (hr == NULL) {
  5435   if (hr == NULL) {
  5800     return is_in_permanent(p);
  5436     return is_in_permanent(p);
  5801   } else {
  5437   } else {
  5802     return hr->is_in(p);
  5438     return hr->is_in(p);
  5803   }
  5439   }
  5804 }
  5440 }
  5805 #endif // !PRODUCT
  5441 #endif // ASSERT
  5806 
  5442 
  5807 void G1CollectedHeap::g1_unimplemented() {
  5443 class VerifyRegionListsClosure : public HeapRegionClosure {
  5808   // Unimplemented();
  5444 private:
  5809 }
  5445   HumongousRegionSet* _humongous_set;
       
  5446   FreeRegionList*     _free_list;
       
  5447   size_t              _region_count;
       
  5448 
       
  5449 public:
       
  5450   VerifyRegionListsClosure(HumongousRegionSet* humongous_set,
       
  5451                            FreeRegionList* free_list) :
       
  5452     _humongous_set(humongous_set), _free_list(free_list),
       
  5453     _region_count(0) { }
       
  5454 
       
  5455   size_t region_count()      { return _region_count;      }
       
  5456 
       
  5457   bool doHeapRegion(HeapRegion* hr) {
       
  5458     _region_count += 1;
       
  5459 
       
  5460     if (hr->continuesHumongous()) {
       
  5461       return false;
       
  5462     }
       
  5463 
       
  5464     if (hr->is_young()) {
       
  5465       // TODO
       
  5466     } else if (hr->startsHumongous()) {
       
  5467       _humongous_set->verify_next_region(hr);
       
  5468     } else if (hr->is_empty()) {
       
  5469       _free_list->verify_next_region(hr);
       
  5470     }
       
  5471     return false;
       
  5472   }
       
  5473 };
       
  5474 
       
  5475 void G1CollectedHeap::verify_region_sets() {
       
  5476   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
       
  5477 
       
  5478   // First, check the explicit lists.
       
  5479   _free_list.verify();
       
  5480   {
       
  5481     // Given that a concurrent operation might be adding regions to
       
  5482     // the secondary free list we have to take the lock before
       
  5483     // verifying it.
       
  5484     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
       
  5485     _secondary_free_list.verify();
       
  5486   }
       
  5487   _humongous_set.verify();
       
  5488 
       
  5489   // If a concurrent region freeing operation is in progress it will
       
  5490   // be difficult to correctly attributed any free regions we come
       
  5491   // across to the correct free list given that they might belong to
       
  5492   // one of several (free_list, secondary_free_list, any local lists,
       
  5493   // etc.). So, if that's the case we will skip the rest of the
       
  5494   // verification operation. Alternatively, waiting for the concurrent
       
  5495   // operation to complete will have a non-trivial effect on the GC's
       
  5496   // operation (no concurrent operation will last longer than the
       
  5497   // interval between two calls to verification) and it might hide
       
  5498   // any issues that we would like to catch during testing.
       
  5499   if (free_regions_coming()) {
       
  5500     return;
       
  5501   }
       
  5502 
       
  5503   {
       
  5504     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
       
  5505     // Make sure we append the secondary_free_list on the free_list so
       
  5506     // that all free regions we will come across can be safely
       
  5507     // attributed to the free_list.
       
  5508     append_secondary_free_list();
       
  5509   }
       
  5510 
       
  5511   // Finally, make sure that the region accounting in the lists is
       
  5512   // consistent with what we see in the heap.
       
  5513   _humongous_set.verify_start();
       
  5514   _free_list.verify_start();
       
  5515 
       
  5516   VerifyRegionListsClosure cl(&_humongous_set, &_free_list);
       
  5517   heap_region_iterate(&cl);
       
  5518 
       
  5519   _humongous_set.verify_end();
       
  5520   _free_list.verify_end();
       
  5521 }