hotspot/src/share/vm/gc/g1/heapRegionManager.cpp
changeset 30764 fec48bf5a827
parent 30157 e36165b16dde
child 31346 a70d45c06136
equal deleted inserted replaced
30614:e45861098f5a 30764:fec48bf5a827
       
     1 /*
       
     2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
       
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     4  *
       
     5  * This code is free software; you can redistribute it and/or modify it
       
     6  * under the terms of the GNU General Public License version 2 only, as
       
     7  * published by the Free Software Foundation.
       
     8  *
       
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    12  * version 2 for more details (a copy is included in the LICENSE file that
       
    13  * accompanied this code).
       
    14  *
       
    15  * You should have received a copy of the GNU General Public License version
       
    16  * 2 along with this work; if not, write to the Free Software Foundation,
       
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    18  *
       
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
       
    20  * or visit www.oracle.com if you need additional information or have any
       
    21  * questions.
       
    22  *
       
    23  */
       
    24 
       
    25 #include "precompiled.hpp"
       
    26 #include "gc/g1/concurrentG1Refine.hpp"
       
    27 #include "gc/g1/g1CollectedHeap.inline.hpp"
       
    28 #include "gc/g1/heapRegion.hpp"
       
    29 #include "gc/g1/heapRegionManager.inline.hpp"
       
    30 #include "gc/g1/heapRegionSet.inline.hpp"
       
    31 #include "memory/allocation.hpp"
       
    32 
       
    33 void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
       
    34                                G1RegionToSpaceMapper* prev_bitmap,
       
    35                                G1RegionToSpaceMapper* next_bitmap,
       
    36                                G1RegionToSpaceMapper* bot,
       
    37                                G1RegionToSpaceMapper* cardtable,
       
    38                                G1RegionToSpaceMapper* card_counts) {
       
    39   _allocated_heapregions_length = 0;
       
    40 
       
    41   _heap_mapper = heap_storage;
       
    42 
       
    43   _prev_bitmap_mapper = prev_bitmap;
       
    44   _next_bitmap_mapper = next_bitmap;
       
    45 
       
    46   _bot_mapper = bot;
       
    47   _cardtable_mapper = cardtable;
       
    48 
       
    49   _card_counts_mapper = card_counts;
       
    50 
       
    51   MemRegion reserved = heap_storage->reserved();
       
    52   _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes);
       
    53 
       
    54   _available_map.resize(_regions.length(), false);
       
    55   _available_map.clear();
       
    56 }
       
    57 
       
    58 bool HeapRegionManager::is_available(uint region) const {
       
    59   return _available_map.at(region);
       
    60 }
       
    61 
       
    62 #ifdef ASSERT
       
    63 bool HeapRegionManager::is_free(HeapRegion* hr) const {
       
    64   return _free_list.contains(hr);
       
    65 }
       
    66 #endif
       
    67 
       
    68 HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
       
    69   G1CollectedHeap* g1h = G1CollectedHeap::heap();
       
    70   HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index);
       
    71   MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
       
    72   assert(reserved().contains(mr), "invariant");
       
    73   return g1h->allocator()->new_heap_region(hrm_index, g1h->bot_shared(), mr);
       
    74 }
       
    75 
       
    76 void HeapRegionManager::commit_regions(uint index, size_t num_regions) {
       
    77   guarantee(num_regions > 0, "Must commit more than zero regions");
       
    78   guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions");
       
    79 
       
    80   _num_committed += (uint)num_regions;
       
    81 
       
    82   _heap_mapper->commit_regions(index, num_regions);
       
    83 
       
    84   // Also commit auxiliary data
       
    85   _prev_bitmap_mapper->commit_regions(index, num_regions);
       
    86   _next_bitmap_mapper->commit_regions(index, num_regions);
       
    87 
       
    88   _bot_mapper->commit_regions(index, num_regions);
       
    89   _cardtable_mapper->commit_regions(index, num_regions);
       
    90 
       
    91   _card_counts_mapper->commit_regions(index, num_regions);
       
    92 }
       
    93 
       
    94 void HeapRegionManager::uncommit_regions(uint start, size_t num_regions) {
       
    95   guarantee(num_regions >= 1, err_msg("Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start));
       
    96   guarantee(_num_committed >= num_regions, "pre-condition");
       
    97 
       
    98   // Print before uncommitting.
       
    99   if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
       
   100     for (uint i = start; i < start + num_regions; i++) {
       
   101       HeapRegion* hr = at(i);
       
   102       G1CollectedHeap::heap()->hr_printer()->uncommit(hr->bottom(), hr->end());
       
   103     }
       
   104   }
       
   105 
       
   106   _num_committed -= (uint)num_regions;
       
   107 
       
   108   _available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range);
       
   109   _heap_mapper->uncommit_regions(start, num_regions);
       
   110 
       
   111   // Also uncommit auxiliary data
       
   112   _prev_bitmap_mapper->uncommit_regions(start, num_regions);
       
   113   _next_bitmap_mapper->uncommit_regions(start, num_regions);
       
   114 
       
   115   _bot_mapper->uncommit_regions(start, num_regions);
       
   116   _cardtable_mapper->uncommit_regions(start, num_regions);
       
   117 
       
   118   _card_counts_mapper->uncommit_regions(start, num_regions);
       
   119 }
       
   120 
       
   121 void HeapRegionManager::make_regions_available(uint start, uint num_regions) {
       
   122   guarantee(num_regions > 0, "No point in calling this for zero regions");
       
   123   commit_regions(start, num_regions);
       
   124   for (uint i = start; i < start + num_regions; i++) {
       
   125     if (_regions.get_by_index(i) == NULL) {
       
   126       HeapRegion* new_hr = new_heap_region(i);
       
   127       _regions.set_by_index(i, new_hr);
       
   128       _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1);
       
   129     }
       
   130   }
       
   131 
       
   132   _available_map.par_set_range(start, start + num_regions, BitMap::unknown_range);
       
   133 
       
   134   for (uint i = start; i < start + num_regions; i++) {
       
   135     assert(is_available(i), err_msg("Just made region %u available but is apparently not.", i));
       
   136     HeapRegion* hr = at(i);
       
   137     if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
       
   138       G1CollectedHeap::heap()->hr_printer()->commit(hr->bottom(), hr->end());
       
   139     }
       
   140     HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i);
       
   141     MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
       
   142 
       
   143     hr->initialize(mr);
       
   144     insert_into_free_list(at(i));
       
   145   }
       
   146 }
       
   147 
       
   148 MemoryUsage HeapRegionManager::get_auxiliary_data_memory_usage() const {
       
   149   size_t used_sz =
       
   150     _prev_bitmap_mapper->committed_size() +
       
   151     _next_bitmap_mapper->committed_size() +
       
   152     _bot_mapper->committed_size() +
       
   153     _cardtable_mapper->committed_size() +
       
   154     _card_counts_mapper->committed_size();
       
   155 
       
   156   size_t committed_sz =
       
   157     _prev_bitmap_mapper->reserved_size() +
       
   158     _next_bitmap_mapper->reserved_size() +
       
   159     _bot_mapper->reserved_size() +
       
   160     _cardtable_mapper->reserved_size() +
       
   161     _card_counts_mapper->reserved_size();
       
   162 
       
   163   return MemoryUsage(0, used_sz, committed_sz, committed_sz);
       
   164 }
       
   165 
       
   166 uint HeapRegionManager::expand_by(uint num_regions) {
       
   167   return expand_at(0, num_regions);
       
   168 }
       
   169 
       
   170 uint HeapRegionManager::expand_at(uint start, uint num_regions) {
       
   171   if (num_regions == 0) {
       
   172     return 0;
       
   173   }
       
   174 
       
   175   uint cur = start;
       
   176   uint idx_last_found = 0;
       
   177   uint num_last_found = 0;
       
   178 
       
   179   uint expanded = 0;
       
   180 
       
   181   while (expanded < num_regions &&
       
   182          (num_last_found = find_unavailable_from_idx(cur, &idx_last_found)) > 0) {
       
   183     uint to_expand = MIN2(num_regions - expanded, num_last_found);
       
   184     make_regions_available(idx_last_found, to_expand);
       
   185     expanded += to_expand;
       
   186     cur = idx_last_found + num_last_found + 1;
       
   187   }
       
   188 
       
   189   verify_optional();
       
   190   return expanded;
       
   191 }
       
   192 
       
   193 uint HeapRegionManager::find_contiguous(size_t num, bool empty_only) {
       
   194   uint found = 0;
       
   195   size_t length_found = 0;
       
   196   uint cur = 0;
       
   197 
       
   198   while (length_found < num && cur < max_length()) {
       
   199     HeapRegion* hr = _regions.get_by_index(cur);
       
   200     if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) {
       
   201       // This region is a potential candidate for allocation into.
       
   202       length_found++;
       
   203     } else {
       
   204       // This region is not a candidate. The next region is the next possible one.
       
   205       found = cur + 1;
       
   206       length_found = 0;
       
   207     }
       
   208     cur++;
       
   209   }
       
   210 
       
   211   if (length_found == num) {
       
   212     for (uint i = found; i < (found + num); i++) {
       
   213       HeapRegion* hr = _regions.get_by_index(i);
       
   214       // sanity check
       
   215       guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()),
       
   216                 err_msg("Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT
       
   217                         " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr)));
       
   218     }
       
   219     return found;
       
   220   } else {
       
   221     return G1_NO_HRM_INDEX;
       
   222   }
       
   223 }
       
   224 
       
   225 HeapRegion* HeapRegionManager::next_region_in_heap(const HeapRegion* r) const {
       
   226   guarantee(r != NULL, "Start region must be a valid region");
       
   227   guarantee(is_available(r->hrm_index()), err_msg("Trying to iterate starting from region %u which is not in the heap", r->hrm_index()));
       
   228   for (uint i = r->hrm_index() + 1; i < _allocated_heapregions_length; i++) {
       
   229     HeapRegion* hr = _regions.get_by_index(i);
       
   230     if (is_available(i)) {
       
   231       return hr;
       
   232     }
       
   233   }
       
   234   return NULL;
       
   235 }
       
   236 
       
   237 void HeapRegionManager::iterate(HeapRegionClosure* blk) const {
       
   238   uint len = max_length();
       
   239 
       
   240   for (uint i = 0; i < len; i++) {
       
   241     if (!is_available(i)) {
       
   242       continue;
       
   243     }
       
   244     guarantee(at(i) != NULL, err_msg("Tried to access region %u that has a NULL HeapRegion*", i));
       
   245     bool res = blk->doHeapRegion(at(i));
       
   246     if (res) {
       
   247       blk->incomplete();
       
   248       return;
       
   249     }
       
   250   }
       
   251 }
       
   252 
       
   253 uint HeapRegionManager::find_unavailable_from_idx(uint start_idx, uint* res_idx) const {
       
   254   guarantee(res_idx != NULL, "checking");
       
   255   guarantee(start_idx <= (max_length() + 1), "checking");
       
   256 
       
   257   uint num_regions = 0;
       
   258 
       
   259   uint cur = start_idx;
       
   260   while (cur < max_length() && is_available(cur)) {
       
   261     cur++;
       
   262   }
       
   263   if (cur == max_length()) {
       
   264     return num_regions;
       
   265   }
       
   266   *res_idx = cur;
       
   267   while (cur < max_length() && !is_available(cur)) {
       
   268     cur++;
       
   269   }
       
   270   num_regions = cur - *res_idx;
       
   271 #ifdef ASSERT
       
   272   for (uint i = *res_idx; i < (*res_idx + num_regions); i++) {
       
   273     assert(!is_available(i), "just checking");
       
   274   }
       
   275   assert(cur == max_length() || num_regions == 0 || is_available(cur),
       
   276          err_msg("The region at the current position %u must be available or at the end of the heap.", cur));
       
   277 #endif
       
   278   return num_regions;
       
   279 }
       
   280 
       
   281 void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const {
       
   282   const uint start_index = hrclaimer->start_region_for_worker(worker_id);
       
   283 
       
   284   // Every worker will actually look at all regions, skipping over regions that
       
   285   // are currently not committed.
       
   286   // This also (potentially) iterates over regions newly allocated during GC. This
       
   287   // is no problem except for some extra work.
       
   288   const uint n_regions = hrclaimer->n_regions();
       
   289   for (uint count = 0; count < n_regions; count++) {
       
   290     const uint index = (start_index + count) % n_regions;
       
   291     assert(index < n_regions, "sanity");
       
   292     // Skip over unavailable regions
       
   293     if (!is_available(index)) {
       
   294       continue;
       
   295     }
       
   296     HeapRegion* r = _regions.get_by_index(index);
       
   297     // We'll ignore "continues humongous" regions (we'll process them
       
   298     // when we come across their corresponding "start humongous"
       
   299     // region) and regions already claimed.
       
   300     // However, if the iteration is specified as concurrent, the values for
       
   301     // is_starts_humongous and is_continues_humongous can not be trusted,
       
   302     // and we should just blindly iterate over regions regardless of their
       
   303     // humongous status.
       
   304     if (hrclaimer->is_region_claimed(index) || (!concurrent && r->is_continues_humongous())) {
       
   305       continue;
       
   306     }
       
   307     // OK, try to claim it
       
   308     if (!hrclaimer->claim_region(index)) {
       
   309       continue;
       
   310     }
       
   311     // Success!
       
   312     // As mentioned above, special treatment of humongous regions can only be
       
   313     // done if we are iterating non-concurrently.
       
   314     if (!concurrent && r->is_starts_humongous()) {
       
   315       // If the region is "starts humongous" we'll iterate over its
       
   316       // "continues humongous" first; in fact we'll do them
       
   317       // first. The order is important. In one case, calling the
       
   318       // closure on the "starts humongous" region might de-allocate
       
   319       // and clear all its "continues humongous" regions and, as a
       
   320       // result, we might end up processing them twice. So, we'll do
       
   321       // them first (note: most closures will ignore them anyway) and
       
   322       // then we'll do the "starts humongous" region.
       
   323       for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) {
       
   324         HeapRegion* chr = _regions.get_by_index(ch_index);
       
   325 
       
   326         assert(chr->is_continues_humongous(), "Must be humongous region");
       
   327         assert(chr->humongous_start_region() == r,
       
   328                err_msg("Must work on humongous continuation of the original start region "
       
   329                        PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr)));
       
   330         assert(!hrclaimer->is_region_claimed(ch_index),
       
   331                "Must not have been claimed yet because claiming of humongous continuation first claims the start region");
       
   332 
       
   333         // Claim the region so no other worker tries to process the region. When a worker processes a
       
   334         // starts_humongous region it may also process the associated continues_humongous regions.
       
   335         // The continues_humongous regions can be changed to free regions. Unless this worker claims
       
   336         // all of these regions, other workers might try claim and process these newly free regions.
       
   337         bool claim_result = hrclaimer->claim_region(ch_index);
       
   338         guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object");
       
   339 
       
   340         bool res2 = blk->doHeapRegion(chr);
       
   341         if (res2) {
       
   342           return;
       
   343         }
       
   344 
       
   345         // Right now, this holds (i.e., no closure that actually
       
   346         // does something with "continues humongous" regions
       
   347         // clears them). We might have to weaken it in the future,
       
   348         // but let's leave these two asserts here for extra safety.
       
   349         assert(chr->is_continues_humongous(), "should still be the case");
       
   350         assert(chr->humongous_start_region() == r, "sanity");
       
   351       }
       
   352     }
       
   353 
       
   354     bool res = blk->doHeapRegion(r);
       
   355     if (res) {
       
   356       return;
       
   357     }
       
   358   }
       
   359 }
       
   360 
       
   361 uint HeapRegionManager::shrink_by(uint num_regions_to_remove) {
       
   362   assert(length() > 0, "the region sequence should not be empty");
       
   363   assert(length() <= _allocated_heapregions_length, "invariant");
       
   364   assert(_allocated_heapregions_length > 0, "we should have at least one region committed");
       
   365   assert(num_regions_to_remove < length(), "We should never remove all regions");
       
   366 
       
   367   if (num_regions_to_remove == 0) {
       
   368     return 0;
       
   369   }
       
   370 
       
   371   uint removed = 0;
       
   372   uint cur = _allocated_heapregions_length - 1;
       
   373   uint idx_last_found = 0;
       
   374   uint num_last_found = 0;
       
   375 
       
   376   while ((removed < num_regions_to_remove) &&
       
   377       (num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
       
   378     uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found);
       
   379 
       
   380     uncommit_regions(idx_last_found + num_last_found - to_remove, to_remove);
       
   381 
       
   382     cur -= num_last_found;
       
   383     removed += to_remove;
       
   384   }
       
   385 
       
   386   verify_optional();
       
   387 
       
   388   return removed;
       
   389 }
       
   390 
       
   391 uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
       
   392   guarantee(start_idx < _allocated_heapregions_length, "checking");
       
   393   guarantee(res_idx != NULL, "checking");
       
   394 
       
   395   uint num_regions_found = 0;
       
   396 
       
   397   jlong cur = start_idx;
       
   398   while (cur != -1 && !(is_available(cur) && at(cur)->is_empty())) {
       
   399     cur--;
       
   400   }
       
   401   if (cur == -1) {
       
   402     return num_regions_found;
       
   403   }
       
   404   jlong old_cur = cur;
       
   405   // cur indexes the first empty region
       
   406   while (cur != -1 && is_available(cur) && at(cur)->is_empty()) {
       
   407     cur--;
       
   408   }
       
   409   *res_idx = cur + 1;
       
   410   num_regions_found = old_cur - cur;
       
   411 
       
   412 #ifdef ASSERT
       
   413   for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) {
       
   414     assert(at(i)->is_empty(), "just checking");
       
   415   }
       
   416 #endif
       
   417   return num_regions_found;
       
   418 }
       
   419 
       
   420 void HeapRegionManager::verify() {
       
   421   guarantee(length() <= _allocated_heapregions_length,
       
   422             err_msg("invariant: _length: %u _allocated_length: %u",
       
   423                     length(), _allocated_heapregions_length));
       
   424   guarantee(_allocated_heapregions_length <= max_length(),
       
   425             err_msg("invariant: _allocated_length: %u _max_length: %u",
       
   426                     _allocated_heapregions_length, max_length()));
       
   427 
       
   428   bool prev_committed = true;
       
   429   uint num_committed = 0;
       
   430   HeapWord* prev_end = heap_bottom();
       
   431   for (uint i = 0; i < _allocated_heapregions_length; i++) {
       
   432     if (!is_available(i)) {
       
   433       prev_committed = false;
       
   434       continue;
       
   435     }
       
   436     num_committed++;
       
   437     HeapRegion* hr = _regions.get_by_index(i);
       
   438     guarantee(hr != NULL, err_msg("invariant: i: %u", i));
       
   439     guarantee(!prev_committed || hr->bottom() == prev_end,
       
   440               err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
       
   441                       i, HR_FORMAT_PARAMS(hr), p2i(prev_end)));
       
   442     guarantee(hr->hrm_index() == i,
       
   443               err_msg("invariant: i: %u hrm_index(): %u", i, hr->hrm_index()));
       
   444     // Asserts will fire if i is >= _length
       
   445     HeapWord* addr = hr->bottom();
       
   446     guarantee(addr_to_region(addr) == hr, "sanity");
       
   447     // We cannot check whether the region is part of a particular set: at the time
       
   448     // this method may be called, we have only completed allocation of the regions,
       
   449     // but not put into a region set.
       
   450     prev_committed = true;
       
   451     if (hr->is_starts_humongous()) {
       
   452       prev_end = hr->orig_end();
       
   453     } else {
       
   454       prev_end = hr->end();
       
   455     }
       
   456   }
       
   457   for (uint i = _allocated_heapregions_length; i < max_length(); i++) {
       
   458     guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i));
       
   459   }
       
   460 
       
   461   guarantee(num_committed == _num_committed, err_msg("Found %u committed regions, but should be %u", num_committed, _num_committed));
       
   462   _free_list.verify();
       
   463 }
       
   464 
       
   465 #ifndef PRODUCT
       
   466 void HeapRegionManager::verify_optional() {
       
   467   verify();
       
   468 }
       
   469 #endif // PRODUCT
       
   470 
       
   471 HeapRegionClaimer::HeapRegionClaimer(uint n_workers) :
       
   472     _n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm._allocated_heapregions_length), _claims(NULL) {
       
   473   assert(n_workers > 0, "Need at least one worker.");
       
   474   _claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC);
       
   475   memset(_claims, Unclaimed, sizeof(*_claims) * _n_regions);
       
   476 }
       
   477 
       
   478 HeapRegionClaimer::~HeapRegionClaimer() {
       
   479   if (_claims != NULL) {
       
   480     FREE_C_HEAP_ARRAY(uint, _claims);
       
   481   }
       
   482 }
       
   483 
       
   484 uint HeapRegionClaimer::start_region_for_worker(uint worker_id) const {
       
   485   assert(worker_id < _n_workers, "Invalid worker_id.");
       
   486   return _n_regions * worker_id / _n_workers;
       
   487 }
       
   488 
       
   489 bool HeapRegionClaimer::is_region_claimed(uint region_index) const {
       
   490   assert(region_index < _n_regions, "Invalid index.");
       
   491   return _claims[region_index] == Claimed;
       
   492 }
       
   493 
       
   494 bool HeapRegionClaimer::claim_region(uint region_index) {
       
   495   assert(region_index < _n_regions, "Invalid index.");
       
   496   uint old_val = Atomic::cmpxchg(Claimed, &_claims[region_index], Unclaimed);
       
   497   return old_val == Unclaimed;
       
   498 }