src/hotspot/share/gc/g1/g1RegionToSpaceMapper.cpp
changeset 59060 fce1fa1bdc91
parent 54264 41af8d0546bc
equal deleted inserted replaced
59059:27a266d5fb13 59060:fce1fa1bdc91
    22  *
    22  *
    23  */
    23  */
    24 
    24 
    25 #include "precompiled.hpp"
    25 #include "precompiled.hpp"
    26 #include "gc/g1/g1BiasedArray.hpp"
    26 #include "gc/g1/g1BiasedArray.hpp"
       
    27 #include "gc/g1/g1NUMA.hpp"
    27 #include "gc/g1/g1RegionToSpaceMapper.hpp"
    28 #include "gc/g1/g1RegionToSpaceMapper.hpp"
    28 #include "logging/log.hpp"
    29 #include "logging/log.hpp"
    29 #include "memory/allocation.inline.hpp"
    30 #include "memory/allocation.inline.hpp"
    30 #include "memory/virtualspace.hpp"
    31 #include "memory/virtualspace.hpp"
    31 #include "runtime/java.hpp"
    32 #include "runtime/java.hpp"
    42                                              size_t commit_factor,
    43                                              size_t commit_factor,
    43                                              MemoryType type) :
    44                                              MemoryType type) :
    44   _listener(NULL),
    45   _listener(NULL),
    45   _storage(rs, used_size, page_size),
    46   _storage(rs, used_size, page_size),
    46   _region_granularity(region_granularity),
    47   _region_granularity(region_granularity),
    47   _commit_map(rs.size() * commit_factor / region_granularity, mtGC) {
    48   _commit_map(rs.size() * commit_factor / region_granularity, mtGC),
       
    49   _memory_type(type) {
    48   guarantee(is_power_of_2(page_size), "must be");
    50   guarantee(is_power_of_2(page_size), "must be");
    49   guarantee(is_power_of_2(region_granularity), "must be");
    51   guarantee(is_power_of_2(region_granularity), "must be");
    50 
    52 
    51   MemTracker::record_virtual_memory_type((address)rs.base(), type);
    53   MemTracker::record_virtual_memory_type((address)rs.base(), type);
    52 }
    54 }
    70 
    72 
    71     guarantee(alloc_granularity >= page_size, "allocation granularity smaller than commit granularity");
    73     guarantee(alloc_granularity >= page_size, "allocation granularity smaller than commit granularity");
    72   }
    74   }
    73 
    75 
    74   virtual void commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) {
    76   virtual void commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) {
    75     size_t const start_page = (size_t)start_idx * _pages_per_region;
    77     const size_t start_page = (size_t)start_idx * _pages_per_region;
    76     bool zero_filled = _storage.commit(start_page, num_regions * _pages_per_region);
    78     const size_t size_in_pages = num_regions * _pages_per_region;
       
    79     bool zero_filled = _storage.commit(start_page, size_in_pages);
       
    80     if (_memory_type == mtJavaHeap) {
       
    81       for (uint region_index = start_idx; region_index < start_idx + num_regions; region_index++ ) {
       
    82         void* address = _storage.page_start(region_index * _pages_per_region);
       
    83         size_t size_in_bytes = _storage.page_size() * _pages_per_region;
       
    84         G1NUMA::numa()->request_memory_on_node(address, size_in_bytes, region_index);
       
    85       }
       
    86     }
    77     if (AlwaysPreTouch) {
    87     if (AlwaysPreTouch) {
    78       _storage.pretouch(start_page, num_regions * _pages_per_region, pretouch_gang);
    88       _storage.pretouch(start_page, size_in_pages, pretouch_gang);
    79     }
    89     }
    80     _commit_map.set_range(start_idx, start_idx + num_regions);
    90     _commit_map.set_range(start_idx, start_idx + num_regions);
    81     fire_on_commit(start_idx, num_regions, zero_filled);
    91     fire_on_commit(start_idx, num_regions, zero_filled);
    82   }
    92   }
    83 
    93 
   124 
   134 
   125     size_t first_committed = NoPage;
   135     size_t first_committed = NoPage;
   126     size_t num_committed = 0;
   136     size_t num_committed = 0;
   127 
   137 
   128     bool all_zero_filled = true;
   138     bool all_zero_filled = true;
   129 
   139     G1NUMA* numa = G1NUMA::numa();
   130     for (uint i = start_idx; i < start_idx + num_regions; i++) {
   140 
   131       assert(!_commit_map.at(i), "Trying to commit storage at region %u that is already committed", i);
   141     for (uint region_idx = start_idx; region_idx < start_idx + num_regions; region_idx++) {
   132       size_t idx = region_idx_to_page_idx(i);
   142       assert(!_commit_map.at(region_idx), "Trying to commit storage at region %u that is already committed", region_idx);
   133       uint old_refcount = _refcounts.get_by_index(idx);
   143       size_t page_idx = region_idx_to_page_idx(region_idx);
       
   144       uint old_refcount = _refcounts.get_by_index(page_idx);
   134 
   145 
   135       bool zero_filled = false;
   146       bool zero_filled = false;
   136       if (old_refcount == 0) {
   147       if (old_refcount == 0) {
   137         if (first_committed == NoPage) {
   148         if (first_committed == NoPage) {
   138           first_committed = idx;
   149           first_committed = page_idx;
   139           num_committed = 1;
   150           num_committed = 1;
   140         } else {
   151         } else {
   141           num_committed++;
   152           num_committed++;
   142         }
   153         }
   143         zero_filled = _storage.commit(idx, 1);
   154         zero_filled = _storage.commit(page_idx, 1);
       
   155         if (_memory_type == mtJavaHeap) {
       
   156           void* address = _storage.page_start(page_idx);
       
   157           size_t size_in_bytes = _storage.page_size();
       
   158           numa->request_memory_on_node(address, size_in_bytes, region_idx);
       
   159         }
   144       }
   160       }
   145       all_zero_filled &= zero_filled;
   161       all_zero_filled &= zero_filled;
   146 
   162 
   147       _refcounts.set_by_index(idx, old_refcount + 1);
   163       _refcounts.set_by_index(page_idx, old_refcount + 1);
   148       _commit_map.set_bit(i);
   164       _commit_map.set_bit(region_idx);
   149     }
   165     }
   150     if (AlwaysPreTouch && num_committed > 0) {
   166     if (AlwaysPreTouch && num_committed > 0) {
   151       _storage.pretouch(first_committed, num_committed, pretouch_gang);
   167       _storage.pretouch(first_committed, num_committed, pretouch_gang);
   152     }
   168     }
   153     fire_on_commit(start_idx, num_regions, all_zero_filled);
   169     fire_on_commit(start_idx, num_regions, all_zero_filled);