237 // The secondary free list which contains regions that have been |
237 // The secondary free list which contains regions that have been |
238 // freed up during the cleanup process. This will be appended to the |
238 // freed up during the cleanup process. This will be appended to the |
239 // master free list when appropriate. |
239 // master free list when appropriate. |
240 SecondaryFreeRegionList _secondary_free_list; |
240 SecondaryFreeRegionList _secondary_free_list; |
241 |
241 |
|
242 // It keeps track of the old regions. |
|
243 MasterOldRegionSet _old_set; |
|
244 |
242 // It keeps track of the humongous regions. |
245 // It keeps track of the humongous regions. |
243 MasterHumongousRegionSet _humongous_set; |
246 MasterHumongousRegionSet _humongous_set; |
244 |
247 |
245 // The number of regions we could create by expansion. |
248 // The number of regions we could create by expansion. |
246 size_t _expansion_regions; |
249 size_t _expansion_regions; |
247 |
250 |
248 // The block offset table for the G1 heap. |
251 // The block offset table for the G1 heap. |
249 G1BlockOffsetSharedArray* _bot_shared; |
252 G1BlockOffsetSharedArray* _bot_shared; |
250 |
253 |
251 // Move all of the regions off the free lists, then rebuild those free |
254 // Tears down the region sets / lists so that they are empty and the |
252 // lists, before and after full GC. |
255 // regions on the heap do not belong to a region set / list. The |
253 void tear_down_region_lists(); |
256 // only exception is the humongous set which we leave unaltered. If |
254 void rebuild_region_lists(); |
257 // free_list_only is true, it will only tear down the master free |
|
258 // list. It is called before a Full GC (free_list_only == false) or |
|
259 // before heap shrinking (free_list_only == true). |
|
260 void tear_down_region_sets(bool free_list_only); |
|
261 |
|
262 // Rebuilds the region sets / lists so that they are repopulated to |
|
263 // reflect the contents of the heap. The only exception is the |
|
264 // humongous set which was not torn down in the first place. If |
|
265 // free_list_only is true, it will only rebuild the master free |
|
266 // list. It is called after a Full GC (free_list_only == false) or |
|
267 // after heap shrinking (free_list_only == true). |
|
268 void rebuild_region_sets(bool free_list_only); |
255 |
269 |
256 // The sequence of all heap regions in the heap. |
270 // The sequence of all heap regions in the heap. |
257 HeapRegionSeq _hrs; |
271 HeapRegionSeq _hrs; |
258 |
272 |
259 // Alloc region used to satisfy mutator allocation requests. |
273 // Alloc region used to satisfy mutator allocation requests. |
1122 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); |
1136 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); |
1123 append_secondary_free_list(); |
1137 append_secondary_free_list(); |
1124 } |
1138 } |
1125 } |
1139 } |
1126 |
1140 |
|
1141 void old_set_remove(HeapRegion* hr) { |
|
1142 _old_set.remove(hr); |
|
1143 } |
|
1144 |
1127 void set_free_regions_coming(); |
1145 void set_free_regions_coming(); |
1128 void reset_free_regions_coming(); |
1146 void reset_free_regions_coming(); |
1129 bool free_regions_coming() { return _free_regions_coming; } |
1147 bool free_regions_coming() { return _free_regions_coming; } |
1130 void wait_while_free_regions_coming(); |
1148 void wait_while_free_regions_coming(); |
1131 |
1149 |
1151 // free_humongous_region() depending on the type of the region that |
1169 // free_humongous_region() depending on the type of the region that |
1152 // is passed to it. |
1170 // is passed to it. |
1153 void free_region_if_empty(HeapRegion* hr, |
1171 void free_region_if_empty(HeapRegion* hr, |
1154 size_t* pre_used, |
1172 size_t* pre_used, |
1155 FreeRegionList* free_list, |
1173 FreeRegionList* free_list, |
|
1174 OldRegionSet* old_proxy_set, |
1156 HumongousRegionSet* humongous_proxy_set, |
1175 HumongousRegionSet* humongous_proxy_set, |
1157 HRRSCleanupTask* hrrs_cleanup_task, |
1176 HRRSCleanupTask* hrrs_cleanup_task, |
1158 bool par); |
1177 bool par); |
1159 |
1178 |
1160 // It appends the free list to the master free list and updates the |
1179 // It appends the free list to the master free list and updates the |
1161 // master humongous list according to the contents of the proxy |
1180 // master humongous list according to the contents of the proxy |
1162 // list. It also adjusts the total used bytes according to pre_used |
1181 // list. It also adjusts the total used bytes according to pre_used |
1163 // (if par is true, it will do so by taking the ParGCRareEvent_lock). |
1182 // (if par is true, it will do so by taking the ParGCRareEvent_lock). |
1164 void update_sets_after_freeing_regions(size_t pre_used, |
1183 void update_sets_after_freeing_regions(size_t pre_used, |
1165 FreeRegionList* free_list, |
1184 FreeRegionList* free_list, |
|
1185 OldRegionSet* old_proxy_set, |
1166 HumongousRegionSet* humongous_proxy_set, |
1186 HumongousRegionSet* humongous_proxy_set, |
1167 bool par); |
1187 bool par); |
1168 |
1188 |
1169 // Returns "TRUE" iff "p" points into the allocated area of the heap. |
1189 // Returns "TRUE" iff "p" points into the allocated area of the heap. |
1170 virtual bool is_in(const void* p) const; |
1190 virtual bool is_in(const void* p) const; |
1450 |
1470 |
1451 // Convenience function to be used in situations where the heap type can be |
1471 // Convenience function to be used in situations where the heap type can be |
1452 // asserted to be this type. |
1472 // asserted to be this type. |
1453 static G1CollectedHeap* heap(); |
1473 static G1CollectedHeap* heap(); |
1454 |
1474 |
1455 void empty_young_list(); |
|
1456 |
|
1457 void set_region_short_lived_locked(HeapRegion* hr); |
1475 void set_region_short_lived_locked(HeapRegion* hr); |
1458 // add appropriate methods for any other surv rate groups |
1476 // add appropriate methods for any other surv rate groups |
1459 |
1477 |
1460 YoungList* young_list() { return _young_list; } |
1478 YoungList* young_list() { return _young_list; } |
1461 |
1479 |