1135 |
1135 |
1136 G1CMIsAliveClosure is_alive(g1h); |
1136 G1CMIsAliveClosure is_alive(g1h); |
1137 _gc_tracer_cm->report_object_count_after_gc(&is_alive); |
1137 _gc_tracer_cm->report_object_count_after_gc(&is_alive); |
1138 } |
1138 } |
1139 |
1139 |
1140 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { |
1140 class G1CleanupTask: public AbstractGangTask { |
1141 G1CollectedHeap* _g1; |
1141 // Per-region work during the Cleanup pause. |
1142 size_t _freed_bytes; |
1142 class G1CleanupRegionsClosure : public HeapRegionClosure { |
1143 FreeRegionList* _local_cleanup_list; |
1143 G1CollectedHeap* _g1; |
1144 uint _old_regions_removed; |
1144 size_t _freed_bytes; |
1145 uint _humongous_regions_removed; |
1145 FreeRegionList* _local_cleanup_list; |
1146 HRRSCleanupTask* _hrrs_cleanup_task; |
1146 uint _old_regions_removed; |
1147 |
1147 uint _humongous_regions_removed; |
1148 public: |
1148 HRRSCleanupTask* _hrrs_cleanup_task; |
1149 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, |
1149 |
1150 FreeRegionList* local_cleanup_list, |
1150 public: |
1151 HRRSCleanupTask* hrrs_cleanup_task) : |
1151 G1CleanupRegionsClosure(G1CollectedHeap* g1, |
1152 _g1(g1), |
1152 FreeRegionList* local_cleanup_list, |
1153 _freed_bytes(0), |
1153 HRRSCleanupTask* hrrs_cleanup_task) : |
1154 _local_cleanup_list(local_cleanup_list), |
1154 _g1(g1), |
1155 _old_regions_removed(0), |
1155 _freed_bytes(0), |
1156 _humongous_regions_removed(0), |
1156 _local_cleanup_list(local_cleanup_list), |
1157 _hrrs_cleanup_task(hrrs_cleanup_task) { } |
1157 _old_regions_removed(0), |
1158 |
1158 _humongous_regions_removed(0), |
1159 size_t freed_bytes() { return _freed_bytes; } |
1159 _hrrs_cleanup_task(hrrs_cleanup_task) { } |
1160 const uint old_regions_removed() { return _old_regions_removed; } |
1160 |
1161 const uint humongous_regions_removed() { return _humongous_regions_removed; } |
1161 size_t freed_bytes() { return _freed_bytes; } |
1162 |
1162 const uint old_regions_removed() { return _old_regions_removed; } |
1163 bool do_heap_region(HeapRegion *hr) { |
1163 const uint humongous_regions_removed() { return _humongous_regions_removed; } |
1164 _g1->reset_gc_time_stamps(hr); |
1164 |
1165 hr->note_end_of_marking(); |
1165 bool do_heap_region(HeapRegion *hr) { |
1166 |
1166 _g1->reset_gc_time_stamps(hr); |
1167 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) { |
1167 hr->note_end_of_marking(); |
1168 _freed_bytes += hr->used(); |
1168 |
1169 hr->set_containing_set(NULL); |
1169 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) { |
1170 if (hr->is_humongous()) { |
1170 _freed_bytes += hr->used(); |
1171 _humongous_regions_removed++; |
1171 hr->set_containing_set(NULL); |
1172 _g1->free_humongous_region(hr, _local_cleanup_list, true /* skip_remset */); |
1172 if (hr->is_humongous()) { |
|
1173 _humongous_regions_removed++; |
|
1174 _g1->free_humongous_region(hr, _local_cleanup_list); |
|
1175 } else { |
|
1176 _old_regions_removed++; |
|
1177 _g1->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */); |
|
1178 } |
|
1179 hr->clear_cardtable(); |
1173 } else { |
1180 } else { |
1174 _old_regions_removed++; |
1181 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); |
1175 _g1->free_region(hr, _local_cleanup_list, true /* skip_remset */); |
|
1176 } |
1182 } |
1177 } else { |
1183 |
1178 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); |
1184 return false; |
1179 } |
1185 } |
1180 |
1186 }; |
1181 return false; |
1187 |
1182 } |
|
1183 }; |
|
1184 |
|
1185 class G1ParNoteEndTask: public AbstractGangTask { |
|
1186 friend class G1NoteEndOfConcMarkClosure; |
|
1187 |
|
1188 protected: |
|
1189 G1CollectedHeap* _g1h; |
1188 G1CollectedHeap* _g1h; |
1190 FreeRegionList* _cleanup_list; |
1189 FreeRegionList* _cleanup_list; |
1191 HeapRegionClaimer _hrclaimer; |
1190 HeapRegionClaimer _hrclaimer; |
1192 |
1191 |
1193 public: |
1192 public: |
1194 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : |
1193 G1CleanupTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : |
1195 AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { |
1194 AbstractGangTask("G1 Cleanup"), |
|
1195 _g1h(g1h), |
|
1196 _cleanup_list(cleanup_list), |
|
1197 _hrclaimer(n_workers) { |
|
1198 |
|
1199 HeapRegionRemSet::reset_for_cleanup_tasks(); |
1196 } |
1200 } |
1197 |
1201 |
1198 void work(uint worker_id) { |
1202 void work(uint worker_id) { |
1199 FreeRegionList local_cleanup_list("Local Cleanup List"); |
1203 FreeRegionList local_cleanup_list("Local Cleanup List"); |
1200 HRRSCleanupTask hrrs_cleanup_task; |
1204 HRRSCleanupTask hrrs_cleanup_task; |
1201 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, |
1205 G1CleanupRegionsClosure cl(_g1h, |
1202 &hrrs_cleanup_task); |
1206 &local_cleanup_list, |
1203 _g1h->heap_region_par_iterate_from_worker_offset(&g1_note_end, &_hrclaimer, worker_id); |
1207 &hrrs_cleanup_task); |
1204 assert(g1_note_end.is_complete(), "Shouldn't have yielded!"); |
1208 _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id); |
1205 |
1209 assert(cl.is_complete(), "Shouldn't have aborted!"); |
1206 // Now update the lists |
1210 |
1207 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); |
1211 // Now update the old/humongous region sets |
|
1212 _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed()); |
1208 { |
1213 { |
1209 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); |
1214 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); |
1210 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); |
1215 _g1h->decrement_summary_bytes(cl.freed_bytes()); |
1211 |
|
1212 // If we iterate over the global cleanup list at the end of |
|
1213 // cleanup to do this printing we will not guarantee to only |
|
1214 // generate output for the newly-reclaimed regions (the list |
|
1215 // might not be empty at the beginning of cleanup; we might |
|
1216 // still be working on its previous contents). So we do the |
|
1217 // printing here, before we append the new regions to the global |
|
1218 // cleanup list. |
|
1219 |
|
1220 G1HRPrinter* hr_printer = _g1h->hr_printer(); |
|
1221 if (hr_printer->is_active()) { |
|
1222 FreeRegionListIterator iter(&local_cleanup_list); |
|
1223 while (iter.more_available()) { |
|
1224 HeapRegion* hr = iter.get_next(); |
|
1225 hr_printer->cleanup(hr); |
|
1226 } |
|
1227 } |
|
1228 |
1216 |
1229 _cleanup_list->add_ordered(&local_cleanup_list); |
1217 _cleanup_list->add_ordered(&local_cleanup_list); |
1230 assert(local_cleanup_list.is_empty(), "post-condition"); |
1218 assert(local_cleanup_list.is_empty(), "post-condition"); |
1231 |
1219 |
1232 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); |
1220 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); |
1233 } |
1221 } |
1234 } |
1222 } |
1235 }; |
1223 }; |
|
1224 |
|
1225 void G1ConcurrentMark::reclaim_empty_regions() { |
|
1226 WorkGang* workers = _g1h->workers(); |
|
1227 FreeRegionList empty_regions_list("Empty Regions After Mark List"); |
|
1228 |
|
1229 G1CleanupTask cl(_g1h, &empty_regions_list, workers->active_workers()); |
|
1230 workers->run_task(&cl); |
|
1231 |
|
1232 if (!empty_regions_list.is_empty()) { |
|
1233 // Now print the empty regions list. |
|
1234 G1HRPrinter* hrp = _g1h->hr_printer(); |
|
1235 if (hrp->is_active()) { |
|
1236 FreeRegionListIterator iter(&empty_regions_list); |
|
1237 while (iter.more_available()) { |
|
1238 HeapRegion* hr = iter.get_next(); |
|
1239 hrp->cleanup(hr); |
|
1240 } |
|
1241 } |
|
1242 // And actually make them available. |
|
1243 _g1h->prepend_to_freelist(&empty_regions_list); |
|
1244 } |
|
1245 } |
1236 |
1246 |
1237 void G1ConcurrentMark::cleanup() { |
1247 void G1ConcurrentMark::cleanup() { |
1238 // world is stopped at this checkpoint |
1248 // world is stopped at this checkpoint |
1239 assert(SafepointSynchronize::is_at_safepoint(), |
1249 assert(SafepointSynchronize::is_at_safepoint(), |
1240 "world should be stopped"); |
1250 "world should be stopped"); |
1275 if (log_is_enabled(Trace, gc, liveness)) { |
1283 if (log_is_enabled(Trace, gc, liveness)) { |
1276 G1PrintRegionLivenessInfoClosure cl("Post-Cleanup"); |
1284 G1PrintRegionLivenessInfoClosure cl("Post-Cleanup"); |
1277 _g1h->heap_region_iterate(&cl); |
1285 _g1h->heap_region_iterate(&cl); |
1278 } |
1286 } |
1279 |
1287 |
1280 // Install newly created mark bitMap as "prev". |
1288 g1h->reset_gc_time_stamp(); |
|
1289 |
|
1290 // Install newly created mark bitmap as "prev". |
1281 swap_mark_bitmaps(); |
1291 swap_mark_bitmaps(); |
1282 |
1292 { |
1283 g1h->reset_gc_time_stamp(); |
1293 GCTraceTime(Debug, gc, phases)("Reclaim Empty Regions"); |
1284 |
1294 reclaim_empty_regions(); |
1285 uint n_workers = _g1h->workers()->active_workers(); |
1295 } |
1286 |
1296 |
1287 // Note end of marking in all heap regions. |
|
1288 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); |
|
1289 g1h->workers()->run_task(&g1_par_note_end_task); |
|
1290 g1h->check_gc_time_stamps(); |
1297 g1h->check_gc_time_stamps(); |
1291 |
|
1292 if (!cleanup_list_is_empty()) { |
|
1293 // The cleanup list is not empty, so we'll have to process it |
|
1294 // concurrently. Notify anyone else that might be wanting free |
|
1295 // regions that there will be more free regions coming soon. |
|
1296 g1h->set_free_regions_coming(); |
|
1297 } |
|
1298 |
1298 |
1299 { |
1299 { |
1300 GCTraceTime(Debug, gc, phases)("Finalize Concurrent Mark Cleanup"); |
1300 GCTraceTime(Debug, gc, phases)("Finalize Concurrent Mark Cleanup"); |
1301 // This will also free any regions totally full of garbage objects, |
|
1302 // and sort the regions. |
|
1303 g1h->g1_policy()->record_concurrent_mark_cleanup_end(); |
1301 g1h->g1_policy()->record_concurrent_mark_cleanup_end(); |
1304 } |
1302 } |
1305 |
1303 |
1306 // Statistics. |
1304 // Statistics. |
1307 double end = os::elapsedTime(); |
1305 double end = os::elapsedTime(); |
1308 _cleanup_times.add((end - start) * 1000.0); |
1306 _cleanup_times.add((end - start) * 1000.0); |
1309 |
1307 |
1310 // Clean up will have freed any regions completely full of garbage. |
1308 // Cleanup will have freed any regions completely full of garbage. |
1311 // Update the soft reference policy with the new heap occupancy. |
1309 // Update the soft reference policy with the new heap occupancy. |
1312 Universe::update_heap_info_at_gc(); |
1310 Universe::update_heap_info_at_gc(); |
1313 |
1311 |
1314 if (VerifyDuringGC) { |
1312 if (VerifyDuringGC) { |
1315 g1h->verifier()->verify(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "During GC (Cleanup after)"); |
1313 g1h->verifier()->verify(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "During GC (Cleanup after)"); |
1330 MetaspaceGC::compute_new_size(); |
1328 MetaspaceGC::compute_new_size(); |
1331 |
1329 |
1332 // We reclaimed old regions so we should calculate the sizes to make |
1330 // We reclaimed old regions so we should calculate the sizes to make |
1333 // sure we update the old gen/space data. |
1331 // sure we update the old gen/space data. |
1334 g1h->g1mm()->update_sizes(); |
1332 g1h->g1mm()->update_sizes(); |
1335 } |
|
1336 |
|
1337 void G1ConcurrentMark::complete_cleanup() { |
|
1338 if (has_aborted()) return; |
|
1339 |
|
1340 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
|
1341 |
|
1342 _cleanup_list.verify_optional(); |
|
1343 FreeRegionList tmp_free_list("Tmp Free List"); |
|
1344 |
|
1345 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " |
|
1346 "cleanup list has %u entries", |
|
1347 _cleanup_list.length()); |
|
1348 |
|
1349 // No one else should be accessing the _cleanup_list at this point, |
|
1350 // so it is not necessary to take any locks |
|
1351 while (!_cleanup_list.is_empty()) { |
|
1352 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); |
|
1353 assert(hr != NULL, "Got NULL from a non-empty list"); |
|
1354 hr->par_clear(); |
|
1355 tmp_free_list.add_ordered(hr); |
|
1356 |
|
1357 // Instead of adding one region at a time to the secondary_free_list, |
|
1358 // we accumulate them in the local list and move them a few at a |
|
1359 // time. This also cuts down on the number of notify_all() calls |
|
1360 // we do during this process. We'll also append the local list when |
|
1361 // _cleanup_list is empty (which means we just removed the last |
|
1362 // region from the _cleanup_list). |
|
1363 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || |
|
1364 _cleanup_list.is_empty()) { |
|
1365 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " |
|
1366 "appending %u entries to the secondary_free_list, " |
|
1367 "cleanup list still has %u entries", |
|
1368 tmp_free_list.length(), |
|
1369 _cleanup_list.length()); |
|
1370 |
|
1371 { |
|
1372 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); |
|
1373 g1h->secondary_free_list_add(&tmp_free_list); |
|
1374 SecondaryFreeList_lock->notify_all(); |
|
1375 } |
|
1376 #ifndef PRODUCT |
|
1377 if (G1StressConcRegionFreeing) { |
|
1378 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { |
|
1379 os::sleep(Thread::current(), (jlong) 1, false); |
|
1380 } |
|
1381 } |
|
1382 #endif |
|
1383 } |
|
1384 } |
|
1385 assert(tmp_free_list.is_empty(), "post-condition"); |
|
1386 } |
1333 } |
1387 |
1334 |
1388 // Supporting Object and Oop closures for reference discovery |
1335 // Supporting Object and Oop closures for reference discovery |
1389 // and processing in during marking |
1336 // and processing in during marking |
1390 |
1337 |