1253 return false; |
1253 return false; |
1254 } |
1254 } |
1255 #endif //PRODUCT |
1255 #endif //PRODUCT |
1256 |
1256 |
1257 |
1257 |
|
1258 #ifdef ASSERT |
|
1259 template <class T> void assert_is_in(T *p) { |
|
1260 T heap_oop = oopDesc::load_heap_oop(p); |
|
1261 if (!oopDesc::is_null(heap_oop)) { |
|
1262 oop o = oopDesc::decode_heap_oop_not_null(heap_oop); |
|
1263 assert(Universe::heap()->is_in(o), "should be in heap"); |
|
1264 } |
|
1265 } |
|
1266 template <class T> void assert_is_in_closed_subset(T *p) { |
|
1267 T heap_oop = oopDesc::load_heap_oop(p); |
|
1268 if (!oopDesc::is_null(heap_oop)) { |
|
1269 oop o = oopDesc::decode_heap_oop_not_null(heap_oop); |
|
1270 assert(Universe::heap()->is_in_closed_subset(o), "should be in closed"); |
|
1271 } |
|
1272 } |
|
1273 template <class T> void assert_is_in_reserved(T *p) { |
|
1274 T heap_oop = oopDesc::load_heap_oop(p); |
|
1275 if (!oopDesc::is_null(heap_oop)) { |
|
1276 oop o = oopDesc::decode_heap_oop_not_null(heap_oop); |
|
1277 assert(Universe::heap()->is_in_reserved(o), "should be in reserved"); |
|
1278 } |
|
1279 } |
|
1280 template <class T> void assert_nothing(T *p) {} |
|
1281 |
|
1282 #else |
|
1283 template <class T> void assert_is_in(T *p) {} |
|
1284 template <class T> void assert_is_in_closed_subset(T *p) {} |
|
1285 template <class T> void assert_is_in_reserved(T *p) {} |
|
1286 template <class T> void assert_nothing(T *p) {} |
|
1287 #endif // ASSERT |
|
1288 |
|
1289 // |
|
1290 // Macros that iterate over areas of oops which are specialized on type of |
|
1291 // oop pointer either narrow or wide, depending on UseCompressedOops |
|
1292 // |
|
1293 // Parameters are: |
|
1294 // T - type of oop to point to (either oop or narrowOop) |
|
1295 // start_p - starting pointer for region to iterate over |
|
1296 // count - number of oops or narrowOops to iterate over |
|
1297 // do_oop - action to perform on each oop (it's arbitrary C code which |
|
1298 // makes it more efficient to put in a macro rather than making |
|
1299 // it a template function) |
|
1300 // assert_fn - assert function which is template function because performance |
|
1301 // doesn't matter when enabled. |
|
1302 #define InstanceKlass_SPECIALIZED_OOP_ITERATE( \ |
|
1303 T, start_p, count, do_oop, \ |
|
1304 assert_fn) \ |
|
1305 { \ |
|
1306 T* p = (T*)(start_p); \ |
|
1307 T* const end = p + (count); \ |
|
1308 while (p < end) { \ |
|
1309 (assert_fn)(p); \ |
|
1310 do_oop; \ |
|
1311 ++p; \ |
|
1312 } \ |
|
1313 } |
|
1314 |
|
1315 #define InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE( \ |
|
1316 T, start_p, count, do_oop, \ |
|
1317 assert_fn) \ |
|
1318 { \ |
|
1319 T* const start = (T*)(start_p); \ |
|
1320 T* p = start + (count); \ |
|
1321 while (start < p) { \ |
|
1322 --p; \ |
|
1323 (assert_fn)(p); \ |
|
1324 do_oop; \ |
|
1325 } \ |
|
1326 } |
|
1327 |
|
1328 #define InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \ |
|
1329 T, start_p, count, low, high, \ |
|
1330 do_oop, assert_fn) \ |
|
1331 { \ |
|
1332 T* const l = (T*)(low); \ |
|
1333 T* const h = (T*)(high); \ |
|
1334 assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \ |
|
1335 mask_bits((intptr_t)h, sizeof(T)-1) == 0, \ |
|
1336 "bounded region must be properly aligned"); \ |
|
1337 T* p = (T*)(start_p); \ |
|
1338 T* end = p + (count); \ |
|
1339 if (p < l) p = l; \ |
|
1340 if (end > h) end = h; \ |
|
1341 while (p < end) { \ |
|
1342 (assert_fn)(p); \ |
|
1343 do_oop; \ |
|
1344 ++p; \ |
|
1345 } \ |
|
1346 } |
|
1347 |
|
1348 |
|
1349 // The following macros call specialized macros, passing either oop or |
|
1350 // narrowOop as the specialization type. These test the UseCompressedOops |
|
1351 // flag. |
|
1352 #define InstanceKlass_OOP_ITERATE(start_p, count, \ |
|
1353 do_oop, assert_fn) \ |
|
1354 { \ |
|
1355 if (UseCompressedOops) { \ |
|
1356 InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ |
|
1357 start_p, count, \ |
|
1358 do_oop, assert_fn) \ |
|
1359 } else { \ |
|
1360 InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \ |
|
1361 start_p, count, \ |
|
1362 do_oop, assert_fn) \ |
|
1363 } \ |
|
1364 } |
|
1365 |
|
1366 #define InstanceKlass_BOUNDED_OOP_ITERATE(start_p, count, low, high, \ |
|
1367 do_oop, assert_fn) \ |
|
1368 { \ |
|
1369 if (UseCompressedOops) { \ |
|
1370 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ |
|
1371 start_p, count, \ |
|
1372 low, high, \ |
|
1373 do_oop, assert_fn) \ |
|
1374 } else { \ |
|
1375 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ |
|
1376 start_p, count, \ |
|
1377 low, high, \ |
|
1378 do_oop, assert_fn) \ |
|
1379 } \ |
|
1380 } |
|
1381 |
|
1382 #define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn) \ |
|
1383 { \ |
|
1384 /* Compute oopmap block range. The common case \ |
|
1385 is nonstatic_oop_map_size == 1. */ \ |
|
1386 OopMapBlock* map = start_of_nonstatic_oop_maps(); \ |
|
1387 OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \ |
|
1388 if (UseCompressedOops) { \ |
|
1389 while (map < end_map) { \ |
|
1390 InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ |
|
1391 obj->obj_field_addr<narrowOop>(map->offset()), map->length(), \ |
|
1392 do_oop, assert_fn) \ |
|
1393 ++map; \ |
|
1394 } \ |
|
1395 } else { \ |
|
1396 while (map < end_map) { \ |
|
1397 InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \ |
|
1398 obj->obj_field_addr<oop>(map->offset()), map->length(), \ |
|
1399 do_oop, assert_fn) \ |
|
1400 ++map; \ |
|
1401 } \ |
|
1402 } \ |
|
1403 } |
|
1404 |
|
1405 #define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn) \ |
|
1406 { \ |
|
1407 OopMapBlock* const start_map = start_of_nonstatic_oop_maps(); \ |
|
1408 OopMapBlock* map = start_map + nonstatic_oop_map_size(); \ |
|
1409 if (UseCompressedOops) { \ |
|
1410 while (start_map < map) { \ |
|
1411 --map; \ |
|
1412 InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop, \ |
|
1413 obj->obj_field_addr<narrowOop>(map->offset()), map->length(), \ |
|
1414 do_oop, assert_fn) \ |
|
1415 } \ |
|
1416 } else { \ |
|
1417 while (start_map < map) { \ |
|
1418 --map; \ |
|
1419 InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop, \ |
|
1420 obj->obj_field_addr<oop>(map->offset()), map->length(), \ |
|
1421 do_oop, assert_fn) \ |
|
1422 } \ |
|
1423 } \ |
|
1424 } |
|
1425 |
|
1426 #define InstanceKlass_BOUNDED_OOP_MAP_ITERATE(obj, low, high, do_oop, \ |
|
1427 assert_fn) \ |
|
1428 { \ |
|
1429 /* Compute oopmap block range. The common case is \ |
|
1430 nonstatic_oop_map_size == 1, so we accept the \ |
|
1431 usually non-existent extra overhead of examining \ |
|
1432 all the maps. */ \ |
|
1433 OopMapBlock* map = start_of_nonstatic_oop_maps(); \ |
|
1434 OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \ |
|
1435 if (UseCompressedOops) { \ |
|
1436 while (map < end_map) { \ |
|
1437 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ |
|
1438 obj->obj_field_addr<narrowOop>(map->offset()), map->length(), \ |
|
1439 low, high, \ |
|
1440 do_oop, assert_fn) \ |
|
1441 ++map; \ |
|
1442 } \ |
|
1443 } else { \ |
|
1444 while (map < end_map) { \ |
|
1445 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ |
|
1446 obj->obj_field_addr<oop>(map->offset()), map->length(), \ |
|
1447 low, high, \ |
|
1448 do_oop, assert_fn) \ |
|
1449 ++map; \ |
|
1450 } \ |
|
1451 } \ |
|
1452 } |
|
1453 |
1258 void instanceKlass::follow_static_fields() { |
1454 void instanceKlass::follow_static_fields() { |
1259 oop* start = start_of_static_fields(); |
1455 InstanceKlass_OOP_ITERATE( \ |
1260 oop* end = start + static_oop_field_size(); |
1456 start_of_static_fields(), static_oop_field_size(), \ |
1261 while (start < end) { |
1457 MarkSweep::mark_and_push(p), \ |
1262 if (*start != NULL) { |
1458 assert_is_in_closed_subset) |
1263 assert(Universe::heap()->is_in_closed_subset(*start), |
|
1264 "should be in heap"); |
|
1265 MarkSweep::mark_and_push(start); |
|
1266 } |
|
1267 start++; |
|
1268 } |
|
1269 } |
1459 } |
1270 |
1460 |
1271 #ifndef SERIALGC |
1461 #ifndef SERIALGC |
1272 void instanceKlass::follow_static_fields(ParCompactionManager* cm) { |
1462 void instanceKlass::follow_static_fields(ParCompactionManager* cm) { |
1273 oop* start = start_of_static_fields(); |
1463 InstanceKlass_OOP_ITERATE( \ |
1274 oop* end = start + static_oop_field_size(); |
1464 start_of_static_fields(), static_oop_field_size(), \ |
1275 while (start < end) { |
1465 PSParallelCompact::mark_and_push(cm, p), \ |
1276 if (*start != NULL) { |
1466 assert_is_in) |
1277 assert(Universe::heap()->is_in(*start), "should be in heap"); |
|
1278 PSParallelCompact::mark_and_push(cm, start); |
|
1279 } |
|
1280 start++; |
|
1281 } |
|
1282 } |
1467 } |
1283 #endif // SERIALGC |
1468 #endif // SERIALGC |
1284 |
1469 |
1285 |
|
1286 void instanceKlass::adjust_static_fields() { |
1470 void instanceKlass::adjust_static_fields() { |
1287 oop* start = start_of_static_fields(); |
1471 InstanceKlass_OOP_ITERATE( \ |
1288 oop* end = start + static_oop_field_size(); |
1472 start_of_static_fields(), static_oop_field_size(), \ |
1289 while (start < end) { |
1473 MarkSweep::adjust_pointer(p), \ |
1290 MarkSweep::adjust_pointer(start); |
1474 assert_nothing) |
1291 start++; |
|
1292 } |
|
1293 } |
1475 } |
1294 |
1476 |
1295 #ifndef SERIALGC |
1477 #ifndef SERIALGC |
1296 void instanceKlass::update_static_fields() { |
1478 void instanceKlass::update_static_fields() { |
1297 oop* const start = start_of_static_fields(); |
1479 InstanceKlass_OOP_ITERATE( \ |
1298 oop* const beg_oop = start; |
1480 start_of_static_fields(), static_oop_field_size(), \ |
1299 oop* const end_oop = start + static_oop_field_size(); |
1481 PSParallelCompact::adjust_pointer(p), \ |
1300 for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) { |
1482 assert_nothing) |
1301 PSParallelCompact::adjust_pointer(cur_oop); |
1483 } |
1302 } |
1484 |
1303 } |
1485 void instanceKlass::update_static_fields(HeapWord* beg_addr, HeapWord* end_addr) { |
1304 |
1486 InstanceKlass_BOUNDED_OOP_ITERATE( \ |
1305 void |
1487 start_of_static_fields(), static_oop_field_size(), \ |
1306 instanceKlass::update_static_fields(HeapWord* beg_addr, HeapWord* end_addr) { |
1488 beg_addr, end_addr, \ |
1307 oop* const start = start_of_static_fields(); |
1489 PSParallelCompact::adjust_pointer(p), \ |
1308 oop* const beg_oop = MAX2((oop*)beg_addr, start); |
1490 assert_nothing ) |
1309 oop* const end_oop = MIN2((oop*)end_addr, start + static_oop_field_size()); |
|
1310 for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) { |
|
1311 PSParallelCompact::adjust_pointer(cur_oop); |
|
1312 } |
|
1313 } |
1491 } |
1314 #endif // SERIALGC |
1492 #endif // SERIALGC |
1315 |
1493 |
1316 void instanceKlass::oop_follow_contents(oop obj) { |
1494 void instanceKlass::oop_follow_contents(oop obj) { |
1317 assert (obj!=NULL, "can't follow the content of NULL object"); |
1495 assert(obj != NULL, "can't follow the content of NULL object"); |
1318 obj->follow_header(); |
1496 obj->follow_header(); |
1319 OopMapBlock* map = start_of_nonstatic_oop_maps(); |
1497 InstanceKlass_OOP_MAP_ITERATE( \ |
1320 OopMapBlock* end_map = map + nonstatic_oop_map_size(); |
1498 obj, \ |
1321 while (map < end_map) { |
1499 MarkSweep::mark_and_push(p), \ |
1322 oop* start = obj->obj_field_addr(map->offset()); |
1500 assert_is_in_closed_subset) |
1323 oop* end = start + map->length(); |
|
1324 while (start < end) { |
|
1325 if (*start != NULL) { |
|
1326 assert(Universe::heap()->is_in_closed_subset(*start), |
|
1327 "should be in heap"); |
|
1328 MarkSweep::mark_and_push(start); |
|
1329 } |
|
1330 start++; |
|
1331 } |
|
1332 map++; |
|
1333 } |
|
1334 } |
1501 } |
1335 |
1502 |
1336 #ifndef SERIALGC |
1503 #ifndef SERIALGC |
1337 void instanceKlass::oop_follow_contents(ParCompactionManager* cm, |
1504 void instanceKlass::oop_follow_contents(ParCompactionManager* cm, |
1338 oop obj) { |
1505 oop obj) { |
1339 assert (obj!=NULL, "can't follow the content of NULL object"); |
1506 assert(obj != NULL, "can't follow the content of NULL object"); |
1340 obj->follow_header(cm); |
1507 obj->follow_header(cm); |
1341 OopMapBlock* map = start_of_nonstatic_oop_maps(); |
1508 InstanceKlass_OOP_MAP_ITERATE( \ |
1342 OopMapBlock* end_map = map + nonstatic_oop_map_size(); |
1509 obj, \ |
1343 while (map < end_map) { |
1510 PSParallelCompact::mark_and_push(cm, p), \ |
1344 oop* start = obj->obj_field_addr(map->offset()); |
1511 assert_is_in) |
1345 oop* end = start + map->length(); |
|
1346 while (start < end) { |
|
1347 if (*start != NULL) { |
|
1348 assert(Universe::heap()->is_in(*start), "should be in heap"); |
|
1349 PSParallelCompact::mark_and_push(cm, start); |
|
1350 } |
|
1351 start++; |
|
1352 } |
|
1353 map++; |
|
1354 } |
|
1355 } |
1512 } |
1356 #endif // SERIALGC |
1513 #endif // SERIALGC |
1357 |
|
1358 #define invoke_closure_on(start, closure, nv_suffix) { \ |
|
1359 oop obj = *(start); \ |
|
1360 if (obj != NULL) { \ |
|
1361 assert(Universe::heap()->is_in_closed_subset(obj), "should be in heap"); \ |
|
1362 (closure)->do_oop##nv_suffix(start); \ |
|
1363 } \ |
|
1364 } |
|
1365 |
1514 |
1366 // closure's do_header() method dicates whether the given closure should be |
1515 // closure's do_header() method dicates whether the given closure should be |
1367 // applied to the klass ptr in the object header. |
1516 // applied to the klass ptr in the object header. |
1368 |
1517 |
1369 #define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ |
1518 #define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ |
1370 \ |
1519 \ |
1371 int instanceKlass::oop_oop_iterate##nv_suffix(oop obj, \ |
1520 int instanceKlass::oop_oop_iterate##nv_suffix(oop obj, \ |
1372 OopClosureType* closure) { \ |
1521 OopClosureType* closure) {\ |
1373 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \ |
1522 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\ |
1374 /* header */ \ |
1523 /* header */ \ |
1375 if (closure->do_header()) { \ |
1524 if (closure->do_header()) { \ |
1376 obj->oop_iterate_header(closure); \ |
1525 obj->oop_iterate_header(closure); \ |
1377 } \ |
1526 } \ |
1378 /* instance variables */ \ |
1527 InstanceKlass_OOP_MAP_ITERATE( \ |
1379 OopMapBlock* map = start_of_nonstatic_oop_maps(); \ |
1528 obj, \ |
1380 OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \ |
1529 SpecializationStats:: \ |
1381 const intx field_offset = PrefetchFieldsAhead; \ |
1530 record_do_oop_call##nv_suffix(SpecializationStats::ik); \ |
1382 if (field_offset > 0) { \ |
1531 (closure)->do_oop##nv_suffix(p), \ |
1383 while (map < end_map) { \ |
1532 assert_is_in_closed_subset) \ |
1384 oop* start = obj->obj_field_addr(map->offset()); \ |
1533 return size_helper(); \ |
1385 oop* const end = start + map->length(); \ |
1534 } |
1386 while (start < end) { \ |
1535 |
1387 prefetch_beyond(start, (oop*)end, field_offset, \ |
1536 #define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ |
1388 closure->prefetch_style()); \ |
1537 \ |
1389 SpecializationStats:: \ |
1538 int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \ |
1390 record_do_oop_call##nv_suffix(SpecializationStats::ik); \ |
1539 OopClosureType* closure, \ |
1391 invoke_closure_on(start, closure, nv_suffix); \ |
1540 MemRegion mr) { \ |
1392 start++; \ |
1541 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\ |
1393 } \ |
1542 if (closure->do_header()) { \ |
1394 map++; \ |
1543 obj->oop_iterate_header(closure, mr); \ |
1395 } \ |
1544 } \ |
1396 } else { \ |
1545 InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \ |
1397 while (map < end_map) { \ |
1546 obj, mr.start(), mr.end(), \ |
1398 oop* start = obj->obj_field_addr(map->offset()); \ |
1547 (closure)->do_oop##nv_suffix(p), \ |
1399 oop* const end = start + map->length(); \ |
1548 assert_is_in_closed_subset) \ |
1400 while (start < end) { \ |
1549 return size_helper(); \ |
1401 SpecializationStats:: \ |
|
1402 record_do_oop_call##nv_suffix(SpecializationStats::ik); \ |
|
1403 invoke_closure_on(start, closure, nv_suffix); \ |
|
1404 start++; \ |
|
1405 } \ |
|
1406 map++; \ |
|
1407 } \ |
|
1408 } \ |
|
1409 return size_helper(); \ |
|
1410 } |
|
1411 |
|
1412 #define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ |
|
1413 \ |
|
1414 int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \ |
|
1415 OopClosureType* closure, \ |
|
1416 MemRegion mr) { \ |
|
1417 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \ |
|
1418 /* header */ \ |
|
1419 if (closure->do_header()) { \ |
|
1420 obj->oop_iterate_header(closure, mr); \ |
|
1421 } \ |
|
1422 /* instance variables */ \ |
|
1423 OopMapBlock* map = start_of_nonstatic_oop_maps(); \ |
|
1424 OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \ |
|
1425 HeapWord* bot = mr.start(); \ |
|
1426 HeapWord* top = mr.end(); \ |
|
1427 oop* start = obj->obj_field_addr(map->offset()); \ |
|
1428 HeapWord* end = MIN2((HeapWord*)(start + map->length()), top); \ |
|
1429 /* Find the first map entry that extends onto mr. */ \ |
|
1430 while (map < end_map && end <= bot) { \ |
|
1431 map++; \ |
|
1432 start = obj->obj_field_addr(map->offset()); \ |
|
1433 end = MIN2((HeapWord*)(start + map->length()), top); \ |
|
1434 } \ |
|
1435 if (map != end_map) { \ |
|
1436 /* The current map's end is past the start of "mr". Skip up to the first \ |
|
1437 entry on "mr". */ \ |
|
1438 while ((HeapWord*)start < bot) { \ |
|
1439 start++; \ |
|
1440 } \ |
|
1441 const intx field_offset = PrefetchFieldsAhead; \ |
|
1442 for (;;) { \ |
|
1443 if (field_offset > 0) { \ |
|
1444 while ((HeapWord*)start < end) { \ |
|
1445 prefetch_beyond(start, (oop*)end, field_offset, \ |
|
1446 closure->prefetch_style()); \ |
|
1447 invoke_closure_on(start, closure, nv_suffix); \ |
|
1448 start++; \ |
|
1449 } \ |
|
1450 } else { \ |
|
1451 while ((HeapWord*)start < end) { \ |
|
1452 invoke_closure_on(start, closure, nv_suffix); \ |
|
1453 start++; \ |
|
1454 } \ |
|
1455 } \ |
|
1456 /* Go to the next map. */ \ |
|
1457 map++; \ |
|
1458 if (map == end_map) { \ |
|
1459 break; \ |
|
1460 } \ |
|
1461 /* Otherwise, */ \ |
|
1462 start = obj->obj_field_addr(map->offset()); \ |
|
1463 if ((HeapWord*)start >= top) { \ |
|
1464 break; \ |
|
1465 } \ |
|
1466 end = MIN2((HeapWord*)(start + map->length()), top); \ |
|
1467 } \ |
|
1468 } \ |
|
1469 return size_helper(); \ |
|
1470 } |
1550 } |
1471 |
1551 |
1472 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN) |
1552 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN) |
1473 ALL_OOP_OOP_ITERATE_CLOSURES_3(InstanceKlass_OOP_OOP_ITERATE_DEFN) |
1553 ALL_OOP_OOP_ITERATE_CLOSURES_3(InstanceKlass_OOP_OOP_ITERATE_DEFN) |
1474 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m) |
1554 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m) |
1475 ALL_OOP_OOP_ITERATE_CLOSURES_3(InstanceKlass_OOP_OOP_ITERATE_DEFN_m) |
1555 ALL_OOP_OOP_ITERATE_CLOSURES_3(InstanceKlass_OOP_OOP_ITERATE_DEFN_m) |
1476 |
1556 |
1477 |
|
1478 void instanceKlass::iterate_static_fields(OopClosure* closure) { |
1557 void instanceKlass::iterate_static_fields(OopClosure* closure) { |
1479 oop* start = start_of_static_fields(); |
1558 InstanceKlass_OOP_ITERATE( \ |
1480 oop* end = start + static_oop_field_size(); |
1559 start_of_static_fields(), static_oop_field_size(), \ |
1481 while (start < end) { |
1560 closure->do_oop(p), \ |
1482 assert(Universe::heap()->is_in_reserved_or_null(*start), "should be in heap"); |
1561 assert_is_in_reserved) |
1483 closure->do_oop(start); |
|
1484 start++; |
|
1485 } |
|
1486 } |
1562 } |
1487 |
1563 |
1488 void instanceKlass::iterate_static_fields(OopClosure* closure, |
1564 void instanceKlass::iterate_static_fields(OopClosure* closure, |
1489 MemRegion mr) { |
1565 MemRegion mr) { |
1490 oop* start = start_of_static_fields(); |
1566 InstanceKlass_BOUNDED_OOP_ITERATE( \ |
1491 oop* end = start + static_oop_field_size(); |
1567 start_of_static_fields(), static_oop_field_size(), \ |
1492 // I gather that the the static fields of reference types come first, |
1568 mr.start(), mr.end(), \ |
1493 // hence the name of "oop_field_size", and that is what makes this safe. |
1569 (closure)->do_oop_v(p), \ |
1494 assert((intptr_t)mr.start() == |
1570 assert_is_in_closed_subset) |
1495 align_size_up((intptr_t)mr.start(), sizeof(oop)) && |
1571 } |
1496 (intptr_t)mr.end() == align_size_up((intptr_t)mr.end(), sizeof(oop)), |
|
1497 "Memregion must be oop-aligned."); |
|
1498 if ((HeapWord*)start < mr.start()) start = (oop*)mr.start(); |
|
1499 if ((HeapWord*)end > mr.end()) end = (oop*)mr.end(); |
|
1500 while (start < end) { |
|
1501 invoke_closure_on(start, closure,_v); |
|
1502 start++; |
|
1503 } |
|
1504 } |
|
1505 |
|
1506 |
1572 |
1507 int instanceKlass::oop_adjust_pointers(oop obj) { |
1573 int instanceKlass::oop_adjust_pointers(oop obj) { |
1508 int size = size_helper(); |
1574 int size = size_helper(); |
1509 |
1575 InstanceKlass_OOP_MAP_ITERATE( \ |
1510 // Compute oopmap block range. The common case is nonstatic_oop_map_size == 1. |
1576 obj, \ |
1511 OopMapBlock* map = start_of_nonstatic_oop_maps(); |
1577 MarkSweep::adjust_pointer(p), \ |
1512 OopMapBlock* const end_map = map + nonstatic_oop_map_size(); |
1578 assert_is_in) |
1513 // Iterate over oopmap blocks |
|
1514 while (map < end_map) { |
|
1515 // Compute oop range for this block |
|
1516 oop* start = obj->obj_field_addr(map->offset()); |
|
1517 oop* end = start + map->length(); |
|
1518 // Iterate over oops |
|
1519 while (start < end) { |
|
1520 assert(Universe::heap()->is_in_or_null(*start), "should be in heap"); |
|
1521 MarkSweep::adjust_pointer(start); |
|
1522 start++; |
|
1523 } |
|
1524 map++; |
|
1525 } |
|
1526 |
|
1527 obj->adjust_header(); |
1579 obj->adjust_header(); |
1528 return size; |
1580 return size; |
1529 } |
1581 } |
1530 |
1582 |
1531 #ifndef SERIALGC |
1583 #ifndef SERIALGC |
1532 void instanceKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) { |
1584 void instanceKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) { |
1533 assert(!pm->depth_first(), "invariant"); |
1585 assert(!pm->depth_first(), "invariant"); |
1534 // Compute oopmap block range. The common case is nonstatic_oop_map_size == 1. |
1586 InstanceKlass_OOP_MAP_REVERSE_ITERATE( \ |
1535 OopMapBlock* start_map = start_of_nonstatic_oop_maps(); |
1587 obj, \ |
1536 OopMapBlock* map = start_map + nonstatic_oop_map_size(); |
1588 if (PSScavenge::should_scavenge(p)) { \ |
1537 |
1589 pm->claim_or_forward_breadth(p); \ |
1538 // Iterate over oopmap blocks |
1590 }, \ |
1539 while (start_map < map) { |
1591 assert_nothing ) |
1540 --map; |
|
1541 // Compute oop range for this block |
|
1542 oop* start = obj->obj_field_addr(map->offset()); |
|
1543 oop* curr = start + map->length(); |
|
1544 // Iterate over oops |
|
1545 while (start < curr) { |
|
1546 --curr; |
|
1547 if (PSScavenge::should_scavenge(*curr)) { |
|
1548 assert(Universe::heap()->is_in(*curr), "should be in heap"); |
|
1549 pm->claim_or_forward_breadth(curr); |
|
1550 } |
|
1551 } |
|
1552 } |
|
1553 } |
1592 } |
1554 |
1593 |
1555 void instanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { |
1594 void instanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { |
1556 assert(pm->depth_first(), "invariant"); |
1595 assert(pm->depth_first(), "invariant"); |
1557 // Compute oopmap block range. The common case is nonstatic_oop_map_size == 1. |
1596 InstanceKlass_OOP_MAP_REVERSE_ITERATE( \ |
1558 OopMapBlock* start_map = start_of_nonstatic_oop_maps(); |
1597 obj, \ |
1559 OopMapBlock* map = start_map + nonstatic_oop_map_size(); |
1598 if (PSScavenge::should_scavenge(p)) { \ |
1560 |
1599 pm->claim_or_forward_depth(p); \ |
1561 // Iterate over oopmap blocks |
1600 }, \ |
1562 while (start_map < map) { |
1601 assert_nothing ) |
1563 --map; |
|
1564 // Compute oop range for this block |
|
1565 oop* start = obj->obj_field_addr(map->offset()); |
|
1566 oop* curr = start + map->length(); |
|
1567 // Iterate over oops |
|
1568 while (start < curr) { |
|
1569 --curr; |
|
1570 if (PSScavenge::should_scavenge(*curr)) { |
|
1571 assert(Universe::heap()->is_in(*curr), "should be in heap"); |
|
1572 pm->claim_or_forward_depth(curr); |
|
1573 } |
|
1574 } |
|
1575 } |
|
1576 } |
1602 } |
1577 |
1603 |
1578 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { |
1604 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { |
1579 // Compute oopmap block range. The common case is nonstatic_oop_map_size==1. |
1605 InstanceKlass_OOP_MAP_ITERATE( \ |
1580 OopMapBlock* map = start_of_nonstatic_oop_maps(); |
1606 obj, \ |
1581 OopMapBlock* const end_map = map + nonstatic_oop_map_size(); |
1607 PSParallelCompact::adjust_pointer(p), \ |
1582 // Iterate over oopmap blocks |
1608 assert_nothing) |
1583 while (map < end_map) { |
|
1584 // Compute oop range for this oopmap block. |
|
1585 oop* const map_start = obj->obj_field_addr(map->offset()); |
|
1586 oop* const beg_oop = map_start; |
|
1587 oop* const end_oop = map_start + map->length(); |
|
1588 for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) { |
|
1589 PSParallelCompact::adjust_pointer(cur_oop); |
|
1590 } |
|
1591 ++map; |
|
1592 } |
|
1593 |
|
1594 return size_helper(); |
1609 return size_helper(); |
1595 } |
1610 } |
1596 |
1611 |
1597 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj, |
1612 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj, |
1598 HeapWord* beg_addr, HeapWord* end_addr) { |
1613 HeapWord* beg_addr, HeapWord* end_addr) { |
1599 // Compute oopmap block range. The common case is nonstatic_oop_map_size==1. |
1614 InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \ |
1600 OopMapBlock* map = start_of_nonstatic_oop_maps(); |
1615 obj, beg_addr, end_addr, \ |
1601 OopMapBlock* const end_map = map + nonstatic_oop_map_size(); |
1616 PSParallelCompact::adjust_pointer(p), \ |
1602 // Iterate over oopmap blocks |
1617 assert_nothing) |
1603 while (map < end_map) { |
|
1604 // Compute oop range for this oopmap block. |
|
1605 oop* const map_start = obj->obj_field_addr(map->offset()); |
|
1606 oop* const beg_oop = MAX2((oop*)beg_addr, map_start); |
|
1607 oop* const end_oop = MIN2((oop*)end_addr, map_start + map->length()); |
|
1608 for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) { |
|
1609 PSParallelCompact::adjust_pointer(cur_oop); |
|
1610 } |
|
1611 ++map; |
|
1612 } |
|
1613 |
|
1614 return size_helper(); |
1618 return size_helper(); |
1615 } |
1619 } |
1616 |
1620 |
1617 void instanceKlass::copy_static_fields(PSPromotionManager* pm) { |
1621 void instanceKlass::copy_static_fields(PSPromotionManager* pm) { |
1618 assert(!pm->depth_first(), "invariant"); |
1622 assert(!pm->depth_first(), "invariant"); |
1619 // Compute oop range |
1623 InstanceKlass_OOP_ITERATE( \ |
1620 oop* start = start_of_static_fields(); |
1624 start_of_static_fields(), static_oop_field_size(), \ |
1621 oop* end = start + static_oop_field_size(); |
1625 if (PSScavenge::should_scavenge(p)) { \ |
1622 // Iterate over oops |
1626 pm->claim_or_forward_breadth(p); \ |
1623 while (start < end) { |
1627 }, \ |
1624 if (PSScavenge::should_scavenge(*start)) { |
1628 assert_nothing ) |
1625 assert(Universe::heap()->is_in(*start), "should be in heap"); |
|
1626 pm->claim_or_forward_breadth(start); |
|
1627 } |
|
1628 start++; |
|
1629 } |
|
1630 } |
1629 } |
1631 |
1630 |
1632 void instanceKlass::push_static_fields(PSPromotionManager* pm) { |
1631 void instanceKlass::push_static_fields(PSPromotionManager* pm) { |
1633 assert(pm->depth_first(), "invariant"); |
1632 assert(pm->depth_first(), "invariant"); |
1634 // Compute oop range |
1633 InstanceKlass_OOP_ITERATE( \ |
1635 oop* start = start_of_static_fields(); |
1634 start_of_static_fields(), static_oop_field_size(), \ |
1636 oop* end = start + static_oop_field_size(); |
1635 if (PSScavenge::should_scavenge(p)) { \ |
1637 // Iterate over oops |
1636 pm->claim_or_forward_depth(p); \ |
1638 while (start < end) { |
1637 }, \ |
1639 if (PSScavenge::should_scavenge(*start)) { |
1638 assert_nothing ) |
1640 assert(Universe::heap()->is_in(*start), "should be in heap"); |
|
1641 pm->claim_or_forward_depth(start); |
|
1642 } |
|
1643 start++; |
|
1644 } |
|
1645 } |
1639 } |
1646 |
1640 |
1647 void instanceKlass::copy_static_fields(ParCompactionManager* cm) { |
1641 void instanceKlass::copy_static_fields(ParCompactionManager* cm) { |
1648 // Compute oop range |
1642 InstanceKlass_OOP_ITERATE( \ |
1649 oop* start = start_of_static_fields(); |
1643 start_of_static_fields(), static_oop_field_size(), \ |
1650 oop* end = start + static_oop_field_size(); |
1644 PSParallelCompact::adjust_pointer(p), \ |
1651 // Iterate over oops |
1645 assert_is_in) |
1652 while (start < end) { |
|
1653 if (*start != NULL) { |
|
1654 assert(Universe::heap()->is_in(*start), "should be in heap"); |
|
1655 // *start = (oop) cm->summary_data()->calc_new_pointer(*start); |
|
1656 PSParallelCompact::adjust_pointer(start); |
|
1657 } |
|
1658 start++; |
|
1659 } |
|
1660 } |
1646 } |
1661 #endif // SERIALGC |
1647 #endif // SERIALGC |
1662 |
1648 |
1663 // This klass is alive but the implementor link is not followed/updated. |
1649 // This klass is alive but the implementor link is not followed/updated. |
1664 // Subklass and sibling links are handled by Klass::follow_weak_klass_links |
1650 // Subklass and sibling links are handled by Klass::follow_weak_klass_links |