350 last_o = o; |
350 last_o = o; |
351 } |
351 } |
352 } |
352 } |
353 |
353 |
354 ////////////////////////////////////////////////////////////////////// |
354 ////////////////////////////////////////////////////////////////////// |
355 // BlockOffsetArrayNonContigSpace |
|
356 ////////////////////////////////////////////////////////////////////// |
|
357 |
|
358 // The block [blk_start, blk_end) has been allocated; |
|
359 // adjust the block offset table to represent this information; |
|
360 // NOTE: Clients of BlockOffsetArrayNonContigSpace: consider using |
|
361 // the somewhat more lightweight split_block() or |
|
362 // (when init_to_zero()) mark_block() wherever possible. |
|
363 // right-open interval: [blk_start, blk_end) |
|
364 void |
|
365 BlockOffsetArrayNonContigSpace::alloc_block(HeapWord* blk_start, |
|
366 HeapWord* blk_end) { |
|
367 assert(blk_start != NULL && blk_end > blk_start, |
|
368 "phantom block"); |
|
369 single_block(blk_start, blk_end); |
|
370 allocated(blk_start, blk_end); |
|
371 } |
|
372 |
|
373 // Adjust BOT to show that a previously whole block has been split |
|
374 // into two. We verify the BOT for the first part (prefix) and |
|
375 // update the BOT for the second part (suffix). |
|
376 // blk is the start of the block |
|
377 // blk_size is the size of the original block |
|
378 // left_blk_size is the size of the first part of the split |
|
379 void BlockOffsetArrayNonContigSpace::split_block(HeapWord* blk, |
|
380 size_t blk_size, |
|
381 size_t left_blk_size) { |
|
382 // Verify that the BOT shows [blk, blk + blk_size) to be one block. |
|
383 verify_single_block(blk, blk_size); |
|
384 // Update the BOT to indicate that [blk + left_blk_size, blk + blk_size) |
|
385 // is one single block. |
|
386 assert(blk_size > 0, "Should be positive"); |
|
387 assert(left_blk_size > 0, "Should be positive"); |
|
388 assert(left_blk_size < blk_size, "Not a split"); |
|
389 |
|
390 // Start addresses of prefix block and suffix block. |
|
391 HeapWord* pref_addr = blk; |
|
392 HeapWord* suff_addr = blk + left_blk_size; |
|
393 HeapWord* end_addr = blk + blk_size; |
|
394 |
|
395 // Indices for starts of prefix block and suffix block. |
|
396 size_t pref_index = _array->index_for(pref_addr); |
|
397 if (_array->address_for_index(pref_index) != pref_addr) { |
|
398 // pref_addr does not begin pref_index |
|
399 pref_index++; |
|
400 } |
|
401 |
|
402 size_t suff_index = _array->index_for(suff_addr); |
|
403 if (_array->address_for_index(suff_index) != suff_addr) { |
|
404 // suff_addr does not begin suff_index |
|
405 suff_index++; |
|
406 } |
|
407 |
|
408 // Definition: A block B, denoted [B_start, B_end) __starts__ |
|
409 // a card C, denoted [C_start, C_end), where C_start and C_end |
|
410 // are the heap addresses that card C covers, iff |
|
411 // B_start <= C_start < B_end. |
|
412 // |
|
413 // We say that a card C "is started by" a block B, iff |
|
414 // B "starts" C. |
|
415 // |
|
416 // Note that the cardinality of the set of cards {C} |
|
417 // started by a block B can be 0, 1, or more. |
|
418 // |
|
419 // Below, pref_index and suff_index are, respectively, the |
|
420 // first (least) card indices that the prefix and suffix of |
|
421 // the split start; end_index is one more than the index of |
|
422 // the last (greatest) card that blk starts. |
|
423 size_t end_index = _array->index_for(end_addr - 1) + 1; |
|
424 |
|
425 // Calculate the # cards that the prefix and suffix affect. |
|
426 size_t num_pref_cards = suff_index - pref_index; |
|
427 |
|
428 size_t num_suff_cards = end_index - suff_index; |
|
429 // Change the cards that need changing |
|
430 if (num_suff_cards > 0) { |
|
431 HeapWord* boundary = _array->address_for_index(suff_index); |
|
432 // Set the offset card for suffix block |
|
433 _array->set_offset_array(suff_index, boundary, suff_addr, true /* reducing */); |
|
434 // Change any further cards that need changing in the suffix |
|
435 if (num_pref_cards > 0) { |
|
436 if (num_pref_cards >= num_suff_cards) { |
|
437 // Unilaterally fix all of the suffix cards: closed card |
|
438 // index interval in args below. |
|
439 set_remainder_to_point_to_start_incl(suff_index + 1, end_index - 1, true /* reducing */); |
|
440 } else { |
|
441 // Unilaterally fix the first (num_pref_cards - 1) following |
|
442 // the "offset card" in the suffix block. |
|
443 const size_t right_most_fixed_index = suff_index + num_pref_cards - 1; |
|
444 set_remainder_to_point_to_start_incl(suff_index + 1, |
|
445 right_most_fixed_index, true /* reducing */); |
|
446 // Fix the appropriate cards in the remainder of the |
|
447 // suffix block -- these are the last num_pref_cards |
|
448 // cards in each power block of the "new" range plumbed |
|
449 // from suff_addr. |
|
450 bool more = true; |
|
451 uint i = 1; |
|
452 // Fix the first power block with back_by > num_pref_cards. |
|
453 while (more && (i < BOTConstants::N_powers)) { |
|
454 size_t back_by = BOTConstants::power_to_cards_back(i); |
|
455 size_t right_index = suff_index + back_by - 1; |
|
456 size_t left_index = right_index - num_pref_cards + 1; |
|
457 if (right_index >= end_index - 1) { // last iteration |
|
458 right_index = end_index - 1; |
|
459 more = false; |
|
460 } |
|
461 if (left_index <= right_most_fixed_index) { |
|
462 left_index = right_most_fixed_index + 1; |
|
463 } |
|
464 if (back_by > num_pref_cards) { |
|
465 // Fill in the remainder of this "power block", if it |
|
466 // is non-null. |
|
467 if (left_index <= right_index) { |
|
468 _array->set_offset_array(left_index, right_index, |
|
469 BOTConstants::N_words + i - 1, true /* reducing */); |
|
470 } else { |
|
471 more = false; // we are done |
|
472 assert((end_index - 1) == right_index, "Must be at the end."); |
|
473 } |
|
474 i++; |
|
475 break; |
|
476 } |
|
477 i++; |
|
478 } |
|
479 // Fix the rest of the power blocks. |
|
480 while (more && (i < BOTConstants::N_powers)) { |
|
481 size_t back_by = BOTConstants::power_to_cards_back(i); |
|
482 size_t right_index = suff_index + back_by - 1; |
|
483 size_t left_index = right_index - num_pref_cards + 1; |
|
484 if (right_index >= end_index - 1) { // last iteration |
|
485 right_index = end_index - 1; |
|
486 if (left_index > right_index) { |
|
487 break; |
|
488 } |
|
489 more = false; |
|
490 } |
|
491 assert(left_index <= right_index, "Error"); |
|
492 _array->set_offset_array(left_index, right_index, BOTConstants::N_words + i - 1, true /* reducing */); |
|
493 i++; |
|
494 } |
|
495 } |
|
496 } // else no more cards to fix in suffix |
|
497 } // else nothing needs to be done |
|
498 // Verify that we did the right thing |
|
499 verify_single_block(pref_addr, left_blk_size); |
|
500 verify_single_block(suff_addr, blk_size - left_blk_size); |
|
501 } |
|
502 |
|
503 |
|
504 // Mark the BOT such that if [blk_start, blk_end) straddles a card |
|
505 // boundary, the card following the first such boundary is marked |
|
506 // with the appropriate offset. |
|
507 // NOTE: this method does _not_ adjust _unallocated_block or |
|
508 // any cards subsequent to the first one. |
|
509 void |
|
510 BlockOffsetArrayNonContigSpace::mark_block(HeapWord* blk_start, |
|
511 HeapWord* blk_end, bool reducing) { |
|
512 do_block_internal(blk_start, blk_end, Action_mark, reducing); |
|
513 } |
|
514 |
|
515 HeapWord* BlockOffsetArrayNonContigSpace::block_start_unsafe( |
|
516 const void* addr) const { |
|
517 assert(_array->offset_array(0) == 0, "objects can't cross covered areas"); |
|
518 assert(_bottom <= addr && addr < _end, |
|
519 "addr must be covered by this Array"); |
|
520 // Must read this exactly once because it can be modified by parallel |
|
521 // allocation. |
|
522 HeapWord* ub = _unallocated_block; |
|
523 if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) { |
|
524 assert(ub < _end, "tautology (see above)"); |
|
525 return ub; |
|
526 } |
|
527 |
|
528 // Otherwise, find the block start using the table. |
|
529 size_t index = _array->index_for(addr); |
|
530 HeapWord* q = _array->address_for_index(index); |
|
531 |
|
532 uint offset = _array->offset_array(index); // Extend u_char to uint. |
|
533 while (offset >= BOTConstants::N_words) { |
|
534 // The excess of the offset from N_words indicates a power of Base |
|
535 // to go back by. |
|
536 size_t n_cards_back = BOTConstants::entry_to_cards_back(offset); |
|
537 q -= (BOTConstants::N_words * n_cards_back); |
|
538 assert(q >= _sp->bottom(), |
|
539 "q = " PTR_FORMAT " crossed below bottom = " PTR_FORMAT, |
|
540 p2i(q), p2i(_sp->bottom())); |
|
541 assert(q < _sp->end(), |
|
542 "q = " PTR_FORMAT " crossed above end = " PTR_FORMAT, |
|
543 p2i(q), p2i(_sp->end())); |
|
544 index -= n_cards_back; |
|
545 offset = _array->offset_array(index); |
|
546 } |
|
547 assert(offset < BOTConstants::N_words, "offset too large"); |
|
548 index--; |
|
549 q -= offset; |
|
550 assert(q >= _sp->bottom(), |
|
551 "q = " PTR_FORMAT " crossed below bottom = " PTR_FORMAT, |
|
552 p2i(q), p2i(_sp->bottom())); |
|
553 assert(q < _sp->end(), |
|
554 "q = " PTR_FORMAT " crossed above end = " PTR_FORMAT, |
|
555 p2i(q), p2i(_sp->end())); |
|
556 HeapWord* n = q; |
|
557 |
|
558 while (n <= addr) { |
|
559 debug_only(HeapWord* last = q); // for debugging |
|
560 q = n; |
|
561 n += _sp->block_size(n); |
|
562 assert(n > q, |
|
563 "Looping at n = " PTR_FORMAT " with last = " PTR_FORMAT "," |
|
564 " while querying blk_start(" PTR_FORMAT ")" |
|
565 " on _sp = [" PTR_FORMAT "," PTR_FORMAT ")", |
|
566 p2i(n), p2i(last), p2i(addr), p2i(_sp->bottom()), p2i(_sp->end())); |
|
567 } |
|
568 assert(q <= addr, |
|
569 "wrong order for current (" INTPTR_FORMAT ")" " <= arg (" INTPTR_FORMAT ")", |
|
570 p2i(q), p2i(addr)); |
|
571 assert(addr <= n, |
|
572 "wrong order for arg (" INTPTR_FORMAT ") <= next (" INTPTR_FORMAT ")", |
|
573 p2i(addr), p2i(n)); |
|
574 return q; |
|
575 } |
|
576 |
|
577 HeapWord* BlockOffsetArrayNonContigSpace::block_start_careful( |
|
578 const void* addr) const { |
|
579 assert(_array->offset_array(0) == 0, "objects can't cross covered areas"); |
|
580 |
|
581 assert(_bottom <= addr && addr < _end, |
|
582 "addr must be covered by this Array"); |
|
583 // Must read this exactly once because it can be modified by parallel |
|
584 // allocation. |
|
585 HeapWord* ub = _unallocated_block; |
|
586 if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) { |
|
587 assert(ub < _end, "tautology (see above)"); |
|
588 return ub; |
|
589 } |
|
590 |
|
591 // Otherwise, find the block start using the table, but taking |
|
592 // care (cf block_start_unsafe() above) not to parse any objects/blocks |
|
593 // on the cards themselves. |
|
594 size_t index = _array->index_for(addr); |
|
595 assert(_array->address_for_index(index) == addr, |
|
596 "arg should be start of card"); |
|
597 |
|
598 HeapWord* q = (HeapWord*)addr; |
|
599 uint offset; |
|
600 do { |
|
601 offset = _array->offset_array(index); |
|
602 if (offset < BOTConstants::N_words) { |
|
603 q -= offset; |
|
604 } else { |
|
605 size_t n_cards_back = BOTConstants::entry_to_cards_back(offset); |
|
606 q -= (n_cards_back * BOTConstants::N_words); |
|
607 index -= n_cards_back; |
|
608 } |
|
609 } while (offset >= BOTConstants::N_words); |
|
610 assert(q <= addr, "block start should be to left of arg"); |
|
611 return q; |
|
612 } |
|
613 |
|
614 #ifndef PRODUCT |
|
615 // Verification & debugging - ensure that the offset table reflects the fact |
|
616 // that the block [blk_start, blk_end) or [blk, blk + size) is a |
|
617 // single block of storage. NOTE: can't const this because of |
|
618 // call to non-const do_block_internal() below. |
|
619 void BlockOffsetArrayNonContigSpace::verify_single_block( |
|
620 HeapWord* blk_start, HeapWord* blk_end) { |
|
621 if (VerifyBlockOffsetArray) { |
|
622 do_block_internal(blk_start, blk_end, Action_check); |
|
623 } |
|
624 } |
|
625 |
|
626 void BlockOffsetArrayNonContigSpace::verify_single_block( |
|
627 HeapWord* blk, size_t size) { |
|
628 verify_single_block(blk, blk + size); |
|
629 } |
|
630 |
|
631 // Verify that the given block is before _unallocated_block |
|
632 void BlockOffsetArrayNonContigSpace::verify_not_unallocated( |
|
633 HeapWord* blk_start, HeapWord* blk_end) const { |
|
634 if (BlockOffsetArrayUseUnallocatedBlock) { |
|
635 assert(blk_start < blk_end, "Block inconsistency?"); |
|
636 assert(blk_end <= _unallocated_block, "_unallocated_block problem"); |
|
637 } |
|
638 } |
|
639 |
|
640 void BlockOffsetArrayNonContigSpace::verify_not_unallocated( |
|
641 HeapWord* blk, size_t size) const { |
|
642 verify_not_unallocated(blk, blk + size); |
|
643 } |
|
644 #endif // PRODUCT |
|
645 |
|
646 size_t BlockOffsetArrayNonContigSpace::last_active_index() const { |
|
647 if (_unallocated_block == _bottom) { |
|
648 return 0; |
|
649 } else { |
|
650 return _array->index_for(_unallocated_block - 1); |
|
651 } |
|
652 } |
|
653 |
|
654 ////////////////////////////////////////////////////////////////////// |
|
655 // BlockOffsetArrayContigSpace |
355 // BlockOffsetArrayContigSpace |
656 ////////////////////////////////////////////////////////////////////// |
356 ////////////////////////////////////////////////////////////////////// |
657 |
357 |
658 HeapWord* BlockOffsetArrayContigSpace::block_start_unsafe(const void* addr) const { |
358 HeapWord* BlockOffsetArrayContigSpace::block_start_unsafe(const void* addr) const { |
659 assert(_array->offset_array(0) == 0, "objects can't cross covered areas"); |
359 assert(_array->offset_array(0) == 0, "objects can't cross covered areas"); |