387 void check_all_cards(size_t left_card, size_t right_card) const; |
380 void check_all_cards(size_t left_card, size_t right_card) const; |
388 }; |
381 }; |
389 |
382 |
390 //////////////////////////////////////////////////////////////////////////// |
383 //////////////////////////////////////////////////////////////////////////// |
391 // A subtype of BlockOffsetArray that takes advantage of the fact |
384 // A subtype of BlockOffsetArray that takes advantage of the fact |
392 // that its underlying space is a NonContiguousSpace, so that some |
|
393 // specialized interfaces can be made available for spaces that |
|
394 // manipulate the table. |
|
395 //////////////////////////////////////////////////////////////////////////// |
|
396 class BlockOffsetArrayNonContigSpace: public BlockOffsetArray { |
|
397 friend class VMStructs; |
|
398 private: |
|
399 // The portion [_unallocated_block, _sp.end()) of the space that |
|
400 // is a single block known not to contain any objects. |
|
401 // NOTE: See BlockOffsetArrayUseUnallocatedBlock flag. |
|
402 HeapWord* _unallocated_block; |
|
403 |
|
404 public: |
|
405 BlockOffsetArrayNonContigSpace(BlockOffsetSharedArray* array, MemRegion mr): |
|
406 BlockOffsetArray(array, mr, false), |
|
407 _unallocated_block(_bottom) { } |
|
408 |
|
409 // Accessor |
|
410 HeapWord* unallocated_block() const { |
|
411 assert(BlockOffsetArrayUseUnallocatedBlock, |
|
412 "_unallocated_block is not being maintained"); |
|
413 return _unallocated_block; |
|
414 } |
|
415 |
|
416 void set_unallocated_block(HeapWord* block) { |
|
417 assert(BlockOffsetArrayUseUnallocatedBlock, |
|
418 "_unallocated_block is not being maintained"); |
|
419 assert(block >= _bottom && block <= _end, "out of range"); |
|
420 _unallocated_block = block; |
|
421 } |
|
422 |
|
423 // These methods expect to be called with [blk_start, blk_end) |
|
424 // representing a block of memory in the heap. |
|
425 void alloc_block(HeapWord* blk_start, HeapWord* blk_end); |
|
426 void alloc_block(HeapWord* blk, size_t size) { |
|
427 alloc_block(blk, blk + size); |
|
428 } |
|
429 |
|
430 // The following methods are useful and optimized for a |
|
431 // non-contiguous space. |
|
432 |
|
433 // Given a block [blk_start, blk_start + full_blk_size), and |
|
434 // a left_blk_size < full_blk_size, adjust the BOT to show two |
|
435 // blocks [blk_start, blk_start + left_blk_size) and |
|
436 // [blk_start + left_blk_size, blk_start + full_blk_size). |
|
437 // It is assumed (and verified in the non-product VM) that the |
|
438 // BOT was correct for the original block. |
|
439 void split_block(HeapWord* blk_start, size_t full_blk_size, |
|
440 size_t left_blk_size); |
|
441 |
|
442 // Adjust BOT to show that it has a block in the range |
|
443 // [blk_start, blk_start + size). Only the first card |
|
444 // of BOT is touched. It is assumed (and verified in the |
|
445 // non-product VM) that the remaining cards of the block |
|
446 // are correct. |
|
447 void mark_block(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false); |
|
448 void mark_block(HeapWord* blk, size_t size, bool reducing = false) { |
|
449 mark_block(blk, blk + size, reducing); |
|
450 } |
|
451 |
|
452 // Adjust _unallocated_block to indicate that a particular |
|
453 // block has been newly allocated or freed. It is assumed (and |
|
454 // verified in the non-product VM) that the BOT is correct for |
|
455 // the given block. |
|
456 void allocated(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false) { |
|
457 // Verify that the BOT shows [blk, blk + blk_size) to be one block. |
|
458 verify_single_block(blk_start, blk_end); |
|
459 if (BlockOffsetArrayUseUnallocatedBlock) { |
|
460 _unallocated_block = MAX2(_unallocated_block, blk_end); |
|
461 } |
|
462 } |
|
463 |
|
464 void allocated(HeapWord* blk, size_t size, bool reducing = false) { |
|
465 allocated(blk, blk + size, reducing); |
|
466 } |
|
467 |
|
468 void freed(HeapWord* blk_start, HeapWord* blk_end); |
|
469 void freed(HeapWord* blk, size_t size); |
|
470 |
|
471 HeapWord* block_start_unsafe(const void* addr) const; |
|
472 |
|
473 // Requires "addr" to be the start of a card and returns the |
|
474 // start of the block that contains the given address. |
|
475 HeapWord* block_start_careful(const void* addr) const; |
|
476 |
|
477 // Verification & debugging: ensure that the offset table reflects |
|
478 // the fact that the block [blk_start, blk_end) or [blk, blk + size) |
|
479 // is a single block of storage. NOTE: can't const this because of |
|
480 // call to non-const do_block_internal() below. |
|
481 void verify_single_block(HeapWord* blk_start, HeapWord* blk_end) |
|
482 PRODUCT_RETURN; |
|
483 void verify_single_block(HeapWord* blk, size_t size) PRODUCT_RETURN; |
|
484 |
|
485 // Verify that the given block is before _unallocated_block |
|
486 void verify_not_unallocated(HeapWord* blk_start, HeapWord* blk_end) |
|
487 const PRODUCT_RETURN; |
|
488 void verify_not_unallocated(HeapWord* blk, size_t size) |
|
489 const PRODUCT_RETURN; |
|
490 |
|
491 // Debugging support |
|
492 virtual size_t last_active_index() const; |
|
493 }; |
|
494 |
|
495 //////////////////////////////////////////////////////////////////////////// |
|
496 // A subtype of BlockOffsetArray that takes advantage of the fact |
|
497 // that its underlying space is a ContiguousSpace, so that its "active" |
385 // that its underlying space is a ContiguousSpace, so that its "active" |
498 // region can be more efficiently tracked (than for a non-contiguous space). |
386 // region can be more efficiently tracked (than for a non-contiguous space). |
499 //////////////////////////////////////////////////////////////////////////// |
387 //////////////////////////////////////////////////////////////////////////// |
500 class BlockOffsetArrayContigSpace: public BlockOffsetArray { |
388 class BlockOffsetArrayContigSpace: public BlockOffsetArray { |
501 friend class VMStructs; |
389 friend class VMStructs; |