equal
deleted
inserted
replaced
69 } |
69 } |
70 |
70 |
71 HeapWord* G1CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, |
71 HeapWord* G1CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, |
72 const HeapWord* limit) const { |
72 const HeapWord* limit) const { |
73 // First we must round addr *up* to a possible object boundary. |
73 // First we must round addr *up* to a possible object boundary. |
74 addr = align_ptr_up(addr, HeapWordSize << _shifter); |
74 addr = align_up(addr, HeapWordSize << _shifter); |
75 size_t addrOffset = heapWordToOffset(addr); |
75 size_t addrOffset = heapWordToOffset(addr); |
76 assert(limit != NULL, "limit must not be NULL"); |
76 assert(limit != NULL, "limit must not be NULL"); |
77 size_t limitOffset = heapWordToOffset(limit); |
77 size_t limitOffset = heapWordToOffset(limit); |
78 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); |
78 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); |
79 HeapWord* nextAddr = offsetToHeapWord(nextOffset); |
79 HeapWord* nextAddr = offsetToHeapWord(nextOffset); |
168 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) { |
168 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) { |
169 guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized."); |
169 guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized."); |
170 |
170 |
171 size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry); |
171 size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry); |
172 |
172 |
173 _max_chunk_capacity = align_size_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; |
173 _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; |
174 size_t initial_chunk_capacity = align_size_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; |
174 size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; |
175 |
175 |
176 guarantee(initial_chunk_capacity <= _max_chunk_capacity, |
176 guarantee(initial_chunk_capacity <= _max_chunk_capacity, |
177 "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT, |
177 "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT, |
178 _max_chunk_capacity, |
178 _max_chunk_capacity, |
179 initial_chunk_capacity); |
179 initial_chunk_capacity); |
712 |
712 |
713 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) { |
713 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) { |
714 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint."); |
714 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint."); |
715 |
715 |
716 size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor(); |
716 size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor(); |
717 size_t const num_chunks = align_size_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size(); |
717 size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size(); |
718 |
718 |
719 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); |
719 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); |
720 |
720 |
721 G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield); |
721 G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield); |
722 |
722 |