150 // Fully drain the queue. |
150 // Fully drain the queue. |
151 trim_queue_to_threshold(0); |
151 trim_queue_to_threshold(0); |
152 } while (!_refs->is_empty()); |
152 } while (!_refs->is_empty()); |
153 } |
153 } |
154 |
154 |
155 HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr const region_attr, |
155 HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr* dest, |
156 G1HeapRegionAttr* dest, |
|
157 size_t word_sz, |
156 size_t word_sz, |
158 bool previous_plab_refill_failed) { |
157 bool previous_plab_refill_failed, |
159 assert(region_attr.is_in_cset_or_humongous(), "Unexpected region attr type: %s", region_attr.get_type_str()); |
158 uint node_index) { |
|
159 |
160 assert(dest->is_in_cset_or_humongous(), "Unexpected dest: %s region attr", dest->get_type_str()); |
160 assert(dest->is_in_cset_or_humongous(), "Unexpected dest: %s region attr", dest->get_type_str()); |
161 |
161 |
162 // Right now we only have two types of regions (young / old) so |
162 // Right now we only have two types of regions (young / old) so |
163 // let's keep the logic here simple. We can generalize it when necessary. |
163 // let's keep the logic here simple. We can generalize it when necessary. |
164 if (dest->is_young()) { |
164 if (dest->is_young()) { |
165 bool plab_refill_in_old_failed = false; |
165 bool plab_refill_in_old_failed = false; |
166 HeapWord* const obj_ptr = _plab_allocator->allocate(G1HeapRegionAttr::Old, |
166 HeapWord* const obj_ptr = _plab_allocator->allocate(G1HeapRegionAttr::Old, |
167 word_sz, |
167 word_sz, |
168 &plab_refill_in_old_failed); |
168 &plab_refill_in_old_failed, |
|
169 node_index); |
169 // Make sure that we won't attempt to copy any other objects out |
170 // Make sure that we won't attempt to copy any other objects out |
170 // of a survivor region (given that apparently we cannot allocate |
171 // of a survivor region (given that apparently we cannot allocate |
171 // any new ones) to avoid coming into this slow path again and again. |
172 // any new ones) to avoid coming into this slow path again and again. |
172 // Only consider failed PLAB refill here: failed inline allocations are |
173 // Only consider failed PLAB refill here: failed inline allocations are |
173 // typically large, so not indicative of remaining space. |
174 // typically large, so not indicative of remaining space. |
202 return dest(region_attr); |
203 return dest(region_attr); |
203 } |
204 } |
204 |
205 |
205 void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr, |
206 void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr, |
206 oop const old, size_t word_sz, uint age, |
207 oop const old, size_t word_sz, uint age, |
207 HeapWord * const obj_ptr) const { |
208 HeapWord * const obj_ptr, uint node_index) const { |
208 PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr); |
209 PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr, node_index); |
209 if (alloc_buf->contains(obj_ptr)) { |
210 if (alloc_buf->contains(obj_ptr)) { |
210 _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz * HeapWordSize, age, |
211 _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz * HeapWordSize, age, |
211 dest_attr.type() == G1HeapRegionAttr::Old, |
212 dest_attr.type() == G1HeapRegionAttr::Old, |
212 alloc_buf->word_sz() * HeapWordSize); |
213 alloc_buf->word_sz() * HeapWordSize); |
213 } else { |
214 } else { |
226 // The second clause is to prevent premature evacuation failure in case there |
227 // The second clause is to prevent premature evacuation failure in case there |
227 // is still space in survivor, but old gen is full. |
228 // is still space in survivor, but old gen is full. |
228 if (_old_gen_is_full && dest_attr.is_old()) { |
229 if (_old_gen_is_full && dest_attr.is_old()) { |
229 return handle_evacuation_failure_par(old, old_mark); |
230 return handle_evacuation_failure_par(old, old_mark); |
230 } |
231 } |
231 HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz); |
232 HeapRegion* const from_region = _g1h->heap_region_containing(old); |
|
233 uint node_index = from_region->node_index(); |
|
234 |
|
235 HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index); |
232 |
236 |
233 // PLAB allocations should succeed most of the time, so we'll |
237 // PLAB allocations should succeed most of the time, so we'll |
234 // normally check against NULL once and that's it. |
238 // normally check against NULL once and that's it. |
235 if (obj_ptr == NULL) { |
239 if (obj_ptr == NULL) { |
236 bool plab_refill_failed = false; |
240 bool plab_refill_failed = false; |
237 obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_attr, word_sz, &plab_refill_failed); |
241 obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_attr, word_sz, &plab_refill_failed, node_index); |
238 if (obj_ptr == NULL) { |
242 if (obj_ptr == NULL) { |
239 obj_ptr = allocate_in_next_plab(region_attr, &dest_attr, word_sz, plab_refill_failed); |
243 assert(region_attr.is_in_cset(), "Unexpected region attr type: %s", region_attr.get_type_str()); |
|
244 obj_ptr = allocate_in_next_plab(&dest_attr, word_sz, plab_refill_failed, node_index); |
240 if (obj_ptr == NULL) { |
245 if (obj_ptr == NULL) { |
241 // This will either forward-to-self, or detect that someone else has |
246 // This will either forward-to-self, or detect that someone else has |
242 // installed a forwarding pointer. |
247 // installed a forwarding pointer. |
243 return handle_evacuation_failure_par(old, old_mark); |
248 return handle_evacuation_failure_par(old, old_mark); |
244 } |
249 } |
245 } |
250 } |
246 if (_g1h->_gc_tracer_stw->should_report_promotion_events()) { |
251 if (_g1h->_gc_tracer_stw->should_report_promotion_events()) { |
247 // The events are checked individually as part of the actual commit |
252 // The events are checked individually as part of the actual commit |
248 report_promotion_event(dest_attr, old, word_sz, age, obj_ptr); |
253 report_promotion_event(dest_attr, old, word_sz, age, obj_ptr, node_index); |
249 } |
254 } |
250 } |
255 } |
251 |
256 |
252 assert(obj_ptr != NULL, "when we get here, allocation should have succeeded"); |
257 assert(obj_ptr != NULL, "when we get here, allocation should have succeeded"); |
253 assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap"); |
258 assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap"); |
255 #ifndef PRODUCT |
260 #ifndef PRODUCT |
256 // Should this evacuation fail? |
261 // Should this evacuation fail? |
257 if (_g1h->evacuation_should_fail()) { |
262 if (_g1h->evacuation_should_fail()) { |
258 // Doing this after all the allocation attempts also tests the |
263 // Doing this after all the allocation attempts also tests the |
259 // undo_allocation() method too. |
264 // undo_allocation() method too. |
260 _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz); |
265 _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index); |
261 return handle_evacuation_failure_par(old, old_mark); |
266 return handle_evacuation_failure_par(old, old_mark); |
262 } |
267 } |
263 #endif // !PRODUCT |
268 #endif // !PRODUCT |
264 |
269 |
265 // We're going to allocate linearly, so might as well prefetch ahead. |
270 // We're going to allocate linearly, so might as well prefetch ahead. |
268 const oop obj = oop(obj_ptr); |
273 const oop obj = oop(obj_ptr); |
269 const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed); |
274 const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed); |
270 if (forward_ptr == NULL) { |
275 if (forward_ptr == NULL) { |
271 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); |
276 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); |
272 |
277 |
273 HeapRegion* const from_region = _g1h->heap_region_containing(old); |
|
274 const uint young_index = from_region->young_index_in_cset(); |
278 const uint young_index = from_region->young_index_in_cset(); |
275 |
279 |
276 assert((from_region->is_young() && young_index > 0) || |
280 assert((from_region->is_young() && young_index > 0) || |
277 (!from_region->is_young() && young_index == 0), "invariant" ); |
281 (!from_region->is_young() && young_index == 0), "invariant" ); |
278 |
282 |