src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
changeset 54843 25c329958c70
parent 54465 c4f16445675a
child 55149 00f7fce88e25
child 58678 9cf78a70fa4f
equal deleted inserted replaced
54842:f9c8e16db3dd 54843:25c329958c70
    73   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
    73   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
    74   memset(_surviving_young_words, 0, real_length * sizeof(size_t));
    74   memset(_surviving_young_words, 0, real_length * sizeof(size_t));
    75 
    75 
    76   _plab_allocator = new G1PLABAllocator(_g1h->allocator());
    76   _plab_allocator = new G1PLABAllocator(_g1h->allocator());
    77 
    77 
    78   _dest[InCSetState::NotInCSet]    = InCSetState::NotInCSet;
    78   _dest[G1HeapRegionAttr::NotInCSet] = G1HeapRegionAttr::NotInCSet;
    79   // The dest for Young is used when the objects are aged enough to
    79   // The dest for Young is used when the objects are aged enough to
    80   // need to be moved to the next space.
    80   // need to be moved to the next space.
    81   _dest[InCSetState::Young]        = InCSetState::Old;
    81   _dest[G1HeapRegionAttr::Young] = G1HeapRegionAttr::Old;
    82   _dest[InCSetState::Old]          = InCSetState::Old;
    82   _dest[G1HeapRegionAttr::Old]   = G1HeapRegionAttr::Old;
    83 
    83 
    84   _closures = G1EvacuationRootClosures::create_root_closures(this, _g1h);
    84   _closures = G1EvacuationRootClosures::create_root_closures(this, _g1h);
    85 
    85 
    86   _oops_into_optional_regions = new G1OopStarChunkedList[_num_optional_regions];
    86   _oops_into_optional_regions = new G1OopStarChunkedList[_num_optional_regions];
    87 }
    87 }
   155     // Fully drain the queue.
   155     // Fully drain the queue.
   156     trim_queue_to_threshold(0);
   156     trim_queue_to_threshold(0);
   157   } while (!_refs->is_empty());
   157   } while (!_refs->is_empty());
   158 }
   158 }
   159 
   159 
   160 HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state,
   160 HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr const region_attr,
   161                                                       InCSetState* dest,
   161                                                       G1HeapRegionAttr* dest,
   162                                                       size_t word_sz,
   162                                                       size_t word_sz,
   163                                                       bool previous_plab_refill_failed) {
   163                                                       bool previous_plab_refill_failed) {
   164   assert(state.is_in_cset_or_humongous(), "Unexpected state: " CSETSTATE_FORMAT, state.value());
   164   assert(region_attr.is_in_cset_or_humongous(), "Unexpected region attr type: %s", region_attr.get_type_str());
   165   assert(dest->is_in_cset_or_humongous(), "Unexpected dest: " CSETSTATE_FORMAT, dest->value());
   165   assert(dest->is_in_cset_or_humongous(), "Unexpected dest: %s region attr", dest->get_type_str());
   166 
   166 
   167   // Right now we only have two types of regions (young / old) so
   167   // Right now we only have two types of regions (young / old) so
   168   // let's keep the logic here simple. We can generalize it when necessary.
   168   // let's keep the logic here simple. We can generalize it when necessary.
   169   if (dest->is_young()) {
   169   if (dest->is_young()) {
   170     bool plab_refill_in_old_failed = false;
   170     bool plab_refill_in_old_failed = false;
   171     HeapWord* const obj_ptr = _plab_allocator->allocate(InCSetState::Old,
   171     HeapWord* const obj_ptr = _plab_allocator->allocate(G1HeapRegionAttr::Old,
   172                                                         word_sz,
   172                                                         word_sz,
   173                                                         &plab_refill_in_old_failed);
   173                                                         &plab_refill_in_old_failed);
   174     // Make sure that we won't attempt to copy any other objects out
   174     // Make sure that we won't attempt to copy any other objects out
   175     // of a survivor region (given that apparently we cannot allocate
   175     // of a survivor region (given that apparently we cannot allocate
   176     // any new ones) to avoid coming into this slow path again and again.
   176     // any new ones) to avoid coming into this slow path again and again.
   188       _old_gen_is_full = plab_refill_in_old_failed;
   188       _old_gen_is_full = plab_refill_in_old_failed;
   189     }
   189     }
   190     return obj_ptr;
   190     return obj_ptr;
   191   } else {
   191   } else {
   192     _old_gen_is_full = previous_plab_refill_failed;
   192     _old_gen_is_full = previous_plab_refill_failed;
   193     assert(dest->is_old(), "Unexpected dest: " CSETSTATE_FORMAT, dest->value());
   193     assert(dest->is_old(), "Unexpected dest region attr: %s", dest->get_type_str());
   194     // no other space to try.
   194     // no other space to try.
   195     return NULL;
   195     return NULL;
   196   }
   196   }
   197 }
   197 }
   198 
   198 
   199 InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop const m, uint& age) {
   199 G1HeapRegionAttr G1ParScanThreadState::next_region_attr(G1HeapRegionAttr const region_attr, markOop const m, uint& age) {
   200   if (state.is_young()) {
   200   if (region_attr.is_young()) {
   201     age = !m->has_displaced_mark_helper() ? m->age()
   201     age = !m->has_displaced_mark_helper() ? m->age()
   202                                           : m->displaced_mark_helper()->age();
   202                                           : m->displaced_mark_helper()->age();
   203     if (age < _tenuring_threshold) {
   203     if (age < _tenuring_threshold) {
   204       return state;
   204       return region_attr;
   205     }
   205     }
   206   }
   206   }
   207   return dest(state);
   207   return dest(region_attr);
   208 }
   208 }
   209 
   209 
   210 void G1ParScanThreadState::report_promotion_event(InCSetState const dest_state,
   210 void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr,
   211                                                   oop const old, size_t word_sz, uint age,
   211                                                   oop const old, size_t word_sz, uint age,
   212                                                   HeapWord * const obj_ptr) const {
   212                                                   HeapWord * const obj_ptr) const {
   213   PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_state);
   213   PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr);
   214   if (alloc_buf->contains(obj_ptr)) {
   214   if (alloc_buf->contains(obj_ptr)) {
   215     _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz * HeapWordSize, age,
   215     _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz * HeapWordSize, age,
   216                                                              dest_state.value() == InCSetState::Old,
   216                                                              dest_attr.type() == G1HeapRegionAttr::Old,
   217                                                              alloc_buf->word_sz() * HeapWordSize);
   217                                                              alloc_buf->word_sz() * HeapWordSize);
   218   } else {
   218   } else {
   219     _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz * HeapWordSize, age,
   219     _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz * HeapWordSize, age,
   220                                                               dest_state.value() == InCSetState::Old);
   220                                                               dest_attr.type() == G1HeapRegionAttr::Old);
   221   }
   221   }
   222 }
   222 }
   223 
   223 
   224 oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
   224 oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr const region_attr,
   225                                                  oop const old,
   225                                                  oop const old,
   226                                                  markOop const old_mark) {
   226                                                  markOop const old_mark) {
   227   const size_t word_sz = old->size();
   227   const size_t word_sz = old->size();
   228   HeapRegion* const from_region = _g1h->heap_region_containing(old);
   228   HeapRegion* const from_region = _g1h->heap_region_containing(old);
   229   // +1 to make the -1 indexes valid...
   229   // +1 to make the -1 indexes valid...
   230   const int young_index = from_region->young_index_in_cset()+1;
   230   const int young_index = from_region->young_index_in_cset()+1;
   231   assert( (from_region->is_young() && young_index >  0) ||
   231   assert( (from_region->is_young() && young_index >  0) ||
   232          (!from_region->is_young() && young_index == 0), "invariant" );
   232          (!from_region->is_young() && young_index == 0), "invariant" );
   233 
   233 
   234   uint age = 0;
   234   uint age = 0;
   235   InCSetState dest_state = next_state(state, old_mark, age);
   235   G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
   236   // The second clause is to prevent premature evacuation failure in case there
   236   // The second clause is to prevent premature evacuation failure in case there
   237   // is still space in survivor, but old gen is full.
   237   // is still space in survivor, but old gen is full.
   238   if (_old_gen_is_full && dest_state.is_old()) {
   238   if (_old_gen_is_full && dest_attr.is_old()) {
   239     return handle_evacuation_failure_par(old, old_mark);
   239     return handle_evacuation_failure_par(old, old_mark);
   240   }
   240   }
   241   HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz);
   241   HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz);
   242 
   242 
   243   // PLAB allocations should succeed most of the time, so we'll
   243   // PLAB allocations should succeed most of the time, so we'll
   244   // normally check against NULL once and that's it.
   244   // normally check against NULL once and that's it.
   245   if (obj_ptr == NULL) {
   245   if (obj_ptr == NULL) {
   246     bool plab_refill_failed = false;
   246     bool plab_refill_failed = false;
   247     obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_state, word_sz, &plab_refill_failed);
   247     obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_attr, word_sz, &plab_refill_failed);
   248     if (obj_ptr == NULL) {
   248     if (obj_ptr == NULL) {
   249       obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, plab_refill_failed);
   249       obj_ptr = allocate_in_next_plab(region_attr, &dest_attr, word_sz, plab_refill_failed);
   250       if (obj_ptr == NULL) {
   250       if (obj_ptr == NULL) {
   251         // This will either forward-to-self, or detect that someone else has
   251         // This will either forward-to-self, or detect that someone else has
   252         // installed a forwarding pointer.
   252         // installed a forwarding pointer.
   253         return handle_evacuation_failure_par(old, old_mark);
   253         return handle_evacuation_failure_par(old, old_mark);
   254       }
   254       }
   255     }
   255     }
   256     if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
   256     if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
   257       // The events are checked individually as part of the actual commit
   257       // The events are checked individually as part of the actual commit
   258       report_promotion_event(dest_state, old, word_sz, age, obj_ptr);
   258       report_promotion_event(dest_attr, old, word_sz, age, obj_ptr);
   259     }
   259     }
   260   }
   260   }
   261 
   261 
   262   assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
   262   assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
   263   assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
   263   assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
   265 #ifndef PRODUCT
   265 #ifndef PRODUCT
   266   // Should this evacuation fail?
   266   // Should this evacuation fail?
   267   if (_g1h->evacuation_should_fail()) {
   267   if (_g1h->evacuation_should_fail()) {
   268     // Doing this after all the allocation attempts also tests the
   268     // Doing this after all the allocation attempts also tests the
   269     // undo_allocation() method too.
   269     // undo_allocation() method too.
   270     _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz);
   270     _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz);
   271     return handle_evacuation_failure_par(old, old_mark);
   271     return handle_evacuation_failure_par(old, old_mark);
   272   }
   272   }
   273 #endif // !PRODUCT
   273 #endif // !PRODUCT
   274 
   274 
   275   // We're going to allocate linearly, so might as well prefetch ahead.
   275   // We're going to allocate linearly, so might as well prefetch ahead.
   278   const oop obj = oop(obj_ptr);
   278   const oop obj = oop(obj_ptr);
   279   const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed);
   279   const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed);
   280   if (forward_ptr == NULL) {
   280   if (forward_ptr == NULL) {
   281     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
   281     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
   282 
   282 
   283     if (dest_state.is_young()) {
   283     if (dest_attr.is_young()) {
   284       if (age < markOopDesc::max_age) {
   284       if (age < markOopDesc::max_age) {
   285         age++;
   285         age++;
   286       }
   286       }
   287       if (old_mark->has_displaced_mark_helper()) {
   287       if (old_mark->has_displaced_mark_helper()) {
   288         // In this case, we have to install the mark word first,
   288         // In this case, we have to install the mark word first,
   298     } else {
   298     } else {
   299       obj->set_mark_raw(old_mark);
   299       obj->set_mark_raw(old_mark);
   300     }
   300     }
   301 
   301 
   302     if (G1StringDedup::is_enabled()) {
   302     if (G1StringDedup::is_enabled()) {
   303       const bool is_from_young = state.is_young();
   303       const bool is_from_young = region_attr.is_young();
   304       const bool is_to_young = dest_state.is_young();
   304       const bool is_to_young = dest_attr.is_young();
   305       assert(is_from_young == _g1h->heap_region_containing(old)->is_young(),
   305       assert(is_from_young == _g1h->heap_region_containing(old)->is_young(),
   306              "sanity");
   306              "sanity");
   307       assert(is_to_young == _g1h->heap_region_containing(obj)->is_young(),
   307       assert(is_to_young == _g1h->heap_region_containing(obj)->is_young(),
   308              "sanity");
   308              "sanity");
   309       G1StringDedup::enqueue_from_evacuation(is_from_young,
   309       G1StringDedup::enqueue_from_evacuation(is_from_young,
   320       // length field of the from-space object.
   320       // length field of the from-space object.
   321       arrayOop(obj)->set_length(0);
   321       arrayOop(obj)->set_length(0);
   322       oop* old_p = set_partial_array_mask(old);
   322       oop* old_p = set_partial_array_mask(old);
   323       do_oop_partial_array(old_p);
   323       do_oop_partial_array(old_p);
   324     } else {
   324     } else {
   325       G1ScanInYoungSetter x(&_scanner, dest_state.is_young());
   325       G1ScanInYoungSetter x(&_scanner, dest_attr.is_young());
   326       obj->oop_iterate_backwards(&_scanner);
   326       obj->oop_iterate_backwards(&_scanner);
   327     }
   327     }
   328     return obj;
   328     return obj;
   329   } else {
   329   } else {
   330     _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz);
   330     _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz);
   331     return forward_ptr;
   331     return forward_ptr;
   332   }
   332   }
   333 }
   333 }
   334 
   334 
   335 G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) {
   335 G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) {