# HG changeset patch # User tschatzl # Date 1438869153 -7200 # Node ID 0891f3fa84fce94adae0a235211481b1f6d6055c # Parent 6f0257cb64934a9b3dc111126ab91cdc453620b1 8133047: Rename G1ParScanThreadState::_queue_num to _worker_id Reviewed-by: jmasa, jwilhelm diff -r 6f0257cb6493 -r 0891f3fa84fc hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp --- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp Thu Aug 06 15:49:52 2015 +0200 +++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp Thu Aug 06 15:52:33 2015 +0200 @@ -4285,18 +4285,18 @@ g1_policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0); } -void G1CollectedHeap::preserve_mark_during_evac_failure(uint queue_num, oop obj, markOop m) { +void G1CollectedHeap::preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m) { if (!_evacuation_failed) { _evacuation_failed = true; } - _evacuation_failed_info_array[queue_num].register_copy_failure(obj->size()); + _evacuation_failed_info_array[worker_id].register_copy_failure(obj->size()); // We want to call the "for_promotion_failure" version only in the // case of a promotion failure. if (m->must_be_preserved_for_promotion_failure(obj)) { OopAndMarkOop elem(obj, m); - _preserved_objs[queue_num].push(elem); + _preserved_objs[worker_id].push(elem); } } @@ -4340,7 +4340,7 @@ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); - assert(_worker_id == _par_scan_state->queue_num(), "sanity"); + assert(_worker_id == _par_scan_state->worker_id(), "sanity"); const InCSetState state = _g1->in_cset_state(obj); if (state.is_in_cset()) { diff -r 6f0257cb6493 -r 0891f3fa84fc hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp --- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp Thu Aug 06 15:49:52 2015 +0200 +++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp Thu Aug 06 15:52:33 2015 +0200 @@ -854,7 +854,7 @@ // Preserve the mark of "obj", if necessary, in preparation for its mark // word being overwritten with a self-forwarding-pointer. - void preserve_mark_during_evac_failure(uint queue, oop obj, markOop m); + void preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m); #ifndef PRODUCT // Support for forcing evacuation failures. Analogous to diff -r 6f0257cb6493 -r 0891f3fa84fc hotspot/src/share/vm/gc/g1/g1OopClosures.cpp --- a/hotspot/src/share/vm/gc/g1/g1OopClosures.cpp Thu Aug 06 15:49:52 2015 +0200 +++ b/hotspot/src/share/vm/gc/g1/g1OopClosures.cpp Thu Aug 06 15:52:33 2015 +0200 @@ -48,7 +48,7 @@ assert(par_scan_state != NULL, "Must set par_scan_state to non-NULL."); _par_scan_state = par_scan_state; - _worker_id = par_scan_state->queue_num(); + _worker_id = par_scan_state->worker_id(); assert(_worker_id < ParallelGCThreads, err_msg("The given worker id %u must be less than the number of threads %u", _worker_id, ParallelGCThreads)); diff -r 6f0257cb6493 -r 0891f3fa84fc hotspot/src/share/vm/gc/g1/g1ParScanThreadState.cpp --- a/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.cpp Thu Aug 06 15:49:52 2015 +0200 +++ b/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.cpp Thu Aug 06 15:52:33 2015 +0200 @@ -31,13 +31,13 @@ #include "oops/oop.inline.hpp" #include "runtime/prefetch.inline.hpp" -G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp) +G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, ReferenceProcessor* rp) : _g1h(g1h), - _refs(g1h->task_queue(queue_num)), + _refs(g1h->task_queue(worker_id)), _dcq(&g1h->dirty_card_queue_set()), _ct_bs(g1h->g1_barrier_set()), _g1_rem(g1h->g1_rem_set()), - _hash_seed(17), _queue_num(queue_num), + _hash_seed(17), _worker_id(worker_id), _term_attempts(0), _tenuring_threshold(g1h->g1_policy()->tenuring_threshold()), _age_table(false), _scanner(g1h, rp), @@ -93,7 +93,7 @@ st->print_cr("%3u %9.2f %9.2f %6.2f " "%9.2f %6.2f " SIZE_FORMAT_W(8) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7), - _queue_num, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, + _worker_id, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, term_ms, term_ms * 100 / elapsed_ms, term_attempts(), (alloc_buffer_waste + undo_waste) * HeapWordSize / K, alloc_buffer_waste * HeapWordSize / K, @@ -267,7 +267,7 @@ "sanity"); G1StringDedup::enqueue_from_evacuation(is_from_young, is_to_young, - queue_num(), + _worker_id, obj); } @@ -307,7 +307,7 @@ _g1h->hr_printer()->evac_failure(r); } - _g1h->preserve_mark_during_evac_failure(_queue_num, old, m); + _g1h->preserve_mark_during_evac_failure(_worker_id, old, m); _scanner.set_region(r); old->oop_iterate_backwards(&_scanner); diff -r 6f0257cb6493 -r 0891f3fa84fc hotspot/src/share/vm/gc/g1/g1ParScanThreadState.hpp --- a/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.hpp Thu Aug 06 15:49:52 2015 +0200 +++ b/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.hpp Thu Aug 06 15:52:33 2015 +0200 @@ -55,7 +55,7 @@ G1ParScanClosure _scanner; int _hash_seed; - uint _queue_num; + uint _worker_id; size_t _term_attempts; @@ -85,7 +85,7 @@ } public: - G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp); + G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, ReferenceProcessor* rp); ~G1ParScanThreadState(); ageTable* age_table() { return &_age_table; } @@ -112,8 +112,7 @@ } } - int* hash_seed() { return &_hash_seed; } - uint queue_num() { return _queue_num; } + uint worker_id() { return _worker_id; } size_t term_attempts() const { return _term_attempts; } void note_term_attempt() { _term_attempts++; } diff -r 6f0257cb6493 -r 0891f3fa84fc hotspot/src/share/vm/gc/g1/g1ParScanThreadState.inline.hpp --- a/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.inline.hpp Thu Aug 06 15:49:52 2015 +0200 +++ b/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.inline.hpp Thu Aug 06 15:52:33 2015 +0200 @@ -56,7 +56,7 @@ } assert(obj != NULL, "Must be"); - update_rs(from, p, queue_num()); + update_rs(from, p, _worker_id); } template inline void G1ParScanThreadState::push_on_queue(T* ref) { @@ -136,7 +136,7 @@ void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) { StarTask stolen_task; - while (task_queues->steal(queue_num(), hash_seed(), stolen_task)) { + while (task_queues->steal(_worker_id, &_hash_seed, stolen_task)) { assert(verify_task(stolen_task), "sanity"); dispatch_reference(stolen_task);