--- a/hotspot/src/share/vm/gc/g1/concurrentG1Refine.cpp Fri Feb 26 17:55:05 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/concurrentG1Refine.cpp Sat Feb 27 00:07:03 2016 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,19 +36,19 @@
{
// Ergonomically select initial concurrent refinement parameters
if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
- FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, (intx)ParallelGCThreads);
+ FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, ParallelGCThreads);
}
set_green_zone(G1ConcRefinementGreenZone);
if (FLAG_IS_DEFAULT(G1ConcRefinementYellowZone)) {
FLAG_SET_DEFAULT(G1ConcRefinementYellowZone, green_zone() * 3);
}
- set_yellow_zone(MAX2<int>(G1ConcRefinementYellowZone, green_zone()));
+ set_yellow_zone(MAX2(G1ConcRefinementYellowZone, green_zone()));
if (FLAG_IS_DEFAULT(G1ConcRefinementRedZone)) {
FLAG_SET_DEFAULT(G1ConcRefinementRedZone, yellow_zone() * 2);
}
- set_red_zone(MAX2<int>(G1ConcRefinementRedZone, yellow_zone()));
+ set_red_zone(MAX2(G1ConcRefinementRedZone, yellow_zone()));
}
ConcurrentG1Refine* ConcurrentG1Refine::create(G1CollectedHeap* g1h, CardTableEntryClosure* refine_closure, jint* ecode) {
--- a/hotspot/src/share/vm/gc/g1/concurrentG1Refine.hpp Fri Feb 26 17:55:05 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/concurrentG1Refine.hpp Sat Feb 27 00:07:03 2016 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -61,11 +61,11 @@
* 2) green = 0. Means no caching. Can be a good way to minimize the
* amount of time spent updating rsets during a collection.
*/
- int _green_zone;
- int _yellow_zone;
- int _red_zone;
+ size_t _green_zone;
+ size_t _yellow_zone;
+ size_t _red_zone;
- int _thread_threshold_step;
+ size_t _thread_threshold_step;
// We delay the refinement of 'hot' cards using the hot card cache.
G1HotCardCache _hot_card_cache;
@@ -100,17 +100,17 @@
void print_worker_threads_on(outputStream* st) const;
- void set_green_zone(int x) { _green_zone = x; }
- void set_yellow_zone(int x) { _yellow_zone = x; }
- void set_red_zone(int x) { _red_zone = x; }
+ void set_green_zone(size_t x) { _green_zone = x; }
+ void set_yellow_zone(size_t x) { _yellow_zone = x; }
+ void set_red_zone(size_t x) { _red_zone = x; }
- int green_zone() const { return _green_zone; }
- int yellow_zone() const { return _yellow_zone; }
- int red_zone() const { return _red_zone; }
+ size_t green_zone() const { return _green_zone; }
+ size_t yellow_zone() const { return _yellow_zone; }
+ size_t red_zone() const { return _red_zone; }
uint worker_thread_num() const { return _n_worker_threads; }
- int thread_threshold_step() const { return _thread_threshold_step; }
+ size_t thread_threshold_step() const { return _thread_threshold_step; }
G1HotCardCache* hot_card_cache() { return &_hot_card_cache; }
--- a/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.cpp Fri Feb 26 17:55:05 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.cpp Sat Feb 27 00:07:03 2016 +0000
@@ -67,10 +67,12 @@
void ConcurrentG1RefineThread::initialize() {
// Current thread activation threshold
- _threshold = MIN2<int>(cg1r()->thread_threshold_step() * (_worker_id + 1) + cg1r()->green_zone(),
+ _threshold = MIN2(cg1r()->thread_threshold_step() * (_worker_id + 1) + cg1r()->green_zone(),
cg1r()->yellow_zone());
// A thread deactivates once the number of buffer reached a deactivation threshold
- _deactivation_threshold = MAX2<int>(_threshold - cg1r()->thread_threshold_step(), cg1r()->green_zone());
+ _deactivation_threshold =
+ MAX2(_threshold - MIN2(_threshold, cg1r()->thread_threshold_step()),
+ cg1r()->green_zone());
}
void ConcurrentG1RefineThread::wait_for_completed_buffers() {
@@ -127,14 +129,14 @@
}
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
- log_debug(gc, refine)("Activated %d, on threshold: %d, current: %d",
+ log_debug(gc, refine)("Activated %d, on threshold: " SIZE_FORMAT ", current: " SIZE_FORMAT,
_worker_id, _threshold, dcqs.completed_buffers_num());
{
SuspendibleThreadSetJoiner sts_join;
do {
- int curr_buffer_num = (int)dcqs.completed_buffers_num();
+ size_t curr_buffer_num = dcqs.completed_buffers_num();
// If the number of the buffers falls down into the yellow zone,
// that means that the transition period after the evacuation pause has ended.
if (dcqs.completed_queue_padding() > 0 && curr_buffer_num <= cg1r()->yellow_zone()) {
@@ -151,7 +153,7 @@
false /* during_pause */));
deactivate();
- log_debug(gc, refine)("Deactivated %d, off threshold: %d, current: %d",
+ log_debug(gc, refine)("Deactivated %d, off threshold: " SIZE_FORMAT ", current: " SIZE_FORMAT,
_worker_id, _deactivation_threshold,
dcqs.completed_buffers_num());
}
--- a/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.hpp Fri Feb 26 17:55:05 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.hpp Sat Feb 27 00:07:03 2016 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -53,11 +53,11 @@
// The closure applied to completed log buffers.
CardTableEntryClosure* _refine_closure;
- int _thread_threshold_step;
+ size_t _thread_threshold_step;
// This thread activation threshold
- int _threshold;
+ size_t _threshold;
// This thread deactivation threshold
- int _deactivation_threshold;
+ size_t _deactivation_threshold;
void wait_for_completed_buffers();
--- a/hotspot/src/share/vm/gc/g1/dirtyCardQueue.cpp Fri Feb 26 17:55:05 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/dirtyCardQueue.cpp Sat Feb 27 00:07:03 2016 +0000
@@ -207,22 +207,24 @@
}
-BufferNode* DirtyCardQueueSet::get_completed_buffer(int stop_at) {
+BufferNode* DirtyCardQueueSet::get_completed_buffer(size_t stop_at) {
BufferNode* nd = NULL;
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
- if ((int)_n_completed_buffers <= stop_at) {
+ if (_n_completed_buffers <= stop_at) {
_process_completed = false;
return NULL;
}
if (_completed_buffers_head != NULL) {
nd = _completed_buffers_head;
+ assert(_n_completed_buffers > 0, "Invariant");
_completed_buffers_head = nd->next();
- if (_completed_buffers_head == NULL)
+ _n_completed_buffers--;
+ if (_completed_buffers_head == NULL) {
+ assert(_n_completed_buffers == 0, "Invariant");
_completed_buffers_tail = NULL;
- _n_completed_buffers--;
- assert(_n_completed_buffers >= 0, "Invariant");
+ }
}
DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
return nd;
@@ -230,7 +232,7 @@
bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
uint worker_i,
- int stop_at,
+ size_t stop_at,
bool during_pause) {
assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
BufferNode* nd = get_completed_buffer(stop_at);
--- a/hotspot/src/share/vm/gc/g1/dirtyCardQueue.hpp Fri Feb 26 17:55:05 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/dirtyCardQueue.hpp Sat Feb 27 00:07:03 2016 +0000
@@ -134,10 +134,10 @@
// is returned to the completed buffer set, and this call returns false.
bool apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
uint worker_i,
- int stop_at,
+ size_t stop_at,
bool during_pause);
- BufferNode* get_completed_buffer(int stop_at);
+ BufferNode* get_completed_buffer(size_t stop_at);
// Applies the current closure to all completed buffers,
// non-consumptively.
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp Fri Feb 26 17:55:05 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp Sat Feb 27 00:07:03 2016 +0000
@@ -1984,8 +1984,8 @@
JavaThread::dirty_card_queue_set().initialize(_refine_cte_cl,
DirtyCardQ_CBL_mon,
DirtyCardQ_FL_lock,
- concurrent_g1_refine()->yellow_zone(),
- concurrent_g1_refine()->red_zone(),
+ (int)concurrent_g1_refine()->yellow_zone(),
+ (int)concurrent_g1_refine()->red_zone(),
Shared_DirtyCardQ_lock,
NULL, // fl_owner
true); // init_free_ids
--- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp Fri Feb 26 17:55:05 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp Sat Feb 27 00:07:03 2016 +0000
@@ -1301,12 +1301,12 @@
const int k_gy = 3, k_gr = 6;
const double inc_k = 1.1, dec_k = 0.9;
- int g = cg1r->green_zone();
+ size_t g = cg1r->green_zone();
if (update_rs_time > goal_ms) {
- g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
+ g = (size_t)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
} else {
if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
- g = (int)MAX2(g * inc_k, g + 1.0);
+ g = (size_t)MAX2(g * inc_k, g + 1.0);
}
}
// Change the refinement threads params
@@ -1315,15 +1315,15 @@
cg1r->set_red_zone(g * k_gr);
cg1r->reinitialize_threads();
- int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * _predictor.sigma()), 1);
- int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
+ size_t processing_threshold_delta = MAX2<size_t>(cg1r->green_zone() * _predictor.sigma(), 1);
+ size_t processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
cg1r->yellow_zone());
// Change the barrier params
- dcqs.set_process_completed_threshold(processing_threshold);
- dcqs.set_max_completed_queue(cg1r->red_zone());
+ dcqs.set_process_completed_threshold((int)processing_threshold);
+ dcqs.set_max_completed_queue((int)cg1r->red_zone());
}
- int curr_queue_size = dcqs.completed_buffers_num();
+ size_t curr_queue_size = dcqs.completed_buffers_num();
if (curr_queue_size >= cg1r->yellow_zone()) {
dcqs.set_completed_queue_padding(curr_queue_size);
} else {
--- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp Fri Feb 26 17:55:05 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp Sat Feb 27 00:07:03 2016 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2271,7 +2271,7 @@
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
guarantee(has_overflown() ||
satb_mq_set.completed_buffers_num() == 0,
- "Invariant: has_overflown = %s, num buffers = %d",
+ "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT,
BOOL_TO_STR(has_overflown()),
satb_mq_set.completed_buffers_num());
--- a/hotspot/src/share/vm/gc/g1/g1RemSet.cpp Fri Feb 26 17:55:05 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/g1RemSet.cpp Sat Feb 27 00:07:03 2016 +0000
@@ -291,7 +291,6 @@
_g1->cleanUpCardTable();
DirtyCardQueueSet& into_cset_dcqs = _into_cset_dirty_card_queue_set;
- int into_cset_n_buffers = into_cset_dcqs.completed_buffers_num();
if (_g1->evacuation_failed()) {
double restore_remembered_set_start = os::elapsedTime();
--- a/hotspot/src/share/vm/gc/g1/g1_globals.hpp Fri Feb 26 17:55:05 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/g1_globals.hpp Sat Feb 27 00:07:03 2016 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -107,35 +107,35 @@
"Size of an update buffer") \
range(1, NOT_LP64(32*M) LP64_ONLY(1*G)) \
\
- product(intx, G1ConcRefinementYellowZone, 0, \
+ product(size_t, G1ConcRefinementYellowZone, 0, \
"Number of enqueued update buffers that will " \
"trigger concurrent processing. Will be selected ergonomically " \
"by default.") \
- range(0, max_intx) \
+ range(0, SIZE_MAX) \
\
- product(intx, G1ConcRefinementRedZone, 0, \
+ product(size_t, G1ConcRefinementRedZone, 0, \
"Maximum number of enqueued update buffers before mutator " \
"threads start processing new ones instead of enqueueing them. " \
"Will be selected ergonomically by default. Zero will disable " \
"concurrent processing.") \
- range(0, max_intx) \
+ range(0, SIZE_MAX) \
\
- product(intx, G1ConcRefinementGreenZone, 0, \
+ product(size_t, G1ConcRefinementGreenZone, 0, \
"The number of update buffers that are left in the queue by the " \
"concurrent processing threads. Will be selected ergonomically " \
"by default.") \
- range(0, max_intx) \
+ range(0, SIZE_MAX) \
\
- product(intx, G1ConcRefinementServiceIntervalMillis, 300, \
+ product(uintx, G1ConcRefinementServiceIntervalMillis, 300, \
"The last concurrent refinement thread wakes up every " \
"specified number of milliseconds to do miscellaneous work.") \
- range(0, max_jint) \
+ range(0, max_uintx) \
\
- product(intx, G1ConcRefinementThresholdStep, 0, \
+ product(size_t, G1ConcRefinementThresholdStep, 0, \
"Each time the rset update queue increases by this amount " \
"activate the next refinement thread if available. " \
"Will be selected ergonomically by default.") \
- range(0, max_jint) \
+ range(0, SIZE_MAX) \
\
product(intx, G1RSetUpdatingPauseTimePercent, 10, \
"A target percentage of time that is allowed to be spend on " \
--- a/hotspot/src/share/vm/gc/g1/ptrQueue.cpp Fri Feb 26 17:55:05 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/ptrQueue.cpp Sat Feb 27 00:07:03 2016 +0000
@@ -271,16 +271,17 @@
_n_completed_buffers++;
if (!_process_completed && _process_completed_threshold >= 0 &&
- _n_completed_buffers >= _process_completed_threshold) {
+ _n_completed_buffers >= (size_t)_process_completed_threshold) {
_process_completed = true;
- if (_notify_when_complete)
+ if (_notify_when_complete) {
_cbl_mon->notify();
+ }
}
DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
}
-int PtrQueueSet::completed_buffers_list_length() {
- int n = 0;
+size_t PtrQueueSet::completed_buffers_list_length() {
+ size_t n = 0;
BufferNode* cbn = _completed_buffers_head;
while (cbn != NULL) {
n++;
@@ -334,7 +335,8 @@
void PtrQueueSet::notify_if_necessary() {
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
- if (_n_completed_buffers >= _process_completed_threshold || _max_completed_queue == 0) {
+ assert(_process_completed_threshold >= 0, "_process_completed is negative");
+ if (_n_completed_buffers >= (size_t)_process_completed_threshold || _max_completed_queue == 0) {
_process_completed = true;
if (_notify_when_complete)
_cbl_mon->notify();
--- a/hotspot/src/share/vm/gc/g1/ptrQueue.hpp Fri Feb 26 17:55:05 2016 +0100
+++ b/hotspot/src/share/vm/gc/g1/ptrQueue.hpp Sat Feb 27 00:07:03 2016 +0000
@@ -209,7 +209,7 @@
Monitor* _cbl_mon; // Protects the fields below.
BufferNode* _completed_buffers_head;
BufferNode* _completed_buffers_tail;
- int _n_completed_buffers;
+ size_t _n_completed_buffers;
int _process_completed_threshold;
volatile bool _process_completed;
@@ -233,9 +233,9 @@
// Maximum number of elements allowed on completed queue: after that,
// enqueuer does the work itself. Zero indicates no maximum.
int _max_completed_queue;
- int _completed_queue_padding;
+ size_t _completed_queue_padding;
- int completed_buffers_list_length();
+ size_t completed_buffers_list_length();
void assert_completed_buffer_list_len_correct_locked();
void assert_completed_buffer_list_len_correct();
@@ -299,15 +299,15 @@
// list size may be reduced, if that is deemed desirable.
void reduce_free_list();
- int completed_buffers_num() { return _n_completed_buffers; }
+ size_t completed_buffers_num() { return _n_completed_buffers; }
void merge_bufferlists(PtrQueueSet* src);
void set_max_completed_queue(int m) { _max_completed_queue = m; }
int max_completed_queue() { return _max_completed_queue; }
- void set_completed_queue_padding(int padding) { _completed_queue_padding = padding; }
- int completed_queue_padding() { return _completed_queue_padding; }
+ void set_completed_queue_padding(size_t padding) { _completed_queue_padding = padding; }
+ size_t completed_queue_padding() { return _completed_queue_padding; }
// Notify the consumer if the number of buffers crossed the threshold
void notify_if_necessary();