8233702: Introduce helper function to clamp value to range
Reviewed-by: sjohanss, kbarrett
--- a/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.cpp Fri Nov 22 10:03:38 2019 +0100
+++ b/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.cpp Fri Nov 22 10:03:38 2019 +0100
@@ -144,7 +144,7 @@
const size_t max_address_offset_bits = 44; // 16TB
const size_t address_offset = ZUtils::round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
const size_t address_offset_bits = log2_intptr(address_offset);
- return MIN2(MAX2(address_offset_bits, min_address_offset_bits), max_address_offset_bits);
+ return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
}
size_t ZPlatformAddressMetadataShift() {
--- a/src/hotspot/cpu/x86/gc/z/zGlobals_x86.cpp Fri Nov 22 10:03:38 2019 +0100
+++ b/src/hotspot/cpu/x86/gc/z/zGlobals_x86.cpp Fri Nov 22 10:03:38 2019 +0100
@@ -144,7 +144,7 @@
const size_t max_address_offset_bits = 44; // 16TB
const size_t address_offset = ZUtils::round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
const size_t address_offset_bits = log2_intptr(address_offset);
- return MIN2(MAX2(address_offset_bits, min_address_offset_bits), max_address_offset_bits);
+ return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
}
size_t ZPlatformAddressMetadataShift() {
--- a/src/hotspot/share/gc/epsilon/epsilonHeap.cpp Fri Nov 22 10:03:38 2019 +0100
+++ b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp Fri Nov 22 10:03:38 2019 +0100
@@ -212,7 +212,7 @@
}
// Always honor boundaries
- size = MAX2(min_size, MIN2(_max_tlab_size, size));
+ size = clamp(size, min_size, _max_tlab_size);
// Always honor alignment
size = align_up(size, MinObjAlignment);
--- a/src/hotspot/share/gc/g1/g1Allocator.cpp Fri Nov 22 10:03:38 2019 +0100
+++ b/src/hotspot/share/gc/g1/g1Allocator.cpp Fri Nov 22 10:03:38 2019 +0100
@@ -194,7 +194,7 @@
if (hr == NULL) {
return max_tlab;
} else {
- return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
+ return clamp(hr->free(), MinTLABSize, max_tlab);
}
}
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Fri Nov 22 10:03:38 2019 +0100
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Fri Nov 22 10:03:38 2019 +0100
@@ -1611,7 +1611,7 @@
// we utilize all the worker threads we can.
bool processing_is_mt = rp->processing_is_mt();
uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U);
- active_workers = MAX2(MIN2(active_workers, _max_num_tasks), 1U);
+ active_workers = clamp(active_workers, 1u, _max_num_tasks);
// Parallel processing task executor.
G1CMRefProcTaskExecutor par_task_executor(_g1h, this,
--- a/src/hotspot/share/gc/parallel/asPSYoungGen.cpp Fri Nov 22 10:03:38 2019 +0100
+++ b/src/hotspot/share/gc/parallel/asPSYoungGen.cpp Fri Nov 22 10:03:38 2019 +0100
@@ -167,8 +167,7 @@
// Adjust new generation size
const size_t eden_plus_survivors =
align_up(eden_size + 2 * survivor_size, alignment);
- size_t desired_size = MAX2(MIN2(eden_plus_survivors, gen_size_limit()),
- min_gen_size());
+ size_t desired_size = clamp(eden_plus_survivors, min_gen_size(), gen_size_limit());
assert(desired_size <= gen_size_limit(), "just checking");
if (desired_size > orig_size) {
--- a/src/hotspot/share/gc/parallel/psOldGen.cpp Fri Nov 22 10:03:38 2019 +0100
+++ b/src/hotspot/share/gc/parallel/psOldGen.cpp Fri Nov 22 10:03:38 2019 +0100
@@ -355,7 +355,7 @@
new_size = gen_size_limit();
}
// Adjust according to our min and max
- new_size = MAX2(MIN2(new_size, gen_size_limit()), min_gen_size());
+ new_size = clamp(new_size, min_gen_size(), gen_size_limit());
assert(gen_size_limit() >= reserved().byte_size(), "max new size problem?");
new_size = align_up(new_size, alignment);
--- a/src/hotspot/share/gc/parallel/psYoungGen.cpp Fri Nov 22 10:03:38 2019 +0100
+++ b/src/hotspot/share/gc/parallel/psYoungGen.cpp Fri Nov 22 10:03:38 2019 +0100
@@ -300,8 +300,7 @@
// Adjust new generation size
const size_t eden_plus_survivors =
align_up(eden_size + 2 * survivor_size, alignment);
- size_t desired_size = MAX2(MIN2(eden_plus_survivors, max_size()),
- min_gen_size());
+ size_t desired_size = clamp(eden_plus_survivors, min_gen_size(), max_size());
assert(desired_size <= max_size(), "just checking");
if (desired_size > orig_size) {
--- a/src/hotspot/share/gc/serial/defNewGeneration.cpp Fri Nov 22 10:03:38 2019 +0100
+++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp Fri Nov 22 10:03:38 2019 +0100
@@ -392,7 +392,7 @@
size_t desired_new_size = adjust_for_thread_increase(new_size_candidate, new_size_before, alignment);
// Adjust new generation size
- desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
+ desired_new_size = clamp(desired_new_size, min_new_size, max_new_size);
assert(desired_new_size <= max_new_size, "just checking");
bool changed = false;
--- a/src/hotspot/share/gc/shared/genArguments.cpp Fri Nov 22 10:03:38 2019 +0100
+++ b/src/hotspot/share/gc/shared/genArguments.cpp Fri Nov 22 10:03:38 2019 +0100
@@ -229,7 +229,7 @@
// yield a size that is too small) and bound it by MaxNewSize above.
// Ergonomics plays here by previously calculating the desired
// NewSize and MaxNewSize.
- max_young_size = MIN2(MAX2(max_young_size, NewSize), MaxNewSize);
+ max_young_size = clamp(max_young_size, NewSize, MaxNewSize);
}
// Given the maximum young size, determine the initial and
@@ -260,7 +260,7 @@
// NewSize as the floor, because if NewRatio is overly large, the resulting
// size can be too small.
initial_young_size =
- MIN2(max_young_size, MAX2(scale_by_NewRatio_aligned(InitialHeapSize, GenAlignment), NewSize));
+ clamp(scale_by_NewRatio_aligned(InitialHeapSize, GenAlignment), NewSize, max_young_size);
}
}
@@ -285,7 +285,7 @@
// the minimum, maximum and initial sizes consistent
// with the young sizes and the overall heap sizes.
MinOldSize = GenAlignment;
- initial_old_size = MIN2(MaxOldSize, MAX2(InitialHeapSize - initial_young_size, MinOldSize));
+ initial_old_size = clamp(InitialHeapSize - initial_young_size, MinOldSize, MaxOldSize);
// MaxOldSize has already been made consistent above.
} else {
// OldSize has been explicitly set on the command line. Use it
--- a/src/hotspot/share/gc/shared/plab.cpp Fri Nov 22 10:03:38 2019 +0100
+++ b/src/hotspot/share/gc/shared/plab.cpp Fri Nov 22 10:03:38 2019 +0100
@@ -135,7 +135,7 @@
// Calculates plab size for current number of gc worker threads.
size_t PLABStats::desired_plab_sz(uint no_of_gc_workers) {
- return align_object_size(MIN2(MAX2(min_size(), _desired_net_plab_sz / no_of_gc_workers), max_size()));
+ return align_object_size(clamp(_desired_net_plab_sz / no_of_gc_workers, min_size(), max_size()));
}
// Compute desired plab size for one gc worker thread and latch result for later
--- a/src/hotspot/share/gc/shared/taskqueue.cpp Fri Nov 22 10:03:38 2019 +0100
+++ b/src/hotspot/share/gc/shared/taskqueue.cpp Fri Nov 22 10:03:38 2019 +0100
@@ -57,7 +57,7 @@
{
// Use a width w: 1 <= w <= max_width
const unsigned int max_width = 40;
- const unsigned int w = MAX2(MIN2(width, max_width), 1U);
+ const unsigned int w = clamp(width, 1u, max_width);
if (line == 0) { // spaces equal in width to the header
const unsigned int hdr_width = w * last_stat_id + last_stat_id - 1;
--- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp Fri Nov 22 10:03:38 2019 +0100
+++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp Fri Nov 22 10:03:38 2019 +0100
@@ -132,7 +132,7 @@
(Universe::heap()->tlab_capacity(thread()) / HeapWordSize));
size_t new_size = alloc / _target_refills;
- new_size = MIN2(MAX2(new_size, min_size()), max_size());
+ new_size = clamp(new_size, min_size(), max_size());
size_t aligned_new_size = align_object_size(new_size);
@@ -251,6 +251,10 @@
(nof_threads * target_refills());
init_sz = align_object_size(init_sz);
}
+ // We can't use clamp() between min_size() and max_size() here because some
+ // options based on them may still be inconsistent and so it may assert;
+ // inconsistencies between those will be caught by following AfterMemoryInit
+ // constraint checking.
init_sz = MIN2(MAX2(init_sz, min_size()), max_size());
return init_sz;
}
--- a/src/hotspot/share/opto/parse2.cpp Fri Nov 22 10:03:38 2019 +0100
+++ b/src/hotspot/share/opto/parse2.cpp Fri Nov 22 10:03:38 2019 +0100
@@ -605,7 +605,7 @@
return PROB_FAIR;
}
float p = taken_cnt / total_cnt;
- return MIN2(MAX2(p, PROB_MIN), PROB_MAX);
+ return clamp(p, PROB_MIN, PROB_MAX);
}
static float if_cnt(float cnt) {
--- a/src/hotspot/share/runtime/globals.hpp Fri Nov 22 10:03:38 2019 +0100
+++ b/src/hotspot/share/runtime/globals.hpp Fri Nov 22 10:03:38 2019 +0100
@@ -1408,7 +1408,7 @@
product(intx, AllocatePrefetchDistance, -1, \
"Distance to prefetch ahead of allocation pointer. " \
"-1: use system-specific value (automatically determined") \
- constraint(AllocatePrefetchDistanceConstraintFunc, AfterMemoryInit)\
+ constraint(AllocatePrefetchDistanceConstraintFunc,AfterMemoryInit)\
\
product(intx, AllocatePrefetchLines, 3, \
"Number of lines to prefetch ahead of array allocation pointer") \
--- a/src/hotspot/share/utilities/globalDefinitions.hpp Fri Nov 22 10:03:38 2019 +0100
+++ b/src/hotspot/share/utilities/globalDefinitions.hpp Fri Nov 22 10:03:38 2019 +0100
@@ -949,6 +949,13 @@
template<class T> inline T ABS(T x) { return (x > 0) ? x : -x; }
+// Return the given value clamped to the range [min ... max]
+template<typename T>
+inline T clamp(T value, T min, T max) {
+ assert(min <= max, "must be");
+ return MIN2(MAX2(value, min), max);
+}
+
// true if x is a power of 2, false otherwise
inline bool is_power_of_2(intptr_t x) {
return ((x != NoBits) && (mask_bits(x, x - 1) == NoBits));