8219633: ZGC: Rename ZPageSizeMin to ZGranuleSize
Reviewed-by: eosterlund, stefank
--- a/src/hotspot/os_cpu/linux_x86/gc/z/zGlobals_linux_x86.hpp Wed Mar 13 11:31:00 2019 +0100
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zGlobals_linux_x86.hpp Wed Mar 13 11:31:00 2019 +0100
@@ -74,7 +74,7 @@
// * 63-47 Fixed (17-bits, always zero)
//
-const size_t ZPlatformPageSizeSmallShift = 21; // 2M
+const size_t ZPlatformGranuleSizeShift = 21; // 2M
const size_t ZPlatformAddressOffsetBits = 42; // 4TB
--- a/src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.cpp Wed Mar 13 11:31:00 2019 +0100
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.cpp Wed Mar 13 11:31:00 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zBackingFile_linux_x86.hpp"
#include "gc/z/zErrno.hpp"
+#include "gc/z/zGlobals.hpp"
#include "gc/z/zLargePages.inline.hpp"
#include "gc/z/zMemory.hpp"
#include "gc/z/zNUMA.hpp"
@@ -47,23 +48,22 @@
// Proc file entry for max map mount
#define ZFILENAME_PROC_MAX_MAP_COUNT "/proc/sys/vm/max_map_count"
-ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity, size_t granule_size) :
+ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity) :
_manager(),
- _file(),
- _granule_size(granule_size) {
+ _file() {
if (!_file.is_initialized()) {
return;
}
// Check and warn if max map count is too low
- check_max_map_count(max_capacity, granule_size);
+ check_max_map_count(max_capacity);
// Check and warn if available space on filesystem is too low
check_available_space_on_filesystem(max_capacity);
}
-void ZPhysicalMemoryBacking::check_max_map_count(size_t max_capacity, size_t granule_size) const {
+void ZPhysicalMemoryBacking::check_max_map_count(size_t max_capacity) const {
const char* const filename = ZFILENAME_PROC_MAX_MAP_COUNT;
FILE* const file = fopen(filename, "r");
if (file == NULL) {
@@ -86,7 +86,7 @@
// However, ZGC tends to create the most mappings and dominate the total count.
// In the worst cases, ZGC will map each granule three times, i.e. once per heap view.
// We speculate that we need another 20% to allow for non-ZGC subsystems to map memory.
- const size_t required_max_map_count = (max_capacity / granule_size) * 3 * 1.2;
+ const size_t required_max_map_count = (max_capacity / ZGranuleSize) * 3 * 1.2;
if (actual_max_map_count < required_max_map_count) {
log_warning(gc, init)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
log_warning(gc, init)("The system limit on number of memory mappings per process might be too low "
@@ -135,7 +135,7 @@
size_t ZPhysicalMemoryBacking::try_expand(size_t old_capacity, size_t new_capacity) {
assert(old_capacity < new_capacity, "Invalid old/new capacity");
- const size_t capacity = _file.try_expand(old_capacity, new_capacity - old_capacity, _granule_size);
+ const size_t capacity = _file.try_expand(old_capacity, new_capacity - old_capacity, ZGranuleSize);
if (capacity > old_capacity) {
// Add expanded capacity to free list
_manager.free(old_capacity, capacity - old_capacity);
@@ -145,15 +145,15 @@
}
ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) {
- assert(is_aligned(size, _granule_size), "Invalid size");
+ assert(is_aligned(size, ZGranuleSize), "Invalid size");
ZPhysicalMemory pmem;
// Allocate segments
- for (size_t allocated = 0; allocated < size; allocated += _granule_size) {
- const uintptr_t start = _manager.alloc_from_front(_granule_size);
+ for (size_t allocated = 0; allocated < size; allocated += ZGranuleSize) {
+ const uintptr_t start = _manager.alloc_from_front(ZGranuleSize);
assert(start != UINTPTR_MAX, "Allocation should never fail");
- pmem.add_segment(ZPhysicalMemorySegment(start, _granule_size));
+ pmem.add_segment(ZPhysicalMemorySegment(start, ZGranuleSize));
}
return pmem;
--- a/src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.hpp Wed Mar 13 11:31:00 2019 +0100
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.hpp Wed Mar 13 11:31:00 2019 +0100
@@ -34,9 +34,8 @@
private:
ZMemoryManager _manager;
ZBackingFile _file;
- const size_t _granule_size;
- void check_max_map_count(size_t max_capacity, size_t granule_size) const;
+ void check_max_map_count(size_t max_capacity) const;
void check_available_space_on_filesystem(size_t max_capacity) const;
void map_failed(ZErrno err) const;
@@ -46,7 +45,7 @@
void unmap_view(ZPhysicalMemory pmem, uintptr_t addr) const;
public:
- ZPhysicalMemoryBacking(size_t max_capacity, size_t granule_size);
+ ZPhysicalMemoryBacking(size_t max_capacity);
bool is_initialized() const;
--- a/src/hotspot/share/gc/z/vmStructs_z.hpp Wed Mar 13 11:31:00 2019 +0100
+++ b/src/hotspot/share/gc/z/vmStructs_z.hpp Wed Mar 13 11:31:00 2019 +0100
@@ -52,7 +52,7 @@
const int* _ZObjectAlignmentSmall;
};
-typedef ZAddressRangeMap<ZPageTableEntry, ZPageSizeMinShift> ZAddressRangeMapForPageTable;
+typedef ZAddressRangeMap<ZPageTableEntry, ZGranuleSizeShift> ZAddressRangeMapForPageTable;
#define VM_STRUCTS_ZGC(nonstatic_field, volatile_nonstatic_field, static_field) \
static_field(ZGlobalsForVMStructs, _instance_p, ZGlobalsForVMStructs*) \
@@ -101,9 +101,9 @@
declare_constant(ZObjectAlignmentLargeShift)
#define VM_LONG_CONSTANTS_ZGC(declare_constant) \
+ declare_constant(ZGranuleSizeShift) \
declare_constant(ZPageSizeSmallShift) \
declare_constant(ZPageSizeMediumShift) \
- declare_constant(ZPageSizeMinShift) \
declare_constant(ZAddressOffsetShift) \
declare_constant(ZAddressOffsetBits) \
declare_constant(ZAddressOffsetMask) \
--- a/src/hotspot/share/gc/z/zCollectorPolicy.cpp Wed Mar 13 11:31:00 2019 +0100
+++ b/src/hotspot/share/gc/z/zCollectorPolicy.cpp Wed Mar 13 11:31:00 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,6 @@
#include "gc/z/zGlobals.hpp"
void ZCollectorPolicy::initialize_alignments() {
- _space_alignment = ZPageSizeMin;
+ _space_alignment = ZGranuleSize;
_heap_alignment = _space_alignment;
}
--- a/src/hotspot/share/gc/z/zDebug.gdb Wed Mar 13 11:31:00 2019 +0100
+++ b/src/hotspot/share/gc/z/zDebug.gdb Wed Mar 13 11:31:00 2019 +0100
@@ -48,7 +48,7 @@
end
end
end
- printf "\t Page: %llu\n", ((uintptr_t)$obj & ZAddressOffsetMask) >> ZPageSizeMinShift
+ printf "\t Page: %llu\n", ((uintptr_t)$obj & ZAddressOffsetMask) >> ZGranuleSizeShift
x/16gx $obj
printf "Mark: 0x%016llx\tKlass: %s\n", (uintptr_t)$obj->_mark, (char*)$obj->_metadata->_klass->_name->_body
end
@@ -99,7 +99,7 @@
define zmarked
set $addr = $arg0
set $obj = ((uintptr_t)$addr & ZAddressOffsetMask)
- set $page_index = $obj >> ZPageSizeMinShift
+ set $page_index = $obj >> ZGranuleSizeShift
set $page_entry = (uintptr_t)ZHeap::_heap._pagetable._map._map[$page_index]
set $page = (ZPage*)($page_entry & ~1)
set $page_start = (uintptr_t)$page._virtual._start
--- a/src/hotspot/share/gc/z/zGlobals.hpp Wed Mar 13 11:31:00 2019 +0100
+++ b/src/hotspot/share/gc/z/zGlobals.hpp Wed Mar 13 11:31:00 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -40,20 +40,22 @@
// Global sequence number
extern uint32_t ZGlobalSeqNum;
+// Granule shift/size
+const size_t ZGranuleSizeShift = ZPlatformGranuleSizeShift;
+const size_t ZGranuleSize = (size_t)1 << ZGranuleSizeShift;
+
// Page types
const uint8_t ZPageTypeSmall = 0;
const uint8_t ZPageTypeMedium = 1;
const uint8_t ZPageTypeLarge = 2;
// Page size shifts
-const size_t ZPageSizeSmallShift = ZPlatformPageSizeSmallShift;
+const size_t ZPageSizeSmallShift = ZGranuleSizeShift;
const size_t ZPageSizeMediumShift = ZPageSizeSmallShift + 4;
-const size_t ZPageSizeMinShift = ZPageSizeSmallShift;
// Page sizes
const size_t ZPageSizeSmall = (size_t)1 << ZPageSizeSmallShift;
const size_t ZPageSizeMedium = (size_t)1 << ZPageSizeMediumShift;
-const size_t ZPageSizeMin = (size_t)1 << ZPageSizeMinShift;
// Object size limits
const size_t ZObjectSizeLimitSmall = (ZPageSizeSmall / 8); // Allow 12.5% waste
@@ -133,7 +135,7 @@
const size_t ZMarkStackMagazineSlots = (ZMarkStackMagazineSize / ZMarkStackSize) - 1;
// Mark stripe size
-const size_t ZMarkStripeShift = ZPageSizeMinShift;
+const size_t ZMarkStripeShift = ZGranuleSizeShift;
// Max number of mark stripes
const size_t ZMarkStripesMax = 16; // Must be a power of two
--- a/src/hotspot/share/gc/z/zHeap.cpp Wed Mar 13 11:31:00 2019 +0100
+++ b/src/hotspot/share/gc/z/zHeap.cpp Wed Mar 13 11:31:00 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -80,12 +80,12 @@
}
size_t ZHeap::heap_min_size() const {
- const size_t aligned_min_size = align_up(InitialHeapSize, ZPageSizeMin);
+ const size_t aligned_min_size = align_up(InitialHeapSize, ZGranuleSize);
return MIN2(aligned_min_size, heap_max_size());
}
size_t ZHeap::heap_max_size() const {
- const size_t aligned_max_size = align_up(MaxHeapSize, ZPageSizeMin);
+ const size_t aligned_max_size = align_up(MaxHeapSize, ZGranuleSize);
return MIN2(aligned_max_size, ZAddressOffsetMax);
}
--- a/src/hotspot/share/gc/z/zHeapIterator.cpp Wed Mar 13 11:31:00 2019 +0100
+++ b/src/hotspot/share/gc/z/zHeapIterator.cpp Wed Mar 13 11:31:00 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -123,13 +123,13 @@
}
static size_t object_index_max() {
- return ZPageSizeMin >> ZObjectAlignmentSmallShift;
+ return ZGranuleSize >> ZObjectAlignmentSmallShift;
}
static size_t object_index(oop obj) {
const uintptr_t addr = ZOop::to_address(obj);
const uintptr_t offset = ZAddress::offset(addr);
- const uintptr_t mask = (1 << ZPageSizeMinShift) - 1;
+ const uintptr_t mask = ZGranuleSize - 1;
return (offset & mask) >> ZObjectAlignmentSmallShift;
}
--- a/src/hotspot/share/gc/z/zHeapIterator.hpp Wed Mar 13 11:31:00 2019 +0100
+++ b/src/hotspot/share/gc/z/zHeapIterator.hpp Wed Mar 13 11:31:00 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,8 +36,8 @@
friend class ZHeapIteratorOopClosure;
private:
- typedef ZAddressRangeMap<ZHeapIteratorBitMap*, ZPageSizeMinShift> ZVisitMap;
- typedef ZAddressRangeMapIterator<ZHeapIteratorBitMap*, ZPageSizeMinShift> ZVisitMapIterator;
+ typedef ZAddressRangeMap<ZHeapIteratorBitMap*, ZGranuleSizeShift> ZVisitMap;
+ typedef ZAddressRangeMapIterator<ZHeapIteratorBitMap*, ZGranuleSizeShift> ZVisitMapIterator;
typedef Stack<oop, mtGC> ZVisitStack;
ZVisitStack _visit_stack;
--- a/src/hotspot/share/gc/z/zObjectAllocator.cpp Wed Mar 13 11:31:00 2019 +0100
+++ b/src/hotspot/share/gc/z/zObjectAllocator.cpp Wed Mar 13 11:31:00 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -113,7 +113,7 @@
uintptr_t addr = 0;
// Allocate new large page
- const size_t page_size = align_up(size, ZPageSizeMin);
+ const size_t page_size = align_up(size, ZGranuleSize);
ZPage* const page = alloc_page(ZPageTypeLarge, page_size, flags);
if (page != NULL) {
// Allocate the object
--- a/src/hotspot/share/gc/z/zPage.cpp Wed Mar 13 11:31:00 2019 +0100
+++ b/src/hotspot/share/gc/z/zPage.cpp Wed Mar 13 11:31:00 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -56,7 +56,7 @@
assert(!_virtual.is_null(), "Should not be null");
assert((type == ZPageTypeSmall && size() == ZPageSizeSmall) ||
(type == ZPageTypeMedium && size() == ZPageSizeMedium) ||
- (type == ZPageTypeLarge && is_aligned(size(), ZPageSizeMin)),
+ (type == ZPageTypeLarge && is_aligned(size(), ZGranuleSize)),
"Page type/size mismatch");
}
--- a/src/hotspot/share/gc/z/zPageAllocator.cpp Wed Mar 13 11:31:00 2019 +0100
+++ b/src/hotspot/share/gc/z/zPageAllocator.cpp Wed Mar 13 11:31:00 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -86,7 +86,7 @@
ZPageAllocator::ZPageAllocator(size_t min_capacity, size_t max_capacity, size_t max_reserve) :
_lock(),
_virtual(),
- _physical(max_capacity, ZPageSizeMin),
+ _physical(max_capacity),
_cache(),
_max_reserve(max_reserve),
_pre_mapped(_virtual, _physical, try_ensure_unused_for_pre_mapped(min_capacity)),
--- a/src/hotspot/share/gc/z/zPageTable.cpp Wed Mar 13 11:31:00 2019 +0100
+++ b/src/hotspot/share/gc/z/zPageTable.cpp Wed Mar 13 11:31:00 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,7 @@
const uintptr_t start = ZAddress::good(page->start());
const uintptr_t end = start + page->size();
- for (uintptr_t addr = start; addr < end; addr += ZPageSizeMin) {
+ for (uintptr_t addr = start; addr < end; addr += ZGranuleSize) {
_map.put(addr, entry);
}
}
--- a/src/hotspot/share/gc/z/zPageTable.hpp Wed Mar 13 11:31:00 2019 +0100
+++ b/src/hotspot/share/gc/z/zPageTable.hpp Wed Mar 13 11:31:00 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,7 +36,7 @@
friend class ZPageTableIterator;
private:
- ZAddressRangeMap<ZPageTableEntry, ZPageSizeMinShift> _map;
+ ZAddressRangeMap<ZPageTableEntry, ZGranuleSizeShift> _map;
ZPageTableEntry get_entry(ZPage* page) const;
void put_entry(ZPage* page, ZPageTableEntry entry);
@@ -55,7 +55,7 @@
class ZPageTableIterator : public StackObj {
private:
- ZAddressRangeMapIterator<ZPageTableEntry, ZPageSizeMinShift> _iter;
+ ZAddressRangeMapIterator<ZPageTableEntry, ZGranuleSizeShift> _iter;
ZPage* _prev;
public:
--- a/src/hotspot/share/gc/z/zPhysicalMemory.cpp Wed Mar 13 11:31:00 2019 +0100
+++ b/src/hotspot/share/gc/z/zPhysicalMemory.cpp Wed Mar 13 11:31:00 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -91,8 +91,8 @@
}
}
-ZPhysicalMemoryManager::ZPhysicalMemoryManager(size_t max_capacity, size_t granule_size) :
- _backing(max_capacity, granule_size),
+ZPhysicalMemoryManager::ZPhysicalMemoryManager(size_t max_capacity) :
+ _backing(max_capacity),
_max_capacity(max_capacity),
_current_max_capacity(max_capacity),
_capacity(0),
--- a/src/hotspot/share/gc/z/zPhysicalMemory.hpp Wed Mar 13 11:31:00 2019 +0100
+++ b/src/hotspot/share/gc/z/zPhysicalMemory.hpp Wed Mar 13 11:31:00 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -78,7 +78,7 @@
void nmt_uncommit(ZPhysicalMemory pmem, uintptr_t offset);
public:
- ZPhysicalMemoryManager(size_t max_capacity, size_t granule_size);
+ ZPhysicalMemoryManager(size_t max_capacity);
bool is_initialized() const;
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZAddressRangeMapForPageTable.java Wed Mar 13 11:31:00 2019 +0100
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZAddressRangeMapForPageTable.java Wed Mar 13 11:31:00 2019 +0100
@@ -34,8 +34,6 @@
public class ZAddressRangeMapForPageTable extends VMObject {
private static AddressField mapField;
- private static long AddressRangeShift = ZGlobals.ZPageSizeMinShift;
-
static {
VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase()));
}
@@ -55,11 +53,11 @@
}
public long size() {
- return ZGlobals.ZAddressOffsetMax >> AddressRangeShift;
+ return ZGlobals.ZAddressOffsetMax >> ZGlobals.ZGranuleSizeShift;
}
private long index_for_addr(Address addr) {
- long index = ZAddress.offset(addr) >> AddressRangeShift;
+ long index = ZAddress.offset(addr) >> ZGlobals.ZGranuleSizeShift;
return index;
}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZGlobals.java Wed Mar 13 11:31:00 2019 +0100
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZGlobals.java Wed Mar 13 11:31:00 2019 +0100
@@ -39,10 +39,12 @@
public static byte ZPageTypeMedium;
public static byte ZPageTypeLarge;
+ // Granule size shift
+ public static long ZGranuleSizeShift;
+
// Page size shifts
public static long ZPageSizeSmallShift;
public static long ZPageSizeMediumShift;
- public static long ZPageSizeMinShift;
// Object alignment shifts
public static int ZObjectAlignmentMediumShift;
@@ -74,9 +76,10 @@
ZPageTypeMedium = db.lookupIntConstant("ZPageTypeMedium").byteValue();
ZPageTypeLarge = db.lookupIntConstant("ZPageTypeLarge").byteValue();
+ ZGranuleSizeShift = db.lookupLongConstant("ZGranuleSizeShift").longValue();
+
ZPageSizeSmallShift = db.lookupLongConstant("ZPageSizeSmallShift").longValue();
ZPageSizeMediumShift = db.lookupLongConstant("ZPageSizeMediumShift").longValue();
- ZPageSizeMinShift = db.lookupLongConstant("ZPageSizeMinShift").longValue();
ZObjectAlignmentMediumShift = db.lookupIntConstant("ZObjectAlignmentMediumShift").intValue();
ZObjectAlignmentLargeShift = db.lookupIntConstant("ZObjectAlignmentLargeShift").intValue();;
--- a/test/hotspot/gtest/gc/z/test_zPhysicalMemory.cpp Wed Mar 13 11:31:00 2019 +0100
+++ b/test/hotspot/gtest/gc/z/test_zPhysicalMemory.cpp Wed Mar 13 11:31:00 2019 +0100
@@ -22,6 +22,7 @@
*/
#include "precompiled.hpp"
+#include "gc/z/zGlobals.hpp"
#include "gc/z/zPhysicalMemory.inline.hpp"
#include "utilities/debug.hpp"
#include "unittest.hpp"
@@ -29,55 +30,51 @@
#if defined(AMD64)
TEST(ZPhysicalMemorySegmentTest, split) {
- const size_t SegmentSize = 2 * M;
-
- ZPhysicalMemorySegment seg(0, 10 * SegmentSize);
+ ZPhysicalMemorySegment seg(0, 10 * ZGranuleSize);
- ZPhysicalMemorySegment seg_split0 = seg.split(0 * SegmentSize);
- EXPECT_EQ(seg_split0.size(), 0 * SegmentSize);
- EXPECT_EQ( seg.size(), 10 * SegmentSize);
+ ZPhysicalMemorySegment seg_split0 = seg.split(0 * ZGranuleSize);
+ EXPECT_EQ(seg_split0.size(), 0 * ZGranuleSize);
+ EXPECT_EQ( seg.size(), 10 * ZGranuleSize);
- ZPhysicalMemorySegment seg_split1 = seg.split(5 * SegmentSize);
- EXPECT_EQ(seg_split1.size(), 5 * SegmentSize);
- EXPECT_EQ( seg.size(), 5 * SegmentSize);
+ ZPhysicalMemorySegment seg_split1 = seg.split(5 * ZGranuleSize);
+ EXPECT_EQ(seg_split1.size(), 5 * ZGranuleSize);
+ EXPECT_EQ( seg.size(), 5 * ZGranuleSize);
- ZPhysicalMemorySegment seg_split2 = seg.split(5 * SegmentSize);
- EXPECT_EQ(seg_split2.size(), 5 * SegmentSize);
- EXPECT_EQ( seg.size(), 0 * SegmentSize);
+ ZPhysicalMemorySegment seg_split2 = seg.split(5 * ZGranuleSize);
+ EXPECT_EQ(seg_split2.size(), 5 * ZGranuleSize);
+ EXPECT_EQ( seg.size(), 0 * ZGranuleSize);
- ZPhysicalMemorySegment seg_split3 = seg.split(0 * SegmentSize);
- EXPECT_EQ(seg_split3.size(), 0 * SegmentSize);
- EXPECT_EQ( seg.size(), 0 * SegmentSize);
+ ZPhysicalMemorySegment seg_split3 = seg.split(0 * ZGranuleSize);
+ EXPECT_EQ(seg_split3.size(), 0 * ZGranuleSize);
+ EXPECT_EQ( seg.size(), 0 * ZGranuleSize);
}
TEST(ZPhysicalMemoryTest, split) {
- const size_t SegmentSize = 2 * M;
-
- ZPhysicalMemoryManager pmem_manager(10 * SegmentSize, SegmentSize);
+ ZPhysicalMemoryManager pmem_manager(10 * ZGranuleSize);
- pmem_manager.try_ensure_unused_capacity(10 * SegmentSize);
- EXPECT_EQ(pmem_manager.unused_capacity(), 10 * SegmentSize);
+ pmem_manager.try_ensure_unused_capacity(10 * ZGranuleSize);
+ EXPECT_EQ(pmem_manager.unused_capacity(), 10 * ZGranuleSize);
- ZPhysicalMemory pmem = pmem_manager.alloc(8 * SegmentSize);
+ ZPhysicalMemory pmem = pmem_manager.alloc(8 * ZGranuleSize);
EXPECT_EQ(pmem.nsegments(), 1u) << "wrong number of segments";
- ZPhysicalMemory split0_pmem = pmem.split(SegmentSize);
+ ZPhysicalMemory split0_pmem = pmem.split(ZGranuleSize);
EXPECT_EQ(split0_pmem.nsegments(), 1u);
EXPECT_EQ( pmem.nsegments(), 1u);
- EXPECT_EQ(split0_pmem.size(), 1 * SegmentSize);
- EXPECT_EQ( pmem.size(), 7 * SegmentSize);
+ EXPECT_EQ(split0_pmem.size(), 1 * ZGranuleSize);
+ EXPECT_EQ( pmem.size(), 7 * ZGranuleSize);
- ZPhysicalMemory split1_pmem = pmem.split(2 * SegmentSize);
+ ZPhysicalMemory split1_pmem = pmem.split(2 * ZGranuleSize);
EXPECT_EQ(split1_pmem.nsegments(), 1u);
EXPECT_EQ( pmem.nsegments(), 1u);
- EXPECT_EQ(split1_pmem.size(), 2 * SegmentSize);
- EXPECT_EQ( pmem.size(), 5 * SegmentSize);
+ EXPECT_EQ(split1_pmem.size(), 2 * ZGranuleSize);
+ EXPECT_EQ( pmem.size(), 5 * ZGranuleSize);
- ZPhysicalMemory split2_pmem = pmem.split(5 * SegmentSize);
+ ZPhysicalMemory split2_pmem = pmem.split(5 * ZGranuleSize);
EXPECT_EQ(split2_pmem.nsegments(), 1u);
EXPECT_EQ( pmem.nsegments(), 1u);
- EXPECT_EQ(split2_pmem.size(), 5 * SegmentSize);
- EXPECT_EQ( pmem.size(), 0 * SegmentSize);
+ EXPECT_EQ(split2_pmem.size(), 5 * ZGranuleSize);
+ EXPECT_EQ( pmem.size(), 0 * ZGranuleSize);
}
#endif