# HG changeset patch # User pliden # Date 1552906238 -3600 # Node ID 7c23a44326100993c75fa8f7a9cb716822cc163e # Parent 5e1480a38a43bd8c88ccf9f92c206f323b977a02 8220569: ZGC: Rename and rework ZUnmapBadViews to ZVerifyViews Reviewed-by: stefank, eosterlund diff -r 5e1480a38a43 -r 7c23a4432610 src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.cpp --- a/src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.cpp Sun Mar 17 08:26:38 2019 +0000 +++ b/src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.cpp Mon Mar 18 11:50:38 2019 +0100 @@ -237,8 +237,8 @@ } void ZPhysicalMemoryBacking::map(ZPhysicalMemory pmem, uintptr_t offset) const { - if (ZUnmapBadViews) { - // Only map the good view, for debugging only + if (ZVerifyViews) { + // Map good view map_view(pmem, ZAddress::good(offset), AlwaysPreTouch); } else { // Map all views @@ -249,8 +249,8 @@ } void ZPhysicalMemoryBacking::unmap(ZPhysicalMemory pmem, uintptr_t offset) const { - if (ZUnmapBadViews) { - // Only map the good view, for debugging only + if (ZVerifyViews) { + // Unmap good view unmap_view(pmem, ZAddress::good(offset)); } else { // Unmap all views @@ -260,11 +260,14 @@ } } -void ZPhysicalMemoryBacking::flip(ZPhysicalMemory pmem, uintptr_t offset) const { - assert(ZUnmapBadViews, "Should be enabled"); - const uintptr_t addr_good = ZAddress::good(offset); - const uintptr_t addr_bad = ZAddress::is_marked(ZAddressGoodMask) ? ZAddress::remapped(offset) : ZAddress::marked(offset); - // Map/Unmap views - map_view(pmem, addr_good, false /* pretouch */); - unmap_view(pmem, addr_bad); +void ZPhysicalMemoryBacking::debug_map(ZPhysicalMemory pmem, uintptr_t offset) const { + // Map good view + assert(ZVerifyViews, "Should be enabled"); + map_view(pmem, ZAddress::good(offset), false /* pretouch */); } + +void ZPhysicalMemoryBacking::debug_unmap(ZPhysicalMemory pmem, uintptr_t offset) const { + // Unmap good view + assert(ZVerifyViews, "Should be enabled"); + unmap_view(pmem, ZAddress::good(offset)); +} diff -r 5e1480a38a43 -r 7c23a4432610 src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.hpp --- a/src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.hpp Sun Mar 17 08:26:38 2019 +0000 +++ b/src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.hpp Mon Mar 18 11:50:38 2019 +0100 @@ -58,7 +58,9 @@ void map(ZPhysicalMemory pmem, uintptr_t offset) const; void unmap(ZPhysicalMemory pmem, uintptr_t offset) const; - void flip(ZPhysicalMemory pmem, uintptr_t offset) const; + + void debug_map(ZPhysicalMemory pmem, uintptr_t offset) const; + void debug_unmap(ZPhysicalMemory pmem, uintptr_t offset) const; }; #endif // OS_CPU_LINUX_X86_GC_Z_ZPHYSICALMEMORYBACKING_LINUX_X86_HPP diff -r 5e1480a38a43 -r 7c23a4432610 src/hotspot/share/gc/z/zHeap.cpp --- a/src/hotspot/share/gc/z/zHeap.cpp Sun Mar 17 08:26:38 2019 +0000 +++ b/src/hotspot/share/gc/z/zHeap.cpp Mon Mar 18 11:50:38 2019 +0100 @@ -247,20 +247,35 @@ } } -void ZHeap::flip_views() { - // For debugging only - if (ZUnmapBadViews) { - // Flip pages +void ZHeap::before_flip() { + if (ZVerifyViews) { + // Unmap all pages + _page_allocator.unmap_all_pages(); + } +} + +void ZHeap::after_flip() { + if (ZVerifyViews) { + // Map all pages ZPageTableIterator iter(&_pagetable); for (ZPage* page; iter.next(&page);) { if (!page->is_detached()) { - _page_allocator.flip_page(page); + _page_allocator.map_page(page); } } + } +} - // Flip pre-mapped memory - _page_allocator.flip_pre_mapped(); - } +void ZHeap::flip_to_marked() { + before_flip(); + ZAddressMasks::flip_to_marked(); + after_flip(); +} + +void ZHeap::flip_to_remapped() { + before_flip(); + ZAddressMasks::flip_to_remapped(); + after_flip(); } void ZHeap::mark_start() { @@ -270,8 +285,7 @@ ZStatSample(ZSamplerHeapUsedBeforeMark, used()); // Flip address view - ZAddressMasks::flip_to_marked(); - flip_views(); + flip_to_marked(); // Retire allocating pages _object_allocator.retire_pages(); @@ -466,8 +480,7 @@ _unload.finish(); // Flip address view - ZAddressMasks::flip_to_remapped(); - flip_views(); + flip_to_remapped(); // Enter relocate phase ZGlobalPhase = ZPhaseRelocate; diff -r 5e1480a38a43 -r 7c23a4432610 src/hotspot/share/gc/z/zHeap.hpp --- a/src/hotspot/share/gc/z/zHeap.hpp Sun Mar 17 08:26:38 2019 +0000 +++ b/src/hotspot/share/gc/z/zHeap.hpp Mon Mar 18 11:50:38 2019 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -67,8 +67,13 @@ size_t heap_max_size() const; size_t heap_max_reserve_size() const; + void before_flip(); + void after_flip(); + + void flip_to_marked(); + void flip_to_remapped(); + void out_of_memory(); - void flip_views(); void fixup_partial_loads(); public: diff -r 5e1480a38a43 -r 7c23a4432610 src/hotspot/share/gc/z/zPageAllocator.cpp --- a/src/hotspot/share/gc/z/zPageAllocator.cpp Sun Mar 17 08:26:38 2019 +0000 +++ b/src/hotspot/share/gc/z/zPageAllocator.cpp Mon Mar 18 11:50:38 2019 +0100 @@ -242,11 +242,6 @@ _pre_mapped.clear(); } -void ZPageAllocator::map_page(ZPage* page) { - // Map physical memory - _physical.map(page->physical_memory(), page->start()); -} - void ZPageAllocator::detach_page(ZPage* page) { // Detach the memory mapping. detach_memory(page->virtual_memory(), page->physical_memory()); @@ -267,6 +262,21 @@ delete page; } +void ZPageAllocator::map_page(ZPage* page) { + // Map physical memory + if (!page->is_mapped()) { + _physical.map(page->physical_memory(), page->start()); + } else if (ZVerifyViews) { + _physical.debug_map(page->physical_memory(), page->start()); + } +} + +void ZPageAllocator::unmap_all_pages() { + ZPhysicalMemory pmem(ZPhysicalMemorySegment(0 /* start */, ZAddressOffsetMax)); + _physical.debug_unmap(pmem, 0 /* offset */); + pmem.clear(); +} + void ZPageAllocator::flush_detached_pages(ZList* list) { ZLocker locker(&_lock); list->transfer(&_detached); @@ -398,9 +408,7 @@ } // Map page if needed - if (!page->is_mapped()) { - map_page(page); - } + map_page(page); // Reset page. This updates the page's sequence number and must // be done after page allocation, which potentially blocked in @@ -455,27 +463,6 @@ pmem.clear(); } -void ZPageAllocator::flip_page(ZPage* page) { - const ZPhysicalMemory& pmem = page->physical_memory(); - const uintptr_t addr = page->start(); - - // Flip physical mapping - _physical.flip(pmem, addr); -} - -void ZPageAllocator::flip_pre_mapped() { - if (_pre_mapped.available() == 0) { - // Nothing to flip - return; - } - - const ZPhysicalMemory& pmem = _pre_mapped.physical_memory(); - const ZVirtualMemory& vmem = _pre_mapped.virtual_memory(); - - // Flip physical mapping - _physical.flip(pmem, vmem.start()); -} - void ZPageAllocator::free_page(ZPage* page, bool reclaimed) { ZLocker locker(&_lock); diff -r 5e1480a38a43 -r 7c23a4432610 src/hotspot/share/gc/z/zPageAllocator.hpp --- a/src/hotspot/share/gc/z/zPageAllocator.hpp Sun Mar 17 08:26:38 2019 +0000 +++ b/src/hotspot/share/gc/z/zPageAllocator.hpp Mon Mar 18 11:50:38 2019 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,7 +63,6 @@ size_t try_ensure_unused_for_pre_mapped(size_t size); ZPage* create_page(uint8_t type, size_t size); - void map_page(ZPage* page); void detach_page(ZPage* page); void flush_pre_mapped(); void flush_cache(size_t size); @@ -97,13 +96,13 @@ void reset_statistics(); ZPage* alloc_page(uint8_t type, size_t size, ZAllocationFlags flags); - void flip_page(ZPage* page); void free_page(ZPage* page, bool reclaimed); void destroy_page(ZPage* page); - void flush_detached_pages(ZList* list); + void map_page(ZPage* page); + void unmap_all_pages(); - void flip_pre_mapped(); + void flush_detached_pages(ZList* list); bool is_alloc_stalled() const; void check_out_of_memory(); diff -r 5e1480a38a43 -r 7c23a4432610 src/hotspot/share/gc/z/zPhysicalMemory.cpp --- a/src/hotspot/share/gc/z/zPhysicalMemory.cpp Sun Mar 17 08:26:38 2019 +0000 +++ b/src/hotspot/share/gc/z/zPhysicalMemory.cpp Mon Mar 18 11:50:38 2019 +0100 @@ -179,6 +179,10 @@ _backing.unmap(pmem, offset); } -void ZPhysicalMemoryManager::flip(ZPhysicalMemory pmem, uintptr_t offset) { - _backing.flip(pmem, offset); +void ZPhysicalMemoryManager::debug_map(ZPhysicalMemory pmem, uintptr_t offset) { + _backing.debug_map(pmem, offset); } + +void ZPhysicalMemoryManager::debug_unmap(ZPhysicalMemory pmem, uintptr_t offset) { + _backing.debug_unmap(pmem, offset); +} diff -r 5e1480a38a43 -r 7c23a4432610 src/hotspot/share/gc/z/zPhysicalMemory.hpp --- a/src/hotspot/share/gc/z/zPhysicalMemory.hpp Sun Mar 17 08:26:38 2019 +0000 +++ b/src/hotspot/share/gc/z/zPhysicalMemory.hpp Mon Mar 18 11:50:38 2019 +0100 @@ -94,7 +94,9 @@ void map(ZPhysicalMemory pmem, uintptr_t offset); void unmap(ZPhysicalMemory pmem, uintptr_t offset); - void flip(ZPhysicalMemory pmem, uintptr_t offset); + + void debug_map(ZPhysicalMemory pmem, uintptr_t offset); + void debug_unmap(ZPhysicalMemory pmem, uintptr_t offset); }; #endif // SHARE_GC_Z_ZPHYSICALMEMORY_HPP diff -r 5e1480a38a43 -r 7c23a4432610 src/hotspot/share/gc/z/z_globals.hpp --- a/src/hotspot/share/gc/z/z_globals.hpp Sun Mar 17 08:26:38 2019 +0000 +++ b/src/hotspot/share/gc/z/z_globals.hpp Mon Mar 18 11:50:38 2019 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -70,8 +70,8 @@ diagnostic(bool, ZProactive, true, \ "Enable proactive GC cycles") \ \ - diagnostic(bool, ZUnmapBadViews, false, \ - "Unmap bad (inactive) heap views") \ + diagnostic(bool, ZVerifyViews, false, \ + "Verify heap view accesses") \ \ diagnostic(bool, ZVerifyMarking, false, \ "Verify marking stacks") \ diff -r 5e1480a38a43 -r 7c23a4432610 test/hotspot/jtreg/compiler/gcbarriers/UnsafeIntrinsicsTest.java --- a/test/hotspot/jtreg/compiler/gcbarriers/UnsafeIntrinsicsTest.java Sun Mar 17 08:26:38 2019 +0000 +++ b/test/hotspot/jtreg/compiler/gcbarriers/UnsafeIntrinsicsTest.java Mon Mar 18 11:50:38 2019 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,7 @@ * @modules java.base/jdk.internal.misc:+open * @summary Validate barriers after Unsafe getReference, CAS and swap (GetAndSet) * @requires vm.gc.Z & !vm.graal.enabled - * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseZGC -XX:+UnlockDiagnosticVMOptions -XX:+ZUnmapBadViews -XX:ZCollectionInterval=1 -XX:-CreateCoredumpOnCrash -XX:CompileCommand=dontinline,*::mergeImpl* compiler.gcbarriers.UnsafeIntrinsicsTest + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseZGC -XX:+UnlockDiagnosticVMOptions -XX:+ZVerifyViews -XX:ZCollectionInterval=1 -XX:-CreateCoredumpOnCrash -XX:CompileCommand=dontinline,*::mergeImpl* compiler.gcbarriers.UnsafeIntrinsicsTest */ package compiler.gcbarriers;