# HG changeset patch # User vlivanov # Date 1574785087 -10800 # Node ID f9905e7c96aa5a7d44c53ddb957ab7fc1541f1e1 # Parent 8375560db76b625f7284bcdb920557722f7fc634# Parent eb3e2a5c2bcd000175a7db79bc24797eb30a18ed Merge diff -r 8375560db76b -r f9905e7c96aa src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp Tue Nov 26 16:09:25 2019 +0300 +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp Tue Nov 26 19:18:07 2019 +0300 @@ -101,36 +101,6 @@ return (on_weak_ref || unknown) && (keep_alive || is_traversal_mode); } -template -inline void ShenandoahBarrierSet::inline_write_ref_field_pre(T* field, oop new_val) { - shenandoah_assert_not_in_cset_loc_except(field, _heap->cancelled_gc()); - if (_heap->is_concurrent_mark_in_progress()) { - T heap_oop = RawAccess<>::oop_load(field); - if (!CompressedOops::is_null(heap_oop)) { - enqueue(CompressedOops::decode(heap_oop)); - } - } -} - -// These are the more general virtual versions. -void ShenandoahBarrierSet::write_ref_field_pre_work(oop* field, oop new_val) { - inline_write_ref_field_pre(field, new_val); -} - -void ShenandoahBarrierSet::write_ref_field_pre_work(narrowOop* field, oop new_val) { - inline_write_ref_field_pre(field, new_val); -} - -void ShenandoahBarrierSet::write_ref_field_pre_work(void* field, oop new_val) { - guarantee(false, "Not needed"); -} - -void ShenandoahBarrierSet::write_ref_field_work(void* v, oop o, bool release) { - shenandoah_assert_not_in_cset_loc_except(v, _heap->cancelled_gc()); - shenandoah_assert_not_forwarded_except (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress()); - shenandoah_assert_not_in_cset_except (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress()); -} - oop ShenandoahBarrierSet::load_reference_barrier_not_null(oop obj) { if (ShenandoahLoadRefBarrier && _heap->has_forwarded_objects()) { return load_reference_barrier_impl(obj); @@ -234,30 +204,6 @@ } } -void ShenandoahBarrierSet::storeval_barrier(oop obj) { - if (ShenandoahStoreValEnqueueBarrier && !CompressedOops::is_null(obj) && _heap->is_concurrent_traversal_in_progress()) { - enqueue(obj); - } -} - -void ShenandoahBarrierSet::keep_alive_barrier(oop obj) { - if (ShenandoahKeepAliveBarrier && _heap->is_concurrent_mark_in_progress()) { - enqueue(obj); - } -} - -void ShenandoahBarrierSet::enqueue(oop obj) { - shenandoah_assert_not_forwarded_if(NULL, obj, _heap->is_concurrent_traversal_in_progress()); - assert(_satb_mark_queue_set.is_active(), "only get here when SATB active"); - - // Filter marked objects before hitting the SATB queues. The same predicate would - // be used by SATBMQ::filter to eliminate already marked objects downstream, but - // filtering here helps to avoid wasteful SATB queueing work to begin with. - if (!_heap->requires_marking(obj)) return; - - ShenandoahThreadLocalData::satb_mark_queue(Thread::current()).enqueue_known_active(obj); -} - void ShenandoahBarrierSet::on_thread_create(Thread* thread) { // Create thread local data ShenandoahThreadLocalData::create(thread); diff -r 8375560db76b -r f9905e7c96aa src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp Tue Nov 26 16:09:25 2019 +0300 +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp Tue Nov 26 19:18:07 2019 +0300 @@ -77,17 +77,6 @@ inline void clone_barrier(oop src); void clone_barrier_runtime(oop src); - // We export this to make it available in cases where the static - // type of the barrier set is known. Note that it is non-virtual. - template inline void inline_write_ref_field_pre(T* field, oop new_val); - - // These are the more general virtual versions. - void write_ref_field_pre_work(oop* field, oop new_val); - void write_ref_field_pre_work(narrowOop* field, oop new_val); - void write_ref_field_pre_work(void* field, oop new_val); - - void write_ref_field_work(void* v, oop o, bool release = false); - virtual void on_thread_create(Thread* thread); virtual void on_thread_destroy(Thread* thread); virtual void on_thread_attach(Thread* thread); @@ -96,8 +85,17 @@ static inline oop resolve_forwarded_not_null(oop p); static inline oop resolve_forwarded(oop p); - void storeval_barrier(oop obj); - void keep_alive_barrier(oop obj); + template + inline void satb_barrier(T* field); + inline void satb_enqueue(oop value); + inline void storeval_barrier(oop obj); + + template + inline void keep_alive_if_weak(oop value); + inline void keep_alive_if_weak(DecoratorSet decorators, oop value); + inline void keep_alive_barrier(oop value); + + inline void enqueue(oop obj); oop load_reference_barrier(oop obj); oop load_reference_barrier_not_null(oop obj); @@ -111,8 +109,6 @@ oop load_reference_barrier_native(oop obj, oop* load_addr); oop load_reference_barrier_native(oop obj, narrowOop* load_addr); - void enqueue(oop obj); - private: template inline void arraycopy_pre_work(T* src, T* dst, size_t count); @@ -126,27 +122,12 @@ template oop load_reference_barrier_native_impl(oop obj, T* load_addr); - static void keep_alive_if_weak(DecoratorSet decorators, oop value) { - assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known"); - const bool on_strong_oop_ref = (decorators & ON_STRONG_OOP_REF) != 0; - const bool peek = (decorators & AS_NO_KEEPALIVE) != 0; - if (!peek && !on_strong_oop_ref && value != NULL) { - ShenandoahBarrierSet::barrier_set()->keep_alive_barrier(value); - } - } - public: // Callbacks for runtime accesses. template class AccessBarrier: public BarrierSet::AccessBarrier { typedef BarrierSet::AccessBarrier Raw; - template - static oop oop_atomic_cmpxchg_in_heap_impl(T* addr, oop compare_value, oop new_value); - - template - static oop oop_atomic_xchg_in_heap_impl(T* addr, oop new_value); - public: // Heap oop accesses. These accessors get resolved when // IN_HEAP is set (e.g. when using the HeapAccess API), it is diff -r 8375560db76b -r f9905e7c96aa src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp Tue Nov 26 16:09:25 2019 +0300 +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp Tue Nov 26 19:18:07 2019 +0300 @@ -48,41 +48,124 @@ } } +inline void ShenandoahBarrierSet::enqueue(oop obj) { + shenandoah_assert_not_forwarded_if(NULL, obj, _heap->is_concurrent_traversal_in_progress()); + assert(_satb_mark_queue_set.is_active(), "only get here when SATB active"); + + // Filter marked objects before hitting the SATB queues. The same predicate would + // be used by SATBMQ::filter to eliminate already marked objects downstream, but + // filtering here helps to avoid wasteful SATB queueing work to begin with. + if (!_heap->requires_marking(obj)) return; + + ShenandoahThreadLocalData::satb_mark_queue(Thread::current()).enqueue_known_active(obj); +} + +template +inline void ShenandoahBarrierSet::satb_barrier(T *field) { + if (HasDecorator::value || + HasDecorator::value) { + return; + } + if (ShenandoahSATBBarrier && _heap->is_concurrent_mark_in_progress()) { + T heap_oop = RawAccess<>::oop_load(field); + if (!CompressedOops::is_null(heap_oop)) { + enqueue(CompressedOops::decode(heap_oop)); + } + } +} + +inline void ShenandoahBarrierSet::satb_enqueue(oop value) { + assert(value != NULL, "checked before"); + if (ShenandoahSATBBarrier && _heap->is_concurrent_mark_in_progress()) { + enqueue(value); + } +} + +inline void ShenandoahBarrierSet::storeval_barrier(oop obj) { + if (obj != NULL && ShenandoahStoreValEnqueueBarrier && _heap->is_concurrent_traversal_in_progress()) { + enqueue(obj); + } +} + +inline void ShenandoahBarrierSet::keep_alive_barrier(oop value) { + assert(value != NULL, "checked before"); + if (ShenandoahKeepAliveBarrier && _heap->is_concurrent_mark_in_progress()) { + enqueue(value); + } +} + +inline void ShenandoahBarrierSet::keep_alive_if_weak(DecoratorSet decorators, oop value) { + assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known"); + const bool on_strong_oop_ref = (decorators & ON_STRONG_OOP_REF) != 0; + const bool peek = (decorators & AS_NO_KEEPALIVE) != 0; + if (!peek && !on_strong_oop_ref) { + keep_alive_barrier(value); + } +} + +template +inline void ShenandoahBarrierSet::keep_alive_if_weak(oop value) { + assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known"); + if (!HasDecorator::value && + !HasDecorator::value) { + keep_alive_barrier(value); + } +} + +template +template +inline oop ShenandoahBarrierSet::AccessBarrier::oop_load_not_in_heap(T* addr) { + oop value = Raw::oop_load_not_in_heap(addr); + if (value != NULL) { + ShenandoahBarrierSet *const bs = ShenandoahBarrierSet::barrier_set(); + value = bs->load_reference_barrier_native(value, addr); + bs->keep_alive_if_weak(value); + } + return value; +} + template template inline oop ShenandoahBarrierSet::AccessBarrier::oop_load_in_heap(T* addr) { oop value = Raw::oop_load_in_heap(addr); - value = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(value); - keep_alive_if_weak(decorators, value); + if (value != NULL) { + ShenandoahBarrierSet *const bs = ShenandoahBarrierSet::barrier_set(); + value = bs->load_reference_barrier_not_null(value); + bs->keep_alive_if_weak(value); + } return value; } template inline oop ShenandoahBarrierSet::AccessBarrier::oop_load_in_heap_at(oop base, ptrdiff_t offset) { oop value = Raw::oop_load_in_heap_at(base, offset); - value = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(value); - keep_alive_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength(base, offset), value); + if (value != NULL) { + ShenandoahBarrierSet *const bs = ShenandoahBarrierSet::barrier_set(); + value = bs->load_reference_barrier_not_null(value); + bs->keep_alive_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength(base, offset), + value); + } return value; } template template -inline oop ShenandoahBarrierSet::AccessBarrier::oop_load_not_in_heap(T* addr) { - oop value = Raw::oop_load_not_in_heap(addr); - value = ShenandoahBarrierSet::barrier_set()->load_reference_barrier_native(value, addr); - keep_alive_if_weak(decorators, value); - return value; +inline void ShenandoahBarrierSet::AccessBarrier::oop_store_not_in_heap(T* addr, oop value) { + shenandoah_assert_marked_if(NULL, value, !CompressedOops::is_null(value) && ShenandoahHeap::heap()->is_evacuation_in_progress()); + ShenandoahBarrierSet* const bs = ShenandoahBarrierSet::barrier_set(); + bs->storeval_barrier(value); + bs->satb_barrier(addr); + Raw::oop_store(addr, value); } template template inline void ShenandoahBarrierSet::AccessBarrier::oop_store_in_heap(T* addr, oop value) { - ShenandoahBarrierSet::barrier_set()->storeval_barrier(value); - const bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0; - if (keep_alive) { - ShenandoahBarrierSet::barrier_set()->write_ref_field_pre_work(addr, value); - } - Raw::oop_store_in_heap(addr, value); + shenandoah_assert_not_in_cset_loc_except(addr, ShenandoahHeap::heap()->cancelled_gc()); + shenandoah_assert_not_forwarded_except (addr, value, value == NULL || ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahHeap::heap()->is_concurrent_mark_in_progress()); + shenandoah_assert_not_in_cset_except (addr, value, value == NULL || ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahHeap::heap()->is_concurrent_mark_in_progress()); + + oop_store_not_in_heap(addr, value); } template @@ -92,14 +175,10 @@ template template -inline void ShenandoahBarrierSet::AccessBarrier::oop_store_not_in_heap(T* addr, oop value) { - shenandoah_assert_marked_if(NULL, value, !CompressedOops::is_null(value) && ShenandoahHeap::heap()->is_evacuation_in_progress()); - Raw::oop_store(addr, value); -} +inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value) { + ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set(); + bs->storeval_barrier(new_value); -template -template -inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value) { oop res; oop expected = compare_value; do { @@ -107,79 +186,53 @@ res = Raw::oop_atomic_cmpxchg(addr, compare_value, new_value); expected = res; } while ((compare_value != expected) && (resolve_forwarded(compare_value) == resolve_forwarded(expected))); + + // Note: We don't need a keep-alive-barrier here. We already enqueue any loaded reference for SATB anyway, + // because it must be the previous value. if (res != NULL) { - return ShenandoahBarrierSet::barrier_set()->load_reference_barrier_not_null(res); - } else { - return res; + res = ShenandoahBarrierSet::barrier_set()->load_reference_barrier_not_null(res); + bs->satb_enqueue(res); } -} - -template -template -inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_cmpxchg_in_heap_impl(T* addr, oop compare_value, oop new_value) { - ShenandoahBarrierSet::barrier_set()->storeval_barrier(new_value); - oop result = oop_atomic_cmpxchg_not_in_heap(addr, compare_value, new_value); - const bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0; - if (keep_alive && ShenandoahSATBBarrier && !CompressedOops::is_null(result) && - (result == compare_value) && - ShenandoahHeap::heap()->is_concurrent_mark_in_progress()) { - ShenandoahBarrierSet::barrier_set()->enqueue(result); - } - return result; + return res; } template template inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) { - oop result = oop_atomic_cmpxchg_in_heap_impl(addr, compare_value, new_value); - keep_alive_if_weak(decorators, result); - return result; + return oop_atomic_cmpxchg_not_in_heap(addr, compare_value, new_value); } template inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) { - oop result = oop_atomic_cmpxchg_in_heap_impl(AccessInternal::oop_field_addr(base, offset), compare_value, new_value); - keep_alive_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength(base, offset), result); - return result; + return oop_atomic_cmpxchg_in_heap(AccessInternal::oop_field_addr(base, offset), compare_value, new_value); } template template inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_xchg_not_in_heap(T* addr, oop new_value) { + ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set(); + bs->storeval_barrier(new_value); + oop previous = Raw::oop_atomic_xchg(addr, new_value); - if (previous != NULL) { - return ShenandoahBarrierSet::barrier_set()->load_reference_barrier_not_null(previous); - } else { - return previous; - } -} -template -template -inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_xchg_in_heap_impl(T* addr, oop new_value) { - ShenandoahBarrierSet::barrier_set()->storeval_barrier(new_value); - oop result = oop_atomic_xchg_not_in_heap(addr, new_value); - const bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0; - if (keep_alive && ShenandoahSATBBarrier && !CompressedOops::is_null(result) && - ShenandoahHeap::heap()->is_concurrent_mark_in_progress()) { - ShenandoahBarrierSet::barrier_set()->enqueue(result); + // Note: We don't need a keep-alive-barrier here. We already enqueue any loaded reference for SATB anyway, + // because it must be the previous value. + if (previous != NULL) { + previous = ShenandoahBarrierSet::barrier_set()->load_reference_barrier_not_null(previous); + bs->satb_enqueue(previous); } - return result; + return previous; } template template inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_xchg_in_heap(T* addr, oop new_value) { - oop result = oop_atomic_xchg_in_heap_impl(addr, new_value); - keep_alive_if_weak(addr, result); - return result; + return oop_atomic_xchg_not_in_heap(addr, new_value); } template inline oop ShenandoahBarrierSet::AccessBarrier::oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) { - oop result = oop_atomic_xchg_in_heap_impl(AccessInternal::oop_field_addr(base, offset), new_value); - keep_alive_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength(base, offset), result); - return result; + return oop_atomic_xchg_in_heap(AccessInternal::oop_field_addr(base, offset), new_value); } // Clone barrier support diff -r 8375560db76b -r f9905e7c96aa src/hotspot/share/memory/arena.cpp --- a/src/hotspot/share/memory/arena.cpp Tue Nov 26 16:09:25 2019 +0300 +++ b/src/hotspot/share/memory/arena.cpp Tue Nov 26 19:18:07 2019 +0300 @@ -325,7 +325,7 @@ // change the size void Arena::set_size_in_bytes(size_t size) { if (_size_in_bytes != size) { - long delta = (long)(size - size_in_bytes()); + ssize_t delta = size - size_in_bytes(); _size_in_bytes = size; MemTracker::record_arena_size_change(delta, _flags); } diff -r 8375560db76b -r f9905e7c96aa src/hotspot/share/memory/resourceArea.cpp --- a/src/hotspot/share/memory/resourceArea.cpp Tue Nov 26 16:09:25 2019 +0300 +++ b/src/hotspot/share/memory/resourceArea.cpp Tue Nov 26 19:18:07 2019 +0300 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,8 +31,11 @@ void ResourceArea::bias_to(MEMFLAGS new_flags) { if (new_flags != _flags) { + size_t size = size_in_bytes(); + MemTracker::record_arena_size_change(-ssize_t(size), _flags); MemTracker::record_arena_free(_flags); MemTracker::record_new_arena(new_flags); + MemTracker::record_arena_size_change(ssize_t(size), new_flags); _flags = new_flags; } } diff -r 8375560db76b -r f9905e7c96aa src/hotspot/share/prims/whitebox.cpp --- a/src/hotspot/share/prims/whitebox.cpp Tue Nov 26 16:09:25 2019 +0300 +++ b/src/hotspot/share/prims/whitebox.cpp Tue Nov 26 19:18:07 2019 +0300 @@ -797,6 +797,21 @@ assert(hash_size > 0, "NMT hash_size should be > 0"); return (jint)hash_size; WB_END + +WB_ENTRY(jlong, WB_NMTNewArena(JNIEnv* env, jobject o, jlong init_size)) + Arena* arena = new (mtTest) Arena(mtTest, size_t(init_size)); + return (jlong)arena; +WB_END + +WB_ENTRY(void, WB_NMTFreeArena(JNIEnv* env, jobject o, jlong arena)) + Arena* a = (Arena*)arena; + delete a; +WB_END + +WB_ENTRY(void, WB_NMTArenaMalloc(JNIEnv* env, jobject o, jlong arena, jlong size)) + Arena* a = (Arena*)arena; + a->Amalloc(size_t(size)); +WB_END #endif // INCLUDE_NMT static jmethodID reflected_method_to_jmid(JavaThread* thread, JNIEnv* env, jobject method) { @@ -2244,6 +2259,9 @@ {CC"NMTReleaseMemory", CC"(JJ)V", (void*)&WB_NMTReleaseMemory }, {CC"NMTChangeTrackingLevel", CC"()Z", (void*)&WB_NMTChangeTrackingLevel}, {CC"NMTGetHashSize", CC"()I", (void*)&WB_NMTGetHashSize }, + {CC"NMTNewArena", CC"(J)J", (void*)&WB_NMTNewArena }, + {CC"NMTFreeArena", CC"(J)V", (void*)&WB_NMTFreeArena }, + {CC"NMTArenaMalloc", CC"(JJ)V", (void*)&WB_NMTArenaMalloc }, #endif // INCLUDE_NMT {CC"deoptimizeFrames", CC"(Z)I", (void*)&WB_DeoptimizeFrames }, {CC"deoptimizeAll", CC"()V", (void*)&WB_DeoptimizeAll }, diff -r 8375560db76b -r f9905e7c96aa src/hotspot/share/runtime/handshake.cpp --- a/src/hotspot/share/runtime/handshake.cpp Tue Nov 26 16:09:25 2019 +0300 +++ b/src/hotspot/share/runtime/handshake.cpp Tue Nov 26 19:18:07 2019 +0300 @@ -289,20 +289,24 @@ void HandshakeState::process_self_inner(JavaThread* thread) { assert(Thread::current() == thread, "should call from thread"); assert(!thread->is_terminated(), "should not be a terminated thread"); + assert(thread->thread_state() != _thread_blocked, "should not be in a blocked state"); + assert(thread->thread_state() != _thread_in_native, "should not be in native"); - ThreadInVMForHandshake tivm(thread); - if (!_semaphore.trywait()) { - _semaphore.wait_with_safepoint_check(thread); - } - HandshakeOperation* op = Atomic::load_acquire(&_operation); - if (op != NULL) { - HandleMark hm(thread); - CautiouslyPreserveExceptionMark pem(thread); - // Disarm before execute the operation - clear_handshake(thread); - op->do_handshake(thread); - } - _semaphore.signal(); + do { + ThreadInVMForHandshake tivm(thread); + if (!_semaphore.trywait()) { + _semaphore.wait_with_safepoint_check(thread); + } + HandshakeOperation* op = Atomic::load_acquire(&_operation); + if (op != NULL) { + HandleMark hm(thread); + CautiouslyPreserveExceptionMark pem(thread); + // Disarm before execute the operation + clear_handshake(thread); + op->do_handshake(thread); + } + _semaphore.signal(); + } while (has_operation()); } bool HandshakeState::vmthread_can_process_handshake(JavaThread* target) { diff -r 8375560db76b -r f9905e7c96aa src/hotspot/share/services/mallocTracker.hpp --- a/src/hotspot/share/services/mallocTracker.hpp Tue Nov 26 16:09:25 2019 +0300 +++ b/src/hotspot/share/services/mallocTracker.hpp Tue Nov 26 19:18:07 2019 +0300 @@ -70,8 +70,9 @@ } } - inline void resize(long sz) { + inline void resize(ssize_t sz) { if (sz != 0) { + assert(sz >= 0 || _size >= size_t(-sz), "Must be"); Atomic::add(&_size, size_t(sz)); DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);) } @@ -113,7 +114,7 @@ _arena.deallocate(0); } - inline void record_arena_size_change(long sz) { + inline void record_arena_size_change(ssize_t sz) { _arena.resize(sz); } @@ -207,7 +208,7 @@ as_snapshot()->by_type(flag)->record_arena_free(); } - static inline void record_arena_size_change(long size, MEMFLAGS flag) { + static inline void record_arena_size_change(ssize_t size, MEMFLAGS flag) { as_snapshot()->by_type(flag)->record_arena_size_change(size); } @@ -361,7 +362,7 @@ MallocMemorySummary::record_arena_free(flags); } - static inline void record_arena_size_change(int size, MEMFLAGS flags) { + static inline void record_arena_size_change(ssize_t size, MEMFLAGS flags) { MallocMemorySummary::record_arena_size_change(size, flags); } private: diff -r 8375560db76b -r f9905e7c96aa src/hotspot/share/services/memTracker.hpp --- a/src/hotspot/share/services/memTracker.hpp Tue Nov 26 16:09:25 2019 +0300 +++ b/src/hotspot/share/services/memTracker.hpp Tue Nov 26 19:18:07 2019 +0300 @@ -63,7 +63,7 @@ static inline void record_new_arena(MEMFLAGS flag) { } static inline void record_arena_free(MEMFLAGS flag) { } - static inline void record_arena_size_change(int diff, MEMFLAGS flag) { } + static inline void record_arena_size_change(ssize_t diff, MEMFLAGS flag) { } static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack, MEMFLAGS flag = mtNone) { } static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size, @@ -203,7 +203,7 @@ // Record arena size change. Arena size is the size of all arena // chuncks that backing up the arena. - static inline void record_arena_size_change(int diff, MEMFLAGS flag) { + static inline void record_arena_size_change(ssize_t diff, MEMFLAGS flag) { if (tracking_level() < NMT_summary) return; MallocTracker::record_arena_size_change(diff, flag); } diff -r 8375560db76b -r f9905e7c96aa src/jdk.jfr/share/classes/jdk/jfr/internal/consumer/ChunkHeader.java --- a/src/jdk.jfr/share/classes/jdk/jfr/internal/consumer/ChunkHeader.java Tue Nov 26 16:09:25 2019 +0300 +++ b/src/jdk.jfr/share/classes/jdk/jfr/internal/consumer/ChunkHeader.java Tue Nov 26 19:18:07 2019 +0300 @@ -292,4 +292,8 @@ static long headerSize() { return HEADER_SIZE; } + + public long getLastNanos() { + return getStartNanos() + getDurationNanos(); + } } diff -r 8375560db76b -r f9905e7c96aa src/jdk.jfr/share/classes/jdk/jfr/internal/consumer/ChunkParser.java --- a/src/jdk.jfr/share/classes/jdk/jfr/internal/consumer/ChunkParser.java Tue Nov 26 16:09:25 2019 +0300 +++ b/src/jdk.jfr/share/classes/jdk/jfr/internal/consumer/ChunkParser.java Tue Nov 26 19:18:07 2019 +0300 @@ -202,7 +202,7 @@ long lastValid = absoluteChunkEnd; long metadataPoistion = chunkHeader.getMetataPosition(); long contantPosition = chunkHeader.getConstantPoolPosition(); - chunkFinished = awaitUpdatedHeader(absoluteChunkEnd); + chunkFinished = awaitUpdatedHeader(absoluteChunkEnd, configuration.filterEnd); if (chunkFinished) { Logger.log(LogTag.JFR_SYSTEM_PARSER, LogLevel.INFO, "At chunk end"); return null; @@ -279,11 +279,14 @@ } } - private boolean awaitUpdatedHeader(long absoluteChunkEnd) throws IOException { + private boolean awaitUpdatedHeader(long absoluteChunkEnd, long filterEnd) throws IOException { if (Logger.shouldLog(LogTag.JFR_SYSTEM_PARSER, LogLevel.INFO)) { Logger.log(LogTag.JFR_SYSTEM_PARSER, LogLevel.INFO, "Waiting for more data (streaming). Read so far: " + chunkHeader.getChunkSize() + " bytes"); } while (true) { + if (chunkHeader.getLastNanos() > filterEnd) { + return true; + } chunkHeader.refresh(); if (absoluteChunkEnd != chunkHeader.getEnd()) { return false; diff -r 8375560db76b -r f9905e7c96aa test/hotspot/jtreg/runtime/NMT/HugeArenaTracking.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/hotspot/jtreg/runtime/NMT/HugeArenaTracking.java Tue Nov 26 19:18:07 2019 +0300 @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test + * @key nmt jcmd + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.management + * @build sun.hotspot.WhiteBox + * @run driver ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail HugeArenaTracking + */ + +import java.util.Random; +import jdk.test.lib.process.ProcessTools; +import jdk.test.lib.process.OutputAnalyzer; +import jdk.test.lib.JDKToolFinder; +import sun.hotspot.WhiteBox; + +public class HugeArenaTracking { + private static final long GB = 1024 * 1024 * 1024; + + public static void main(String args[]) throws Exception { + OutputAnalyzer output; + final WhiteBox wb = WhiteBox.getWhiteBox(); + + // Grab my own PID + String pid = Long.toString(ProcessTools.getProcessId()); + ProcessBuilder pb = new ProcessBuilder(); + + long arena1 = wb.NMTNewArena(1024); + long arena2 = wb.NMTNewArena(1024); + + // Run 'jcmd VM.native_memory summary' + pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary"}); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Test (reserved=2KB, committed=2KB)"); + + Random rand = new Random(); + + // Allocate 2GB+ from arena + long total = 0; + while (total < 2 * GB) { + // Cap to 10M + long inc = rand.nextInt(10 * 1024 * 1024); + wb.NMTArenaMalloc(arena1, inc); + total += inc; + } + + ProcessBuilder pb2 = new ProcessBuilder(); + // Run 'jcmd VM.native_memory summary' + pb2.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary", "scale=GB"}); + output = new OutputAnalyzer(pb2.start()); + output.shouldContain("Test (reserved=2GB, committed=2GB)"); + + wb.NMTFreeArena(arena1); + + output = new OutputAnalyzer(pb.start()); + output.shouldContain("Test (reserved=1KB, committed=1KB)"); + wb.NMTFreeArena(arena2); + + output = new OutputAnalyzer(pb.start()); + output.shouldNotContain("Test (reserved"); + } +} diff -r 8375560db76b -r f9905e7c96aa test/jdk/jdk/jfr/api/consumer/recordingstream/TestSetEndTime.java --- a/test/jdk/jdk/jfr/api/consumer/recordingstream/TestSetEndTime.java Tue Nov 26 16:09:25 2019 +0300 +++ b/test/jdk/jdk/jfr/api/consumer/recordingstream/TestSetEndTime.java Tue Nov 26 19:18:07 2019 +0300 @@ -63,6 +63,14 @@ public static void main(String... args) throws Exception { testEventStream(); testRecordingStream(); + testEmptyStream(); + } + + private static void testEmptyStream() { + try (RecordingStream rs = new RecordingStream()) { + rs.setEndTime(Instant.now().plusMillis(1100)); + rs.start(); + } } private static void testRecordingStream() throws Exception { @@ -89,10 +97,10 @@ } closed.await(); System.out.println("Found events: " + count.get()); - if (count.get() < 50) { + if (count.get() > 0 && count.get() < 50) { return; } - System.out.println("Found 50 events. Retrying"); + System.out.println("Retrying"); System.out.println(); } } diff -r 8375560db76b -r f9905e7c96aa test/lib/sun/hotspot/WhiteBox.java --- a/test/lib/sun/hotspot/WhiteBox.java Tue Nov 26 16:09:25 2019 +0300 +++ b/test/lib/sun/hotspot/WhiteBox.java Tue Nov 26 19:18:07 2019 +0300 @@ -222,6 +222,9 @@ public native long NMTMallocWithPseudoStackAndType(long size, int index, int type); public native boolean NMTChangeTrackingLevel(); public native int NMTGetHashSize(); + public native long NMTNewArena(long initSize); + public native void NMTFreeArena(long arena); + public native void NMTArenaMalloc(long arena, long size); // Compiler public native int matchesMethod(Executable method, String pattern);