diff -r 3054503bad7d -r caa25ab47aca src/hotspot/share/jfr/recorder/storage/jfrMemorySpace.inline.hpp --- a/src/hotspot/share/jfr/recorder/storage/jfrMemorySpace.inline.hpp Fri Sep 13 16:03:31 2019 -0700 +++ b/src/hotspot/share/jfr/recorder/storage/jfrMemorySpace.inline.hpp Sat Sep 14 14:40:09 2019 +0200 @@ -26,6 +26,7 @@ #define SHARE_JFR_RECORDER_STORAGE_JFRMEMORYSPACE_INLINE_HPP #include "jfr/recorder/storage/jfrMemorySpace.hpp" +#include "runtime/os.hpp" template class RetrievalType, typename Callback> JfrMemorySpace:: @@ -69,6 +70,42 @@ return true; } +// allocations are even multiples of the mspace min size +static inline size_t align_allocation_size(size_t requested_size, size_t min_elem_size) { + assert((int)min_elem_size % os::vm_page_size() == 0, "invariant"); + u8 alloc_size_bytes = min_elem_size; + while (requested_size > alloc_size_bytes) { + alloc_size_bytes <<= 1; + } + assert((int)alloc_size_bytes % os::vm_page_size() == 0, "invariant"); + return (size_t)alloc_size_bytes; +} + +template class RetrievalType, typename Callback> +inline T* JfrMemorySpace::allocate(size_t size) { + const size_t aligned_size_bytes = align_allocation_size(size, _min_elem_size); + void* const allocation = JfrCHeapObj::new_array(aligned_size_bytes + sizeof(T)); + if (allocation == NULL) { + return NULL; + } + T* const t = new (allocation) T; + assert(t != NULL, "invariant"); + if (!t->initialize(sizeof(T), aligned_size_bytes)) { + JfrCHeapObj::free(t, aligned_size_bytes + sizeof(T)); + return NULL; + } + return t; +} + +template class RetrievalType, typename Callback> +inline void JfrMemorySpace::deallocate(T* t) { + assert(t != NULL, "invariant"); + assert(!_free.in_list(t), "invariant"); + assert(!_full.in_list(t), "invariant"); + assert(t != NULL, "invariant"); + JfrCHeapObj::free(t, t->total_size()); +} + template class RetrievalType, typename Callback> inline void JfrMemorySpace::release_full(T* t) { assert(is_locked(), "invariant"); @@ -122,6 +159,15 @@ } } +template +static inline Mspace* create_mspace(size_t buffer_size, size_t limit, size_t cache_count, Callback* cb) { + Mspace* const mspace = new Mspace(buffer_size, limit, cache_count, cb); + if (mspace != NULL) { + mspace->initialize(); + } + return mspace; +} + template inline size_t size_adjustment(size_t size, Mspace* mspace) { assert(mspace != NULL, "invariant"); @@ -174,6 +220,15 @@ } template +class MspaceLock { + private: + Mspace* _mspace; + public: + MspaceLock(Mspace* mspace) : _mspace(mspace) { _mspace->lock(); } + ~MspaceLock() { _mspace->unlock(); } +}; + +template inline typename Mspace::Type* mspace_allocate_transient_to_full(size_t size, Mspace* mspace, Thread* thread) { typename Mspace::Type* const t = mspace_allocate_transient(size, mspace, thread); if (t == NULL) return NULL; @@ -344,6 +399,20 @@ } template +class ReleaseOp : public StackObj { + private: + Mspace* _mspace; + Thread* _thread; + bool _release_full; + public: + typedef typename Mspace::Type Type; + ReleaseOp(Mspace* mspace, Thread* thread, bool release_full = true) : + _mspace(mspace), _thread(thread), _release_full(release_full) {} + bool process(Type* t); + size_t processed() const { return 0; } +}; + +template inline bool ReleaseOp::process(typename Mspace::Type* t) { assert(t != NULL, "invariant"); // assumes some means of exclusive access to t