50113
|
1 |
/*
|
|
2 |
* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
|
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
20 |
* or visit www.oracle.com if you need additional information or have any
|
|
21 |
* questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
|
25 |
#include "precompiled.hpp"
|
|
26 |
#include "jfr/jfrEvents.hpp"
|
|
27 |
#include "jfr/jni/jfrJavaSupport.hpp"
|
|
28 |
#include "jfr/recorder/jfrRecorder.hpp"
|
|
29 |
#include "jfr/recorder/repository/jfrChunkWriter.hpp"
|
|
30 |
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
|
31 |
#include "jfr/recorder/service/jfrPostBox.hpp"
|
|
32 |
#include "jfr/recorder/storage/jfrMemorySpace.inline.hpp"
|
|
33 |
#include "jfr/recorder/storage/jfrStorage.hpp"
|
|
34 |
#include "jfr/recorder/storage/jfrStorageControl.hpp"
|
|
35 |
#include "jfr/recorder/storage/jfrStorageUtils.inline.hpp"
|
|
36 |
#include "jfr/utilities/jfrIterator.hpp"
|
|
37 |
#include "jfr/utilities/jfrTime.hpp"
|
|
38 |
#include "jfr/writers/jfrNativeEventWriter.hpp"
|
|
39 |
#include "logging/log.hpp"
|
|
40 |
#include "runtime/mutexLocker.hpp"
|
|
41 |
#include "runtime/orderAccess.inline.hpp"
|
|
42 |
#include "runtime/safepoint.hpp"
|
|
43 |
#include "runtime/thread.hpp"
|
|
44 |
|
|
45 |
typedef JfrStorage::Buffer* BufferPtr;
|
|
46 |
|
|
47 |
static JfrStorage* _instance = NULL;
|
|
48 |
static JfrStorageControl* _control;
|
|
49 |
|
|
50 |
JfrStorage& JfrStorage::instance() {
|
|
51 |
return *_instance;
|
|
52 |
}
|
|
53 |
|
|
54 |
JfrStorage* JfrStorage::create(JfrChunkWriter& chunkwriter, JfrPostBox& post_box) {
|
|
55 |
assert(_instance == NULL, "invariant");
|
|
56 |
_instance = new JfrStorage(chunkwriter, post_box);
|
|
57 |
return _instance;
|
|
58 |
}
|
|
59 |
|
|
60 |
void JfrStorage::destroy() {
|
|
61 |
if (_instance != NULL) {
|
|
62 |
delete _instance;
|
|
63 |
_instance = NULL;
|
|
64 |
}
|
|
65 |
}
|
|
66 |
|
|
67 |
JfrStorage::JfrStorage(JfrChunkWriter& chunkwriter, JfrPostBox& post_box) :
|
|
68 |
_control(NULL),
|
|
69 |
_global_mspace(NULL),
|
|
70 |
_thread_local_mspace(NULL),
|
|
71 |
_transient_mspace(NULL),
|
|
72 |
_age_mspace(NULL),
|
|
73 |
_chunkwriter(chunkwriter),
|
|
74 |
_post_box(post_box) {}
|
|
75 |
|
|
76 |
JfrStorage::~JfrStorage() {
|
|
77 |
if (_control != NULL) {
|
|
78 |
delete _control;
|
|
79 |
}
|
|
80 |
if (_global_mspace != NULL) {
|
|
81 |
delete _global_mspace;
|
|
82 |
}
|
|
83 |
if (_thread_local_mspace != NULL) {
|
|
84 |
delete _thread_local_mspace;
|
|
85 |
}
|
|
86 |
if (_transient_mspace != NULL) {
|
|
87 |
delete _transient_mspace;
|
|
88 |
}
|
|
89 |
if (_age_mspace != NULL) {
|
|
90 |
delete _age_mspace;
|
|
91 |
}
|
|
92 |
_instance = NULL;
|
|
93 |
}
|
|
94 |
|
|
95 |
static const size_t in_memory_discard_threshold_delta = 2; // start to discard data when the only this number of free buffers are left
|
|
96 |
static const size_t unlimited_mspace_size = 0;
|
|
97 |
static const size_t thread_local_cache_count = 8;
|
|
98 |
static const size_t thread_local_scavenge_threshold = thread_local_cache_count / 2;
|
|
99 |
static const size_t transient_buffer_size_multiplier = 8; // against thread local buffer size
|
|
100 |
|
|
101 |
template <typename Mspace>
|
|
102 |
static Mspace* create_mspace(size_t buffer_size, size_t limit, size_t cache_count, JfrStorage* storage_instance) {
|
|
103 |
Mspace* mspace = new Mspace(buffer_size, limit, cache_count, storage_instance);
|
|
104 |
if (mspace != NULL) {
|
|
105 |
mspace->initialize();
|
|
106 |
}
|
|
107 |
return mspace;
|
|
108 |
}
|
|
109 |
|
|
110 |
bool JfrStorage::initialize() {
|
|
111 |
assert(_control == NULL, "invariant");
|
|
112 |
assert(_global_mspace == NULL, "invariant");
|
|
113 |
assert(_thread_local_mspace == NULL, "invariant");
|
|
114 |
assert(_transient_mspace == NULL, "invariant");
|
|
115 |
assert(_age_mspace == NULL, "invariant");
|
|
116 |
|
|
117 |
const size_t num_global_buffers = (size_t)JfrOptionSet::num_global_buffers();
|
|
118 |
assert(num_global_buffers >= in_memory_discard_threshold_delta, "invariant");
|
|
119 |
const size_t memory_size = (size_t)JfrOptionSet::memory_size();
|
|
120 |
const size_t global_buffer_size = (size_t)JfrOptionSet::global_buffer_size();
|
|
121 |
const size_t thread_buffer_size = (size_t)JfrOptionSet::thread_buffer_size();
|
|
122 |
|
|
123 |
_control = new JfrStorageControl(num_global_buffers, num_global_buffers - in_memory_discard_threshold_delta);
|
|
124 |
if (_control == NULL) {
|
|
125 |
return false;
|
|
126 |
}
|
|
127 |
_global_mspace = create_mspace<JfrStorageMspace>(global_buffer_size, memory_size, num_global_buffers, this);
|
|
128 |
if (_global_mspace == NULL) {
|
|
129 |
return false;
|
|
130 |
}
|
|
131 |
_thread_local_mspace = create_mspace<JfrThreadLocalMspace>(thread_buffer_size, unlimited_mspace_size, thread_local_cache_count, this);
|
|
132 |
if (_thread_local_mspace == NULL) {
|
|
133 |
return false;
|
|
134 |
}
|
|
135 |
_transient_mspace = create_mspace<JfrStorageMspace>(thread_buffer_size * transient_buffer_size_multiplier, unlimited_mspace_size, 0, this);
|
|
136 |
if (_transient_mspace == NULL) {
|
|
137 |
return false;
|
|
138 |
}
|
|
139 |
_age_mspace = create_mspace<JfrStorageAgeMspace>(0 /* no extra size except header */, unlimited_mspace_size, num_global_buffers, this);
|
|
140 |
if (_age_mspace == NULL) {
|
|
141 |
return false;
|
|
142 |
}
|
|
143 |
control().set_scavenge_threshold(thread_local_scavenge_threshold);
|
|
144 |
return true;
|
|
145 |
}
|
|
146 |
|
|
147 |
JfrStorageControl& JfrStorage::control() {
|
|
148 |
return *instance()._control;
|
|
149 |
}
|
|
150 |
|
|
151 |
static void log_allocation_failure(const char* msg, size_t size) {
|
|
152 |
log_warning(jfr)("Unable to allocate " SIZE_FORMAT " bytes of %s.", size, msg);
|
|
153 |
}
|
|
154 |
|
|
155 |
BufferPtr JfrStorage::acquire_thread_local(Thread* thread, size_t size /* 0 */) {
|
|
156 |
BufferPtr buffer = mspace_get_to_full(size, instance()._thread_local_mspace, thread);
|
|
157 |
if (buffer == NULL) {
|
|
158 |
log_allocation_failure("thread local_memory", size);
|
|
159 |
return NULL;
|
|
160 |
}
|
|
161 |
assert(buffer->acquired_by_self(), "invariant");
|
|
162 |
return buffer;
|
|
163 |
}
|
|
164 |
|
|
165 |
BufferPtr JfrStorage::acquire_transient(size_t size, Thread* thread) {
|
|
166 |
BufferPtr buffer = mspace_allocate_transient_lease_to_full(size, instance()._transient_mspace, thread);
|
|
167 |
if (buffer == NULL) {
|
|
168 |
log_allocation_failure("transient memory", size);
|
|
169 |
return NULL;
|
|
170 |
}
|
|
171 |
assert(buffer->acquired_by_self(), "invariant");
|
|
172 |
assert(buffer->transient(), "invariant");
|
|
173 |
assert(buffer->lease(), "invariant");
|
|
174 |
return buffer;
|
|
175 |
}
|
|
176 |
|
|
177 |
static BufferPtr get_lease(size_t size, JfrStorageMspace* mspace, JfrStorage& storage_instance, size_t retry_count, Thread* thread) {
|
|
178 |
assert(size <= mspace->min_elem_size(), "invariant");
|
|
179 |
while (true) {
|
|
180 |
BufferPtr t = mspace_get_free_lease_with_retry(size, mspace, retry_count, thread);
|
|
181 |
if (t == NULL && storage_instance.control().should_discard()) {
|
|
182 |
storage_instance.discard_oldest(thread);
|
|
183 |
continue;
|
|
184 |
}
|
|
185 |
return t;
|
|
186 |
}
|
|
187 |
}
|
|
188 |
|
|
189 |
static BufferPtr get_promotion_buffer(size_t size, JfrStorageMspace* mspace, JfrStorage& storage_instance, size_t retry_count, Thread* thread) {
|
|
190 |
assert(size <= mspace->min_elem_size(), "invariant");
|
|
191 |
while (true) {
|
|
192 |
BufferPtr t = mspace_get_free_with_retry(size, mspace, retry_count, thread);
|
|
193 |
if (t == NULL && storage_instance.control().should_discard()) {
|
|
194 |
storage_instance.discard_oldest(thread);
|
|
195 |
continue;
|
|
196 |
}
|
|
197 |
return t;
|
|
198 |
}
|
|
199 |
}
|
|
200 |
|
|
201 |
static const size_t lease_retry = 10;
|
|
202 |
|
|
203 |
BufferPtr JfrStorage::acquire_large(size_t size, Thread* thread) {
|
|
204 |
JfrStorage& storage_instance = instance();
|
|
205 |
const size_t max_elem_size = storage_instance._global_mspace->min_elem_size(); // min is also max
|
|
206 |
// if not too large and capacity is still available, ask for a lease from the global system
|
|
207 |
if (size < max_elem_size && storage_instance.control().is_global_lease_allowed()) {
|
|
208 |
BufferPtr const buffer = get_lease(size, storage_instance._global_mspace, storage_instance, lease_retry, thread);
|
|
209 |
if (buffer != NULL) {
|
|
210 |
assert(buffer->acquired_by_self(), "invariant");
|
|
211 |
assert(!buffer->transient(), "invariant");
|
|
212 |
assert(buffer->lease(), "invariant");
|
|
213 |
storage_instance.control().increment_leased();
|
|
214 |
return buffer;
|
|
215 |
}
|
|
216 |
}
|
|
217 |
return acquire_transient(size, thread);
|
|
218 |
}
|
|
219 |
|
|
220 |
static void write_data_loss_event(JfrBuffer* buffer, u8 unflushed_size, Thread* thread) {
|
|
221 |
assert(buffer != NULL, "invariant");
|
|
222 |
assert(buffer->empty(), "invariant");
|
|
223 |
const u8 total_data_loss = thread->jfr_thread_local()->add_data_lost(unflushed_size);
|
|
224 |
if (EventDataLoss::is_enabled()) {
|
|
225 |
JfrNativeEventWriter writer(buffer, thread);
|
|
226 |
writer.write<u8>(EventDataLoss::eventId);
|
|
227 |
writer.write(JfrTicks::now());
|
|
228 |
writer.write(unflushed_size);
|
|
229 |
writer.write(total_data_loss);
|
|
230 |
}
|
|
231 |
}
|
|
232 |
|
|
233 |
static void write_data_loss(BufferPtr buffer, Thread* thread) {
|
|
234 |
assert(buffer != NULL, "invariant");
|
|
235 |
const size_t unflushed_size = buffer->unflushed_size();
|
|
236 |
buffer->concurrent_reinitialization();
|
|
237 |
if (unflushed_size == 0) {
|
|
238 |
return;
|
|
239 |
}
|
|
240 |
write_data_loss_event(buffer, unflushed_size, thread);
|
|
241 |
}
|
|
242 |
|
|
243 |
static const size_t promotion_retry = 100;
|
|
244 |
|
|
245 |
bool JfrStorage::flush_regular_buffer(BufferPtr buffer, Thread* thread) {
|
|
246 |
assert(buffer != NULL, "invariant");
|
|
247 |
assert(!buffer->lease(), "invariant");
|
|
248 |
assert(!buffer->transient(), "invariant");
|
|
249 |
const size_t unflushed_size = buffer->unflushed_size();
|
|
250 |
if (unflushed_size == 0) {
|
|
251 |
buffer->concurrent_reinitialization();
|
|
252 |
assert(buffer->empty(), "invariant");
|
|
253 |
return true;
|
|
254 |
}
|
|
255 |
BufferPtr const promotion_buffer = get_promotion_buffer(unflushed_size, _global_mspace, *this, promotion_retry, thread);
|
|
256 |
if (promotion_buffer == NULL) {
|
|
257 |
write_data_loss(buffer, thread);
|
|
258 |
return false;
|
|
259 |
}
|
|
260 |
assert(promotion_buffer->acquired_by_self(), "invariant");
|
|
261 |
assert(promotion_buffer->free_size() >= unflushed_size, "invariant");
|
|
262 |
buffer->concurrent_move_and_reinitialize(promotion_buffer, unflushed_size);
|
|
263 |
assert(buffer->empty(), "invariant");
|
|
264 |
return true;
|
|
265 |
}
|
|
266 |
|
|
267 |
/*
|
|
268 |
* 1. If the buffer was a "lease" from the global system, release back.
|
|
269 |
* 2. If the buffer is transient (temporal dynamically allocated), retire and register full.
|
|
270 |
*
|
|
271 |
* The buffer is effectively invalidated for the thread post-return,
|
|
272 |
* and the caller should take means to ensure that it is not referenced any longer.
|
|
273 |
*/
|
|
274 |
void JfrStorage::release_large(BufferPtr buffer, Thread* thread) {
|
|
275 |
assert(buffer != NULL, "invariant");
|
|
276 |
assert(buffer->lease(), "invariant");
|
|
277 |
assert(buffer->acquired_by_self(), "invariant");
|
|
278 |
buffer->clear_lease();
|
|
279 |
if (buffer->transient()) {
|
|
280 |
buffer->set_retired();
|
|
281 |
register_full(buffer, thread);
|
|
282 |
} else {
|
|
283 |
buffer->release();
|
|
284 |
control().decrement_leased();
|
|
285 |
}
|
|
286 |
}
|
|
287 |
|
|
288 |
static JfrAgeNode* new_age_node(BufferPtr buffer, JfrStorageAgeMspace* age_mspace, Thread* thread) {
|
|
289 |
assert(buffer != NULL, "invariant");
|
|
290 |
assert(age_mspace != NULL, "invariant");
|
|
291 |
return mspace_allocate_transient(0, age_mspace, thread);
|
|
292 |
}
|
|
293 |
|
|
294 |
static void log_registration_failure(size_t unflushed_size) {
|
|
295 |
log_warning(jfr)("Unable to register a full buffer of " SIZE_FORMAT " bytes.", unflushed_size);
|
|
296 |
log_debug(jfr, system)("Cleared 1 full buffer of " SIZE_FORMAT " bytes.", unflushed_size);
|
|
297 |
}
|
|
298 |
|
|
299 |
static void handle_registration_failure(BufferPtr buffer) {
|
|
300 |
assert(buffer != NULL, "invariant");
|
|
301 |
assert(buffer->retired(), "invariant");
|
|
302 |
const size_t unflushed_size = buffer->unflushed_size();
|
|
303 |
buffer->reinitialize();
|
|
304 |
log_registration_failure(unflushed_size);
|
|
305 |
}
|
|
306 |
|
|
307 |
static JfrAgeNode* get_free_age_node(JfrStorageAgeMspace* age_mspace, Thread* thread) {
|
|
308 |
assert(JfrBuffer_lock->owned_by_self(), "invariant");
|
|
309 |
return mspace_get_free_with_detach(0, age_mspace, thread);
|
|
310 |
}
|
|
311 |
|
|
312 |
static bool insert_full_age_node(JfrAgeNode* age_node, JfrStorageAgeMspace* age_mspace, Thread* thread) {
|
|
313 |
assert(JfrBuffer_lock->owned_by_self(), "invariant");
|
|
314 |
assert(age_node->retired_buffer()->retired(), "invariant");
|
|
315 |
age_mspace->insert_full_head(age_node);
|
|
316 |
return true;
|
|
317 |
}
|
|
318 |
|
|
319 |
static bool full_buffer_registration(BufferPtr buffer, JfrStorageAgeMspace* age_mspace, JfrStorageControl& control, Thread* thread) {
|
|
320 |
assert(buffer != NULL, "invariant");
|
|
321 |
assert(buffer->retired(), "invariant");
|
|
322 |
assert(age_mspace != NULL, "invariant");
|
|
323 |
MutexLockerEx lock(JfrBuffer_lock, Mutex::_no_safepoint_check_flag);
|
|
324 |
JfrAgeNode* age_node = get_free_age_node(age_mspace, thread);
|
|
325 |
if (age_node == NULL) {
|
|
326 |
age_node = new_age_node(buffer, age_mspace, thread);
|
|
327 |
if (age_node == NULL) {
|
|
328 |
return false;
|
|
329 |
}
|
|
330 |
}
|
|
331 |
assert(age_node->acquired_by_self(), "invariant");
|
|
332 |
assert(age_node != NULL, "invariant");
|
|
333 |
age_node->set_retired_buffer(buffer);
|
|
334 |
return insert_full_age_node(age_node, age_mspace, thread);
|
|
335 |
}
|
|
336 |
|
|
337 |
void JfrStorage::register_full(BufferPtr buffer, Thread* thread) {
|
|
338 |
assert(buffer != NULL, "invariant");
|
|
339 |
assert(buffer->retired(), "invariant");
|
|
340 |
if (!full_buffer_registration(buffer, _age_mspace, control(), thread)) {
|
|
341 |
handle_registration_failure(buffer);
|
|
342 |
buffer->release();
|
|
343 |
}
|
|
344 |
if (control().should_post_buffer_full_message()) {
|
|
345 |
_post_box.post(MSG_FULLBUFFER);
|
|
346 |
}
|
|
347 |
}
|
|
348 |
|
|
349 |
void JfrStorage::lock() {
|
|
350 |
assert(!JfrBuffer_lock->owned_by_self(), "invariant");
|
|
351 |
JfrBuffer_lock->lock_without_safepoint_check();
|
|
352 |
}
|
|
353 |
|
|
354 |
void JfrStorage::unlock() {
|
|
355 |
assert(JfrBuffer_lock->owned_by_self(), "invariant");
|
|
356 |
JfrBuffer_lock->unlock();
|
|
357 |
}
|
|
358 |
|
|
359 |
#ifdef ASSERT
|
|
360 |
bool JfrStorage::is_locked() const {
|
|
361 |
return JfrBuffer_lock->owned_by_self();
|
|
362 |
}
|
|
363 |
#endif
|
|
364 |
|
|
365 |
// don't use buffer on return, it is gone
|
|
366 |
void JfrStorage::release(BufferPtr buffer, Thread* thread) {
|
|
367 |
assert(buffer != NULL, "invariant");
|
|
368 |
assert(!buffer->lease(), "invariant");
|
|
369 |
assert(!buffer->transient(), "invariant");
|
|
370 |
assert(!buffer->retired(), "invariant");
|
|
371 |
if (!buffer->empty()) {
|
|
372 |
if (!flush_regular_buffer(buffer, thread)) {
|
|
373 |
buffer->concurrent_reinitialization();
|
|
374 |
}
|
|
375 |
}
|
|
376 |
assert(buffer->empty(), "invariant");
|
|
377 |
control().increment_dead();
|
|
378 |
buffer->release();
|
|
379 |
buffer->set_retired();
|
|
380 |
}
|
|
381 |
|
|
382 |
void JfrStorage::release_thread_local(BufferPtr buffer, Thread* thread) {
|
|
383 |
assert(buffer != NULL, "invariant");
|
|
384 |
JfrStorage& storage_instance = instance();
|
|
385 |
storage_instance.release(buffer, thread);
|
|
386 |
if (storage_instance.control().should_scavenge()) {
|
|
387 |
storage_instance._post_box.post(MSG_DEADBUFFER);
|
|
388 |
}
|
|
389 |
}
|
|
390 |
|
|
391 |
static void log_discard(size_t count, size_t amount, size_t current) {
|
|
392 |
if (log_is_enabled(Debug, jfr, system)) {
|
|
393 |
assert(count > 0, "invariant");
|
|
394 |
log_debug(jfr, system)("Cleared " SIZE_FORMAT " full buffer(s) of " SIZE_FORMAT" bytes.", count, amount);
|
|
395 |
log_debug(jfr, system)("Current number of full buffers " SIZE_FORMAT "", current);
|
|
396 |
}
|
|
397 |
}
|
|
398 |
|
|
399 |
void JfrStorage::discard_oldest(Thread* thread) {
|
|
400 |
if (JfrBuffer_lock->try_lock()) {
|
|
401 |
if (!control().should_discard()) {
|
|
402 |
// another thread handled it
|
|
403 |
return;
|
|
404 |
}
|
|
405 |
const size_t num_full_pre_discard = control().full_count();
|
|
406 |
size_t num_full_post_discard = 0;
|
|
407 |
size_t discarded_size = 0;
|
|
408 |
while (true) {
|
|
409 |
JfrAgeNode* const oldest_age_node = _age_mspace->full_tail();
|
|
410 |
if (oldest_age_node == NULL) {
|
|
411 |
break;
|
|
412 |
}
|
|
413 |
BufferPtr const buffer = oldest_age_node->retired_buffer();
|
|
414 |
assert(buffer->retired(), "invariant");
|
|
415 |
discarded_size += buffer->unflushed_size();
|
|
416 |
num_full_post_discard = control().decrement_full();
|
|
417 |
if (buffer->transient()) {
|
|
418 |
mspace_release_full(buffer, _transient_mspace);
|
|
419 |
mspace_release_full(oldest_age_node, _age_mspace);
|
|
420 |
continue;
|
|
421 |
} else {
|
|
422 |
mspace_release_full(oldest_age_node, _age_mspace);
|
|
423 |
buffer->reinitialize();
|
|
424 |
buffer->release(); // pusb
|
|
425 |
break;
|
|
426 |
}
|
|
427 |
}
|
|
428 |
JfrBuffer_lock->unlock();
|
|
429 |
const size_t number_of_discards = num_full_pre_discard - num_full_post_discard;
|
|
430 |
if (number_of_discards > 0) {
|
|
431 |
log_discard(number_of_discards, discarded_size, num_full_post_discard);
|
|
432 |
}
|
|
433 |
}
|
|
434 |
}
|
|
435 |
|
|
436 |
#ifdef ASSERT
|
|
437 |
typedef const BufferPtr ConstBufferPtr;
|
|
438 |
|
|
439 |
static void assert_flush_precondition(ConstBufferPtr cur, size_t used, bool native, const Thread* t) {
|
|
440 |
assert(t != NULL, "invariant");
|
|
441 |
assert(cur != NULL, "invariant");
|
|
442 |
assert(cur->pos() + used <= cur->end(), "invariant");
|
|
443 |
assert(native ? t->jfr_thread_local()->native_buffer() == cur : t->jfr_thread_local()->java_buffer() == cur, "invariant");
|
|
444 |
}
|
|
445 |
|
|
446 |
static void assert_flush_regular_precondition(ConstBufferPtr cur, const u1* const cur_pos, size_t used, size_t req, const Thread* t) {
|
|
447 |
assert(t != NULL, "invariant");
|
|
448 |
assert(t->jfr_thread_local()->shelved_buffer() == NULL, "invariant");
|
|
449 |
assert(cur != NULL, "invariant");
|
|
450 |
assert(!cur->lease(), "invariant");
|
|
451 |
assert(cur_pos != NULL, "invariant");
|
|
452 |
assert(req >= used, "invariant");
|
|
453 |
}
|
|
454 |
|
|
455 |
static void assert_provision_large_precondition(ConstBufferPtr cur, size_t used, size_t req, const Thread* t) {
|
|
456 |
assert(cur != NULL, "invariant");
|
|
457 |
assert(t != NULL, "invariant");
|
|
458 |
assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant");
|
|
459 |
assert(req >= used, "invariant");
|
|
460 |
}
|
|
461 |
|
|
462 |
static void assert_flush_large_precondition(ConstBufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
|
|
463 |
assert(t != NULL, "invariant");
|
|
464 |
assert(cur != NULL, "invariant");
|
|
465 |
assert(cur->lease(), "invariant");
|
|
466 |
assert(cur_pos != NULL, "invariant");
|
|
467 |
assert(native ? t->jfr_thread_local()->native_buffer() == cur : t->jfr_thread_local()->java_buffer() == cur, "invariant");
|
|
468 |
assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant");
|
|
469 |
assert(req >= used, "invariant");
|
|
470 |
assert(cur != t->jfr_thread_local()->shelved_buffer(), "invariant");
|
|
471 |
}
|
|
472 |
#endif // ASSERT
|
|
473 |
|
|
474 |
BufferPtr JfrStorage::flush(BufferPtr cur, size_t used, size_t req, bool native, Thread* t) {
|
|
475 |
debug_only(assert_flush_precondition(cur, used, native, t);)
|
|
476 |
const u1* const cur_pos = cur->pos();
|
|
477 |
req += used;
|
|
478 |
// requested size now encompass the outstanding used size
|
|
479 |
return cur->lease() ? instance().flush_large(cur, cur_pos, used, req, native, t) :
|
|
480 |
instance().flush_regular(cur, cur_pos, used, req, native, t);
|
|
481 |
}
|
|
482 |
|
|
483 |
BufferPtr JfrStorage::flush_regular(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
|
|
484 |
debug_only(assert_flush_regular_precondition(cur, cur_pos, used, req, t);)
|
|
485 |
// A flush is needed before memcpy since a non-large buffer is thread stable
|
|
486 |
// (thread local). The flush will not modify memory in addresses above pos()
|
|
487 |
// which is where the "used / uncommitted" data resides. It is therefore both
|
|
488 |
// possible and valid to migrate data after the flush. This is however only
|
|
489 |
// the case for stable thread local buffers; it is not the case for large buffers.
|
|
490 |
if (!cur->empty()) {
|
|
491 |
flush_regular_buffer(cur, t);
|
|
492 |
}
|
|
493 |
assert(t->jfr_thread_local()->shelved_buffer() == NULL, "invariant");
|
|
494 |
if (cur->free_size() >= req) {
|
|
495 |
// simplest case, no switching of buffers
|
|
496 |
if (used > 0) {
|
|
497 |
memcpy(cur->pos(), (void*)cur_pos, used);
|
|
498 |
}
|
|
499 |
assert(native ? t->jfr_thread_local()->native_buffer() == cur : t->jfr_thread_local()->java_buffer() == cur, "invariant");
|
|
500 |
return cur;
|
|
501 |
}
|
|
502 |
// Going for a "larger-than-regular" buffer.
|
|
503 |
// Shelve the current buffer to make room for a temporary lease.
|
|
504 |
t->jfr_thread_local()->shelve_buffer(cur);
|
|
505 |
return provision_large(cur, cur_pos, used, req, native, t);
|
|
506 |
}
|
|
507 |
|
|
508 |
static BufferPtr store_buffer_to_thread_local(BufferPtr buffer, JfrThreadLocal* jfr_thread_local, bool native) {
|
|
509 |
assert(buffer != NULL, "invariant");
|
|
510 |
if (native) {
|
|
511 |
jfr_thread_local->set_native_buffer(buffer);
|
|
512 |
} else {
|
|
513 |
jfr_thread_local->set_java_buffer(buffer);
|
|
514 |
}
|
|
515 |
return buffer;
|
|
516 |
}
|
|
517 |
|
|
518 |
static BufferPtr restore_shelved_buffer(bool native, Thread* t) {
|
|
519 |
JfrThreadLocal* const tl = t->jfr_thread_local();
|
|
520 |
BufferPtr shelved = tl->shelved_buffer();
|
|
521 |
assert(shelved != NULL, "invariant");
|
|
522 |
tl->shelve_buffer(NULL);
|
|
523 |
// restore shelved buffer back as primary
|
|
524 |
return store_buffer_to_thread_local(shelved, tl, native);
|
|
525 |
}
|
|
526 |
|
|
527 |
BufferPtr JfrStorage::flush_large(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
|
|
528 |
debug_only(assert_flush_large_precondition(cur, cur_pos, used, req, native, t);)
|
|
529 |
// Can the "regular" buffer (now shelved) accommodate the requested size?
|
|
530 |
BufferPtr shelved = t->jfr_thread_local()->shelved_buffer();
|
|
531 |
assert(shelved != NULL, "invariant");
|
|
532 |
if (shelved->free_size() >= req) {
|
|
533 |
if (req > 0) {
|
|
534 |
memcpy(shelved->pos(), (void*)cur_pos, (size_t)used);
|
|
535 |
}
|
|
536 |
// release and invalidate
|
|
537 |
release_large(cur, t);
|
|
538 |
return restore_shelved_buffer(native, t);
|
|
539 |
}
|
|
540 |
// regular too small
|
|
541 |
return provision_large(cur, cur_pos, used, req, native, t);
|
|
542 |
}
|
|
543 |
|
|
544 |
static BufferPtr large_fail(BufferPtr cur, bool native, JfrStorage& storage_instance, Thread* t) {
|
|
545 |
assert(cur != NULL, "invariant");
|
|
546 |
assert(t != NULL, "invariant");
|
|
547 |
if (cur->lease()) {
|
|
548 |
storage_instance.release_large(cur, t);
|
|
549 |
}
|
|
550 |
return restore_shelved_buffer(native, t);
|
|
551 |
}
|
|
552 |
|
|
553 |
// Always returns a non-null buffer.
|
|
554 |
// If accommodating the large request fails, the shelved buffer is returned
|
|
555 |
// even though it might be smaller than the requested size.
|
|
556 |
// Caller needs to ensure if the size was successfully accommodated.
|
|
557 |
BufferPtr JfrStorage::provision_large(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
|
|
558 |
debug_only(assert_provision_large_precondition(cur, used, req, t);)
|
|
559 |
assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant");
|
|
560 |
BufferPtr const buffer = acquire_large(req, t);
|
|
561 |
if (buffer == NULL) {
|
|
562 |
// unable to allocate and serve the request
|
|
563 |
return large_fail(cur, native, *this, t);
|
|
564 |
}
|
|
565 |
// ok managed to acquire a "large" buffer for the requested size
|
|
566 |
assert(buffer->free_size() >= req, "invariant");
|
|
567 |
assert(buffer->lease(), "invariant");
|
|
568 |
// transfer outstanding data
|
|
569 |
memcpy(buffer->pos(), (void*)cur_pos, used);
|
|
570 |
if (cur->lease()) {
|
|
571 |
release_large(cur, t);
|
|
572 |
// don't use current anymore, it is gone
|
|
573 |
}
|
|
574 |
return store_buffer_to_thread_local(buffer, t->jfr_thread_local(), native);
|
|
575 |
}
|
|
576 |
|
|
577 |
typedef UnBufferedWriteToChunk<JfrBuffer> WriteOperation;
|
|
578 |
typedef MutexedWriteOp<WriteOperation> MutexedWriteOperation;
|
|
579 |
typedef ConcurrentWriteOp<WriteOperation> ConcurrentWriteOperation;
|
|
580 |
typedef ConcurrentWriteOpExcludeRetired<WriteOperation> ThreadLocalConcurrentWriteOperation;
|
|
581 |
|
|
582 |
size_t JfrStorage::write() {
|
|
583 |
const size_t full_size_processed = write_full();
|
|
584 |
WriteOperation wo(_chunkwriter);
|
|
585 |
ThreadLocalConcurrentWriteOperation tlwo(wo);
|
|
586 |
process_full_list(tlwo, _thread_local_mspace);
|
|
587 |
ConcurrentWriteOperation cwo(wo);
|
|
588 |
process_free_list(cwo, _global_mspace);
|
|
589 |
return full_size_processed + wo.processed();
|
|
590 |
}
|
|
591 |
|
|
592 |
size_t JfrStorage::write_at_safepoint() {
|
|
593 |
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
|
|
594 |
WriteOperation wo(_chunkwriter);
|
|
595 |
MutexedWriteOperation writer(wo); // mutexed write mode
|
|
596 |
process_full_list(writer, _thread_local_mspace);
|
|
597 |
assert(_transient_mspace->is_free_empty(), "invariant");
|
|
598 |
process_full_list(writer, _transient_mspace);
|
|
599 |
assert(_global_mspace->is_full_empty(), "invariant");
|
|
600 |
process_free_list(writer, _global_mspace);
|
|
601 |
return wo.processed();
|
|
602 |
}
|
|
603 |
|
|
604 |
typedef DiscardOp<DefaultDiscarder<JfrStorage::Buffer> > DiscardOperation;
|
|
605 |
typedef ReleaseOp<JfrStorageMspace> ReleaseOperation;
|
|
606 |
typedef CompositeOperation<MutexedWriteOperation, ReleaseOperation> FullOperation;
|
|
607 |
|
|
608 |
size_t JfrStorage::clear() {
|
|
609 |
const size_t full_size_processed = clear_full();
|
|
610 |
DiscardOperation discarder(concurrent); // concurrent discard mode
|
|
611 |
process_full_list(discarder, _thread_local_mspace);
|
|
612 |
assert(_transient_mspace->is_free_empty(), "invariant");
|
|
613 |
process_full_list(discarder, _transient_mspace);
|
|
614 |
assert(_global_mspace->is_full_empty(), "invariant");
|
|
615 |
process_free_list(discarder, _global_mspace);
|
|
616 |
return full_size_processed + discarder.processed();
|
|
617 |
}
|
|
618 |
|
|
619 |
static void insert_free_age_nodes(JfrStorageAgeMspace* age_mspace, JfrAgeNode* head, JfrAgeNode* tail, size_t count) {
|
|
620 |
if (tail != NULL) {
|
|
621 |
assert(tail->next() == NULL, "invariant");
|
|
622 |
assert(head != NULL, "invariant");
|
|
623 |
assert(head->prev() == NULL, "invariant");
|
|
624 |
MutexLockerEx buffer_lock(JfrBuffer_lock, Mutex::_no_safepoint_check_flag);
|
|
625 |
age_mspace->insert_free_tail(head, tail, count);
|
|
626 |
}
|
|
627 |
}
|
|
628 |
|
|
629 |
template <typename Processor>
|
|
630 |
static void process_age_list(Processor& processor, JfrStorageAgeMspace* age_mspace, JfrAgeNode* head, size_t count) {
|
|
631 |
assert(age_mspace != NULL, "invariant");
|
|
632 |
assert(head != NULL, "invariant");
|
|
633 |
JfrAgeNode* node = head;
|
|
634 |
JfrAgeNode* last = NULL;
|
|
635 |
while (node != NULL) {
|
|
636 |
last = node;
|
|
637 |
BufferPtr const buffer = node->retired_buffer();
|
|
638 |
assert(buffer != NULL, "invariant");
|
|
639 |
assert(buffer->retired(), "invariant");
|
|
640 |
processor.process(buffer);
|
|
641 |
// at this point, buffer is already live or destroyed
|
|
642 |
node->clear_identity();
|
|
643 |
JfrAgeNode* const next = (JfrAgeNode*)node->next();
|
|
644 |
if (node->transient()) {
|
|
645 |
// detach
|
|
646 |
last = (JfrAgeNode*)last->prev();
|
|
647 |
if (last != NULL) {
|
|
648 |
last->set_next(next);
|
|
649 |
} else {
|
|
650 |
head = next;
|
|
651 |
}
|
|
652 |
if (next != NULL) {
|
|
653 |
next->set_prev(last);
|
|
654 |
}
|
|
655 |
--count;
|
|
656 |
age_mspace->deallocate(node);
|
|
657 |
}
|
|
658 |
node = next;
|
|
659 |
}
|
|
660 |
insert_free_age_nodes(age_mspace, head, last, count);
|
|
661 |
}
|
|
662 |
|
|
663 |
template <typename Processor>
|
|
664 |
static size_t process_full(Processor& processor, JfrStorageControl& control, JfrStorageAgeMspace* age_mspace) {
|
|
665 |
assert(age_mspace != NULL, "invariant");
|
|
666 |
if (age_mspace->is_full_empty()) {
|
|
667 |
// nothing to do
|
|
668 |
return 0;
|
|
669 |
}
|
|
670 |
size_t count;
|
|
671 |
JfrAgeNode* head;;
|
|
672 |
{
|
|
673 |
// fetch age list
|
|
674 |
MutexLockerEx buffer_lock(JfrBuffer_lock, Mutex::_no_safepoint_check_flag);
|
|
675 |
count = age_mspace->full_count();
|
|
676 |
head = age_mspace->clear_full();
|
|
677 |
control.reset_full();
|
|
678 |
}
|
|
679 |
assert(head != NULL, "invariant");
|
|
680 |
process_age_list(processor, age_mspace, head, count);
|
|
681 |
return count;
|
|
682 |
}
|
|
683 |
|
|
684 |
static void log(size_t count, size_t amount, bool clear = false) {
|
|
685 |
if (log_is_enabled(Debug, jfr, system)) {
|
|
686 |
if (count > 0) {
|
|
687 |
log_debug(jfr, system)("%s " SIZE_FORMAT " full buffer(s) of " SIZE_FORMAT" B of data%s",
|
|
688 |
clear ? "Discarded" : "Wrote", count, amount, clear ? "." : " to chunk.");
|
|
689 |
}
|
|
690 |
}
|
|
691 |
}
|
|
692 |
|
|
693 |
// full writer
|
|
694 |
// Assumption is retired only; exclusive access
|
|
695 |
// MutexedWriter -> ReleaseOp
|
|
696 |
//
|
|
697 |
size_t JfrStorage::write_full() {
|
|
698 |
assert(_chunkwriter.is_valid(), "invariant");
|
|
699 |
Thread* const thread = Thread::current();
|
|
700 |
WriteOperation wo(_chunkwriter);
|
|
701 |
MutexedWriteOperation writer(wo); // a retired buffer implies mutexed access
|
|
702 |
ReleaseOperation ro(_transient_mspace, thread);
|
|
703 |
FullOperation cmd(&writer, &ro);
|
|
704 |
const size_t count = process_full(cmd, control(), _age_mspace);
|
|
705 |
log(count, writer.processed());
|
|
706 |
return writer.processed();
|
|
707 |
}
|
|
708 |
|
|
709 |
size_t JfrStorage::clear_full() {
|
|
710 |
DiscardOperation discarder(mutexed); // a retired buffer implies mutexed access
|
|
711 |
const size_t count = process_full(discarder, control(), _age_mspace);
|
|
712 |
log(count, discarder.processed(), true);
|
|
713 |
return discarder.processed();
|
|
714 |
}
|
|
715 |
|
|
716 |
static void scavenge_log(size_t count, size_t amount, size_t current) {
|
|
717 |
if (count > 0) {
|
|
718 |
if (log_is_enabled(Debug, jfr, system)) {
|
|
719 |
log_debug(jfr, system)("Released " SIZE_FORMAT " dead buffer(s) of " SIZE_FORMAT" B of data.", count, amount);
|
|
720 |
log_debug(jfr, system)("Current number of dead buffers " SIZE_FORMAT "", current);
|
|
721 |
}
|
|
722 |
}
|
|
723 |
}
|
|
724 |
|
|
725 |
template <typename Mspace>
|
|
726 |
class Scavenger {
|
|
727 |
private:
|
|
728 |
JfrStorageControl& _control;
|
|
729 |
Mspace* _mspace;
|
|
730 |
size_t _count;
|
|
731 |
size_t _amount;
|
|
732 |
public:
|
|
733 |
typedef typename Mspace::Type Type;
|
|
734 |
Scavenger(JfrStorageControl& control, Mspace* mspace) : _control(control), _mspace(mspace), _count(0), _amount(0) {}
|
|
735 |
bool process(Type* t) {
|
|
736 |
if (t->retired()) {
|
|
737 |
assert(!t->transient(), "invariant");
|
|
738 |
assert(!t->lease(), "invariant");
|
|
739 |
assert(t->empty(), "invariant");
|
|
740 |
assert(t->identity() == NULL, "invariant");
|
|
741 |
++_count;
|
|
742 |
_amount += t->total_size();
|
|
743 |
t->clear_retired();
|
|
744 |
_control.decrement_dead();
|
|
745 |
mspace_release_full_critical(t, _mspace);
|
|
746 |
}
|
|
747 |
return true;
|
|
748 |
}
|
|
749 |
size_t processed() const { return _count; }
|
|
750 |
size_t amount() const { return _amount; }
|
|
751 |
};
|
|
752 |
|
|
753 |
size_t JfrStorage::scavenge() {
|
|
754 |
JfrStorageControl& ctrl = control();
|
|
755 |
if (ctrl.dead_count() == 0) {
|
|
756 |
return 0;
|
|
757 |
}
|
|
758 |
Scavenger<JfrThreadLocalMspace> scavenger(ctrl, _thread_local_mspace);
|
|
759 |
process_full_list(scavenger, _thread_local_mspace);
|
|
760 |
scavenge_log(scavenger.processed(), scavenger.amount(), ctrl.dead_count());
|
|
761 |
return scavenger.processed();
|
|
762 |
}
|