46818
|
1 |
/*
|
|
2 |
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
|
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
20 |
* or visit www.oracle.com if you need additional information or have any
|
|
21 |
* questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
|
25 |
#include "precompiled.hpp"
|
|
26 |
#include "gc/shared/genCollectedHeap.hpp"
|
|
27 |
#include "memory/allocation.hpp"
|
|
28 |
#include "memory/allocation.inline.hpp"
|
|
29 |
#include "memory/metaspaceShared.hpp"
|
|
30 |
#include "memory/resourceArea.hpp"
|
|
31 |
#include "memory/universe.hpp"
|
|
32 |
#include "runtime/atomic.hpp"
|
|
33 |
#include "runtime/os.hpp"
|
|
34 |
#include "runtime/task.hpp"
|
|
35 |
#include "runtime/threadCritical.hpp"
|
|
36 |
#include "services/memTracker.hpp"
|
|
37 |
#include "utilities/ostream.hpp"
|
|
38 |
|
|
39 |
//--------------------------------------------------------------------------------------
|
|
40 |
// ChunkPool implementation
|
|
41 |
|
|
42 |
// MT-safe pool of chunks to reduce malloc/free thrashing
|
|
43 |
// NB: not using Mutex because pools are used before Threads are initialized
|
|
44 |
class ChunkPool: public CHeapObj<mtInternal> {
|
|
45 |
Chunk* _first; // first cached Chunk; its first word points to next chunk
|
|
46 |
size_t _num_chunks; // number of unused chunks in pool
|
|
47 |
size_t _num_used; // number of chunks currently checked out
|
|
48 |
const size_t _size; // size of each chunk (must be uniform)
|
|
49 |
|
|
50 |
// Our four static pools
|
|
51 |
static ChunkPool* _large_pool;
|
|
52 |
static ChunkPool* _medium_pool;
|
|
53 |
static ChunkPool* _small_pool;
|
|
54 |
static ChunkPool* _tiny_pool;
|
|
55 |
|
|
56 |
// return first element or null
|
|
57 |
void* get_first() {
|
|
58 |
Chunk* c = _first;
|
|
59 |
if (_first) {
|
|
60 |
_first = _first->next();
|
|
61 |
_num_chunks--;
|
|
62 |
}
|
|
63 |
return c;
|
|
64 |
}
|
|
65 |
|
|
66 |
public:
|
|
67 |
// All chunks in a ChunkPool has the same size
|
|
68 |
ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
|
|
69 |
|
|
70 |
// Allocate a new chunk from the pool (might expand the pool)
|
|
71 |
NOINLINE void* allocate(size_t bytes, AllocFailType alloc_failmode) {
|
|
72 |
assert(bytes == _size, "bad size");
|
|
73 |
void* p = NULL;
|
|
74 |
// No VM lock can be taken inside ThreadCritical lock, so os::malloc
|
|
75 |
// should be done outside ThreadCritical lock due to NMT
|
|
76 |
{ ThreadCritical tc;
|
|
77 |
_num_used++;
|
|
78 |
p = get_first();
|
|
79 |
}
|
|
80 |
if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC);
|
|
81 |
if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
|
|
82 |
vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "ChunkPool::allocate");
|
|
83 |
}
|
|
84 |
return p;
|
|
85 |
}
|
|
86 |
|
|
87 |
// Return a chunk to the pool
|
|
88 |
void free(Chunk* chunk) {
|
|
89 |
assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size");
|
|
90 |
ThreadCritical tc;
|
|
91 |
_num_used--;
|
|
92 |
|
|
93 |
// Add chunk to list
|
|
94 |
chunk->set_next(_first);
|
|
95 |
_first = chunk;
|
|
96 |
_num_chunks++;
|
|
97 |
}
|
|
98 |
|
|
99 |
// Prune the pool
|
|
100 |
void free_all_but(size_t n) {
|
|
101 |
Chunk* cur = NULL;
|
|
102 |
Chunk* next;
|
|
103 |
{
|
|
104 |
// if we have more than n chunks, free all of them
|
|
105 |
ThreadCritical tc;
|
|
106 |
if (_num_chunks > n) {
|
|
107 |
// free chunks at end of queue, for better locality
|
|
108 |
cur = _first;
|
|
109 |
for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();
|
|
110 |
|
|
111 |
if (cur != NULL) {
|
|
112 |
next = cur->next();
|
|
113 |
cur->set_next(NULL);
|
|
114 |
cur = next;
|
|
115 |
|
|
116 |
// Free all remaining chunks while in ThreadCritical lock
|
|
117 |
// so NMT adjustment is stable.
|
|
118 |
while(cur != NULL) {
|
|
119 |
next = cur->next();
|
|
120 |
os::free(cur);
|
|
121 |
_num_chunks--;
|
|
122 |
cur = next;
|
|
123 |
}
|
|
124 |
}
|
|
125 |
}
|
|
126 |
}
|
|
127 |
}
|
|
128 |
|
|
129 |
// Accessors to preallocated pool's
|
|
130 |
static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; }
|
|
131 |
static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; }
|
|
132 |
static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; }
|
|
133 |
static ChunkPool* tiny_pool() { assert(_tiny_pool != NULL, "must be initialized"); return _tiny_pool; }
|
|
134 |
|
|
135 |
static void initialize() {
|
|
136 |
_large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size());
|
|
137 |
_medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size());
|
|
138 |
_small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size());
|
|
139 |
_tiny_pool = new ChunkPool(Chunk::tiny_size + Chunk::aligned_overhead_size());
|
|
140 |
}
|
|
141 |
|
|
142 |
static void clean() {
|
|
143 |
enum { BlocksToKeep = 5 };
|
|
144 |
_tiny_pool->free_all_but(BlocksToKeep);
|
|
145 |
_small_pool->free_all_but(BlocksToKeep);
|
|
146 |
_medium_pool->free_all_but(BlocksToKeep);
|
|
147 |
_large_pool->free_all_but(BlocksToKeep);
|
|
148 |
}
|
|
149 |
};
|
|
150 |
|
|
151 |
ChunkPool* ChunkPool::_large_pool = NULL;
|
|
152 |
ChunkPool* ChunkPool::_medium_pool = NULL;
|
|
153 |
ChunkPool* ChunkPool::_small_pool = NULL;
|
|
154 |
ChunkPool* ChunkPool::_tiny_pool = NULL;
|
|
155 |
|
|
156 |
void chunkpool_init() {
|
|
157 |
ChunkPool::initialize();
|
|
158 |
}
|
|
159 |
|
|
160 |
void
|
|
161 |
Chunk::clean_chunk_pool() {
|
|
162 |
ChunkPool::clean();
|
|
163 |
}
|
|
164 |
|
|
165 |
|
|
166 |
//--------------------------------------------------------------------------------------
|
|
167 |
// ChunkPoolCleaner implementation
|
|
168 |
//
|
|
169 |
|
|
170 |
class ChunkPoolCleaner : public PeriodicTask {
|
|
171 |
enum { CleaningInterval = 5000 }; // cleaning interval in ms
|
|
172 |
|
|
173 |
public:
|
|
174 |
ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {}
|
|
175 |
void task() {
|
|
176 |
ChunkPool::clean();
|
|
177 |
}
|
|
178 |
};
|
|
179 |
|
|
180 |
//--------------------------------------------------------------------------------------
|
|
181 |
// Chunk implementation
|
|
182 |
|
|
183 |
void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) throw() {
|
|
184 |
// requested_size is equal to sizeof(Chunk) but in order for the arena
|
|
185 |
// allocations to come out aligned as expected the size must be aligned
|
|
186 |
// to expected arena alignment.
|
|
187 |
// expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it.
|
|
188 |
assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
|
|
189 |
size_t bytes = ARENA_ALIGN(requested_size) + length;
|
|
190 |
switch (length) {
|
|
191 |
case Chunk::size: return ChunkPool::large_pool()->allocate(bytes, alloc_failmode);
|
|
192 |
case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode);
|
|
193 |
case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes, alloc_failmode);
|
|
194 |
case Chunk::tiny_size: return ChunkPool::tiny_pool()->allocate(bytes, alloc_failmode);
|
|
195 |
default: {
|
|
196 |
void* p = os::malloc(bytes, mtChunk, CALLER_PC);
|
|
197 |
if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
|
|
198 |
vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new");
|
|
199 |
}
|
|
200 |
return p;
|
|
201 |
}
|
|
202 |
}
|
|
203 |
}
|
|
204 |
|
|
205 |
void Chunk::operator delete(void* p) {
|
|
206 |
Chunk* c = (Chunk*)p;
|
|
207 |
switch (c->length()) {
|
|
208 |
case Chunk::size: ChunkPool::large_pool()->free(c); break;
|
|
209 |
case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
|
|
210 |
case Chunk::init_size: ChunkPool::small_pool()->free(c); break;
|
|
211 |
case Chunk::tiny_size: ChunkPool::tiny_pool()->free(c); break;
|
|
212 |
default:
|
|
213 |
ThreadCritical tc; // Free chunks under TC lock so that NMT adjustment is stable.
|
|
214 |
os::free(c);
|
|
215 |
}
|
|
216 |
}
|
|
217 |
|
|
218 |
Chunk::Chunk(size_t length) : _len(length) {
|
|
219 |
_next = NULL; // Chain on the linked list
|
|
220 |
}
|
|
221 |
|
|
222 |
void Chunk::chop() {
|
|
223 |
Chunk *k = this;
|
|
224 |
while( k ) {
|
|
225 |
Chunk *tmp = k->next();
|
|
226 |
// clear out this chunk (to detect allocation bugs)
|
|
227 |
if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length());
|
|
228 |
delete k; // Free chunk (was malloc'd)
|
|
229 |
k = tmp;
|
|
230 |
}
|
|
231 |
}
|
|
232 |
|
|
233 |
void Chunk::next_chop() {
|
|
234 |
_next->chop();
|
|
235 |
_next = NULL;
|
|
236 |
}
|
|
237 |
|
|
238 |
void Chunk::start_chunk_pool_cleaner_task() {
|
|
239 |
#ifdef ASSERT
|
|
240 |
static bool task_created = false;
|
|
241 |
assert(!task_created, "should not start chuck pool cleaner twice");
|
|
242 |
task_created = true;
|
|
243 |
#endif
|
|
244 |
ChunkPoolCleaner* cleaner = new ChunkPoolCleaner();
|
|
245 |
cleaner->enroll();
|
|
246 |
}
|
|
247 |
|
|
248 |
//------------------------------Arena------------------------------------------
|
|
249 |
|
|
250 |
Arena::Arena(MEMFLAGS flag, size_t init_size) : _flags(flag), _size_in_bytes(0) {
|
|
251 |
size_t round_size = (sizeof (char *)) - 1;
|
|
252 |
init_size = (init_size+round_size) & ~round_size;
|
|
253 |
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
|
|
254 |
_hwm = _chunk->bottom(); // Save the cached hwm, max
|
|
255 |
_max = _chunk->top();
|
|
256 |
MemTracker::record_new_arena(flag);
|
|
257 |
set_size_in_bytes(init_size);
|
|
258 |
}
|
|
259 |
|
|
260 |
Arena::Arena(MEMFLAGS flag) : _flags(flag), _size_in_bytes(0) {
|
|
261 |
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
|
|
262 |
_hwm = _chunk->bottom(); // Save the cached hwm, max
|
|
263 |
_max = _chunk->top();
|
|
264 |
MemTracker::record_new_arena(flag);
|
|
265 |
set_size_in_bytes(Chunk::init_size);
|
|
266 |
}
|
|
267 |
|
|
268 |
Arena *Arena::move_contents(Arena *copy) {
|
|
269 |
copy->destruct_contents();
|
|
270 |
copy->_chunk = _chunk;
|
|
271 |
copy->_hwm = _hwm;
|
|
272 |
copy->_max = _max;
|
|
273 |
copy->_first = _first;
|
|
274 |
|
|
275 |
// workaround rare racing condition, which could double count
|
|
276 |
// the arena size by native memory tracking
|
|
277 |
size_t size = size_in_bytes();
|
|
278 |
set_size_in_bytes(0);
|
|
279 |
copy->set_size_in_bytes(size);
|
|
280 |
// Destroy original arena
|
|
281 |
reset();
|
|
282 |
return copy; // Return Arena with contents
|
|
283 |
}
|
|
284 |
|
|
285 |
Arena::~Arena() {
|
|
286 |
destruct_contents();
|
|
287 |
MemTracker::record_arena_free(_flags);
|
|
288 |
}
|
|
289 |
|
|
290 |
void* Arena::operator new(size_t size) throw() {
|
|
291 |
assert(false, "Use dynamic memory type binding");
|
|
292 |
return NULL;
|
|
293 |
}
|
|
294 |
|
|
295 |
void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) throw() {
|
|
296 |
assert(false, "Use dynamic memory type binding");
|
|
297 |
return NULL;
|
|
298 |
}
|
|
299 |
|
|
300 |
// dynamic memory type binding
|
|
301 |
void* Arena::operator new(size_t size, MEMFLAGS flags) throw() {
|
|
302 |
#ifdef ASSERT
|
|
303 |
void* p = (void*)AllocateHeap(size, flags, CALLER_PC);
|
|
304 |
if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
|
|
305 |
return p;
|
|
306 |
#else
|
|
307 |
return (void *) AllocateHeap(size, flags, CALLER_PC);
|
|
308 |
#endif
|
|
309 |
}
|
|
310 |
|
|
311 |
void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() {
|
|
312 |
#ifdef ASSERT
|
|
313 |
void* p = os::malloc(size, flags, CALLER_PC);
|
|
314 |
if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
|
|
315 |
return p;
|
|
316 |
#else
|
|
317 |
return os::malloc(size, flags, CALLER_PC);
|
|
318 |
#endif
|
|
319 |
}
|
|
320 |
|
|
321 |
void Arena::operator delete(void* p) {
|
|
322 |
FreeHeap(p);
|
|
323 |
}
|
|
324 |
|
|
325 |
// Destroy this arenas contents and reset to empty
|
|
326 |
void Arena::destruct_contents() {
|
|
327 |
if (UseMallocOnly && _first != NULL) {
|
|
328 |
char* end = _first->next() ? _first->top() : _hwm;
|
|
329 |
free_malloced_objects(_first, _first->bottom(), end, _hwm);
|
|
330 |
}
|
|
331 |
// reset size before chop to avoid a rare racing condition
|
|
332 |
// that can have total arena memory exceed total chunk memory
|
|
333 |
set_size_in_bytes(0);
|
|
334 |
_first->chop();
|
|
335 |
reset();
|
|
336 |
}
|
|
337 |
|
|
338 |
// This is high traffic method, but many calls actually don't
|
|
339 |
// change the size
|
|
340 |
void Arena::set_size_in_bytes(size_t size) {
|
|
341 |
if (_size_in_bytes != size) {
|
|
342 |
long delta = (long)(size - size_in_bytes());
|
|
343 |
_size_in_bytes = size;
|
|
344 |
MemTracker::record_arena_size_change(delta, _flags);
|
|
345 |
}
|
|
346 |
}
|
|
347 |
|
|
348 |
// Total of all Chunks in arena
|
|
349 |
size_t Arena::used() const {
|
|
350 |
size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk
|
|
351 |
register Chunk *k = _first;
|
|
352 |
while( k != _chunk) { // Whilst have Chunks in a row
|
|
353 |
sum += k->length(); // Total size of this Chunk
|
|
354 |
k = k->next(); // Bump along to next Chunk
|
|
355 |
}
|
|
356 |
return sum; // Return total consumed space.
|
|
357 |
}
|
|
358 |
|
|
359 |
void Arena::signal_out_of_memory(size_t sz, const char* whence) const {
|
|
360 |
vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, "%s", whence);
|
|
361 |
}
|
|
362 |
|
|
363 |
// Grow a new Chunk
|
|
364 |
void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
|
|
365 |
// Get minimal required size. Either real big, or even bigger for giant objs
|
|
366 |
size_t len = MAX2(x, (size_t) Chunk::size);
|
|
367 |
|
|
368 |
Chunk *k = _chunk; // Get filled-up chunk address
|
|
369 |
_chunk = new (alloc_failmode, len) Chunk(len);
|
|
370 |
|
|
371 |
if (_chunk == NULL) {
|
|
372 |
_chunk = k; // restore the previous value of _chunk
|
|
373 |
return NULL;
|
|
374 |
}
|
|
375 |
if (k) k->set_next(_chunk); // Append new chunk to end of linked list
|
|
376 |
else _first = _chunk;
|
|
377 |
_hwm = _chunk->bottom(); // Save the cached hwm, max
|
|
378 |
_max = _chunk->top();
|
|
379 |
set_size_in_bytes(size_in_bytes() + len);
|
|
380 |
void* result = _hwm;
|
|
381 |
_hwm += x;
|
|
382 |
return result;
|
|
383 |
}
|
|
384 |
|
|
385 |
|
|
386 |
|
|
387 |
// Reallocate storage in Arena.
|
|
388 |
void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) {
|
|
389 |
if (new_size == 0) return NULL;
|
|
390 |
#ifdef ASSERT
|
|
391 |
if (UseMallocOnly) {
|
|
392 |
// always allocate a new object (otherwise we'll free this one twice)
|
|
393 |
char* copy = (char*)Amalloc(new_size, alloc_failmode);
|
|
394 |
if (copy == NULL) {
|
|
395 |
return NULL;
|
|
396 |
}
|
|
397 |
size_t n = MIN2(old_size, new_size);
|
|
398 |
if (n > 0) memcpy(copy, old_ptr, n);
|
|
399 |
Afree(old_ptr,old_size); // Mostly done to keep stats accurate
|
|
400 |
return copy;
|
|
401 |
}
|
|
402 |
#endif
|
|
403 |
char *c_old = (char*)old_ptr; // Handy name
|
|
404 |
// Stupid fast special case
|
|
405 |
if( new_size <= old_size ) { // Shrink in-place
|
|
406 |
if( c_old+old_size == _hwm) // Attempt to free the excess bytes
|
|
407 |
_hwm = c_old+new_size; // Adjust hwm
|
|
408 |
return c_old;
|
|
409 |
}
|
|
410 |
|
|
411 |
// make sure that new_size is legal
|
|
412 |
size_t corrected_new_size = ARENA_ALIGN(new_size);
|
|
413 |
|
|
414 |
// See if we can resize in-place
|
|
415 |
if( (c_old+old_size == _hwm) && // Adjusting recent thing
|
|
416 |
(c_old+corrected_new_size <= _max) ) { // Still fits where it sits
|
|
417 |
_hwm = c_old+corrected_new_size; // Adjust hwm
|
|
418 |
return c_old; // Return old pointer
|
|
419 |
}
|
|
420 |
|
|
421 |
// Oops, got to relocate guts
|
|
422 |
void *new_ptr = Amalloc(new_size, alloc_failmode);
|
|
423 |
if (new_ptr == NULL) {
|
|
424 |
return NULL;
|
|
425 |
}
|
|
426 |
memcpy( new_ptr, c_old, old_size );
|
|
427 |
Afree(c_old,old_size); // Mostly done to keep stats accurate
|
|
428 |
return new_ptr;
|
|
429 |
}
|
|
430 |
|
|
431 |
|
|
432 |
// Determine if pointer belongs to this Arena or not.
|
|
433 |
bool Arena::contains( const void *ptr ) const {
|
|
434 |
#ifdef ASSERT
|
|
435 |
if (UseMallocOnly) {
|
|
436 |
// really slow, but not easy to make fast
|
|
437 |
if (_chunk == NULL) return false;
|
|
438 |
char** bottom = (char**)_chunk->bottom();
|
|
439 |
for (char** p = (char**)_hwm - 1; p >= bottom; p--) {
|
|
440 |
if (*p == ptr) return true;
|
|
441 |
}
|
|
442 |
for (Chunk *c = _first; c != NULL; c = c->next()) {
|
|
443 |
if (c == _chunk) continue; // current chunk has been processed
|
|
444 |
char** bottom = (char**)c->bottom();
|
|
445 |
for (char** p = (char**)c->top() - 1; p >= bottom; p--) {
|
|
446 |
if (*p == ptr) return true;
|
|
447 |
}
|
|
448 |
}
|
|
449 |
return false;
|
|
450 |
}
|
|
451 |
#endif
|
|
452 |
if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm )
|
|
453 |
return true; // Check for in this chunk
|
|
454 |
for (Chunk *c = _first; c; c = c->next()) {
|
|
455 |
if (c == _chunk) continue; // current chunk has been processed
|
|
456 |
if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) {
|
|
457 |
return true; // Check for every chunk in Arena
|
|
458 |
}
|
|
459 |
}
|
|
460 |
return false; // Not in any Chunk, so not in Arena
|
|
461 |
}
|
|
462 |
|
|
463 |
|
|
464 |
#ifdef ASSERT
|
|
465 |
void* Arena::malloc(size_t size) {
|
|
466 |
assert(UseMallocOnly, "shouldn't call");
|
|
467 |
// use malloc, but save pointer in res. area for later freeing
|
|
468 |
char** save = (char**)internal_malloc_4(sizeof(char*));
|
|
469 |
return (*save = (char*)os::malloc(size, mtChunk));
|
|
470 |
}
|
|
471 |
|
|
472 |
// for debugging with UseMallocOnly
|
|
473 |
void* Arena::internal_malloc_4(size_t x) {
|
|
474 |
assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
|
|
475 |
check_for_overflow(x, "Arena::internal_malloc_4");
|
|
476 |
if (_hwm + x > _max) {
|
|
477 |
return grow(x);
|
|
478 |
} else {
|
|
479 |
char *old = _hwm;
|
|
480 |
_hwm += x;
|
|
481 |
return old;
|
|
482 |
}
|
|
483 |
}
|
|
484 |
#endif
|
|
485 |
|
|
486 |
|
|
487 |
//--------------------------------------------------------------------------------------
|
|
488 |
// Non-product code
|
|
489 |
|
|
490 |
#ifndef PRODUCT
|
|
491 |
|
|
492 |
julong Arena::_bytes_allocated = 0;
|
|
493 |
|
|
494 |
void Arena::inc_bytes_allocated(size_t x) { inc_stat_counter(&_bytes_allocated, x); }
|
|
495 |
|
|
496 |
// debugging code
|
|
497 |
inline void Arena::free_all(char** start, char** end) {
|
|
498 |
for (char** p = start; p < end; p++) if (*p) os::free(*p);
|
|
499 |
}
|
|
500 |
|
|
501 |
void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) {
|
|
502 |
assert(UseMallocOnly, "should not call");
|
|
503 |
// free all objects malloced since resource mark was created; resource area
|
|
504 |
// contains their addresses
|
|
505 |
if (chunk->next()) {
|
|
506 |
// this chunk is full, and some others too
|
|
507 |
for (Chunk* c = chunk->next(); c != NULL; c = c->next()) {
|
|
508 |
char* top = c->top();
|
|
509 |
if (c->next() == NULL) {
|
|
510 |
top = hwm2; // last junk is only used up to hwm2
|
|
511 |
assert(c->contains(hwm2), "bad hwm2");
|
|
512 |
}
|
|
513 |
free_all((char**)c->bottom(), (char**)top);
|
|
514 |
}
|
|
515 |
assert(chunk->contains(hwm), "bad hwm");
|
|
516 |
assert(chunk->contains(max), "bad max");
|
|
517 |
free_all((char**)hwm, (char**)max);
|
|
518 |
} else {
|
|
519 |
// this chunk was partially used
|
|
520 |
assert(chunk->contains(hwm), "bad hwm");
|
|
521 |
assert(chunk->contains(hwm2), "bad hwm2");
|
|
522 |
free_all((char**)hwm, (char**)hwm2);
|
|
523 |
}
|
|
524 |
}
|
|
525 |
|
|
526 |
#endif // Non-product
|