author | pliden |
Wed, 27 Jun 2018 11:05:01 +0200 | |
changeset 50811 | f533eb5e7430 |
parent 50380 | bec342339138 |
child 53970 | 1ad7c590a6e7 |
permissions | -rw-r--r-- |
50193 | 1 |
/* |
2 |
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. |
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 |
* or visit www.oracle.com if you need additional information or have any |
|
21 |
* questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
#include "precompiled.hpp" |
|
25 |
||
26 |
#include "logging/log.hpp" |
|
27 |
#include "logging/logStream.hpp" |
|
28 |
#include "memory/binaryTreeDictionary.inline.hpp" |
|
29 |
#include "memory/freeList.inline.hpp" |
|
30 |
#include "memory/metaspace/chunkManager.hpp" |
|
31 |
#include "memory/metaspace/metachunk.hpp" |
|
32 |
#include "memory/metaspace/metaspaceCommon.hpp" |
|
33 |
#include "memory/metaspace/metaspaceStatistics.hpp" |
|
34 |
#include "memory/metaspace/occupancyMap.hpp" |
|
35 |
#include "memory/metaspace/virtualSpaceNode.hpp" |
|
36 |
#include "runtime/mutexLocker.hpp" |
|
37 |
#include "utilities/debug.hpp" |
|
38 |
#include "utilities/globalDefinitions.hpp" |
|
39 |
#include "utilities/ostream.hpp" |
|
40 |
||
41 |
namespace metaspace { |
|
42 |
||
50380
bec342339138
8204195: Clean up macroAssembler.inline.hpp and other inline.hpp files included in .hpp files
coleenp
parents:
50193
diff
changeset
|
43 |
ChunkManager::ChunkManager(bool is_class) |
bec342339138
8204195: Clean up macroAssembler.inline.hpp and other inline.hpp files included in .hpp files
coleenp
parents:
50193
diff
changeset
|
44 |
: _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) { |
bec342339138
8204195: Clean up macroAssembler.inline.hpp and other inline.hpp files included in .hpp files
coleenp
parents:
50193
diff
changeset
|
45 |
_free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class)); |
bec342339138
8204195: Clean up macroAssembler.inline.hpp and other inline.hpp files included in .hpp files
coleenp
parents:
50193
diff
changeset
|
46 |
_free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class)); |
bec342339138
8204195: Clean up macroAssembler.inline.hpp and other inline.hpp files included in .hpp files
coleenp
parents:
50193
diff
changeset
|
47 |
_free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class)); |
bec342339138
8204195: Clean up macroAssembler.inline.hpp and other inline.hpp files included in .hpp files
coleenp
parents:
50193
diff
changeset
|
48 |
} |
bec342339138
8204195: Clean up macroAssembler.inline.hpp and other inline.hpp files included in .hpp files
coleenp
parents:
50193
diff
changeset
|
49 |
|
50193 | 50 |
void ChunkManager::remove_chunk(Metachunk* chunk) { |
51 |
size_t word_size = chunk->word_size(); |
|
52 |
ChunkIndex index = list_index(word_size); |
|
53 |
if (index != HumongousIndex) { |
|
54 |
free_chunks(index)->remove_chunk(chunk); |
|
55 |
} else { |
|
56 |
humongous_dictionary()->remove_chunk(chunk); |
|
57 |
} |
|
58 |
||
59 |
// Chunk has been removed from the chunks free list, update counters. |
|
60 |
account_for_removed_chunk(chunk); |
|
61 |
} |
|
62 |
||
63 |
bool ChunkManager::attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type) { |
|
64 |
assert_lock_strong(MetaspaceExpand_lock); |
|
65 |
assert(chunk != NULL, "invalid chunk pointer"); |
|
66 |
// Check for valid merge combinations. |
|
67 |
assert((chunk->get_chunk_type() == SpecializedIndex && |
|
68 |
(target_chunk_type == SmallIndex || target_chunk_type == MediumIndex)) || |
|
69 |
(chunk->get_chunk_type() == SmallIndex && target_chunk_type == MediumIndex), |
|
70 |
"Invalid chunk merge combination."); |
|
71 |
||
72 |
const size_t target_chunk_word_size = |
|
73 |
get_size_for_nonhumongous_chunktype(target_chunk_type, this->is_class()); |
|
74 |
||
75 |
// [ prospective merge region ) |
|
76 |
MetaWord* const p_merge_region_start = |
|
77 |
(MetaWord*) align_down(chunk, target_chunk_word_size * sizeof(MetaWord)); |
|
78 |
MetaWord* const p_merge_region_end = |
|
79 |
p_merge_region_start + target_chunk_word_size; |
|
80 |
||
81 |
// We need the VirtualSpaceNode containing this chunk and its occupancy map. |
|
82 |
VirtualSpaceNode* const vsn = chunk->container(); |
|
83 |
OccupancyMap* const ocmap = vsn->occupancy_map(); |
|
84 |
||
85 |
// The prospective chunk merge range must be completely contained by the |
|
86 |
// committed range of the virtual space node. |
|
87 |
if (p_merge_region_start < vsn->bottom() || p_merge_region_end > vsn->top()) { |
|
88 |
return false; |
|
89 |
} |
|
90 |
||
91 |
// Only attempt to merge this range if at its start a chunk starts and at its end |
|
92 |
// a chunk ends. If a chunk (can only be humongous) straddles either start or end |
|
93 |
// of that range, we cannot merge. |
|
94 |
if (!ocmap->chunk_starts_at_address(p_merge_region_start)) { |
|
95 |
return false; |
|
96 |
} |
|
97 |
if (p_merge_region_end < vsn->top() && |
|
98 |
!ocmap->chunk_starts_at_address(p_merge_region_end)) { |
|
99 |
return false; |
|
100 |
} |
|
101 |
||
102 |
// Now check if the prospective merge area contains live chunks. If it does we cannot merge. |
|
103 |
if (ocmap->is_region_in_use(p_merge_region_start, target_chunk_word_size)) { |
|
104 |
return false; |
|
105 |
} |
|
106 |
||
107 |
// Success! Remove all chunks in this region... |
|
108 |
log_trace(gc, metaspace, freelist)("%s: coalescing chunks in area [%p-%p)...", |
|
109 |
(is_class() ? "class space" : "metaspace"), |
|
110 |
p_merge_region_start, p_merge_region_end); |
|
111 |
||
112 |
const int num_chunks_removed = |
|
113 |
remove_chunks_in_area(p_merge_region_start, target_chunk_word_size); |
|
114 |
||
115 |
// ... and create a single new bigger chunk. |
|
116 |
Metachunk* const p_new_chunk = |
|
117 |
::new (p_merge_region_start) Metachunk(target_chunk_type, is_class(), target_chunk_word_size, vsn); |
|
118 |
assert(p_new_chunk == (Metachunk*)p_merge_region_start, "Sanity"); |
|
119 |
p_new_chunk->set_origin(origin_merge); |
|
120 |
||
121 |
log_trace(gc, metaspace, freelist)("%s: created coalesced chunk at %p, size " SIZE_FORMAT_HEX ".", |
|
122 |
(is_class() ? "class space" : "metaspace"), |
|
123 |
p_new_chunk, p_new_chunk->word_size() * sizeof(MetaWord)); |
|
124 |
||
125 |
// Fix occupancy map: remove old start bits of the small chunks and set new start bit. |
|
126 |
ocmap->wipe_chunk_start_bits_in_region(p_merge_region_start, target_chunk_word_size); |
|
127 |
ocmap->set_chunk_starts_at_address(p_merge_region_start, true); |
|
128 |
||
129 |
// Mark chunk as free. Note: it is not necessary to update the occupancy |
|
130 |
// map in-use map, because the old chunks were also free, so nothing |
|
131 |
// should have changed. |
|
132 |
p_new_chunk->set_is_tagged_free(true); |
|
133 |
||
134 |
// Add new chunk to its freelist. |
|
135 |
ChunkList* const list = free_chunks(target_chunk_type); |
|
136 |
list->return_chunk_at_head(p_new_chunk); |
|
137 |
||
138 |
// And adjust ChunkManager:: _free_chunks_count (_free_chunks_total |
|
139 |
// should not have changed, because the size of the space should be the same) |
|
140 |
_free_chunks_count -= num_chunks_removed; |
|
141 |
_free_chunks_count ++; |
|
142 |
||
143 |
// VirtualSpaceNode::container_count does not have to be modified: |
|
144 |
// it means "number of active (non-free) chunks", so merging free chunks |
|
145 |
// should not affect that count. |
|
146 |
||
147 |
// At the end of a chunk merge, run verification tests. |
|
148 |
if (VerifyMetaspace) { |
|
149 |
DEBUG_ONLY(this->locked_verify()); |
|
150 |
DEBUG_ONLY(vsn->verify()); |
|
151 |
} |
|
152 |
||
153 |
return true; |
|
154 |
} |
|
155 |
||
156 |
// Remove all chunks in the given area - the chunks are supposed to be free - |
|
157 |
// from their corresponding freelists. Mark them as invalid. |
|
158 |
// - This does not correct the occupancy map. |
|
159 |
// - This does not adjust the counters in ChunkManager. |
|
160 |
// - Does not adjust container count counter in containing VirtualSpaceNode |
|
161 |
// Returns number of chunks removed. |
|
162 |
int ChunkManager::remove_chunks_in_area(MetaWord* p, size_t word_size) { |
|
163 |
assert(p != NULL && word_size > 0, "Invalid range."); |
|
164 |
const size_t smallest_chunk_size = get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class()); |
|
165 |
assert_is_aligned(word_size, smallest_chunk_size); |
|
166 |
||
167 |
Metachunk* const start = (Metachunk*) p; |
|
168 |
const Metachunk* const end = (Metachunk*)(p + word_size); |
|
169 |
Metachunk* cur = start; |
|
170 |
int num_removed = 0; |
|
171 |
while (cur < end) { |
|
172 |
Metachunk* next = (Metachunk*)(((MetaWord*)cur) + cur->word_size()); |
|
173 |
DEBUG_ONLY(do_verify_chunk(cur)); |
|
174 |
assert(cur->get_chunk_type() != HumongousIndex, "Unexpected humongous chunk found at %p.", cur); |
|
175 |
assert(cur->is_tagged_free(), "Chunk expected to be free (%p)", cur); |
|
176 |
log_trace(gc, metaspace, freelist)("%s: removing chunk %p, size " SIZE_FORMAT_HEX ".", |
|
177 |
(is_class() ? "class space" : "metaspace"), |
|
178 |
cur, cur->word_size() * sizeof(MetaWord)); |
|
179 |
cur->remove_sentinel(); |
|
180 |
// Note: cannot call ChunkManager::remove_chunk, because that |
|
181 |
// modifies the counters in ChunkManager, which we do not want. So |
|
182 |
// we call remove_chunk on the freelist directly (see also the |
|
183 |
// splitting function which does the same). |
|
184 |
ChunkList* const list = free_chunks(list_index(cur->word_size())); |
|
185 |
list->remove_chunk(cur); |
|
186 |
num_removed ++; |
|
187 |
cur = next; |
|
188 |
} |
|
189 |
return num_removed; |
|
190 |
} |
|
191 |
||
192 |
size_t ChunkManager::free_chunks_total_words() { |
|
193 |
return _free_chunks_total; |
|
194 |
} |
|
195 |
||
196 |
size_t ChunkManager::free_chunks_total_bytes() { |
|
197 |
return free_chunks_total_words() * BytesPerWord; |
|
198 |
} |
|
199 |
||
200 |
// Update internal accounting after a chunk was added |
|
201 |
void ChunkManager::account_for_added_chunk(const Metachunk* c) { |
|
202 |
assert_lock_strong(MetaspaceExpand_lock); |
|
203 |
_free_chunks_count ++; |
|
204 |
_free_chunks_total += c->word_size(); |
|
205 |
} |
|
206 |
||
207 |
// Update internal accounting after a chunk was removed |
|
208 |
void ChunkManager::account_for_removed_chunk(const Metachunk* c) { |
|
209 |
assert_lock_strong(MetaspaceExpand_lock); |
|
210 |
assert(_free_chunks_count >= 1, |
|
211 |
"ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count); |
|
212 |
assert(_free_chunks_total >= c->word_size(), |
|
213 |
"ChunkManager::_free_chunks_total: about to go negative" |
|
214 |
"(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size()); |
|
215 |
_free_chunks_count --; |
|
216 |
_free_chunks_total -= c->word_size(); |
|
217 |
} |
|
218 |
||
219 |
size_t ChunkManager::free_chunks_count() { |
|
220 |
#ifdef ASSERT |
|
221 |
if (!UseConcMarkSweepGC && !MetaspaceExpand_lock->is_locked()) { |
|
222 |
MutexLockerEx cl(MetaspaceExpand_lock, |
|
223 |
Mutex::_no_safepoint_check_flag); |
|
224 |
// This lock is only needed in debug because the verification |
|
225 |
// of the _free_chunks_totals walks the list of free chunks |
|
226 |
slow_locked_verify_free_chunks_count(); |
|
227 |
} |
|
228 |
#endif |
|
229 |
return _free_chunks_count; |
|
230 |
} |
|
231 |
||
232 |
ChunkIndex ChunkManager::list_index(size_t size) { |
|
233 |
return get_chunk_type_by_size(size, is_class()); |
|
234 |
} |
|
235 |
||
236 |
size_t ChunkManager::size_by_index(ChunkIndex index) const { |
|
237 |
index_bounds_check(index); |
|
238 |
assert(index != HumongousIndex, "Do not call for humongous chunks."); |
|
239 |
return get_size_for_nonhumongous_chunktype(index, is_class()); |
|
240 |
} |
|
241 |
||
242 |
void ChunkManager::locked_verify_free_chunks_total() { |
|
243 |
assert_lock_strong(MetaspaceExpand_lock); |
|
244 |
assert(sum_free_chunks() == _free_chunks_total, |
|
245 |
"_free_chunks_total " SIZE_FORMAT " is not the" |
|
246 |
" same as sum " SIZE_FORMAT, _free_chunks_total, |
|
247 |
sum_free_chunks()); |
|
248 |
} |
|
249 |
||
250 |
void ChunkManager::locked_verify_free_chunks_count() { |
|
251 |
assert_lock_strong(MetaspaceExpand_lock); |
|
252 |
assert(sum_free_chunks_count() == _free_chunks_count, |
|
253 |
"_free_chunks_count " SIZE_FORMAT " is not the" |
|
254 |
" same as sum " SIZE_FORMAT, _free_chunks_count, |
|
255 |
sum_free_chunks_count()); |
|
256 |
} |
|
257 |
||
258 |
void ChunkManager::verify() { |
|
259 |
MutexLockerEx cl(MetaspaceExpand_lock, |
|
260 |
Mutex::_no_safepoint_check_flag); |
|
261 |
locked_verify(); |
|
262 |
} |
|
263 |
||
264 |
void ChunkManager::locked_verify() { |
|
265 |
locked_verify_free_chunks_count(); |
|
266 |
locked_verify_free_chunks_total(); |
|
267 |
for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { |
|
268 |
ChunkList* list = free_chunks(i); |
|
269 |
if (list != NULL) { |
|
270 |
Metachunk* chunk = list->head(); |
|
271 |
while (chunk) { |
|
272 |
DEBUG_ONLY(do_verify_chunk(chunk);) |
|
273 |
assert(chunk->is_tagged_free(), "Chunk should be tagged as free."); |
|
274 |
chunk = chunk->next(); |
|
275 |
} |
|
276 |
} |
|
277 |
} |
|
278 |
} |
|
279 |
||
280 |
void ChunkManager::locked_print_free_chunks(outputStream* st) { |
|
281 |
assert_lock_strong(MetaspaceExpand_lock); |
|
282 |
st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, |
|
283 |
_free_chunks_total, _free_chunks_count); |
|
284 |
} |
|
285 |
||
286 |
void ChunkManager::locked_print_sum_free_chunks(outputStream* st) { |
|
287 |
assert_lock_strong(MetaspaceExpand_lock); |
|
288 |
st->print_cr("Sum free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, |
|
289 |
sum_free_chunks(), sum_free_chunks_count()); |
|
290 |
} |
|
291 |
||
292 |
ChunkList* ChunkManager::free_chunks(ChunkIndex index) { |
|
293 |
assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex, |
|
294 |
"Bad index: %d", (int)index); |
|
295 |
||
296 |
return &_free_chunks[index]; |
|
297 |
} |
|
298 |
||
299 |
// These methods that sum the free chunk lists are used in printing |
|
300 |
// methods that are used in product builds. |
|
301 |
size_t ChunkManager::sum_free_chunks() { |
|
302 |
assert_lock_strong(MetaspaceExpand_lock); |
|
303 |
size_t result = 0; |
|
304 |
for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { |
|
305 |
ChunkList* list = free_chunks(i); |
|
306 |
||
307 |
if (list == NULL) { |
|
308 |
continue; |
|
309 |
} |
|
310 |
||
311 |
result = result + list->count() * list->size(); |
|
312 |
} |
|
313 |
result = result + humongous_dictionary()->total_size(); |
|
314 |
return result; |
|
315 |
} |
|
316 |
||
317 |
size_t ChunkManager::sum_free_chunks_count() { |
|
318 |
assert_lock_strong(MetaspaceExpand_lock); |
|
319 |
size_t count = 0; |
|
320 |
for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { |
|
321 |
ChunkList* list = free_chunks(i); |
|
322 |
if (list == NULL) { |
|
323 |
continue; |
|
324 |
} |
|
325 |
count = count + list->count(); |
|
326 |
} |
|
327 |
count = count + humongous_dictionary()->total_free_blocks(); |
|
328 |
return count; |
|
329 |
} |
|
330 |
||
331 |
ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) { |
|
332 |
ChunkIndex index = list_index(word_size); |
|
333 |
assert(index < HumongousIndex, "No humongous list"); |
|
334 |
return free_chunks(index); |
|
335 |
} |
|
336 |
||
337 |
// Helper for chunk splitting: given a target chunk size and a larger free chunk, |
|
338 |
// split up the larger chunk into n smaller chunks, at least one of which should be |
|
339 |
// the target chunk of target chunk size. The smaller chunks, including the target |
|
340 |
// chunk, are returned to the freelist. The pointer to the target chunk is returned. |
|
341 |
// Note that this chunk is supposed to be removed from the freelist right away. |
|
342 |
Metachunk* ChunkManager::split_chunk(size_t target_chunk_word_size, Metachunk* larger_chunk) { |
|
343 |
assert(larger_chunk->word_size() > target_chunk_word_size, "Sanity"); |
|
344 |
||
345 |
const ChunkIndex larger_chunk_index = larger_chunk->get_chunk_type(); |
|
346 |
const ChunkIndex target_chunk_index = get_chunk_type_by_size(target_chunk_word_size, is_class()); |
|
347 |
||
348 |
MetaWord* const region_start = (MetaWord*)larger_chunk; |
|
349 |
const size_t region_word_len = larger_chunk->word_size(); |
|
350 |
MetaWord* const region_end = region_start + region_word_len; |
|
351 |
VirtualSpaceNode* const vsn = larger_chunk->container(); |
|
352 |
OccupancyMap* const ocmap = vsn->occupancy_map(); |
|
353 |
||
354 |
// Any larger non-humongous chunk size is a multiple of any smaller chunk size. |
|
355 |
// Since non-humongous chunks are aligned to their chunk size, the larger chunk should start |
|
356 |
// at an address suitable to place the smaller target chunk. |
|
357 |
assert_is_aligned(region_start, target_chunk_word_size); |
|
358 |
||
359 |
// Remove old chunk. |
|
360 |
free_chunks(larger_chunk_index)->remove_chunk(larger_chunk); |
|
361 |
larger_chunk->remove_sentinel(); |
|
362 |
||
363 |
// Prevent access to the old chunk from here on. |
|
364 |
larger_chunk = NULL; |
|
365 |
// ... and wipe it. |
|
366 |
DEBUG_ONLY(memset(region_start, 0xfe, region_word_len * BytesPerWord)); |
|
367 |
||
368 |
// In its place create first the target chunk... |
|
369 |
MetaWord* p = region_start; |
|
370 |
Metachunk* target_chunk = ::new (p) Metachunk(target_chunk_index, is_class(), target_chunk_word_size, vsn); |
|
371 |
assert(target_chunk == (Metachunk*)p, "Sanity"); |
|
372 |
target_chunk->set_origin(origin_split); |
|
373 |
||
374 |
// Note: we do not need to mark its start in the occupancy map |
|
375 |
// because it coincides with the old chunk start. |
|
376 |
||
377 |
// Mark chunk as free and return to the freelist. |
|
378 |
do_update_in_use_info_for_chunk(target_chunk, false); |
|
379 |
free_chunks(target_chunk_index)->return_chunk_at_head(target_chunk); |
|
380 |
||
381 |
// This chunk should now be valid and can be verified. |
|
382 |
DEBUG_ONLY(do_verify_chunk(target_chunk)); |
|
383 |
||
384 |
// In the remaining space create the remainder chunks. |
|
385 |
p += target_chunk->word_size(); |
|
386 |
assert(p < region_end, "Sanity"); |
|
387 |
||
388 |
while (p < region_end) { |
|
389 |
||
390 |
// Find the largest chunk size which fits the alignment requirements at address p. |
|
391 |
ChunkIndex this_chunk_index = prev_chunk_index(larger_chunk_index); |
|
392 |
size_t this_chunk_word_size = 0; |
|
393 |
for(;;) { |
|
394 |
this_chunk_word_size = get_size_for_nonhumongous_chunktype(this_chunk_index, is_class()); |
|
395 |
if (is_aligned(p, this_chunk_word_size * BytesPerWord)) { |
|
396 |
break; |
|
397 |
} else { |
|
398 |
this_chunk_index = prev_chunk_index(this_chunk_index); |
|
399 |
assert(this_chunk_index >= target_chunk_index, "Sanity"); |
|
400 |
} |
|
401 |
} |
|
402 |
||
403 |
assert(this_chunk_word_size >= target_chunk_word_size, "Sanity"); |
|
404 |
assert(is_aligned(p, this_chunk_word_size * BytesPerWord), "Sanity"); |
|
405 |
assert(p + this_chunk_word_size <= region_end, "Sanity"); |
|
406 |
||
407 |
// Create splitting chunk. |
|
408 |
Metachunk* this_chunk = ::new (p) Metachunk(this_chunk_index, is_class(), this_chunk_word_size, vsn); |
|
409 |
assert(this_chunk == (Metachunk*)p, "Sanity"); |
|
410 |
this_chunk->set_origin(origin_split); |
|
411 |
ocmap->set_chunk_starts_at_address(p, true); |
|
412 |
do_update_in_use_info_for_chunk(this_chunk, false); |
|
413 |
||
414 |
// This chunk should be valid and can be verified. |
|
415 |
DEBUG_ONLY(do_verify_chunk(this_chunk)); |
|
416 |
||
417 |
// Return this chunk to freelist and correct counter. |
|
418 |
free_chunks(this_chunk_index)->return_chunk_at_head(this_chunk); |
|
419 |
_free_chunks_count ++; |
|
420 |
||
421 |
log_trace(gc, metaspace, freelist)("Created chunk at " PTR_FORMAT ", word size " |
|
422 |
SIZE_FORMAT_HEX " (%s), in split region [" PTR_FORMAT "..." PTR_FORMAT ").", |
|
423 |
p2i(this_chunk), this_chunk->word_size(), chunk_size_name(this_chunk_index), |
|
424 |
p2i(region_start), p2i(region_end)); |
|
425 |
||
426 |
p += this_chunk_word_size; |
|
427 |
||
428 |
} |
|
429 |
||
430 |
return target_chunk; |
|
431 |
} |
|
432 |
||
433 |
Metachunk* ChunkManager::free_chunks_get(size_t word_size) { |
|
434 |
assert_lock_strong(MetaspaceExpand_lock); |
|
435 |
||
436 |
slow_locked_verify(); |
|
437 |
||
438 |
Metachunk* chunk = NULL; |
|
439 |
bool we_did_split_a_chunk = false; |
|
440 |
||
441 |
if (list_index(word_size) != HumongousIndex) { |
|
442 |
||
443 |
ChunkList* free_list = find_free_chunks_list(word_size); |
|
444 |
assert(free_list != NULL, "Sanity check"); |
|
445 |
||
446 |
chunk = free_list->head(); |
|
447 |
||
448 |
if (chunk == NULL) { |
|
449 |
// Split large chunks into smaller chunks if there are no smaller chunks, just large chunks. |
|
450 |
// This is the counterpart of the coalescing-upon-chunk-return. |
|
451 |
||
452 |
ChunkIndex target_chunk_index = get_chunk_type_by_size(word_size, is_class()); |
|
453 |
||
454 |
// Is there a larger chunk we could split? |
|
455 |
Metachunk* larger_chunk = NULL; |
|
456 |
ChunkIndex larger_chunk_index = next_chunk_index(target_chunk_index); |
|
457 |
while (larger_chunk == NULL && larger_chunk_index < NumberOfFreeLists) { |
|
458 |
larger_chunk = free_chunks(larger_chunk_index)->head(); |
|
459 |
if (larger_chunk == NULL) { |
|
460 |
larger_chunk_index = next_chunk_index(larger_chunk_index); |
|
461 |
} |
|
462 |
} |
|
463 |
||
464 |
if (larger_chunk != NULL) { |
|
465 |
assert(larger_chunk->word_size() > word_size, "Sanity"); |
|
466 |
assert(larger_chunk->get_chunk_type() == larger_chunk_index, "Sanity"); |
|
467 |
||
468 |
// We found a larger chunk. Lets split it up: |
|
469 |
// - remove old chunk |
|
470 |
// - in its place, create new smaller chunks, with at least one chunk |
|
471 |
// being of target size, the others sized as large as possible. This |
|
472 |
// is to make sure the resulting chunks are "as coalesced as possible" |
|
473 |
// (similar to VirtualSpaceNode::retire()). |
|
474 |
// Note: during this operation both ChunkManager and VirtualSpaceNode |
|
475 |
// are temporarily invalid, so be careful with asserts. |
|
476 |
||
477 |
log_trace(gc, metaspace, freelist)("%s: splitting chunk " PTR_FORMAT |
|
478 |
", word size " SIZE_FORMAT_HEX " (%s), to get a chunk of word size " SIZE_FORMAT_HEX " (%s)...", |
|
479 |
(is_class() ? "class space" : "metaspace"), p2i(larger_chunk), larger_chunk->word_size(), |
|
480 |
chunk_size_name(larger_chunk_index), word_size, chunk_size_name(target_chunk_index)); |
|
481 |
||
482 |
chunk = split_chunk(word_size, larger_chunk); |
|
483 |
||
484 |
// This should have worked. |
|
485 |
assert(chunk != NULL, "Sanity"); |
|
486 |
assert(chunk->word_size() == word_size, "Sanity"); |
|
487 |
assert(chunk->is_tagged_free(), "Sanity"); |
|
488 |
||
489 |
we_did_split_a_chunk = true; |
|
490 |
||
491 |
} |
|
492 |
} |
|
493 |
||
494 |
if (chunk == NULL) { |
|
495 |
return NULL; |
|
496 |
} |
|
497 |
||
498 |
// Remove the chunk as the head of the list. |
|
499 |
free_list->remove_chunk(chunk); |
|
500 |
||
501 |
log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list: " PTR_FORMAT " chunks left: " SSIZE_FORMAT ".", |
|
502 |
p2i(free_list), free_list->count()); |
|
503 |
||
504 |
} else { |
|
505 |
chunk = humongous_dictionary()->get_chunk(word_size); |
|
506 |
||
507 |
if (chunk == NULL) { |
|
508 |
return NULL; |
|
509 |
} |
|
510 |
||
50811
f533eb5e7430
8205664: Move detailed metaspace logging from debug to trace
pliden
parents:
50380
diff
changeset
|
511 |
log_trace(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT, |
50193 | 512 |
chunk->word_size(), word_size, chunk->word_size() - word_size); |
513 |
} |
|
514 |
||
515 |
// Chunk has been removed from the chunk manager; update counters. |
|
516 |
account_for_removed_chunk(chunk); |
|
517 |
do_update_in_use_info_for_chunk(chunk, true); |
|
518 |
chunk->container()->inc_container_count(); |
|
519 |
chunk->inc_use_count(); |
|
520 |
||
521 |
// Remove it from the links to this freelist |
|
522 |
chunk->set_next(NULL); |
|
523 |
chunk->set_prev(NULL); |
|
524 |
||
525 |
// Run some verifications (some more if we did a chunk split) |
|
526 |
#ifdef ASSERT |
|
527 |
if (VerifyMetaspace) { |
|
528 |
locked_verify(); |
|
529 |
VirtualSpaceNode* const vsn = chunk->container(); |
|
530 |
vsn->verify(); |
|
531 |
if (we_did_split_a_chunk) { |
|
532 |
vsn->verify_free_chunks_are_ideally_merged(); |
|
533 |
} |
|
534 |
} |
|
535 |
#endif |
|
536 |
||
537 |
return chunk; |
|
538 |
} |
|
539 |
||
540 |
Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) { |
|
541 |
assert_lock_strong(MetaspaceExpand_lock); |
|
542 |
slow_locked_verify(); |
|
543 |
||
544 |
// Take from the beginning of the list |
|
545 |
Metachunk* chunk = free_chunks_get(word_size); |
|
546 |
if (chunk == NULL) { |
|
547 |
return NULL; |
|
548 |
} |
|
549 |
||
550 |
assert((word_size <= chunk->word_size()) || |
|
551 |
(list_index(chunk->word_size()) == HumongousIndex), |
|
552 |
"Non-humongous variable sized chunk"); |
|
50811
f533eb5e7430
8205664: Move detailed metaspace logging from debug to trace
pliden
parents:
50380
diff
changeset
|
553 |
LogTarget(Trace, gc, metaspace, freelist) lt; |
50193 | 554 |
if (lt.is_enabled()) { |
555 |
size_t list_count; |
|
556 |
if (list_index(word_size) < HumongousIndex) { |
|
557 |
ChunkList* list = find_free_chunks_list(word_size); |
|
558 |
list_count = list->count(); |
|
559 |
} else { |
|
560 |
list_count = humongous_dictionary()->total_count(); |
|
561 |
} |
|
562 |
LogStream ls(lt); |
|
563 |
ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ", |
|
564 |
p2i(this), p2i(chunk), chunk->word_size(), list_count); |
|
565 |
ResourceMark rm; |
|
566 |
locked_print_free_chunks(&ls); |
|
567 |
} |
|
568 |
||
569 |
return chunk; |
|
570 |
} |
|
571 |
||
572 |
void ChunkManager::return_single_chunk(Metachunk* chunk) { |
|
573 |
const ChunkIndex index = chunk->get_chunk_type(); |
|
574 |
assert_lock_strong(MetaspaceExpand_lock); |
|
575 |
DEBUG_ONLY(do_verify_chunk(chunk);) |
|
576 |
assert(chunk != NULL, "Expected chunk."); |
|
577 |
assert(chunk->container() != NULL, "Container should have been set."); |
|
578 |
assert(chunk->is_tagged_free() == false, "Chunk should be in use."); |
|
579 |
index_bounds_check(index); |
|
580 |
||
581 |
// Note: mangle *before* returning the chunk to the freelist or dictionary. It does not |
|
582 |
// matter for the freelist (non-humongous chunks), but the humongous chunk dictionary |
|
583 |
// keeps tree node pointers in the chunk payload area which mangle will overwrite. |
|
584 |
DEBUG_ONLY(chunk->mangle(badMetaWordVal);) |
|
585 |
||
586 |
if (index != HumongousIndex) { |
|
587 |
// Return non-humongous chunk to freelist. |
|
588 |
ChunkList* list = free_chunks(index); |
|
589 |
assert(list->size() == chunk->word_size(), "Wrong chunk type."); |
|
590 |
list->return_chunk_at_head(chunk); |
|
591 |
log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.", |
|
592 |
chunk_size_name(index), p2i(chunk)); |
|
593 |
} else { |
|
594 |
// Return humongous chunk to dictionary. |
|
595 |
assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type."); |
|
596 |
assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0, |
|
597 |
"Humongous chunk has wrong alignment."); |
|
598 |
_humongous_dictionary.return_chunk(chunk); |
|
599 |
log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.", |
|
600 |
chunk_size_name(index), p2i(chunk), chunk->word_size()); |
|
601 |
} |
|
602 |
chunk->container()->dec_container_count(); |
|
603 |
do_update_in_use_info_for_chunk(chunk, false); |
|
604 |
||
605 |
// Chunk has been added; update counters. |
|
606 |
account_for_added_chunk(chunk); |
|
607 |
||
608 |
// Attempt coalesce returned chunks with its neighboring chunks: |
|
609 |
// if this chunk is small or special, attempt to coalesce to a medium chunk. |
|
610 |
if (index == SmallIndex || index == SpecializedIndex) { |
|
611 |
if (!attempt_to_coalesce_around_chunk(chunk, MediumIndex)) { |
|
612 |
// This did not work. But if this chunk is special, we still may form a small chunk? |
|
613 |
if (index == SpecializedIndex) { |
|
614 |
if (!attempt_to_coalesce_around_chunk(chunk, SmallIndex)) { |
|
615 |
// give up. |
|
616 |
} |
|
617 |
} |
|
618 |
} |
|
619 |
} |
|
620 |
||
621 |
} |
|
622 |
||
623 |
void ChunkManager::return_chunk_list(Metachunk* chunks) { |
|
624 |
if (chunks == NULL) { |
|
625 |
return; |
|
626 |
} |
|
627 |
LogTarget(Trace, gc, metaspace, freelist) log; |
|
628 |
if (log.is_enabled()) { // tracing |
|
629 |
log.print("returning list of chunks..."); |
|
630 |
} |
|
631 |
unsigned num_chunks_returned = 0; |
|
632 |
size_t size_chunks_returned = 0; |
|
633 |
Metachunk* cur = chunks; |
|
634 |
while (cur != NULL) { |
|
635 |
// Capture the next link before it is changed |
|
636 |
// by the call to return_chunk_at_head(); |
|
637 |
Metachunk* next = cur->next(); |
|
638 |
if (log.is_enabled()) { // tracing |
|
639 |
num_chunks_returned ++; |
|
640 |
size_chunks_returned += cur->word_size(); |
|
641 |
} |
|
642 |
return_single_chunk(cur); |
|
643 |
cur = next; |
|
644 |
} |
|
645 |
if (log.is_enabled()) { // tracing |
|
646 |
log.print("returned %u chunks to freelist, total word size " SIZE_FORMAT ".", |
|
647 |
num_chunks_returned, size_chunks_returned); |
|
648 |
} |
|
649 |
} |
|
650 |
||
651 |
void ChunkManager::collect_statistics(ChunkManagerStatistics* out) const { |
|
652 |
MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag); |
|
653 |
for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { |
|
654 |
out->chunk_stats(i).add(num_free_chunks(i), size_free_chunks_in_bytes(i) / sizeof(MetaWord)); |
|
655 |
} |
|
656 |
} |
|
657 |
||
658 |
} // namespace metaspace |
|
659 |
||
660 |
||
661 |