20 * or visit www.oracle.com if you need additional information or have any |
20 * or visit www.oracle.com if you need additional information or have any |
21 * questions. |
21 * questions. |
22 * |
22 * |
23 */ |
23 */ |
24 |
24 |
|
25 #include <memory/metaspace/settings.hpp> |
25 #include "precompiled.hpp" |
26 #include "precompiled.hpp" |
|
27 |
26 #include "aot/aotLoader.hpp" |
28 #include "aot/aotLoader.hpp" |
27 #include "classfile/classLoaderDataGraph.hpp" |
|
28 #include "gc/shared/collectedHeap.hpp" |
29 #include "gc/shared/collectedHeap.hpp" |
29 #include "logging/log.hpp" |
30 #include "logging/log.hpp" |
30 #include "logging/logStream.hpp" |
31 #include "logging/logStream.hpp" |
31 #include "memory/filemap.hpp" |
32 #include "memory/filemap.hpp" |
32 #include "memory/metaspace.hpp" |
33 #include "memory/metaspace.hpp" |
33 #include "memory/metaspace/chunkManager.hpp" |
|
34 #include "memory/metaspace/metachunk.hpp" |
|
35 #include "memory/metaspace/metaspaceCommon.hpp" |
|
36 #include "memory/metaspace/printCLDMetaspaceInfoClosure.hpp" |
|
37 #include "memory/metaspace/spaceManager.hpp" |
|
38 #include "memory/metaspace/virtualSpaceList.hpp" |
|
39 #include "memory/metaspaceShared.hpp" |
34 #include "memory/metaspaceShared.hpp" |
40 #include "memory/metaspaceTracer.hpp" |
35 #include "memory/metaspaceTracer.hpp" |
|
36 #include "memory/metaspace/chunkManager.hpp" |
|
37 #include "memory/metaspace/classLoaderMetaspace.hpp" |
|
38 #include "memory/metaspace/commitLimiter.hpp" |
|
39 #include "memory/metaspace/metaspaceCommon.hpp" |
|
40 #include "memory/metaspace/metaspaceEnums.hpp" |
|
41 #include "memory/metaspace/metaspaceReport.hpp" |
|
42 #include "memory/metaspace/metaspaceSizesSnapshot.hpp" |
|
43 #include "memory/metaspace/runningCounters.hpp" |
|
44 #include "memory/metaspace/virtualSpaceList.hpp" |
41 #include "memory/universe.hpp" |
45 #include "memory/universe.hpp" |
42 #include "oops/compressedOops.hpp" |
46 #include "oops/compressedOops.hpp" |
43 #include "runtime/init.hpp" |
47 #include "runtime/init.hpp" |
|
48 #include "runtime/java.hpp" |
44 #include "runtime/orderAccess.hpp" |
49 #include "runtime/orderAccess.hpp" |
45 #include "services/memTracker.hpp" |
50 #include "services/memTracker.hpp" |
46 #include "utilities/copy.hpp" |
51 #include "utilities/copy.hpp" |
47 #include "utilities/debug.hpp" |
52 #include "utilities/debug.hpp" |
48 #include "utilities/formatBuffer.hpp" |
53 #include "utilities/formatBuffer.hpp" |
49 #include "utilities/globalDefinitions.hpp" |
54 #include "utilities/globalDefinitions.hpp" |
50 #include "utilities/vmError.hpp" |
55 |
51 |
56 |
52 |
57 using metaspace::ChunkManager; |
53 using namespace metaspace; |
58 using metaspace::ClassLoaderMetaspace; |
54 |
59 using metaspace::CommitLimiter; |
55 MetaWord* last_allocated = 0; |
60 using metaspace::MetaspaceType; |
56 |
61 using metaspace::MetadataType; |
57 size_t Metaspace::_compressed_class_space_size; |
62 using metaspace::MetaspaceReporter; |
58 const MetaspaceTracer* Metaspace::_tracer = NULL; |
63 using metaspace::RunningCounters; |
59 |
64 using metaspace::VirtualSpaceList; |
60 DEBUG_ONLY(bool Metaspace::_frozen = false;) |
65 |
61 |
66 |
62 static const char* space_type_name(Metaspace::MetaspaceType t) { |
67 // Used by MetaspaceCounters |
63 const char* s = NULL; |
68 size_t MetaspaceUtils::free_chunks_total_words(MetadataType mdtype) { |
64 switch (t) { |
69 return is_class(mdtype) ? RunningCounters::free_chunks_words_class() : RunningCounters::free_chunks_words_nonclass(); |
65 case Metaspace::StandardMetaspaceType: s = "Standard"; break; |
70 } |
66 case Metaspace::BootMetaspaceType: s = "Boot"; break; |
71 |
67 case Metaspace::UnsafeAnonymousMetaspaceType: s = "UnsafeAnonymous"; break; |
72 size_t MetaspaceUtils::used_words() { |
68 case Metaspace::ReflectionMetaspaceType: s = "Reflection"; break; |
73 return RunningCounters::used_words(); |
69 default: ShouldNotReachHere(); |
74 } |
70 } |
75 |
71 return s; |
76 size_t MetaspaceUtils::used_words(MetadataType mdtype) { |
72 } |
77 return is_class(mdtype) ? RunningCounters::used_words_class() : RunningCounters::used_words_nonclass(); |
73 |
78 } |
74 volatile size_t MetaspaceGC::_capacity_until_GC = 0; |
79 |
75 uint MetaspaceGC::_shrink_factor = 0; |
80 size_t MetaspaceUtils::reserved_words() { |
76 bool MetaspaceGC::_should_concurrent_collect = false; |
81 return RunningCounters::reserved_words(); |
77 |
82 } |
78 // BlockFreelist methods |
83 |
79 |
84 size_t MetaspaceUtils::reserved_words(MetadataType mdtype) { |
80 // VirtualSpaceNode methods |
85 return is_class(mdtype) ? RunningCounters::reserved_words_class() : RunningCounters::reserved_words_nonclass(); |
81 |
86 } |
82 // MetaspaceGC methods |
87 |
83 |
88 size_t MetaspaceUtils::committed_words() { |
84 // VM_CollectForMetadataAllocation is the vm operation used to GC. |
89 return RunningCounters::committed_words(); |
85 // Within the VM operation after the GC the attempt to allocate the metadata |
90 } |
86 // should succeed. If the GC did not free enough space for the metaspace |
91 |
87 // allocation, the HWM is increased so that another virtualspace will be |
92 size_t MetaspaceUtils::committed_words(MetadataType mdtype) { |
88 // allocated for the metadata. With perm gen the increase in the perm |
93 return is_class(mdtype) ? RunningCounters::committed_words_class() : RunningCounters::committed_words_nonclass(); |
89 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The |
94 } |
90 // metaspace policy uses those as the small and large steps for the HWM. |
95 |
91 // |
96 |
92 // After the GC the compute_new_size() for MetaspaceGC is called to |
|
93 // resize the capacity of the metaspaces. The current implementation |
|
94 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used |
|
95 // to resize the Java heap by some GC's. New flags can be implemented |
|
96 // if really needed. MinMetaspaceFreeRatio is used to calculate how much |
|
97 // free space is desirable in the metaspace capacity to decide how much |
|
98 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much |
|
99 // free space is desirable in the metaspace capacity before decreasing |
|
100 // the HWM. |
|
101 |
|
102 // Calculate the amount to increase the high water mark (HWM). |
|
103 // Increase by a minimum amount (MinMetaspaceExpansion) so that |
|
104 // another expansion is not requested too soon. If that is not |
|
105 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion. |
|
106 // If that is still not enough, expand by the size of the allocation |
|
107 // plus some. |
|
108 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { |
|
109 size_t min_delta = MinMetaspaceExpansion; |
|
110 size_t max_delta = MaxMetaspaceExpansion; |
|
111 size_t delta = align_up(bytes, Metaspace::commit_alignment()); |
|
112 |
|
113 if (delta <= min_delta) { |
|
114 delta = min_delta; |
|
115 } else if (delta <= max_delta) { |
|
116 // Don't want to hit the high water mark on the next |
|
117 // allocation so make the delta greater than just enough |
|
118 // for this allocation. |
|
119 delta = max_delta; |
|
120 } else { |
|
121 // This allocation is large but the next ones are probably not |
|
122 // so increase by the minimum. |
|
123 delta = delta + min_delta; |
|
124 } |
|
125 |
|
126 assert_is_aligned(delta, Metaspace::commit_alignment()); |
|
127 |
|
128 return delta; |
|
129 } |
|
130 |
|
131 size_t MetaspaceGC::capacity_until_GC() { |
|
132 size_t value = OrderAccess::load_acquire(&_capacity_until_GC); |
|
133 assert(value >= MetaspaceSize, "Not initialized properly?"); |
|
134 return value; |
|
135 } |
|
136 |
|
137 // Try to increase the _capacity_until_GC limit counter by v bytes. |
|
138 // Returns true if it succeeded. It may fail if either another thread |
|
139 // concurrently increased the limit or the new limit would be larger |
|
140 // than MaxMetaspaceSize. |
|
141 // On success, optionally returns new and old metaspace capacity in |
|
142 // new_cap_until_GC and old_cap_until_GC respectively. |
|
143 // On error, optionally sets can_retry to indicate whether if there is |
|
144 // actually enough space remaining to satisfy the request. |
|
145 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC, bool* can_retry) { |
|
146 assert_is_aligned(v, Metaspace::commit_alignment()); |
|
147 |
|
148 size_t old_capacity_until_GC = _capacity_until_GC; |
|
149 size_t new_value = old_capacity_until_GC + v; |
|
150 |
|
151 if (new_value < old_capacity_until_GC) { |
|
152 // The addition wrapped around, set new_value to aligned max value. |
|
153 new_value = align_down(max_uintx, Metaspace::commit_alignment()); |
|
154 } |
|
155 |
|
156 if (new_value > MaxMetaspaceSize) { |
|
157 if (can_retry != NULL) { |
|
158 *can_retry = false; |
|
159 } |
|
160 return false; |
|
161 } |
|
162 |
|
163 if (can_retry != NULL) { |
|
164 *can_retry = true; |
|
165 } |
|
166 size_t prev_value = Atomic::cmpxchg(new_value, &_capacity_until_GC, old_capacity_until_GC); |
|
167 |
|
168 if (old_capacity_until_GC != prev_value) { |
|
169 return false; |
|
170 } |
|
171 |
|
172 if (new_cap_until_GC != NULL) { |
|
173 *new_cap_until_GC = new_value; |
|
174 } |
|
175 if (old_cap_until_GC != NULL) { |
|
176 *old_cap_until_GC = old_capacity_until_GC; |
|
177 } |
|
178 return true; |
|
179 } |
|
180 |
|
181 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { |
|
182 assert_is_aligned(v, Metaspace::commit_alignment()); |
|
183 |
|
184 return Atomic::sub(v, &_capacity_until_GC); |
|
185 } |
|
186 |
|
187 void MetaspaceGC::initialize() { |
|
188 // Set the high-water mark to MaxMetapaceSize during VM initializaton since |
|
189 // we can't do a GC during initialization. |
|
190 _capacity_until_GC = MaxMetaspaceSize; |
|
191 } |
|
192 |
|
193 void MetaspaceGC::post_initialize() { |
|
194 // Reset the high-water mark once the VM initialization is done. |
|
195 _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize); |
|
196 } |
|
197 |
|
198 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { |
|
199 // Check if the compressed class space is full. |
|
200 if (is_class && Metaspace::using_class_space()) { |
|
201 size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType); |
|
202 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { |
|
203 log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)", |
|
204 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord)); |
|
205 return false; |
|
206 } |
|
207 } |
|
208 |
|
209 // Check if the user has imposed a limit on the metaspace memory. |
|
210 size_t committed_bytes = MetaspaceUtils::committed_bytes(); |
|
211 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) { |
|
212 log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)", |
|
213 (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord)); |
|
214 return false; |
|
215 } |
|
216 |
|
217 return true; |
|
218 } |
|
219 |
|
220 size_t MetaspaceGC::allowed_expansion() { |
|
221 size_t committed_bytes = MetaspaceUtils::committed_bytes(); |
|
222 size_t capacity_until_gc = capacity_until_GC(); |
|
223 |
|
224 assert(capacity_until_gc >= committed_bytes, |
|
225 "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT, |
|
226 capacity_until_gc, committed_bytes); |
|
227 |
|
228 size_t left_until_max = MaxMetaspaceSize - committed_bytes; |
|
229 size_t left_until_GC = capacity_until_gc - committed_bytes; |
|
230 size_t left_to_commit = MIN2(left_until_GC, left_until_max); |
|
231 log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT |
|
232 " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".", |
|
233 left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord); |
|
234 |
|
235 return left_to_commit / BytesPerWord; |
|
236 } |
|
237 |
|
238 void MetaspaceGC::compute_new_size() { |
|
239 assert(_shrink_factor <= 100, "invalid shrink factor"); |
|
240 uint current_shrink_factor = _shrink_factor; |
|
241 _shrink_factor = 0; |
|
242 |
|
243 // Using committed_bytes() for used_after_gc is an overestimation, since the |
|
244 // chunk free lists are included in committed_bytes() and the memory in an |
|
245 // un-fragmented chunk free list is available for future allocations. |
|
246 // However, if the chunk free lists becomes fragmented, then the memory may |
|
247 // not be available for future allocations and the memory is therefore "in use". |
|
248 // Including the chunk free lists in the definition of "in use" is therefore |
|
249 // necessary. Not including the chunk free lists can cause capacity_until_GC to |
|
250 // shrink below committed_bytes() and this has caused serious bugs in the past. |
|
251 const size_t used_after_gc = MetaspaceUtils::committed_bytes(); |
|
252 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); |
|
253 |
|
254 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; |
|
255 const double maximum_used_percentage = 1.0 - minimum_free_percentage; |
|
256 |
|
257 const double min_tmp = used_after_gc / maximum_used_percentage; |
|
258 size_t minimum_desired_capacity = |
|
259 (size_t)MIN2(min_tmp, double(MaxMetaspaceSize)); |
|
260 // Don't shrink less than the initial generation size |
|
261 minimum_desired_capacity = MAX2(minimum_desired_capacity, |
|
262 MetaspaceSize); |
|
263 |
|
264 log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: "); |
|
265 log_trace(gc, metaspace)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f", |
|
266 minimum_free_percentage, maximum_used_percentage); |
|
267 log_trace(gc, metaspace)(" used_after_gc : %6.1fKB", used_after_gc / (double) K); |
|
268 |
|
269 |
|
270 size_t shrink_bytes = 0; |
|
271 if (capacity_until_GC < minimum_desired_capacity) { |
|
272 // If we have less capacity below the metaspace HWM, then |
|
273 // increment the HWM. |
|
274 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; |
|
275 expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment()); |
|
276 // Don't expand unless it's significant |
|
277 if (expand_bytes >= MinMetaspaceExpansion) { |
|
278 size_t new_capacity_until_GC = 0; |
|
279 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC); |
|
280 assert(succeeded, "Should always succesfully increment HWM when at safepoint"); |
|
281 |
|
282 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, |
|
283 new_capacity_until_GC, |
|
284 MetaspaceGCThresholdUpdater::ComputeNewSize); |
|
285 log_trace(gc, metaspace)(" expanding: minimum_desired_capacity: %6.1fKB expand_bytes: %6.1fKB MinMetaspaceExpansion: %6.1fKB new metaspace HWM: %6.1fKB", |
|
286 minimum_desired_capacity / (double) K, |
|
287 expand_bytes / (double) K, |
|
288 MinMetaspaceExpansion / (double) K, |
|
289 new_capacity_until_GC / (double) K); |
|
290 } |
|
291 return; |
|
292 } |
|
293 |
|
294 // No expansion, now see if we want to shrink |
|
295 // We would never want to shrink more than this |
|
296 assert(capacity_until_GC >= minimum_desired_capacity, |
|
297 SIZE_FORMAT " >= " SIZE_FORMAT, |
|
298 capacity_until_GC, minimum_desired_capacity); |
|
299 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; |
|
300 |
|
301 // Should shrinking be considered? |
|
302 if (MaxMetaspaceFreeRatio < 100) { |
|
303 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0; |
|
304 const double minimum_used_percentage = 1.0 - maximum_free_percentage; |
|
305 const double max_tmp = used_after_gc / minimum_used_percentage; |
|
306 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(MaxMetaspaceSize)); |
|
307 maximum_desired_capacity = MAX2(maximum_desired_capacity, |
|
308 MetaspaceSize); |
|
309 log_trace(gc, metaspace)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f", |
|
310 maximum_free_percentage, minimum_used_percentage); |
|
311 log_trace(gc, metaspace)(" minimum_desired_capacity: %6.1fKB maximum_desired_capacity: %6.1fKB", |
|
312 minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K); |
|
313 |
|
314 assert(minimum_desired_capacity <= maximum_desired_capacity, |
|
315 "sanity check"); |
|
316 |
|
317 if (capacity_until_GC > maximum_desired_capacity) { |
|
318 // Capacity too large, compute shrinking size |
|
319 shrink_bytes = capacity_until_GC - maximum_desired_capacity; |
|
320 // We don't want shrink all the way back to initSize if people call |
|
321 // System.gc(), because some programs do that between "phases" and then |
|
322 // we'd just have to grow the heap up again for the next phase. So we |
|
323 // damp the shrinking: 0% on the first call, 10% on the second call, 40% |
|
324 // on the third call, and 100% by the fourth call. But if we recompute |
|
325 // size without shrinking, it goes back to 0%. |
|
326 shrink_bytes = shrink_bytes / 100 * current_shrink_factor; |
|
327 |
|
328 shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment()); |
|
329 |
|
330 assert(shrink_bytes <= max_shrink_bytes, |
|
331 "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, |
|
332 shrink_bytes, max_shrink_bytes); |
|
333 if (current_shrink_factor == 0) { |
|
334 _shrink_factor = 10; |
|
335 } else { |
|
336 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100); |
|
337 } |
|
338 log_trace(gc, metaspace)(" shrinking: initThreshold: %.1fK maximum_desired_capacity: %.1fK", |
|
339 MetaspaceSize / (double) K, maximum_desired_capacity / (double) K); |
|
340 log_trace(gc, metaspace)(" shrink_bytes: %.1fK current_shrink_factor: %d new shrink factor: %d MinMetaspaceExpansion: %.1fK", |
|
341 shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K); |
|
342 } |
|
343 } |
|
344 |
|
345 // Don't shrink unless it's significant |
|
346 if (shrink_bytes >= MinMetaspaceExpansion && |
|
347 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { |
|
348 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes); |
|
349 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, |
|
350 new_capacity_until_GC, |
|
351 MetaspaceGCThresholdUpdater::ComputeNewSize); |
|
352 } |
|
353 } |
|
354 |
|
355 // MetaspaceUtils |
|
356 size_t MetaspaceUtils::_capacity_words [Metaspace:: MetadataTypeCount] = {0, 0}; |
|
357 size_t MetaspaceUtils::_overhead_words [Metaspace:: MetadataTypeCount] = {0, 0}; |
|
358 volatile size_t MetaspaceUtils::_used_words [Metaspace:: MetadataTypeCount] = {0, 0}; |
|
359 |
|
360 // Collect used metaspace statistics. This involves walking the CLDG. The resulting |
|
361 // output will be the accumulated values for all live metaspaces. |
|
362 // Note: method does not do any locking. |
|
363 void MetaspaceUtils::collect_statistics(ClassLoaderMetaspaceStatistics* out) { |
|
364 out->reset(); |
|
365 ClassLoaderDataGraphMetaspaceIterator iter; |
|
366 while (iter.repeat()) { |
|
367 ClassLoaderMetaspace* msp = iter.get_next(); |
|
368 if (msp != NULL) { |
|
369 msp->add_to_statistics(out); |
|
370 } |
|
371 } |
|
372 } |
|
373 |
|
374 size_t MetaspaceUtils::free_in_vs_bytes(Metaspace::MetadataType mdtype) { |
|
375 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); |
|
376 return list == NULL ? 0 : list->free_bytes(); |
|
377 } |
|
378 |
|
379 size_t MetaspaceUtils::free_in_vs_bytes() { |
|
380 return free_in_vs_bytes(Metaspace::ClassType) + free_in_vs_bytes(Metaspace::NonClassType); |
|
381 } |
|
382 |
|
383 static void inc_stat_nonatomically(size_t* pstat, size_t words) { |
|
384 assert_lock_strong(MetaspaceExpand_lock); |
|
385 (*pstat) += words; |
|
386 } |
|
387 |
|
388 static void dec_stat_nonatomically(size_t* pstat, size_t words) { |
|
389 assert_lock_strong(MetaspaceExpand_lock); |
|
390 const size_t size_now = *pstat; |
|
391 assert(size_now >= words, "About to decrement counter below zero " |
|
392 "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".", |
|
393 size_now, words); |
|
394 *pstat = size_now - words; |
|
395 } |
|
396 |
|
397 static void inc_stat_atomically(volatile size_t* pstat, size_t words) { |
|
398 Atomic::add(words, pstat); |
|
399 } |
|
400 |
|
401 static void dec_stat_atomically(volatile size_t* pstat, size_t words) { |
|
402 const size_t size_now = *pstat; |
|
403 assert(size_now >= words, "About to decrement counter below zero " |
|
404 "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".", |
|
405 size_now, words); |
|
406 Atomic::sub(words, pstat); |
|
407 } |
|
408 |
|
409 void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) { |
|
410 dec_stat_nonatomically(&_capacity_words[mdtype], words); |
|
411 } |
|
412 void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) { |
|
413 inc_stat_nonatomically(&_capacity_words[mdtype], words); |
|
414 } |
|
415 void MetaspaceUtils::dec_used(Metaspace::MetadataType mdtype, size_t words) { |
|
416 dec_stat_atomically(&_used_words[mdtype], words); |
|
417 } |
|
418 void MetaspaceUtils::inc_used(Metaspace::MetadataType mdtype, size_t words) { |
|
419 inc_stat_atomically(&_used_words[mdtype], words); |
|
420 } |
|
421 void MetaspaceUtils::dec_overhead(Metaspace::MetadataType mdtype, size_t words) { |
|
422 dec_stat_nonatomically(&_overhead_words[mdtype], words); |
|
423 } |
|
424 void MetaspaceUtils::inc_overhead(Metaspace::MetadataType mdtype, size_t words) { |
|
425 inc_stat_nonatomically(&_overhead_words[mdtype], words); |
|
426 } |
|
427 |
|
428 size_t MetaspaceUtils::reserved_bytes(Metaspace::MetadataType mdtype) { |
|
429 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); |
|
430 return list == NULL ? 0 : list->reserved_bytes(); |
|
431 } |
|
432 |
|
433 size_t MetaspaceUtils::committed_bytes(Metaspace::MetadataType mdtype) { |
|
434 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); |
|
435 return list == NULL ? 0 : list->committed_bytes(); |
|
436 } |
|
437 |
|
438 size_t MetaspaceUtils::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); } |
|
439 |
|
440 size_t MetaspaceUtils::free_chunks_total_words(Metaspace::MetadataType mdtype) { |
|
441 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype); |
|
442 if (chunk_manager == NULL) { |
|
443 return 0; |
|
444 } |
|
445 return chunk_manager->free_chunks_total_words(); |
|
446 } |
|
447 |
|
448 size_t MetaspaceUtils::free_chunks_total_bytes(Metaspace::MetadataType mdtype) { |
|
449 return free_chunks_total_words(mdtype) * BytesPerWord; |
|
450 } |
|
451 |
|
452 size_t MetaspaceUtils::free_chunks_total_words() { |
|
453 return free_chunks_total_words(Metaspace::ClassType) + |
|
454 free_chunks_total_words(Metaspace::NonClassType); |
|
455 } |
|
456 |
|
457 size_t MetaspaceUtils::free_chunks_total_bytes() { |
|
458 return free_chunks_total_words() * BytesPerWord; |
|
459 } |
|
460 |
|
461 bool MetaspaceUtils::has_chunk_free_list(Metaspace::MetadataType mdtype) { |
|
462 return Metaspace::get_chunk_manager(mdtype) != NULL; |
|
463 } |
|
464 |
|
465 MetaspaceChunkFreeListSummary MetaspaceUtils::chunk_free_list_summary(Metaspace::MetadataType mdtype) { |
|
466 if (!has_chunk_free_list(mdtype)) { |
|
467 return MetaspaceChunkFreeListSummary(); |
|
468 } |
|
469 |
|
470 const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype); |
|
471 return cm->chunk_free_list_summary(); |
|
472 } |
|
473 |
97 |
474 void MetaspaceUtils::print_metaspace_change(const metaspace::MetaspaceSizesSnapshot& pre_meta_values) { |
98 void MetaspaceUtils::print_metaspace_change(const metaspace::MetaspaceSizesSnapshot& pre_meta_values) { |
475 const metaspace::MetaspaceSizesSnapshot meta_values; |
99 const metaspace::MetaspaceSizesSnapshot meta_values; |
|
100 |
|
101 // We print used and committed since these are the most useful at-a-glance vitals for Metaspace: |
|
102 // - used tells you how much memory is actually used for metadata |
|
103 // - committed tells you how much memory is committed for the purpose of metadata |
|
104 // The difference between those two would be waste, which can have various forms (freelists, |
|
105 // unused parts of committed chunks etc) |
|
106 // |
|
107 // Left out is reserved, since this is not as exciting as the first two values: for class space, |
|
108 // it is a constant (to uninformed users, often confusingly large). For non-class space, it would |
|
109 // be interesting since free chunks can be uncommitted, but for now it is left out. |
476 |
110 |
477 if (Metaspace::using_class_space()) { |
111 if (Metaspace::using_class_space()) { |
478 log_info(gc, metaspace)(HEAP_CHANGE_FORMAT" " |
112 log_info(gc, metaspace)(HEAP_CHANGE_FORMAT" " |
479 HEAP_CHANGE_FORMAT" " |
113 HEAP_CHANGE_FORMAT" " |
480 HEAP_CHANGE_FORMAT, |
114 HEAP_CHANGE_FORMAT, |
501 meta_values.used(), |
135 meta_values.used(), |
502 meta_values.committed())); |
136 meta_values.committed())); |
503 } |
137 } |
504 } |
138 } |
505 |
139 |
|
140 |
|
141 // Prints an ASCII representation of the given space. |
|
142 void MetaspaceUtils::print_metaspace_map(outputStream* out, MetadataType mdtype) { |
|
143 out->print_cr("-- not yet implemented ---"); |
|
144 } |
|
145 |
|
146 // This will print out a basic metaspace usage report but |
|
147 // unlike print_report() is guaranteed not to lock or to walk the CLDG. |
|
148 void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) { |
|
149 MetaspaceReporter::print_basic_report(out, scale); |
|
150 } |
|
151 |
|
152 // Prints a report about the current metaspace state. |
|
153 // Optional parts can be enabled via flags. |
|
154 // Function will walk the CLDG and will lock the expand lock; if that is not |
|
155 // convenient, use print_basic_report() instead. |
|
156 void MetaspaceUtils::print_full_report(outputStream* out, size_t scale) { |
|
157 const int flags = |
|
158 MetaspaceReporter::rf_show_loaders | |
|
159 MetaspaceReporter::rf_break_down_by_chunktype | |
|
160 MetaspaceReporter::rf_show_classes; |
|
161 MetaspaceReporter::print_report(out, scale, flags); |
|
162 } |
|
163 |
506 void MetaspaceUtils::print_on(outputStream* out) { |
164 void MetaspaceUtils::print_on(outputStream* out) { |
507 Metaspace::MetadataType nct = Metaspace::NonClassType; |
165 |
|
166 // Used from all GCs. It first prints out totals, then, separately, the class space portion. |
508 |
167 |
509 out->print_cr(" Metaspace " |
168 out->print_cr(" Metaspace " |
510 "used " SIZE_FORMAT "K, " |
169 "used " SIZE_FORMAT "K, " |
511 "capacity " SIZE_FORMAT "K, " |
|
512 "committed " SIZE_FORMAT "K, " |
170 "committed " SIZE_FORMAT "K, " |
513 "reserved " SIZE_FORMAT "K", |
171 "reserved " SIZE_FORMAT "K", |
514 used_bytes()/K, |
172 used_bytes()/K, |
515 capacity_bytes()/K, |
|
516 committed_bytes()/K, |
173 committed_bytes()/K, |
517 reserved_bytes()/K); |
174 reserved_bytes()/K); |
518 |
175 |
519 if (Metaspace::using_class_space()) { |
176 if (Metaspace::using_class_space()) { |
520 Metaspace::MetadataType ct = Metaspace::ClassType; |
177 const MetadataType ct = metaspace::ClassType; |
521 out->print_cr(" class space " |
178 out->print_cr(" class space " |
522 "used " SIZE_FORMAT "K, " |
179 "used " SIZE_FORMAT "K, " |
523 "capacity " SIZE_FORMAT "K, " |
|
524 "committed " SIZE_FORMAT "K, " |
180 "committed " SIZE_FORMAT "K, " |
525 "reserved " SIZE_FORMAT "K", |
181 "reserved " SIZE_FORMAT "K", |
526 used_bytes(ct)/K, |
182 used_bytes(ct)/K, |
527 capacity_bytes(ct)/K, |
|
528 committed_bytes(ct)/K, |
183 committed_bytes(ct)/K, |
529 reserved_bytes(ct)/K); |
184 reserved_bytes(ct)/K); |
530 } |
185 } |
531 } |
186 } |
532 |
187 |
533 |
188 #ifdef ASSERT |
534 void MetaspaceUtils::print_vs(outputStream* out, size_t scale) { |
189 void MetaspaceUtils::verify(bool slow) { |
535 const size_t reserved_nonclass_words = reserved_bytes(Metaspace::NonClassType) / sizeof(MetaWord); |
190 if (Metaspace::initialized()) { |
536 const size_t committed_nonclass_words = committed_bytes(Metaspace::NonClassType) / sizeof(MetaWord); |
191 |
537 { |
192 // Verify non-class chunkmanager... |
|
193 ChunkManager* cm = ChunkManager::chunkmanager_nonclass(); |
|
194 cm->verify(slow); |
|
195 |
|
196 // ... and space list. |
|
197 VirtualSpaceList* vsl = VirtualSpaceList::vslist_nonclass(); |
|
198 vsl->verify(slow); |
|
199 |
538 if (Metaspace::using_class_space()) { |
200 if (Metaspace::using_class_space()) { |
539 out->print(" Non-class space: "); |
201 // If we use compressed class pointers, verify class chunkmanager... |
540 } |
202 cm = ChunkManager::chunkmanager_class(); |
541 print_scaled_words(out, reserved_nonclass_words, scale, 7); |
203 assert(cm != NULL, "Sanity"); |
542 out->print(" reserved, "); |
204 cm->verify(slow); |
543 print_scaled_words_and_percentage(out, committed_nonclass_words, reserved_nonclass_words, scale, 7); |
205 |
544 out->print_cr(" committed "); |
206 // ... and class spacelist. |
545 |
207 VirtualSpaceList* vsl = VirtualSpaceList::vslist_nonclass(); |
546 if (Metaspace::using_class_space()) { |
208 assert(vsl != NULL, "Sanity"); |
547 const size_t reserved_class_words = reserved_bytes(Metaspace::ClassType) / sizeof(MetaWord); |
209 vsl->verify(slow); |
548 const size_t committed_class_words = committed_bytes(Metaspace::ClassType) / sizeof(MetaWord); |
210 } |
549 out->print(" Class space: "); |
211 |
550 print_scaled_words(out, reserved_class_words, scale, 7); |
212 } |
551 out->print(" reserved, "); |
213 } |
552 print_scaled_words_and_percentage(out, committed_class_words, reserved_class_words, scale, 7); |
214 #endif |
553 out->print_cr(" committed "); |
215 |
554 |
216 ////////////////////////////////7 |
555 const size_t reserved_words = reserved_nonclass_words + reserved_class_words; |
217 // MetaspaceGC methods |
556 const size_t committed_words = committed_nonclass_words + committed_class_words; |
218 |
557 out->print(" Both: "); |
219 volatile size_t MetaspaceGC::_capacity_until_GC = 0; |
558 print_scaled_words(out, reserved_words, scale, 7); |
220 uint MetaspaceGC::_shrink_factor = 0; |
559 out->print(" reserved, "); |
221 bool MetaspaceGC::_should_concurrent_collect = false; |
560 print_scaled_words_and_percentage(out, committed_words, reserved_words, scale, 7); |
222 |
561 out->print_cr(" committed "); |
223 // VM_CollectForMetadataAllocation is the vm operation used to GC. |
562 } |
224 // Within the VM operation after the GC the attempt to allocate the metadata |
563 } |
225 // should succeed. If the GC did not free enough space for the metaspace |
564 } |
226 // allocation, the HWM is increased so that another virtualspace will be |
565 |
227 // allocated for the metadata. With perm gen the increase in the perm |
566 static void print_basic_switches(outputStream* out, size_t scale) { |
228 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The |
567 out->print("MaxMetaspaceSize: "); |
229 // metaspace policy uses those as the small and large steps for the HWM. |
568 if (MaxMetaspaceSize >= (max_uintx) - (2 * os::vm_page_size())) { |
230 // |
569 // aka "very big". Default is max_uintx, but due to rounding in arg parsing the real |
231 // After the GC the compute_new_size() for MetaspaceGC is called to |
570 // value is smaller. |
232 // resize the capacity of the metaspaces. The current implementation |
571 out->print("unlimited"); |
233 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used |
|
234 // to resize the Java heap by some GC's. New flags can be implemented |
|
235 // if really needed. MinMetaspaceFreeRatio is used to calculate how much |
|
236 // free space is desirable in the metaspace capacity to decide how much |
|
237 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much |
|
238 // free space is desirable in the metaspace capacity before decreasing |
|
239 // the HWM. |
|
240 |
|
241 // Calculate the amount to increase the high water mark (HWM). |
|
242 // Increase by a minimum amount (MinMetaspaceExpansion) so that |
|
243 // another expansion is not requested too soon. If that is not |
|
244 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion. |
|
245 // If that is still not enough, expand by the size of the allocation |
|
246 // plus some. |
|
247 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { |
|
248 size_t min_delta = MinMetaspaceExpansion; |
|
249 size_t max_delta = MaxMetaspaceExpansion; |
|
250 size_t delta = align_up(bytes, Metaspace::commit_alignment()); |
|
251 |
|
252 if (delta <= min_delta) { |
|
253 delta = min_delta; |
|
254 } else if (delta <= max_delta) { |
|
255 // Don't want to hit the high water mark on the next |
|
256 // allocation so make the delta greater than just enough |
|
257 // for this allocation. |
|
258 delta = max_delta; |
572 } else { |
259 } else { |
573 print_human_readable_size(out, MaxMetaspaceSize, scale); |
260 // This allocation is large but the next ones are probably not |
574 } |
261 // so increase by the minimum. |
575 out->cr(); |
262 delta = delta + min_delta; |
576 if (Metaspace::using_class_space()) { |
263 } |
577 out->print("CompressedClassSpaceSize: "); |
264 |
578 print_human_readable_size(out, CompressedClassSpaceSize, scale); |
265 assert_is_aligned(delta, Metaspace::commit_alignment()); |
579 } |
266 |
580 out->cr(); |
267 return delta; |
581 } |
268 } |
582 |
269 |
583 // This will print out a basic metaspace usage report but |
270 size_t MetaspaceGC::capacity_until_GC() { |
584 // unlike print_report() is guaranteed not to lock or to walk the CLDG. |
271 size_t value = OrderAccess::load_acquire(&_capacity_until_GC); |
585 void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) { |
272 assert(value >= MetaspaceSize, "Not initialized properly?"); |
586 |
273 return value; |
587 if (!Metaspace::initialized()) { |
274 } |
588 out->print_cr("Metaspace not yet initialized."); |
275 |
|
276 // Try to increase the _capacity_until_GC limit counter by v bytes. |
|
277 // Returns true if it succeeded. It may fail if either another thread |
|
278 // concurrently increased the limit or the new limit would be larger |
|
279 // than MaxMetaspaceSize. |
|
280 // On success, optionally returns new and old metaspace capacity in |
|
281 // new_cap_until_GC and old_cap_until_GC respectively. |
|
282 // On error, optionally sets can_retry to indicate whether if there is |
|
283 // actually enough space remaining to satisfy the request. |
|
284 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC, bool* can_retry) { |
|
285 assert_is_aligned(v, Metaspace::commit_alignment()); |
|
286 |
|
287 size_t old_capacity_until_GC = _capacity_until_GC; |
|
288 size_t new_value = old_capacity_until_GC + v; |
|
289 |
|
290 if (new_value < old_capacity_until_GC) { |
|
291 // The addition wrapped around, set new_value to aligned max value. |
|
292 new_value = align_down(max_uintx, Metaspace::commit_alignment()); |
|
293 } |
|
294 |
|
295 if (new_value > MaxMetaspaceSize) { |
|
296 if (can_retry != NULL) { |
|
297 *can_retry = false; |
|
298 } |
|
299 return false; |
|
300 } |
|
301 |
|
302 if (can_retry != NULL) { |
|
303 *can_retry = true; |
|
304 } |
|
305 size_t prev_value = Atomic::cmpxchg(new_value, &_capacity_until_GC, old_capacity_until_GC); |
|
306 |
|
307 if (old_capacity_until_GC != prev_value) { |
|
308 return false; |
|
309 } |
|
310 |
|
311 if (new_cap_until_GC != NULL) { |
|
312 *new_cap_until_GC = new_value; |
|
313 } |
|
314 if (old_cap_until_GC != NULL) { |
|
315 *old_cap_until_GC = old_capacity_until_GC; |
|
316 } |
|
317 return true; |
|
318 } |
|
319 |
|
320 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { |
|
321 assert_is_aligned(v, Metaspace::commit_alignment()); |
|
322 |
|
323 return Atomic::sub(v, &_capacity_until_GC); |
|
324 } |
|
325 |
|
326 void MetaspaceGC::initialize() { |
|
327 // Set the high-water mark to MaxMetapaceSize during VM initializaton since |
|
328 // we can't do a GC during initialization. |
|
329 _capacity_until_GC = MaxMetaspaceSize; |
|
330 } |
|
331 |
|
332 void MetaspaceGC::post_initialize() { |
|
333 // Reset the high-water mark once the VM initialization is done. |
|
334 _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize); |
|
335 } |
|
336 |
|
337 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { |
|
338 // Check if the compressed class space is full. |
|
339 if (is_class && Metaspace::using_class_space()) { |
|
340 size_t class_committed = MetaspaceUtils::committed_bytes(metaspace::ClassType); |
|
341 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { |
|
342 log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)", |
|
343 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord)); |
|
344 return false; |
|
345 } |
|
346 } |
|
347 |
|
348 // Check if the user has imposed a limit on the metaspace memory. |
|
349 size_t committed_bytes = MetaspaceUtils::committed_bytes(); |
|
350 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) { |
|
351 log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)", |
|
352 (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord)); |
|
353 return false; |
|
354 } |
|
355 |
|
356 return true; |
|
357 } |
|
358 |
|
359 size_t MetaspaceGC::allowed_expansion() { |
|
360 size_t committed_bytes = MetaspaceUtils::committed_bytes(); |
|
361 size_t capacity_until_gc = capacity_until_GC(); |
|
362 |
|
363 assert(capacity_until_gc >= committed_bytes, |
|
364 "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT, |
|
365 capacity_until_gc, committed_bytes); |
|
366 |
|
367 size_t left_until_max = MaxMetaspaceSize - committed_bytes; |
|
368 size_t left_until_GC = capacity_until_gc - committed_bytes; |
|
369 size_t left_to_commit = MIN2(left_until_GC, left_until_max); |
|
370 log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT |
|
371 " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".", |
|
372 left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord); |
|
373 |
|
374 return left_to_commit / BytesPerWord; |
|
375 } |
|
376 |
|
377 void MetaspaceGC::compute_new_size() { |
|
378 assert(_shrink_factor <= 100, "invalid shrink factor"); |
|
379 uint current_shrink_factor = _shrink_factor; |
|
380 _shrink_factor = 0; |
|
381 |
|
382 // Using committed_bytes() for used_after_gc is an overestimation, since the |
|
383 // chunk free lists are included in committed_bytes() and the memory in an |
|
384 // un-fragmented chunk free list is available for future allocations. |
|
385 // However, if the chunk free lists becomes fragmented, then the memory may |
|
386 // not be available for future allocations and the memory is therefore "in use". |
|
387 // Including the chunk free lists in the definition of "in use" is therefore |
|
388 // necessary. Not including the chunk free lists can cause capacity_until_GC to |
|
389 // shrink below committed_bytes() and this has caused serious bugs in the past. |
|
390 const size_t used_after_gc = MetaspaceUtils::committed_bytes(); |
|
391 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); |
|
392 |
|
393 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; |
|
394 const double maximum_used_percentage = 1.0 - minimum_free_percentage; |
|
395 |
|
396 const double min_tmp = used_after_gc / maximum_used_percentage; |
|
397 size_t minimum_desired_capacity = |
|
398 (size_t)MIN2(min_tmp, double(MaxMetaspaceSize)); |
|
399 // Don't shrink less than the initial generation size |
|
400 minimum_desired_capacity = MAX2(minimum_desired_capacity, |
|
401 MetaspaceSize); |
|
402 |
|
403 log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: "); |
|
404 log_trace(gc, metaspace)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f", |
|
405 minimum_free_percentage, maximum_used_percentage); |
|
406 log_trace(gc, metaspace)(" used_after_gc : %6.1fKB", used_after_gc / (double) K); |
|
407 |
|
408 |
|
409 size_t shrink_bytes = 0; |
|
410 if (capacity_until_GC < minimum_desired_capacity) { |
|
411 // If we have less capacity below the metaspace HWM, then |
|
412 // increment the HWM. |
|
413 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; |
|
414 expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment()); |
|
415 // Don't expand unless it's significant |
|
416 if (expand_bytes >= MinMetaspaceExpansion) { |
|
417 size_t new_capacity_until_GC = 0; |
|
418 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC); |
|
419 assert(succeeded, "Should always succesfully increment HWM when at safepoint"); |
|
420 |
|
421 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, |
|
422 new_capacity_until_GC, |
|
423 MetaspaceGCThresholdUpdater::ComputeNewSize); |
|
424 log_trace(gc, metaspace)(" expanding: minimum_desired_capacity: %6.1fKB expand_bytes: %6.1fKB MinMetaspaceExpansion: %6.1fKB new metaspace HWM: %6.1fKB", |
|
425 minimum_desired_capacity / (double) K, |
|
426 expand_bytes / (double) K, |
|
427 MinMetaspaceExpansion / (double) K, |
|
428 new_capacity_until_GC / (double) K); |
|
429 } |
589 return; |
430 return; |
590 } |
431 } |
591 |
432 |
592 out->cr(); |
433 // No expansion, now see if we want to shrink |
593 out->print_cr("Usage:"); |
434 // We would never want to shrink more than this |
594 |
435 assert(capacity_until_GC >= minimum_desired_capacity, |
595 if (Metaspace::using_class_space()) { |
436 SIZE_FORMAT " >= " SIZE_FORMAT, |
596 out->print(" Non-class: "); |
437 capacity_until_GC, minimum_desired_capacity); |
597 } |
438 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; |
598 |
439 |
599 // In its most basic form, we do not require walking the CLDG. Instead, just print the running totals from |
440 // Should shrinking be considered? |
600 // MetaspaceUtils. |
441 if (MaxMetaspaceFreeRatio < 100) { |
601 const size_t cap_nc = MetaspaceUtils::capacity_words(Metaspace::NonClassType); |
442 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0; |
602 const size_t overhead_nc = MetaspaceUtils::overhead_words(Metaspace::NonClassType); |
443 const double minimum_used_percentage = 1.0 - maximum_free_percentage; |
603 const size_t used_nc = MetaspaceUtils::used_words(Metaspace::NonClassType); |
444 const double max_tmp = used_after_gc / minimum_used_percentage; |
604 const size_t free_and_waste_nc = cap_nc - overhead_nc - used_nc; |
445 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(MaxMetaspaceSize)); |
605 |
446 maximum_desired_capacity = MAX2(maximum_desired_capacity, |
606 print_scaled_words(out, cap_nc, scale, 5); |
447 MetaspaceSize); |
607 out->print(" capacity, "); |
448 log_trace(gc, metaspace)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f", |
608 print_scaled_words_and_percentage(out, used_nc, cap_nc, scale, 5); |
449 maximum_free_percentage, minimum_used_percentage); |
609 out->print(" used, "); |
450 log_trace(gc, metaspace)(" minimum_desired_capacity: %6.1fKB maximum_desired_capacity: %6.1fKB", |
610 print_scaled_words_and_percentage(out, free_and_waste_nc, cap_nc, scale, 5); |
451 minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K); |
611 out->print(" free+waste, "); |
452 |
612 print_scaled_words_and_percentage(out, overhead_nc, cap_nc, scale, 5); |
453 assert(minimum_desired_capacity <= maximum_desired_capacity, |
613 out->print(" overhead. "); |
454 "sanity check"); |
614 out->cr(); |
455 |
615 |
456 if (capacity_until_GC > maximum_desired_capacity) { |
616 if (Metaspace::using_class_space()) { |
457 // Capacity too large, compute shrinking size |
617 const size_t cap_c = MetaspaceUtils::capacity_words(Metaspace::ClassType); |
458 shrink_bytes = capacity_until_GC - maximum_desired_capacity; |
618 const size_t overhead_c = MetaspaceUtils::overhead_words(Metaspace::ClassType); |
459 // We don't want shrink all the way back to initSize if people call |
619 const size_t used_c = MetaspaceUtils::used_words(Metaspace::ClassType); |
460 // System.gc(), because some programs do that between "phases" and then |
620 const size_t free_and_waste_c = cap_c - overhead_c - used_c; |
461 // we'd just have to grow the heap up again for the next phase. So we |
621 out->print(" Class: "); |
462 // damp the shrinking: 0% on the first call, 10% on the second call, 40% |
622 print_scaled_words(out, cap_c, scale, 5); |
463 // on the third call, and 100% by the fourth call. But if we recompute |
623 out->print(" capacity, "); |
464 // size without shrinking, it goes back to 0%. |
624 print_scaled_words_and_percentage(out, used_c, cap_c, scale, 5); |
465 shrink_bytes = shrink_bytes / 100 * current_shrink_factor; |
625 out->print(" used, "); |
466 |
626 print_scaled_words_and_percentage(out, free_and_waste_c, cap_c, scale, 5); |
467 shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment()); |
627 out->print(" free+waste, "); |
468 |
628 print_scaled_words_and_percentage(out, overhead_c, cap_c, scale, 5); |
469 assert(shrink_bytes <= max_shrink_bytes, |
629 out->print(" overhead. "); |
470 "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, |
630 out->cr(); |
471 shrink_bytes, max_shrink_bytes); |
631 |
472 if (current_shrink_factor == 0) { |
632 out->print(" Both: "); |
473 _shrink_factor = 10; |
633 const size_t cap = cap_nc + cap_c; |
|
634 |
|
635 print_scaled_words(out, cap, scale, 5); |
|
636 out->print(" capacity, "); |
|
637 print_scaled_words_and_percentage(out, used_nc + used_c, cap, scale, 5); |
|
638 out->print(" used, "); |
|
639 print_scaled_words_and_percentage(out, free_and_waste_nc + free_and_waste_c, cap, scale, 5); |
|
640 out->print(" free+waste, "); |
|
641 print_scaled_words_and_percentage(out, overhead_nc + overhead_c, cap, scale, 5); |
|
642 out->print(" overhead. "); |
|
643 out->cr(); |
|
644 } |
|
645 |
|
646 out->cr(); |
|
647 out->print_cr("Virtual space:"); |
|
648 |
|
649 print_vs(out, scale); |
|
650 |
|
651 out->cr(); |
|
652 out->print_cr("Chunk freelists:"); |
|
653 |
|
654 if (Metaspace::using_class_space()) { |
|
655 out->print(" Non-Class: "); |
|
656 } |
|
657 print_human_readable_size(out, Metaspace::chunk_manager_metadata()->free_chunks_total_bytes(), scale); |
|
658 out->cr(); |
|
659 if (Metaspace::using_class_space()) { |
|
660 out->print(" Class: "); |
|
661 print_human_readable_size(out, Metaspace::chunk_manager_class()->free_chunks_total_bytes(), scale); |
|
662 out->cr(); |
|
663 out->print(" Both: "); |
|
664 print_human_readable_size(out, Metaspace::chunk_manager_class()->free_chunks_total_bytes() + |
|
665 Metaspace::chunk_manager_metadata()->free_chunks_total_bytes(), scale); |
|
666 out->cr(); |
|
667 } |
|
668 |
|
669 out->cr(); |
|
670 |
|
671 // Print basic settings |
|
672 print_basic_switches(out, scale); |
|
673 |
|
674 out->cr(); |
|
675 |
|
676 } |
|
677 |
|
678 void MetaspaceUtils::print_report(outputStream* out, size_t scale, int flags) { |
|
679 |
|
680 if (!Metaspace::initialized()) { |
|
681 out->print_cr("Metaspace not yet initialized."); |
|
682 return; |
|
683 } |
|
684 |
|
685 const bool print_loaders = (flags & rf_show_loaders) > 0; |
|
686 const bool print_classes = (flags & rf_show_classes) > 0; |
|
687 const bool print_by_chunktype = (flags & rf_break_down_by_chunktype) > 0; |
|
688 const bool print_by_spacetype = (flags & rf_break_down_by_spacetype) > 0; |
|
689 |
|
690 // Some report options require walking the class loader data graph. |
|
691 PrintCLDMetaspaceInfoClosure cl(out, scale, print_loaders, print_classes, print_by_chunktype); |
|
692 if (print_loaders) { |
|
693 out->cr(); |
|
694 out->print_cr("Usage per loader:"); |
|
695 out->cr(); |
|
696 } |
|
697 |
|
698 ClassLoaderDataGraph::loaded_cld_do(&cl); // collect data and optionally print |
|
699 |
|
700 // Print totals, broken up by space type. |
|
701 if (print_by_spacetype) { |
|
702 out->cr(); |
|
703 out->print_cr("Usage per space type:"); |
|
704 out->cr(); |
|
705 for (int space_type = (int)Metaspace::ZeroMetaspaceType; |
|
706 space_type < (int)Metaspace::MetaspaceTypeCount; space_type ++) |
|
707 { |
|
708 uintx num_loaders = cl._num_loaders_by_spacetype[space_type]; |
|
709 uintx num_classes = cl._num_classes_by_spacetype[space_type]; |
|
710 out->print("%s - " UINTX_FORMAT " %s", |
|
711 space_type_name((Metaspace::MetaspaceType)space_type), |
|
712 num_loaders, loaders_plural(num_loaders)); |
|
713 if (num_classes > 0) { |
|
714 out->print(", "); |
|
715 print_number_of_classes(out, num_classes, cl._num_classes_shared_by_spacetype[space_type]); |
|
716 out->print(":"); |
|
717 cl._stats_by_spacetype[space_type].print_on(out, scale, print_by_chunktype); |
|
718 } else { |
474 } else { |
719 out->print("."); |
475 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100); |
720 out->cr(); |
|
721 } |
476 } |
722 out->cr(); |
477 log_trace(gc, metaspace)(" shrinking: initThreshold: %.1fK maximum_desired_capacity: %.1fK", |
723 } |
478 MetaspaceSize / (double) K, maximum_desired_capacity / (double) K); |
724 } |
479 log_trace(gc, metaspace)(" shrink_bytes: %.1fK current_shrink_factor: %d new shrink factor: %d MinMetaspaceExpansion: %.1fK", |
725 |
480 shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K); |
726 // Print totals for in-use data: |
481 } |
727 out->cr(); |
482 } |
728 { |
483 |
729 uintx num_loaders = cl._num_loaders; |
484 // Don't shrink unless it's significant |
730 out->print("Total Usage - " UINTX_FORMAT " %s, ", |
485 if (shrink_bytes >= MinMetaspaceExpansion && |
731 num_loaders, loaders_plural(num_loaders)); |
486 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { |
732 print_number_of_classes(out, cl._num_classes, cl._num_classes_shared); |
487 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes); |
733 out->print(":"); |
488 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, |
734 cl._stats_total.print_on(out, scale, print_by_chunktype); |
489 new_capacity_until_GC, |
735 out->cr(); |
490 MetaspaceGCThresholdUpdater::ComputeNewSize); |
736 } |
491 } |
737 |
492 } |
738 // -- Print Virtual space. |
493 |
739 out->cr(); |
494 |
740 out->print_cr("Virtual space:"); |
495 |
741 |
496 ////// Metaspace methods ///// |
742 print_vs(out, scale); |
497 |
743 |
498 |
744 // -- Print VirtualSpaceList details. |
499 |
745 if ((flags & rf_show_vslist) > 0) { |
500 MetaWord* Metaspace::_compressed_class_space_base = NULL; |
746 out->cr(); |
501 size_t Metaspace::_compressed_class_space_size = 0; |
747 out->print_cr("Virtual space list%s:", Metaspace::using_class_space() ? "s" : ""); |
502 const MetaspaceTracer* Metaspace::_tracer = NULL; |
748 |
503 bool Metaspace::_initialized = false; |
749 if (Metaspace::using_class_space()) { |
|
750 out->print_cr(" Non-Class:"); |
|
751 } |
|
752 Metaspace::space_list()->print_on(out, scale); |
|
753 if (Metaspace::using_class_space()) { |
|
754 out->print_cr(" Class:"); |
|
755 Metaspace::class_space_list()->print_on(out, scale); |
|
756 } |
|
757 } |
|
758 out->cr(); |
|
759 |
|
760 // -- Print VirtualSpaceList map. |
|
761 if ((flags & rf_show_vsmap) > 0) { |
|
762 out->cr(); |
|
763 out->print_cr("Virtual space map:"); |
|
764 |
|
765 if (Metaspace::using_class_space()) { |
|
766 out->print_cr(" Non-Class:"); |
|
767 } |
|
768 Metaspace::space_list()->print_map(out); |
|
769 if (Metaspace::using_class_space()) { |
|
770 out->print_cr(" Class:"); |
|
771 Metaspace::class_space_list()->print_map(out); |
|
772 } |
|
773 } |
|
774 out->cr(); |
|
775 |
|
776 // -- Print Freelists (ChunkManager) details |
|
777 out->cr(); |
|
778 out->print_cr("Chunk freelist%s:", Metaspace::using_class_space() ? "s" : ""); |
|
779 |
|
780 ChunkManagerStatistics non_class_cm_stat; |
|
781 Metaspace::chunk_manager_metadata()->collect_statistics(&non_class_cm_stat); |
|
782 |
|
783 if (Metaspace::using_class_space()) { |
|
784 out->print_cr(" Non-Class:"); |
|
785 } |
|
786 non_class_cm_stat.print_on(out, scale); |
|
787 |
|
788 if (Metaspace::using_class_space()) { |
|
789 ChunkManagerStatistics class_cm_stat; |
|
790 Metaspace::chunk_manager_class()->collect_statistics(&class_cm_stat); |
|
791 out->print_cr(" Class:"); |
|
792 class_cm_stat.print_on(out, scale); |
|
793 } |
|
794 |
|
795 // As a convenience, print a summary of common waste. |
|
796 out->cr(); |
|
797 out->print("Waste "); |
|
798 // For all wastages, print percentages from total. As total use the total size of memory committed for metaspace. |
|
799 const size_t committed_words = committed_bytes() / BytesPerWord; |
|
800 |
|
801 out->print("(percentages refer to total committed size "); |
|
802 print_scaled_words(out, committed_words, scale); |
|
803 out->print_cr("):"); |
|
804 |
|
805 // Print space committed but not yet used by any class loader |
|
806 const size_t unused_words_in_vs = MetaspaceUtils::free_in_vs_bytes() / BytesPerWord; |
|
807 out->print(" Committed unused: "); |
|
808 print_scaled_words_and_percentage(out, unused_words_in_vs, committed_words, scale, 6); |
|
809 out->cr(); |
|
810 |
|
811 // Print waste for in-use chunks. |
|
812 UsedChunksStatistics ucs_nonclass = cl._stats_total.nonclass_sm_stats().totals(); |
|
813 UsedChunksStatistics ucs_class = cl._stats_total.class_sm_stats().totals(); |
|
814 UsedChunksStatistics ucs_all; |
|
815 ucs_all.add(ucs_nonclass); |
|
816 ucs_all.add(ucs_class); |
|
817 |
|
818 out->print(" Waste in chunks in use: "); |
|
819 print_scaled_words_and_percentage(out, ucs_all.waste(), committed_words, scale, 6); |
|
820 out->cr(); |
|
821 out->print(" Free in chunks in use: "); |
|
822 print_scaled_words_and_percentage(out, ucs_all.free(), committed_words, scale, 6); |
|
823 out->cr(); |
|
824 out->print(" Overhead in chunks in use: "); |
|
825 print_scaled_words_and_percentage(out, ucs_all.overhead(), committed_words, scale, 6); |
|
826 out->cr(); |
|
827 |
|
828 // Print waste in free chunks. |
|
829 const size_t total_capacity_in_free_chunks = |
|
830 Metaspace::chunk_manager_metadata()->free_chunks_total_words() + |
|
831 (Metaspace::using_class_space() ? Metaspace::chunk_manager_class()->free_chunks_total_words() : 0); |
|
832 out->print(" In free chunks: "); |
|
833 print_scaled_words_and_percentage(out, total_capacity_in_free_chunks, committed_words, scale, 6); |
|
834 out->cr(); |
|
835 |
|
836 // Print waste in deallocated blocks. |
|
837 const uintx free_blocks_num = |
|
838 cl._stats_total.nonclass_sm_stats().free_blocks_num() + |
|
839 cl._stats_total.class_sm_stats().free_blocks_num(); |
|
840 const size_t free_blocks_cap_words = |
|
841 cl._stats_total.nonclass_sm_stats().free_blocks_cap_words() + |
|
842 cl._stats_total.class_sm_stats().free_blocks_cap_words(); |
|
843 out->print("Deallocated from chunks in use: "); |
|
844 print_scaled_words_and_percentage(out, free_blocks_cap_words, committed_words, scale, 6); |
|
845 out->print(" (" UINTX_FORMAT " blocks)", free_blocks_num); |
|
846 out->cr(); |
|
847 |
|
848 // Print total waste. |
|
849 const size_t total_waste = ucs_all.waste() + ucs_all.free() + ucs_all.overhead() + total_capacity_in_free_chunks |
|
850 + free_blocks_cap_words + unused_words_in_vs; |
|
851 out->print(" -total-: "); |
|
852 print_scaled_words_and_percentage(out, total_waste, committed_words, scale, 6); |
|
853 out->cr(); |
|
854 |
|
855 // Print internal statistics |
|
856 #ifdef ASSERT |
|
857 out->cr(); |
|
858 out->cr(); |
|
859 out->print_cr("Internal statistics:"); |
|
860 out->cr(); |
|
861 out->print_cr("Number of allocations: " UINTX_FORMAT ".", g_internal_statistics.num_allocs); |
|
862 out->print_cr("Number of space births: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_births); |
|
863 out->print_cr("Number of space deaths: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_deaths); |
|
864 out->print_cr("Number of virtual space node births: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_created); |
|
865 out->print_cr("Number of virtual space node deaths: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_purged); |
|
866 out->print_cr("Number of times virtual space nodes were expanded: " UINTX_FORMAT ".", g_internal_statistics.num_committed_space_expanded); |
|
867 out->print_cr("Number of deallocations: " UINTX_FORMAT " (" UINTX_FORMAT " external).", g_internal_statistics.num_deallocs, g_internal_statistics.num_external_deallocs); |
|
868 out->print_cr("Allocations from deallocated blocks: " UINTX_FORMAT ".", g_internal_statistics.num_allocs_from_deallocated_blocks); |
|
869 out->print_cr("Number of chunks added to freelist: " UINTX_FORMAT ".", |
|
870 g_internal_statistics.num_chunks_added_to_freelist); |
|
871 out->print_cr("Number of chunks removed from freelist: " UINTX_FORMAT ".", |
|
872 g_internal_statistics.num_chunks_removed_from_freelist); |
|
873 out->print_cr("Number of chunk merges: " UINTX_FORMAT ", split-ups: " UINTX_FORMAT ".", |
|
874 g_internal_statistics.num_chunk_merges, g_internal_statistics.num_chunk_splits); |
|
875 |
|
876 out->cr(); |
|
877 #endif |
|
878 |
|
879 // Print some interesting settings |
|
880 out->cr(); |
|
881 out->cr(); |
|
882 print_basic_switches(out, scale); |
|
883 |
|
884 out->cr(); |
|
885 out->print("InitialBootClassLoaderMetaspaceSize: "); |
|
886 print_human_readable_size(out, InitialBootClassLoaderMetaspaceSize, scale); |
|
887 |
|
888 out->cr(); |
|
889 out->cr(); |
|
890 |
|
891 } // MetaspaceUtils::print_report() |
|
892 |
|
893 // Prints an ASCII representation of the given space. |
|
894 void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) { |
|
895 MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag); |
|
896 const bool for_class = mdtype == Metaspace::ClassType ? true : false; |
|
897 VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list(); |
|
898 if (vsl != NULL) { |
|
899 if (for_class) { |
|
900 if (!Metaspace::using_class_space()) { |
|
901 out->print_cr("No Class Space."); |
|
902 return; |
|
903 } |
|
904 out->print_raw("---- Metaspace Map (Class Space) ----"); |
|
905 } else { |
|
906 out->print_raw("---- Metaspace Map (Non-Class Space) ----"); |
|
907 } |
|
908 // Print legend: |
|
909 out->cr(); |
|
910 out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous."); |
|
911 out->cr(); |
|
912 VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list(); |
|
913 vsl->print_map(out); |
|
914 out->cr(); |
|
915 } |
|
916 } |
|
917 |
|
918 void MetaspaceUtils::verify_free_chunks() { |
|
919 #ifdef ASSERT |
|
920 Metaspace::chunk_manager_metadata()->verify(false); |
|
921 if (Metaspace::using_class_space()) { |
|
922 Metaspace::chunk_manager_class()->verify(false); |
|
923 } |
|
924 #endif |
|
925 } |
|
926 |
|
927 void MetaspaceUtils::verify_metrics() { |
|
928 #ifdef ASSERT |
|
929 // Please note: there are time windows where the internal counters are out of sync with |
|
930 // reality. For example, when a newly created ClassLoaderMetaspace creates its first chunk - |
|
931 // the ClassLoaderMetaspace is not yet attached to its ClassLoaderData object and hence will |
|
932 // not be counted when iterating the CLDG. So be careful when you call this method. |
|
933 ClassLoaderMetaspaceStatistics total_stat; |
|
934 collect_statistics(&total_stat); |
|
935 UsedChunksStatistics nonclass_chunk_stat = total_stat.nonclass_sm_stats().totals(); |
|
936 UsedChunksStatistics class_chunk_stat = total_stat.class_sm_stats().totals(); |
|
937 |
|
938 bool mismatch = false; |
|
939 for (int i = 0; i < Metaspace::MetadataTypeCount; i ++) { |
|
940 Metaspace::MetadataType mdtype = (Metaspace::MetadataType)i; |
|
941 UsedChunksStatistics chunk_stat = total_stat.sm_stats(mdtype).totals(); |
|
942 if (capacity_words(mdtype) != chunk_stat.cap() || |
|
943 used_words(mdtype) != chunk_stat.used() || |
|
944 overhead_words(mdtype) != chunk_stat.overhead()) { |
|
945 mismatch = true; |
|
946 tty->print_cr("MetaspaceUtils::verify_metrics: counter mismatch for mdtype=%u:", mdtype); |
|
947 tty->print_cr("Expected cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".", |
|
948 capacity_words(mdtype), used_words(mdtype), overhead_words(mdtype)); |
|
949 tty->print_cr("Got cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".", |
|
950 chunk_stat.cap(), chunk_stat.used(), chunk_stat.overhead()); |
|
951 tty->flush(); |
|
952 } |
|
953 } |
|
954 assert(mismatch == false, "MetaspaceUtils::verify_metrics: counter mismatch."); |
|
955 #endif |
|
956 } |
|
957 |
|
958 // Metaspace methods |
|
959 |
|
960 size_t Metaspace::_first_chunk_word_size = 0; |
|
961 size_t Metaspace::_first_class_chunk_word_size = 0; |
|
962 |
|
963 size_t Metaspace::_commit_alignment = 0; |
504 size_t Metaspace::_commit_alignment = 0; |
964 size_t Metaspace::_reserve_alignment = 0; |
505 size_t Metaspace::_reserve_alignment = 0; |
965 |
506 |
966 VirtualSpaceList* Metaspace::_space_list = NULL; |
507 DEBUG_ONLY(bool Metaspace::_frozen = false;) |
967 VirtualSpaceList* Metaspace::_class_space_list = NULL; |
508 |
968 |
|
969 ChunkManager* Metaspace::_chunk_manager_metadata = NULL; |
|
970 ChunkManager* Metaspace::_chunk_manager_class = NULL; |
|
971 |
|
972 bool Metaspace::_initialized = false; |
|
973 |
|
974 #define VIRTUALSPACEMULTIPLIER 2 |
|
975 |
509 |
976 #ifdef _LP64 |
510 #ifdef _LP64 |
977 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); |
511 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); |
978 |
512 |
979 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) { |
513 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) { |
1081 #if INCLUDE_CDS |
624 #if INCLUDE_CDS |
1082 if (UseSharedSpaces |
625 if (UseSharedSpaces |
1083 && ! can_use_cds_with_metaspace_addr(a, cds_base)) { |
626 && ! can_use_cds_with_metaspace_addr(a, cds_base)) { |
1084 // We failed to find an aligned base that will reach. Fall |
627 // We failed to find an aligned base that will reach. Fall |
1085 // back to using our requested addr. |
628 // back to using our requested addr. |
1086 metaspace_rs = ReservedSpace(compressed_class_space_size(), |
629 rs = ReservedSpace(compressed_class_space_size(), |
1087 _reserve_alignment, |
630 _reserve_alignment, |
1088 large_pages, |
631 large_pages, |
1089 requested_addr); |
632 requested_addr); |
1090 break; |
633 break; |
1091 } |
634 } |
1092 #endif |
635 #endif |
1093 |
636 |
1094 metaspace_rs = ReservedSpace(compressed_class_space_size(), |
637 rs = ReservedSpace(compressed_class_space_size(), |
1095 _reserve_alignment, |
638 _reserve_alignment, |
1096 large_pages, |
639 large_pages, |
1097 a); |
640 a); |
1098 if (metaspace_rs.is_reserved()) |
641 if (rs.is_reserved()) |
1099 break; |
642 break; |
1100 } |
643 } |
1101 } |
644 } |
1102 |
645 |
1103 #endif // AARCH64 |
646 #endif // AARCH64 |
1104 |
647 |
1105 if (!metaspace_rs.is_reserved()) { |
648 if (!rs.is_reserved()) { |
1106 #if INCLUDE_CDS |
649 #if INCLUDE_CDS |
1107 if (UseSharedSpaces) { |
650 if (UseSharedSpaces) { |
1108 size_t increment = align_up(1*G, _reserve_alignment); |
651 size_t increment = align_up(1*G, _reserve_alignment); |
1109 |
652 |
1110 // Keep trying to allocate the metaspace, increasing the requested_addr |
653 // Keep trying to allocate the metaspace, increasing the requested_addr |
1111 // by 1GB each time, until we reach an address that will no longer allow |
654 // by 1GB each time, until we reach an address that will no longer allow |
1112 // use of CDS with compressed klass pointers. |
655 // use of CDS with compressed klass pointers. |
1113 char *addr = requested_addr; |
656 char *addr = requested_addr; |
1114 while (!metaspace_rs.is_reserved() && (addr + increment > addr) && |
657 while (!rs.is_reserved() && (addr + increment > addr) && |
1115 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) { |
658 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) { |
1116 addr = addr + increment; |
659 addr = addr + increment; |
1117 metaspace_rs = ReservedSpace(compressed_class_space_size(), |
660 rs = ReservedSpace(compressed_class_space_size(), |
1118 _reserve_alignment, large_pages, addr); |
661 _reserve_alignment, large_pages, addr); |
1119 } |
662 } |
1120 } |
663 } |
1121 #endif |
664 #endif |
1122 // If no successful allocation then try to allocate the space anywhere. If |
665 // If no successful allocation then try to allocate the space anywhere. If |
1123 // that fails then OOM doom. At this point we cannot try allocating the |
666 // that fails then OOM doom. At this point we cannot try allocating the |
1124 // metaspace as if UseCompressedClassPointers is off because too much |
667 // metaspace as if UseCompressedClassPointers is off because too much |
1125 // initialization has happened that depends on UseCompressedClassPointers. |
668 // initialization has happened that depends on UseCompressedClassPointers. |
1126 // So, UseCompressedClassPointers cannot be turned off at this point. |
669 // So, UseCompressedClassPointers cannot be turned off at this point. |
1127 if (!metaspace_rs.is_reserved()) { |
670 if (!rs.is_reserved()) { |
1128 metaspace_rs = ReservedSpace(compressed_class_space_size(), |
671 rs = ReservedSpace(compressed_class_space_size(), |
1129 _reserve_alignment, large_pages); |
672 _reserve_alignment, large_pages); |
1130 if (!metaspace_rs.is_reserved()) { |
673 if (!rs.is_reserved()) { |
1131 vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes", |
674 vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes", |
1132 compressed_class_space_size())); |
675 compressed_class_space_size())); |
1133 } |
676 } |
1134 } |
677 } |
1135 } |
678 } |
1136 |
679 |
1137 // If we got here then the metaspace got allocated. |
680 // If we got here then the metaspace got allocated. |
1138 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass); |
681 MemTracker::record_virtual_memory_type((address)rs.base(), mtClass); |
|
682 |
|
683 _compressed_class_space_base = (MetaWord*)rs.base(); |
1139 |
684 |
1140 #if INCLUDE_CDS |
685 #if INCLUDE_CDS |
1141 // Verify that we can use shared spaces. Otherwise, turn off CDS. |
686 // Verify that we can use shared spaces. Otherwise, turn off CDS. |
1142 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) { |
687 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(rs.base(), cds_base)) { |
1143 FileMapInfo::stop_sharing_and_unmap( |
688 FileMapInfo::stop_sharing_and_unmap( |
1144 "Could not allocate metaspace at a compatible address"); |
689 "Could not allocate metaspace at a compatible address"); |
1145 } |
690 } |
1146 #endif |
691 #endif |
1147 set_narrow_klass_base_and_shift((address)metaspace_rs.base(), |
692 set_narrow_klass_base_and_shift((address)rs.base(), |
1148 UseSharedSpaces ? (address)cds_base : 0); |
693 UseSharedSpaces ? (address)cds_base : 0); |
1149 |
694 |
1150 initialize_class_space(metaspace_rs); |
695 initialize_class_space(rs); |
1151 |
696 |
1152 LogTarget(Trace, gc, metaspace) lt; |
697 LogTarget(Trace, gc, metaspace) lt; |
1153 if (lt.is_enabled()) { |
698 if (lt.is_enabled()) { |
1154 ResourceMark rm; |
699 ResourceMark rm; |
1155 LogStream ls(lt); |
700 LogStream ls(lt); |
1156 print_compressed_class_space(&ls, requested_addr); |
701 print_compressed_class_space(&ls, requested_addr); |
1157 } |
702 } |
1158 } |
703 } |
1159 |
704 |
1160 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) { |
|
1161 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d", |
|
1162 p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift()); |
|
1163 if (_class_space_list != NULL) { |
|
1164 address base = (address)_class_space_list->current_virtual_space()->bottom(); |
|
1165 st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT, |
|
1166 compressed_class_space_size(), p2i(base)); |
|
1167 if (requested_addr != 0) { |
|
1168 st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr)); |
|
1169 } |
|
1170 st->cr(); |
|
1171 } |
|
1172 } |
|
1173 |
|
1174 // For UseCompressedClassPointers the class space is reserved above the top of |
705 // For UseCompressedClassPointers the class space is reserved above the top of |
1175 // the Java heap. The argument passed in is at the base of the compressed space. |
706 // the Java heap. The argument passed in is at the base of the compressed space. |
1176 void Metaspace::initialize_class_space(ReservedSpace rs) { |
707 void Metaspace::initialize_class_space(ReservedSpace rs) { |
|
708 |
1177 // The reserved space size may be bigger because of alignment, esp with UseLargePages |
709 // The reserved space size may be bigger because of alignment, esp with UseLargePages |
1178 assert(rs.size() >= CompressedClassSpaceSize, |
710 assert(rs.size() >= CompressedClassSpaceSize, |
1179 SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize); |
711 SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize); |
1180 assert(using_class_space(), "Must be using class space"); |
712 assert(using_class_space(), "Must be using class space"); |
1181 _class_space_list = new VirtualSpaceList(rs); |
713 |
1182 _chunk_manager_class = new ChunkManager(true/*is_class*/); |
714 VirtualSpaceList* vsl = new VirtualSpaceList("class space list", rs, CommitLimiter::globalLimiter()); |
1183 |
715 VirtualSpaceList::set_vslist_class(vsl); |
1184 if (!_class_space_list->initialization_succeeded()) { |
716 ChunkManager* cm = new ChunkManager("class space chunk manager", vsl); |
1185 vm_exit_during_initialization("Failed to setup compressed class space virtual space list."); |
717 ChunkManager::set_chunkmanager_class(cm); |
|
718 |
|
719 } |
|
720 |
|
721 |
|
722 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) { |
|
723 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d", |
|
724 p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift()); |
|
725 if (Metaspace::using_class_space()) { |
|
726 st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT, |
|
727 compressed_class_space_size(), p2i(compressed_class_space_base())); |
|
728 if (requested_addr != 0) { |
|
729 st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr)); |
|
730 } |
|
731 st->cr(); |
1186 } |
732 } |
1187 } |
733 } |
1188 |
734 |
1189 #endif |
735 #endif |
1190 |
736 |
1191 void Metaspace::ergo_initialize() { |
737 void Metaspace::ergo_initialize() { |
|
738 |
|
739 // Must happen before using any setting from Settings::--- |
|
740 metaspace::Settings::strategy_t strat = metaspace::Settings::strategy_balanced_reclaim; |
|
741 if (strcmp(MetaspaceReclaimStrategy, "balanced") == 0) { |
|
742 strat = metaspace::Settings::strategy_balanced_reclaim; |
|
743 } else if (strcmp(MetaspaceReclaimStrategy, "aggressive") == 0) { |
|
744 strat = metaspace::Settings::strategy_aggressive_reclaim; |
|
745 } else if (strcmp(MetaspaceReclaimStrategy, "none") == 0) { |
|
746 strat = metaspace::Settings::strategy_no_reclaim; |
|
747 } else { |
|
748 vm_exit_during_initialization("Invalid value for MetaspaceReclaimStrategy: \"%s\".", MetaspaceReclaimStrategy); |
|
749 } |
|
750 metaspace::Settings::initialize(strat, MetaspaceAlwaysUseClassSpace); |
|
751 |
1192 if (DumpSharedSpaces) { |
752 if (DumpSharedSpaces) { |
1193 // Using large pages when dumping the shared archive is currently not implemented. |
753 // Using large pages when dumping the shared archive is currently not implemented. |
1194 FLAG_SET_ERGO(UseLargePagesInMetaspace, false); |
754 FLAG_SET_ERGO(UseLargePagesInMetaspace, false); |
1195 } |
755 } |
1196 |
756 |
1197 size_t page_size = os::vm_page_size(); |
757 size_t page_size = os::vm_page_size(); |
1198 if (UseLargePages && UseLargePagesInMetaspace) { |
758 if (UseLargePages && UseLargePagesInMetaspace) { |
1199 page_size = os::large_page_size(); |
759 page_size = os::large_page_size(); |
1200 } |
760 } |
1201 |
761 |
1202 _commit_alignment = page_size; |
762 // Commit alignment: (I would rather hide this since this is an implementation detail but we need it |
1203 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); |
763 // when calculating the gc threshold). |
|
764 _commit_alignment = metaspace::Settings::commit_granule_bytes(); |
|
765 |
|
766 // Reserve alignment: all Metaspace memory mappings are to be aligned to the size of a root chunk. |
|
767 _reserve_alignment = MAX2(page_size, (size_t)metaspace::chklvl::MAX_CHUNK_BYTE_SIZE); |
|
768 |
|
769 assert(is_aligned(_reserve_alignment, os::vm_allocation_granularity()), |
|
770 "root chunk size must be a multiple of alloc granularity"); |
1204 |
771 |
1205 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will |
772 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will |
1206 // override if MaxMetaspaceSize was set on the command line or not. |
773 // override if MaxMetaspaceSize was set on the command line or not. |
1207 // This information is needed later to conform to the specification of the |
774 // This information is needed later to conform to the specification of the |
1208 // java.lang.management.MemoryUsage API. |
775 // java.lang.management.MemoryUsage API. |
1451 } |
982 } |
1452 return contains_non_shared(ptr); |
983 return contains_non_shared(ptr); |
1453 } |
984 } |
1454 |
985 |
1455 bool Metaspace::contains_non_shared(const void* ptr) { |
986 bool Metaspace::contains_non_shared(const void* ptr) { |
1456 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) { |
987 if (using_class_space() && VirtualSpaceList::vslist_class()->contains((MetaWord*)ptr)) { |
1457 return true; |
988 return true; |
1458 } |
989 } |
1459 |
990 |
1460 return get_space_list(NonClassType)->contains(ptr); |
991 return VirtualSpaceList::vslist_nonclass()->contains((MetaWord*)ptr); |
1461 } |
992 } |
1462 |
|
1463 // ClassLoaderMetaspace |
|
1464 |
|
1465 ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type) |
|
1466 : _space_type(type) |
|
1467 , _lock(lock) |
|
1468 , _vsm(NULL) |
|
1469 , _class_vsm(NULL) |
|
1470 { |
|
1471 initialize(lock, type); |
|
1472 } |
|
1473 |
|
1474 ClassLoaderMetaspace::~ClassLoaderMetaspace() { |
|
1475 Metaspace::assert_not_frozen(); |
|
1476 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_deaths)); |
|
1477 delete _vsm; |
|
1478 if (Metaspace::using_class_space()) { |
|
1479 delete _class_vsm; |
|
1480 } |
|
1481 } |
|
1482 |
|
1483 void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) { |
|
1484 Metachunk* chunk = get_initialization_chunk(type, mdtype); |
|
1485 if (chunk != NULL) { |
|
1486 // Add to this manager's list of chunks in use and make it the current_chunk(). |
|
1487 get_space_manager(mdtype)->add_chunk(chunk, true); |
|
1488 } |
|
1489 } |
|
1490 |
|
1491 Metachunk* ClassLoaderMetaspace::get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) { |
|
1492 size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type); |
|
1493 |
|
1494 // Get a chunk from the chunk freelist |
|
1495 Metachunk* chunk = Metaspace::get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size); |
|
1496 |
|
1497 if (chunk == NULL) { |
|
1498 chunk = Metaspace::get_space_list(mdtype)->get_new_chunk(chunk_word_size, |
|
1499 get_space_manager(mdtype)->medium_chunk_bunch()); |
|
1500 } |
|
1501 |
|
1502 return chunk; |
|
1503 } |
|
1504 |
|
1505 void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) { |
|
1506 Metaspace::verify_global_initialization(); |
|
1507 |
|
1508 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_births)); |
|
1509 |
|
1510 // Allocate SpaceManager for metadata objects. |
|
1511 _vsm = new SpaceManager(Metaspace::NonClassType, type, lock); |
|
1512 |
|
1513 if (Metaspace::using_class_space()) { |
|
1514 // Allocate SpaceManager for classes. |
|
1515 _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock); |
|
1516 } |
|
1517 |
|
1518 MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag); |
|
1519 |
|
1520 // Allocate chunk for metadata objects |
|
1521 initialize_first_chunk(type, Metaspace::NonClassType); |
|
1522 |
|
1523 // Allocate chunk for class metadata objects |
|
1524 if (Metaspace::using_class_space()) { |
|
1525 initialize_first_chunk(type, Metaspace::ClassType); |
|
1526 } |
|
1527 } |
|
1528 |
|
1529 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) { |
|
1530 Metaspace::assert_not_frozen(); |
|
1531 |
|
1532 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs)); |
|
1533 |
|
1534 // Don't use class_vsm() unless UseCompressedClassPointers is true. |
|
1535 if (Metaspace::is_class_space_allocation(mdtype)) { |
|
1536 return class_vsm()->allocate(word_size); |
|
1537 } else { |
|
1538 return vsm()->allocate(word_size); |
|
1539 } |
|
1540 } |
|
1541 |
|
1542 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) { |
|
1543 Metaspace::assert_not_frozen(); |
|
1544 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord); |
|
1545 assert(delta_bytes > 0, "Must be"); |
|
1546 |
|
1547 size_t before = 0; |
|
1548 size_t after = 0; |
|
1549 bool can_retry = true; |
|
1550 MetaWord* res; |
|
1551 bool incremented; |
|
1552 |
|
1553 // Each thread increments the HWM at most once. Even if the thread fails to increment |
|
1554 // the HWM, an allocation is still attempted. This is because another thread must then |
|
1555 // have incremented the HWM and therefore the allocation might still succeed. |
|
1556 do { |
|
1557 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before, &can_retry); |
|
1558 res = allocate(word_size, mdtype); |
|
1559 } while (!incremented && res == NULL && can_retry); |
|
1560 |
|
1561 if (incremented) { |
|
1562 Metaspace::tracer()->report_gc_threshold(before, after, |
|
1563 MetaspaceGCThresholdUpdater::ExpandAndAllocate); |
|
1564 log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after); |
|
1565 } |
|
1566 |
|
1567 return res; |
|
1568 } |
|
1569 |
|
1570 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const { |
|
1571 return (vsm()->used_words() + |
|
1572 (Metaspace::using_class_space() ? class_vsm()->used_words() : 0)) * BytesPerWord; |
|
1573 } |
|
1574 |
|
1575 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const { |
|
1576 return (vsm()->capacity_words() + |
|
1577 (Metaspace::using_class_space() ? class_vsm()->capacity_words() : 0)) * BytesPerWord; |
|
1578 } |
|
1579 |
|
1580 void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { |
|
1581 Metaspace::assert_not_frozen(); |
|
1582 assert(!SafepointSynchronize::is_at_safepoint() |
|
1583 || Thread::current()->is_VM_thread(), "should be the VM thread"); |
|
1584 |
|
1585 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_external_deallocs)); |
|
1586 |
|
1587 MutexLocker ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); |
|
1588 |
|
1589 if (is_class && Metaspace::using_class_space()) { |
|
1590 class_vsm()->deallocate(ptr, word_size); |
|
1591 } else { |
|
1592 vsm()->deallocate(ptr, word_size); |
|
1593 } |
|
1594 } |
|
1595 |
|
1596 size_t ClassLoaderMetaspace::class_chunk_size(size_t word_size) { |
|
1597 assert(Metaspace::using_class_space(), "Has to use class space"); |
|
1598 return class_vsm()->calc_chunk_size(word_size); |
|
1599 } |
|
1600 |
|
1601 void ClassLoaderMetaspace::print_on(outputStream* out) const { |
|
1602 // Print both class virtual space counts and metaspace. |
|
1603 if (Verbose) { |
|
1604 vsm()->print_on(out); |
|
1605 if (Metaspace::using_class_space()) { |
|
1606 class_vsm()->print_on(out); |
|
1607 } |
|
1608 } |
|
1609 } |
|
1610 |
|
1611 void ClassLoaderMetaspace::verify() { |
|
1612 vsm()->verify(); |
|
1613 if (Metaspace::using_class_space()) { |
|
1614 class_vsm()->verify(); |
|
1615 } |
|
1616 } |
|
1617 |
|
1618 void ClassLoaderMetaspace::add_to_statistics_locked(ClassLoaderMetaspaceStatistics* out) const { |
|
1619 assert_lock_strong(lock()); |
|
1620 vsm()->add_to_statistics_locked(&out->nonclass_sm_stats()); |
|
1621 if (Metaspace::using_class_space()) { |
|
1622 class_vsm()->add_to_statistics_locked(&out->class_sm_stats()); |
|
1623 } |
|
1624 } |
|
1625 |
|
1626 void ClassLoaderMetaspace::add_to_statistics(ClassLoaderMetaspaceStatistics* out) const { |
|
1627 MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag); |
|
1628 add_to_statistics_locked(out); |
|
1629 } |
|
1630 |
|
1631 /////////////// Unit tests /////////////// |
|
1632 |
|
1633 struct chunkmanager_statistics_t { |
|
1634 int num_specialized_chunks; |
|
1635 int num_small_chunks; |
|
1636 int num_medium_chunks; |
|
1637 int num_humongous_chunks; |
|
1638 }; |
|
1639 |
|
1640 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) { |
|
1641 ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType); |
|
1642 ChunkManagerStatistics stat; |
|
1643 chunk_manager->collect_statistics(&stat); |
|
1644 out->num_specialized_chunks = (int)stat.chunk_stats(SpecializedIndex).num(); |
|
1645 out->num_small_chunks = (int)stat.chunk_stats(SmallIndex).num(); |
|
1646 out->num_medium_chunks = (int)stat.chunk_stats(MediumIndex).num(); |
|
1647 out->num_humongous_chunks = (int)stat.chunk_stats(HumongousIndex).num(); |
|
1648 } |
|
1649 |
|
1650 struct chunk_geometry_t { |
|
1651 size_t specialized_chunk_word_size; |
|
1652 size_t small_chunk_word_size; |
|
1653 size_t medium_chunk_word_size; |
|
1654 }; |
|
1655 |
|
1656 extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) { |
|
1657 if (mdType == Metaspace::NonClassType) { |
|
1658 out->specialized_chunk_word_size = SpecializedChunk; |
|
1659 out->small_chunk_word_size = SmallChunk; |
|
1660 out->medium_chunk_word_size = MediumChunk; |
|
1661 } else { |
|
1662 out->specialized_chunk_word_size = ClassSpecializedChunk; |
|
1663 out->small_chunk_word_size = ClassSmallChunk; |
|
1664 out->medium_chunk_word_size = ClassMediumChunk; |
|
1665 } |
|
1666 } |
|