|
1 /* |
|
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #include "precompiled.hpp" |
|
26 #include "gc/parallel/mutableNUMASpace.hpp" |
|
27 #include "gc/parallel/parallelScavengeHeap.hpp" |
|
28 #include "gc/parallel/psMarkSweepDecorator.hpp" |
|
29 #include "gc/parallel/psScavenge.hpp" |
|
30 #include "gc/parallel/psYoungGen.hpp" |
|
31 #include "gc/shared/gcUtil.hpp" |
|
32 #include "gc/shared/spaceDecorator.hpp" |
|
33 #include "logging/log.hpp" |
|
34 #include "oops/oop.inline.hpp" |
|
35 #include "runtime/java.hpp" |
|
36 #include "utilities/align.hpp" |
|
37 |
|
38 PSYoungGen::PSYoungGen(size_t initial_size, |
|
39 size_t min_size, |
|
40 size_t max_size) : |
|
41 _init_gen_size(initial_size), |
|
42 _min_gen_size(min_size), |
|
43 _max_gen_size(max_size) |
|
44 {} |
|
45 |
|
46 void PSYoungGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) { |
|
47 assert(_init_gen_size != 0, "Should have a finite size"); |
|
48 _virtual_space = new PSVirtualSpace(rs, alignment); |
|
49 if (!virtual_space()->expand_by(_init_gen_size)) { |
|
50 vm_exit_during_initialization("Could not reserve enough space for " |
|
51 "object heap"); |
|
52 } |
|
53 } |
|
54 |
|
55 void PSYoungGen::initialize(ReservedSpace rs, size_t alignment) { |
|
56 initialize_virtual_space(rs, alignment); |
|
57 initialize_work(); |
|
58 } |
|
59 |
|
60 void PSYoungGen::initialize_work() { |
|
61 |
|
62 _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(), |
|
63 (HeapWord*)virtual_space()->high_boundary()); |
|
64 |
|
65 MemRegion cmr((HeapWord*)virtual_space()->low(), |
|
66 (HeapWord*)virtual_space()->high()); |
|
67 ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr); |
|
68 |
|
69 if (ZapUnusedHeapArea) { |
|
70 // Mangle newly committed space immediately because it |
|
71 // can be done here more simply that after the new |
|
72 // spaces have been computed. |
|
73 SpaceMangler::mangle_region(cmr); |
|
74 } |
|
75 |
|
76 if (UseNUMA) { |
|
77 _eden_space = new MutableNUMASpace(virtual_space()->alignment()); |
|
78 } else { |
|
79 _eden_space = new MutableSpace(virtual_space()->alignment()); |
|
80 } |
|
81 _from_space = new MutableSpace(virtual_space()->alignment()); |
|
82 _to_space = new MutableSpace(virtual_space()->alignment()); |
|
83 |
|
84 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) { |
|
85 vm_exit_during_initialization("Could not allocate a young gen space"); |
|
86 } |
|
87 |
|
88 // Allocate the mark sweep views of spaces |
|
89 _eden_mark_sweep = |
|
90 new PSMarkSweepDecorator(_eden_space, NULL, MarkSweepDeadRatio); |
|
91 _from_mark_sweep = |
|
92 new PSMarkSweepDecorator(_from_space, NULL, MarkSweepDeadRatio); |
|
93 _to_mark_sweep = |
|
94 new PSMarkSweepDecorator(_to_space, NULL, MarkSweepDeadRatio); |
|
95 |
|
96 if (_eden_mark_sweep == NULL || |
|
97 _from_mark_sweep == NULL || |
|
98 _to_mark_sweep == NULL) { |
|
99 vm_exit_during_initialization("Could not complete allocation" |
|
100 " of the young generation"); |
|
101 } |
|
102 |
|
103 // Generation Counters - generation 0, 3 subspaces |
|
104 _gen_counters = new PSGenerationCounters("new", 0, 3, _min_gen_size, |
|
105 _max_gen_size, _virtual_space); |
|
106 |
|
107 // Compute maximum space sizes for performance counters |
|
108 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); |
|
109 size_t alignment = heap->space_alignment(); |
|
110 size_t size = virtual_space()->reserved_size(); |
|
111 |
|
112 size_t max_survivor_size; |
|
113 size_t max_eden_size; |
|
114 |
|
115 if (UseAdaptiveSizePolicy) { |
|
116 max_survivor_size = size / MinSurvivorRatio; |
|
117 |
|
118 // round the survivor space size down to the nearest alignment |
|
119 // and make sure its size is greater than 0. |
|
120 max_survivor_size = align_down(max_survivor_size, alignment); |
|
121 max_survivor_size = MAX2(max_survivor_size, alignment); |
|
122 |
|
123 // set the maximum size of eden to be the size of the young gen |
|
124 // less two times the minimum survivor size. The minimum survivor |
|
125 // size for UseAdaptiveSizePolicy is one alignment. |
|
126 max_eden_size = size - 2 * alignment; |
|
127 } else { |
|
128 max_survivor_size = size / InitialSurvivorRatio; |
|
129 |
|
130 // round the survivor space size down to the nearest alignment |
|
131 // and make sure its size is greater than 0. |
|
132 max_survivor_size = align_down(max_survivor_size, alignment); |
|
133 max_survivor_size = MAX2(max_survivor_size, alignment); |
|
134 |
|
135 // set the maximum size of eden to be the size of the young gen |
|
136 // less two times the survivor size when the generation is 100% |
|
137 // committed. The minimum survivor size for -UseAdaptiveSizePolicy |
|
138 // is dependent on the committed portion (current capacity) of the |
|
139 // generation - the less space committed, the smaller the survivor |
|
140 // space, possibly as small as an alignment. However, we are interested |
|
141 // in the case where the young generation is 100% committed, as this |
|
142 // is the point where eden reaches its maximum size. At this point, |
|
143 // the size of a survivor space is max_survivor_size. |
|
144 max_eden_size = size - 2 * max_survivor_size; |
|
145 } |
|
146 |
|
147 _eden_counters = new SpaceCounters("eden", 0, max_eden_size, _eden_space, |
|
148 _gen_counters); |
|
149 _from_counters = new SpaceCounters("s0", 1, max_survivor_size, _from_space, |
|
150 _gen_counters); |
|
151 _to_counters = new SpaceCounters("s1", 2, max_survivor_size, _to_space, |
|
152 _gen_counters); |
|
153 |
|
154 compute_initial_space_boundaries(); |
|
155 } |
|
156 |
|
157 void PSYoungGen::compute_initial_space_boundaries() { |
|
158 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); |
|
159 |
|
160 // Compute sizes |
|
161 size_t alignment = heap->space_alignment(); |
|
162 size_t size = virtual_space()->committed_size(); |
|
163 assert(size >= 3 * alignment, "Young space is not large enough for eden + 2 survivors"); |
|
164 |
|
165 size_t survivor_size = size / InitialSurvivorRatio; |
|
166 survivor_size = align_down(survivor_size, alignment); |
|
167 // ... but never less than an alignment |
|
168 survivor_size = MAX2(survivor_size, alignment); |
|
169 |
|
170 // Young generation is eden + 2 survivor spaces |
|
171 size_t eden_size = size - (2 * survivor_size); |
|
172 |
|
173 // Now go ahead and set 'em. |
|
174 set_space_boundaries(eden_size, survivor_size); |
|
175 space_invariants(); |
|
176 |
|
177 if (UsePerfData) { |
|
178 _eden_counters->update_capacity(); |
|
179 _from_counters->update_capacity(); |
|
180 _to_counters->update_capacity(); |
|
181 } |
|
182 } |
|
183 |
|
184 void PSYoungGen::set_space_boundaries(size_t eden_size, size_t survivor_size) { |
|
185 assert(eden_size < virtual_space()->committed_size(), "just checking"); |
|
186 assert(eden_size > 0 && survivor_size > 0, "just checking"); |
|
187 |
|
188 // Initial layout is Eden, to, from. After swapping survivor spaces, |
|
189 // that leaves us with Eden, from, to, which is step one in our two |
|
190 // step resize-with-live-data procedure. |
|
191 char *eden_start = virtual_space()->low(); |
|
192 char *to_start = eden_start + eden_size; |
|
193 char *from_start = to_start + survivor_size; |
|
194 char *from_end = from_start + survivor_size; |
|
195 |
|
196 assert(from_end == virtual_space()->high(), "just checking"); |
|
197 assert(is_object_aligned(eden_start), "checking alignment"); |
|
198 assert(is_object_aligned(to_start), "checking alignment"); |
|
199 assert(is_object_aligned(from_start), "checking alignment"); |
|
200 |
|
201 MemRegion eden_mr((HeapWord*)eden_start, (HeapWord*)to_start); |
|
202 MemRegion to_mr ((HeapWord*)to_start, (HeapWord*)from_start); |
|
203 MemRegion from_mr((HeapWord*)from_start, (HeapWord*)from_end); |
|
204 |
|
205 eden_space()->initialize(eden_mr, true, ZapUnusedHeapArea); |
|
206 to_space()->initialize(to_mr , true, ZapUnusedHeapArea); |
|
207 from_space()->initialize(from_mr, true, ZapUnusedHeapArea); |
|
208 } |
|
209 |
|
210 #ifndef PRODUCT |
|
211 void PSYoungGen::space_invariants() { |
|
212 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); |
|
213 const size_t alignment = heap->space_alignment(); |
|
214 |
|
215 // Currently, our eden size cannot shrink to zero |
|
216 guarantee(eden_space()->capacity_in_bytes() >= alignment, "eden too small"); |
|
217 guarantee(from_space()->capacity_in_bytes() >= alignment, "from too small"); |
|
218 guarantee(to_space()->capacity_in_bytes() >= alignment, "to too small"); |
|
219 |
|
220 // Relationship of spaces to each other |
|
221 char* eden_start = (char*)eden_space()->bottom(); |
|
222 char* eden_end = (char*)eden_space()->end(); |
|
223 char* from_start = (char*)from_space()->bottom(); |
|
224 char* from_end = (char*)from_space()->end(); |
|
225 char* to_start = (char*)to_space()->bottom(); |
|
226 char* to_end = (char*)to_space()->end(); |
|
227 |
|
228 guarantee(eden_start >= virtual_space()->low(), "eden bottom"); |
|
229 guarantee(eden_start < eden_end, "eden space consistency"); |
|
230 guarantee(from_start < from_end, "from space consistency"); |
|
231 guarantee(to_start < to_end, "to space consistency"); |
|
232 |
|
233 // Check whether from space is below to space |
|
234 if (from_start < to_start) { |
|
235 // Eden, from, to |
|
236 guarantee(eden_end <= from_start, "eden/from boundary"); |
|
237 guarantee(from_end <= to_start, "from/to boundary"); |
|
238 guarantee(to_end <= virtual_space()->high(), "to end"); |
|
239 } else { |
|
240 // Eden, to, from |
|
241 guarantee(eden_end <= to_start, "eden/to boundary"); |
|
242 guarantee(to_end <= from_start, "to/from boundary"); |
|
243 guarantee(from_end <= virtual_space()->high(), "from end"); |
|
244 } |
|
245 |
|
246 // More checks that the virtual space is consistent with the spaces |
|
247 assert(virtual_space()->committed_size() >= |
|
248 (eden_space()->capacity_in_bytes() + |
|
249 to_space()->capacity_in_bytes() + |
|
250 from_space()->capacity_in_bytes()), "Committed size is inconsistent"); |
|
251 assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(), |
|
252 "Space invariant"); |
|
253 char* eden_top = (char*)eden_space()->top(); |
|
254 char* from_top = (char*)from_space()->top(); |
|
255 char* to_top = (char*)to_space()->top(); |
|
256 assert(eden_top <= virtual_space()->high(), "eden top"); |
|
257 assert(from_top <= virtual_space()->high(), "from top"); |
|
258 assert(to_top <= virtual_space()->high(), "to top"); |
|
259 |
|
260 virtual_space()->verify(); |
|
261 } |
|
262 #endif |
|
263 |
|
264 void PSYoungGen::resize(size_t eden_size, size_t survivor_size) { |
|
265 // Resize the generation if needed. If the generation resize |
|
266 // reports false, do not attempt to resize the spaces. |
|
267 if (resize_generation(eden_size, survivor_size)) { |
|
268 // Then we lay out the spaces inside the generation |
|
269 resize_spaces(eden_size, survivor_size); |
|
270 |
|
271 space_invariants(); |
|
272 |
|
273 log_trace(gc, ergo)("Young generation size: " |
|
274 "desired eden: " SIZE_FORMAT " survivor: " SIZE_FORMAT |
|
275 " used: " SIZE_FORMAT " capacity: " SIZE_FORMAT |
|
276 " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT, |
|
277 eden_size, survivor_size, used_in_bytes(), capacity_in_bytes(), |
|
278 _max_gen_size, min_gen_size()); |
|
279 } |
|
280 } |
|
281 |
|
282 |
|
283 bool PSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) { |
|
284 const size_t alignment = virtual_space()->alignment(); |
|
285 size_t orig_size = virtual_space()->committed_size(); |
|
286 bool size_changed = false; |
|
287 |
|
288 // There used to be this guarantee there. |
|
289 // guarantee ((eden_size + 2*survivor_size) <= _max_gen_size, "incorrect input arguments"); |
|
290 // Code below forces this requirement. In addition the desired eden |
|
291 // size and desired survivor sizes are desired goals and may |
|
292 // exceed the total generation size. |
|
293 |
|
294 assert(min_gen_size() <= orig_size && orig_size <= max_size(), "just checking"); |
|
295 |
|
296 // Adjust new generation size |
|
297 const size_t eden_plus_survivors = |
|
298 align_up(eden_size + 2 * survivor_size, alignment); |
|
299 size_t desired_size = MAX2(MIN2(eden_plus_survivors, max_size()), |
|
300 min_gen_size()); |
|
301 assert(desired_size <= max_size(), "just checking"); |
|
302 |
|
303 if (desired_size > orig_size) { |
|
304 // Grow the generation |
|
305 size_t change = desired_size - orig_size; |
|
306 assert(change % alignment == 0, "just checking"); |
|
307 HeapWord* prev_high = (HeapWord*) virtual_space()->high(); |
|
308 if (!virtual_space()->expand_by(change)) { |
|
309 return false; // Error if we fail to resize! |
|
310 } |
|
311 if (ZapUnusedHeapArea) { |
|
312 // Mangle newly committed space immediately because it |
|
313 // can be done here more simply that after the new |
|
314 // spaces have been computed. |
|
315 HeapWord* new_high = (HeapWord*) virtual_space()->high(); |
|
316 MemRegion mangle_region(prev_high, new_high); |
|
317 SpaceMangler::mangle_region(mangle_region); |
|
318 } |
|
319 size_changed = true; |
|
320 } else if (desired_size < orig_size) { |
|
321 size_t desired_change = orig_size - desired_size; |
|
322 assert(desired_change % alignment == 0, "just checking"); |
|
323 |
|
324 desired_change = limit_gen_shrink(desired_change); |
|
325 |
|
326 if (desired_change > 0) { |
|
327 virtual_space()->shrink_by(desired_change); |
|
328 reset_survivors_after_shrink(); |
|
329 |
|
330 size_changed = true; |
|
331 } |
|
332 } else { |
|
333 if (orig_size == gen_size_limit()) { |
|
334 log_trace(gc)("PSYoung generation size at maximum: " SIZE_FORMAT "K", orig_size/K); |
|
335 } else if (orig_size == min_gen_size()) { |
|
336 log_trace(gc)("PSYoung generation size at minium: " SIZE_FORMAT "K", orig_size/K); |
|
337 } |
|
338 } |
|
339 |
|
340 if (size_changed) { |
|
341 post_resize(); |
|
342 log_trace(gc)("PSYoung generation size changed: " SIZE_FORMAT "K->" SIZE_FORMAT "K", |
|
343 orig_size/K, virtual_space()->committed_size()/K); |
|
344 } |
|
345 |
|
346 guarantee(eden_plus_survivors <= virtual_space()->committed_size() || |
|
347 virtual_space()->committed_size() == max_size(), "Sanity"); |
|
348 |
|
349 return true; |
|
350 } |
|
351 |
|
352 #ifndef PRODUCT |
|
353 // In the numa case eden is not mangled so a survivor space |
|
354 // moving into a region previously occupied by a survivor |
|
355 // may find an unmangled region. Also in the PS case eden |
|
356 // to-space and from-space may not touch (i.e., there may be |
|
357 // gaps between them due to movement while resizing the |
|
358 // spaces). Those gaps must be mangled. |
|
359 void PSYoungGen::mangle_survivors(MutableSpace* s1, |
|
360 MemRegion s1MR, |
|
361 MutableSpace* s2, |
|
362 MemRegion s2MR) { |
|
363 // Check eden and gap between eden and from-space, in deciding |
|
364 // what to mangle in from-space. Check the gap between from-space |
|
365 // and to-space when deciding what to mangle. |
|
366 // |
|
367 // +--------+ +----+ +---+ |
|
368 // | eden | |s1 | |s2 | |
|
369 // +--------+ +----+ +---+ |
|
370 // +-------+ +-----+ |
|
371 // |s1MR | |s2MR | |
|
372 // +-------+ +-----+ |
|
373 // All of survivor-space is properly mangled so find the |
|
374 // upper bound on the mangling for any portion above current s1. |
|
375 HeapWord* delta_end = MIN2(s1->bottom(), s1MR.end()); |
|
376 MemRegion delta1_left; |
|
377 if (s1MR.start() < delta_end) { |
|
378 delta1_left = MemRegion(s1MR.start(), delta_end); |
|
379 s1->mangle_region(delta1_left); |
|
380 } |
|
381 // Find any portion to the right of the current s1. |
|
382 HeapWord* delta_start = MAX2(s1->end(), s1MR.start()); |
|
383 MemRegion delta1_right; |
|
384 if (delta_start < s1MR.end()) { |
|
385 delta1_right = MemRegion(delta_start, s1MR.end()); |
|
386 s1->mangle_region(delta1_right); |
|
387 } |
|
388 |
|
389 // Similarly for the second survivor space except that |
|
390 // any of the new region that overlaps with the current |
|
391 // region of the first survivor space has already been |
|
392 // mangled. |
|
393 delta_end = MIN2(s2->bottom(), s2MR.end()); |
|
394 delta_start = MAX2(s2MR.start(), s1->end()); |
|
395 MemRegion delta2_left; |
|
396 if (s2MR.start() < delta_end) { |
|
397 delta2_left = MemRegion(s2MR.start(), delta_end); |
|
398 s2->mangle_region(delta2_left); |
|
399 } |
|
400 delta_start = MAX2(s2->end(), s2MR.start()); |
|
401 MemRegion delta2_right; |
|
402 if (delta_start < s2MR.end()) { |
|
403 s2->mangle_region(delta2_right); |
|
404 } |
|
405 |
|
406 // s1 |
|
407 log_develop_trace(gc)("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") " |
|
408 "New region: [" PTR_FORMAT ", " PTR_FORMAT ")", |
|
409 p2i(s1->bottom()), p2i(s1->end()), |
|
410 p2i(s1MR.start()), p2i(s1MR.end())); |
|
411 log_develop_trace(gc)(" Mangle before: [" PTR_FORMAT ", " |
|
412 PTR_FORMAT ") Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")", |
|
413 p2i(delta1_left.start()), p2i(delta1_left.end()), |
|
414 p2i(delta1_right.start()), p2i(delta1_right.end())); |
|
415 |
|
416 // s2 |
|
417 log_develop_trace(gc)("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") " |
|
418 "New region: [" PTR_FORMAT ", " PTR_FORMAT ")", |
|
419 p2i(s2->bottom()), p2i(s2->end()), |
|
420 p2i(s2MR.start()), p2i(s2MR.end())); |
|
421 log_develop_trace(gc)(" Mangle before: [" PTR_FORMAT ", " |
|
422 PTR_FORMAT ") Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")", |
|
423 p2i(delta2_left.start()), p2i(delta2_left.end()), |
|
424 p2i(delta2_right.start()), p2i(delta2_right.end())); |
|
425 } |
|
426 #endif // NOT PRODUCT |
|
427 |
|
428 void PSYoungGen::resize_spaces(size_t requested_eden_size, |
|
429 size_t requested_survivor_size) { |
|
430 assert(UseAdaptiveSizePolicy, "sanity check"); |
|
431 assert(requested_eden_size > 0 && requested_survivor_size > 0, |
|
432 "just checking"); |
|
433 |
|
434 // We require eden and to space to be empty |
|
435 if ((!eden_space()->is_empty()) || (!to_space()->is_empty())) { |
|
436 return; |
|
437 } |
|
438 |
|
439 log_trace(gc, ergo)("PSYoungGen::resize_spaces(requested_eden_size: " SIZE_FORMAT ", requested_survivor_size: " SIZE_FORMAT ")", |
|
440 requested_eden_size, requested_survivor_size); |
|
441 log_trace(gc, ergo)(" eden: [" PTR_FORMAT ".." PTR_FORMAT ") " SIZE_FORMAT, |
|
442 p2i(eden_space()->bottom()), |
|
443 p2i(eden_space()->end()), |
|
444 pointer_delta(eden_space()->end(), |
|
445 eden_space()->bottom(), |
|
446 sizeof(char))); |
|
447 log_trace(gc, ergo)(" from: [" PTR_FORMAT ".." PTR_FORMAT ") " SIZE_FORMAT, |
|
448 p2i(from_space()->bottom()), |
|
449 p2i(from_space()->end()), |
|
450 pointer_delta(from_space()->end(), |
|
451 from_space()->bottom(), |
|
452 sizeof(char))); |
|
453 log_trace(gc, ergo)(" to: [" PTR_FORMAT ".." PTR_FORMAT ") " SIZE_FORMAT, |
|
454 p2i(to_space()->bottom()), |
|
455 p2i(to_space()->end()), |
|
456 pointer_delta( to_space()->end(), |
|
457 to_space()->bottom(), |
|
458 sizeof(char))); |
|
459 |
|
460 // There's nothing to do if the new sizes are the same as the current |
|
461 if (requested_survivor_size == to_space()->capacity_in_bytes() && |
|
462 requested_survivor_size == from_space()->capacity_in_bytes() && |
|
463 requested_eden_size == eden_space()->capacity_in_bytes()) { |
|
464 log_trace(gc, ergo)(" capacities are the right sizes, returning"); |
|
465 return; |
|
466 } |
|
467 |
|
468 char* eden_start = (char*)eden_space()->bottom(); |
|
469 char* eden_end = (char*)eden_space()->end(); |
|
470 char* from_start = (char*)from_space()->bottom(); |
|
471 char* from_end = (char*)from_space()->end(); |
|
472 char* to_start = (char*)to_space()->bottom(); |
|
473 char* to_end = (char*)to_space()->end(); |
|
474 |
|
475 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); |
|
476 const size_t alignment = heap->space_alignment(); |
|
477 const bool maintain_minimum = |
|
478 (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size(); |
|
479 |
|
480 bool eden_from_to_order = from_start < to_start; |
|
481 // Check whether from space is below to space |
|
482 if (eden_from_to_order) { |
|
483 // Eden, from, to |
|
484 eden_from_to_order = true; |
|
485 log_trace(gc, ergo)(" Eden, from, to:"); |
|
486 |
|
487 // Set eden |
|
488 // "requested_eden_size" is a goal for the size of eden |
|
489 // and may not be attainable. "eden_size" below is |
|
490 // calculated based on the location of from-space and |
|
491 // the goal for the size of eden. from-space is |
|
492 // fixed in place because it contains live data. |
|
493 // The calculation is done this way to avoid 32bit |
|
494 // overflow (i.e., eden_start + requested_eden_size |
|
495 // may too large for representation in 32bits). |
|
496 size_t eden_size; |
|
497 if (maintain_minimum) { |
|
498 // Only make eden larger than the requested size if |
|
499 // the minimum size of the generation has to be maintained. |
|
500 // This could be done in general but policy at a higher |
|
501 // level is determining a requested size for eden and that |
|
502 // should be honored unless there is a fundamental reason. |
|
503 eden_size = pointer_delta(from_start, |
|
504 eden_start, |
|
505 sizeof(char)); |
|
506 } else { |
|
507 eden_size = MIN2(requested_eden_size, |
|
508 pointer_delta(from_start, eden_start, sizeof(char))); |
|
509 } |
|
510 |
|
511 eden_end = eden_start + eden_size; |
|
512 assert(eden_end >= eden_start, "addition overflowed"); |
|
513 |
|
514 // To may resize into from space as long as it is clear of live data. |
|
515 // From space must remain page aligned, though, so we need to do some |
|
516 // extra calculations. |
|
517 |
|
518 // First calculate an optimal to-space |
|
519 to_end = (char*)virtual_space()->high(); |
|
520 to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size, |
|
521 sizeof(char)); |
|
522 |
|
523 // Does the optimal to-space overlap from-space? |
|
524 if (to_start < (char*)from_space()->end()) { |
|
525 // Calculate the minimum offset possible for from_end |
|
526 size_t from_size = pointer_delta(from_space()->top(), from_start, sizeof(char)); |
|
527 |
|
528 // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME! |
|
529 if (from_size == 0) { |
|
530 from_size = alignment; |
|
531 } else { |
|
532 from_size = align_up(from_size, alignment); |
|
533 } |
|
534 |
|
535 from_end = from_start + from_size; |
|
536 assert(from_end > from_start, "addition overflow or from_size problem"); |
|
537 |
|
538 guarantee(from_end <= (char*)from_space()->end(), "from_end moved to the right"); |
|
539 |
|
540 // Now update to_start with the new from_end |
|
541 to_start = MAX2(from_end, to_start); |
|
542 } |
|
543 |
|
544 guarantee(to_start != to_end, "to space is zero sized"); |
|
545 |
|
546 log_trace(gc, ergo)(" [eden_start .. eden_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, |
|
547 p2i(eden_start), |
|
548 p2i(eden_end), |
|
549 pointer_delta(eden_end, eden_start, sizeof(char))); |
|
550 log_trace(gc, ergo)(" [from_start .. from_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, |
|
551 p2i(from_start), |
|
552 p2i(from_end), |
|
553 pointer_delta(from_end, from_start, sizeof(char))); |
|
554 log_trace(gc, ergo)(" [ to_start .. to_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, |
|
555 p2i(to_start), |
|
556 p2i(to_end), |
|
557 pointer_delta( to_end, to_start, sizeof(char))); |
|
558 } else { |
|
559 // Eden, to, from |
|
560 log_trace(gc, ergo)(" Eden, to, from:"); |
|
561 |
|
562 // To space gets priority over eden resizing. Note that we position |
|
563 // to space as if we were able to resize from space, even though from |
|
564 // space is not modified. |
|
565 // Giving eden priority was tried and gave poorer performance. |
|
566 to_end = (char*)pointer_delta(virtual_space()->high(), |
|
567 (char*)requested_survivor_size, |
|
568 sizeof(char)); |
|
569 to_end = MIN2(to_end, from_start); |
|
570 to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size, |
|
571 sizeof(char)); |
|
572 // if the space sizes are to be increased by several times then |
|
573 // 'to_start' will point beyond the young generation. In this case |
|
574 // 'to_start' should be adjusted. |
|
575 to_start = MAX2(to_start, eden_start + alignment); |
|
576 |
|
577 // Compute how big eden can be, then adjust end. |
|
578 // See comments above on calculating eden_end. |
|
579 size_t eden_size; |
|
580 if (maintain_minimum) { |
|
581 eden_size = pointer_delta(to_start, eden_start, sizeof(char)); |
|
582 } else { |
|
583 eden_size = MIN2(requested_eden_size, |
|
584 pointer_delta(to_start, eden_start, sizeof(char))); |
|
585 } |
|
586 eden_end = eden_start + eden_size; |
|
587 assert(eden_end >= eden_start, "addition overflowed"); |
|
588 |
|
589 // Could choose to not let eden shrink |
|
590 // to_start = MAX2(to_start, eden_end); |
|
591 |
|
592 // Don't let eden shrink down to 0 or less. |
|
593 eden_end = MAX2(eden_end, eden_start + alignment); |
|
594 to_start = MAX2(to_start, eden_end); |
|
595 |
|
596 log_trace(gc, ergo)(" [eden_start .. eden_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, |
|
597 p2i(eden_start), |
|
598 p2i(eden_end), |
|
599 pointer_delta(eden_end, eden_start, sizeof(char))); |
|
600 log_trace(gc, ergo)(" [ to_start .. to_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, |
|
601 p2i(to_start), |
|
602 p2i(to_end), |
|
603 pointer_delta( to_end, to_start, sizeof(char))); |
|
604 log_trace(gc, ergo)(" [from_start .. from_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, |
|
605 p2i(from_start), |
|
606 p2i(from_end), |
|
607 pointer_delta(from_end, from_start, sizeof(char))); |
|
608 } |
|
609 |
|
610 |
|
611 guarantee((HeapWord*)from_start <= from_space()->bottom(), |
|
612 "from start moved to the right"); |
|
613 guarantee((HeapWord*)from_end >= from_space()->top(), |
|
614 "from end moved into live data"); |
|
615 assert(is_object_aligned(eden_start), "checking alignment"); |
|
616 assert(is_object_aligned(from_start), "checking alignment"); |
|
617 assert(is_object_aligned(to_start), "checking alignment"); |
|
618 |
|
619 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end); |
|
620 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); |
|
621 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end); |
|
622 |
|
623 // Let's make sure the call to initialize doesn't reset "top"! |
|
624 HeapWord* old_from_top = from_space()->top(); |
|
625 |
|
626 // For logging block below |
|
627 size_t old_from = from_space()->capacity_in_bytes(); |
|
628 size_t old_to = to_space()->capacity_in_bytes(); |
|
629 |
|
630 if (ZapUnusedHeapArea) { |
|
631 // NUMA is a special case because a numa space is not mangled |
|
632 // in order to not prematurely bind its address to memory to |
|
633 // the wrong memory (i.e., don't want the GC thread to first |
|
634 // touch the memory). The survivor spaces are not numa |
|
635 // spaces and are mangled. |
|
636 if (UseNUMA) { |
|
637 if (eden_from_to_order) { |
|
638 mangle_survivors(from_space(), fromMR, to_space(), toMR); |
|
639 } else { |
|
640 mangle_survivors(to_space(), toMR, from_space(), fromMR); |
|
641 } |
|
642 } |
|
643 |
|
644 // If not mangling the spaces, do some checking to verify that |
|
645 // the spaces are already mangled. |
|
646 // The spaces should be correctly mangled at this point so |
|
647 // do some checking here. Note that they are not being mangled |
|
648 // in the calls to initialize(). |
|
649 // Must check mangling before the spaces are reshaped. Otherwise, |
|
650 // the bottom or end of one space may have moved into an area |
|
651 // covered by another space and a failure of the check may |
|
652 // not correctly indicate which space is not properly mangled. |
|
653 HeapWord* limit = (HeapWord*) virtual_space()->high(); |
|
654 eden_space()->check_mangled_unused_area(limit); |
|
655 from_space()->check_mangled_unused_area(limit); |
|
656 to_space()->check_mangled_unused_area(limit); |
|
657 } |
|
658 // When an existing space is being initialized, it is not |
|
659 // mangled because the space has been previously mangled. |
|
660 eden_space()->initialize(edenMR, |
|
661 SpaceDecorator::Clear, |
|
662 SpaceDecorator::DontMangle); |
|
663 to_space()->initialize(toMR, |
|
664 SpaceDecorator::Clear, |
|
665 SpaceDecorator::DontMangle); |
|
666 from_space()->initialize(fromMR, |
|
667 SpaceDecorator::DontClear, |
|
668 SpaceDecorator::DontMangle); |
|
669 |
|
670 assert(from_space()->top() == old_from_top, "from top changed!"); |
|
671 |
|
672 log_trace(gc, ergo)("AdaptiveSizePolicy::survivor space sizes: collection: %d (" SIZE_FORMAT ", " SIZE_FORMAT ") -> (" SIZE_FORMAT ", " SIZE_FORMAT ") ", |
|
673 ParallelScavengeHeap::heap()->total_collections(), |
|
674 old_from, old_to, |
|
675 from_space()->capacity_in_bytes(), |
|
676 to_space()->capacity_in_bytes()); |
|
677 } |
|
678 |
|
679 void PSYoungGen::swap_spaces() { |
|
680 MutableSpace* s = from_space(); |
|
681 _from_space = to_space(); |
|
682 _to_space = s; |
|
683 |
|
684 // Now update the decorators. |
|
685 PSMarkSweepDecorator* md = from_mark_sweep(); |
|
686 _from_mark_sweep = to_mark_sweep(); |
|
687 _to_mark_sweep = md; |
|
688 |
|
689 assert(from_mark_sweep()->space() == from_space(), "Sanity"); |
|
690 assert(to_mark_sweep()->space() == to_space(), "Sanity"); |
|
691 } |
|
692 |
|
693 size_t PSYoungGen::capacity_in_bytes() const { |
|
694 return eden_space()->capacity_in_bytes() |
|
695 + from_space()->capacity_in_bytes(); // to_space() is only used during scavenge |
|
696 } |
|
697 |
|
698 |
|
699 size_t PSYoungGen::used_in_bytes() const { |
|
700 return eden_space()->used_in_bytes() |
|
701 + from_space()->used_in_bytes(); // to_space() is only used during scavenge |
|
702 } |
|
703 |
|
704 |
|
705 size_t PSYoungGen::free_in_bytes() const { |
|
706 return eden_space()->free_in_bytes() |
|
707 + from_space()->free_in_bytes(); // to_space() is only used during scavenge |
|
708 } |
|
709 |
|
710 size_t PSYoungGen::capacity_in_words() const { |
|
711 return eden_space()->capacity_in_words() |
|
712 + from_space()->capacity_in_words(); // to_space() is only used during scavenge |
|
713 } |
|
714 |
|
715 |
|
716 size_t PSYoungGen::used_in_words() const { |
|
717 return eden_space()->used_in_words() |
|
718 + from_space()->used_in_words(); // to_space() is only used during scavenge |
|
719 } |
|
720 |
|
721 |
|
722 size_t PSYoungGen::free_in_words() const { |
|
723 return eden_space()->free_in_words() |
|
724 + from_space()->free_in_words(); // to_space() is only used during scavenge |
|
725 } |
|
726 |
|
727 void PSYoungGen::object_iterate(ObjectClosure* blk) { |
|
728 eden_space()->object_iterate(blk); |
|
729 from_space()->object_iterate(blk); |
|
730 to_space()->object_iterate(blk); |
|
731 } |
|
732 |
|
733 void PSYoungGen::precompact() { |
|
734 eden_mark_sweep()->precompact(); |
|
735 from_mark_sweep()->precompact(); |
|
736 to_mark_sweep()->precompact(); |
|
737 } |
|
738 |
|
739 void PSYoungGen::adjust_pointers() { |
|
740 eden_mark_sweep()->adjust_pointers(); |
|
741 from_mark_sweep()->adjust_pointers(); |
|
742 to_mark_sweep()->adjust_pointers(); |
|
743 } |
|
744 |
|
745 void PSYoungGen::compact() { |
|
746 eden_mark_sweep()->compact(ZapUnusedHeapArea); |
|
747 from_mark_sweep()->compact(ZapUnusedHeapArea); |
|
748 // Mark sweep stores preserved markOops in to space, don't disturb! |
|
749 to_mark_sweep()->compact(false); |
|
750 } |
|
751 |
|
752 void PSYoungGen::print() const { print_on(tty); } |
|
753 void PSYoungGen::print_on(outputStream* st) const { |
|
754 st->print(" %-15s", "PSYoungGen"); |
|
755 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", |
|
756 capacity_in_bytes()/K, used_in_bytes()/K); |
|
757 virtual_space()->print_space_boundaries_on(st); |
|
758 st->print(" eden"); eden_space()->print_on(st); |
|
759 st->print(" from"); from_space()->print_on(st); |
|
760 st->print(" to "); to_space()->print_on(st); |
|
761 } |
|
762 |
|
763 // Note that a space is not printed before the [NAME: |
|
764 void PSYoungGen::print_used_change(size_t prev_used) const { |
|
765 log_info(gc, heap)("%s: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)", |
|
766 name(), prev_used / K, used_in_bytes() / K, capacity_in_bytes() / K); |
|
767 } |
|
768 |
|
769 size_t PSYoungGen::available_for_expansion() { |
|
770 ShouldNotReachHere(); |
|
771 return 0; |
|
772 } |
|
773 |
|
774 size_t PSYoungGen::available_for_contraction() { |
|
775 ShouldNotReachHere(); |
|
776 return 0; |
|
777 } |
|
778 |
|
779 size_t PSYoungGen::available_to_min_gen() { |
|
780 assert(virtual_space()->committed_size() >= min_gen_size(), "Invariant"); |
|
781 return virtual_space()->committed_size() - min_gen_size(); |
|
782 } |
|
783 |
|
784 // This method assumes that from-space has live data and that |
|
785 // any shrinkage of the young gen is limited by location of |
|
786 // from-space. |
|
787 size_t PSYoungGen::available_to_live() { |
|
788 size_t delta_in_survivor = 0; |
|
789 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); |
|
790 const size_t space_alignment = heap->space_alignment(); |
|
791 const size_t gen_alignment = heap->generation_alignment(); |
|
792 |
|
793 MutableSpace* space_shrinking = NULL; |
|
794 if (from_space()->end() > to_space()->end()) { |
|
795 space_shrinking = from_space(); |
|
796 } else { |
|
797 space_shrinking = to_space(); |
|
798 } |
|
799 |
|
800 // Include any space that is committed but not included in |
|
801 // the survivor spaces. |
|
802 assert(((HeapWord*)virtual_space()->high()) >= space_shrinking->end(), |
|
803 "Survivor space beyond high end"); |
|
804 size_t unused_committed = pointer_delta(virtual_space()->high(), |
|
805 space_shrinking->end(), sizeof(char)); |
|
806 |
|
807 if (space_shrinking->is_empty()) { |
|
808 // Don't let the space shrink to 0 |
|
809 assert(space_shrinking->capacity_in_bytes() >= space_alignment, |
|
810 "Space is too small"); |
|
811 delta_in_survivor = space_shrinking->capacity_in_bytes() - space_alignment; |
|
812 } else { |
|
813 delta_in_survivor = pointer_delta(space_shrinking->end(), |
|
814 space_shrinking->top(), |
|
815 sizeof(char)); |
|
816 } |
|
817 |
|
818 size_t delta_in_bytes = unused_committed + delta_in_survivor; |
|
819 delta_in_bytes = align_down(delta_in_bytes, gen_alignment); |
|
820 return delta_in_bytes; |
|
821 } |
|
822 |
|
823 // Return the number of bytes available for resizing down the young |
|
824 // generation. This is the minimum of |
|
825 // input "bytes" |
|
826 // bytes to the minimum young gen size |
|
827 // bytes to the size currently being used + some small extra |
|
828 size_t PSYoungGen::limit_gen_shrink(size_t bytes) { |
|
829 // Allow shrinkage into the current eden but keep eden large enough |
|
830 // to maintain the minimum young gen size |
|
831 bytes = MIN3(bytes, available_to_min_gen(), available_to_live()); |
|
832 return align_down(bytes, virtual_space()->alignment()); |
|
833 } |
|
834 |
|
835 void PSYoungGen::reset_after_change() { |
|
836 ShouldNotReachHere(); |
|
837 } |
|
838 |
|
839 void PSYoungGen::reset_survivors_after_shrink() { |
|
840 _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(), |
|
841 (HeapWord*)virtual_space()->high_boundary()); |
|
842 PSScavenge::reference_processor()->set_span(_reserved); |
|
843 |
|
844 MutableSpace* space_shrinking = NULL; |
|
845 if (from_space()->end() > to_space()->end()) { |
|
846 space_shrinking = from_space(); |
|
847 } else { |
|
848 space_shrinking = to_space(); |
|
849 } |
|
850 |
|
851 HeapWord* new_end = (HeapWord*)virtual_space()->high(); |
|
852 assert(new_end >= space_shrinking->bottom(), "Shrink was too large"); |
|
853 // Was there a shrink of the survivor space? |
|
854 if (new_end < space_shrinking->end()) { |
|
855 MemRegion mr(space_shrinking->bottom(), new_end); |
|
856 space_shrinking->initialize(mr, |
|
857 SpaceDecorator::DontClear, |
|
858 SpaceDecorator::Mangle); |
|
859 } |
|
860 } |
|
861 |
|
862 // This method currently does not expect to expand into eden (i.e., |
|
863 // the virtual space boundaries is expected to be consistent |
|
864 // with the eden boundaries.. |
|
865 void PSYoungGen::post_resize() { |
|
866 assert_locked_or_safepoint(Heap_lock); |
|
867 assert((eden_space()->bottom() < to_space()->bottom()) && |
|
868 (eden_space()->bottom() < from_space()->bottom()), |
|
869 "Eden is assumed to be below the survivor spaces"); |
|
870 |
|
871 MemRegion cmr((HeapWord*)virtual_space()->low(), |
|
872 (HeapWord*)virtual_space()->high()); |
|
873 ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr); |
|
874 space_invariants(); |
|
875 } |
|
876 |
|
877 |
|
878 |
|
879 void PSYoungGen::update_counters() { |
|
880 if (UsePerfData) { |
|
881 _eden_counters->update_all(); |
|
882 _from_counters->update_all(); |
|
883 _to_counters->update_all(); |
|
884 _gen_counters->update_all(); |
|
885 } |
|
886 } |
|
887 |
|
888 void PSYoungGen::verify() { |
|
889 eden_space()->verify(); |
|
890 from_space()->verify(); |
|
891 to_space()->verify(); |
|
892 } |
|
893 |
|
894 #ifndef PRODUCT |
|
895 void PSYoungGen::record_spaces_top() { |
|
896 assert(ZapUnusedHeapArea, "Not mangling unused space"); |
|
897 eden_space()->set_top_for_allocations(); |
|
898 from_space()->set_top_for_allocations(); |
|
899 to_space()->set_top_for_allocations(); |
|
900 } |
|
901 #endif |