1
|
1 |
/*
|
|
2 |
* Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 |
* have any questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
|
25 |
# include "incls/_precompiled.incl"
|
|
26 |
# include "incls/_parGCAllocBuffer.cpp.incl"
|
|
27 |
|
|
28 |
ParGCAllocBuffer::ParGCAllocBuffer(size_t desired_plab_sz_) :
|
|
29 |
_word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
|
|
30 |
_end(NULL), _hard_end(NULL),
|
|
31 |
_retained(false), _retained_filler(),
|
|
32 |
_allocated(0), _wasted(0)
|
|
33 |
{
|
|
34 |
assert (min_size() > AlignmentReserve, "Inconsistency!");
|
|
35 |
}
|
|
36 |
|
|
37 |
const size_t ParGCAllocBuffer::FillerHeaderSize =
|
|
38 |
align_object_size(arrayOopDesc::header_size(T_INT));
|
|
39 |
|
|
40 |
// If the minimum object size is greater than MinObjAlignment, we can
|
|
41 |
// end up with a shard at the end of the buffer that's smaller than
|
|
42 |
// the smallest object. We can't allow that because the buffer must
|
|
43 |
// look like it's full of objects when we retire it, so we make
|
|
44 |
// sure we have enough space for a filler int array object.
|
|
45 |
const size_t ParGCAllocBuffer::AlignmentReserve =
|
|
46 |
oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0;
|
|
47 |
|
|
48 |
void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) {
|
|
49 |
assert(!retain || end_of_gc, "Can only retain at GC end.");
|
|
50 |
if (_retained) {
|
|
51 |
// If the buffer had been retained shorten the previous filler object.
|
|
52 |
assert(_retained_filler.end() <= _top, "INVARIANT");
|
|
53 |
SharedHeap::fill_region_with_object(_retained_filler);
|
|
54 |
// Wasted space book-keeping, otherwise (normally) done in invalidate()
|
|
55 |
_wasted += _retained_filler.word_size();
|
|
56 |
_retained = false;
|
|
57 |
}
|
|
58 |
assert(!end_of_gc || !_retained, "At this point, end_of_gc ==> !_retained.");
|
|
59 |
if (_top < _hard_end) {
|
|
60 |
SharedHeap::fill_region_with_object(MemRegion(_top, _hard_end));
|
|
61 |
if (!retain) {
|
|
62 |
invalidate();
|
|
63 |
} else {
|
|
64 |
// Is there wasted space we'd like to retain for the next GC?
|
|
65 |
if (pointer_delta(_end, _top) > FillerHeaderSize) {
|
|
66 |
_retained = true;
|
|
67 |
_retained_filler = MemRegion(_top, FillerHeaderSize);
|
|
68 |
_top = _top + FillerHeaderSize;
|
|
69 |
} else {
|
|
70 |
invalidate();
|
|
71 |
}
|
|
72 |
}
|
|
73 |
}
|
|
74 |
}
|
|
75 |
|
|
76 |
void ParGCAllocBuffer::flush_stats(PLABStats* stats) {
|
|
77 |
assert(ResizePLAB, "Wasted work");
|
|
78 |
stats->add_allocated(_allocated);
|
|
79 |
stats->add_wasted(_wasted);
|
|
80 |
stats->add_unused(pointer_delta(_end, _top));
|
|
81 |
}
|
|
82 |
|
|
83 |
// Compute desired plab size and latch result for later
|
|
84 |
// use. This should be called once at the end of parallel
|
|
85 |
// scavenge; it clears the sensor accumulators.
|
|
86 |
void PLABStats::adjust_desired_plab_sz() {
|
|
87 |
assert(ResizePLAB, "Not set");
|
|
88 |
if (_allocated == 0) {
|
|
89 |
assert(_unused == 0, "Inconsistency in PLAB stats");
|
|
90 |
_allocated = 1;
|
|
91 |
}
|
|
92 |
double wasted_frac = (double)_unused/(double)_allocated;
|
|
93 |
size_t target_refills = (size_t)((wasted_frac*TargetSurvivorRatio)/
|
|
94 |
TargetPLABWastePct);
|
|
95 |
if (target_refills == 0) {
|
|
96 |
target_refills = 1;
|
|
97 |
}
|
|
98 |
_used = _allocated - _wasted - _unused;
|
|
99 |
size_t plab_sz = _used/(target_refills*ParallelGCThreads);
|
|
100 |
if (PrintPLAB) gclog_or_tty->print(" (plab_sz = %d ", plab_sz);
|
|
101 |
// Take historical weighted average
|
|
102 |
_filter.sample(plab_sz);
|
|
103 |
// Clip from above and below, and align to object boundary
|
|
104 |
plab_sz = MAX2(min_size(), (size_t)_filter.average());
|
|
105 |
plab_sz = MIN2(max_size(), plab_sz);
|
|
106 |
plab_sz = align_object_size(plab_sz);
|
|
107 |
// Latch the result
|
|
108 |
if (PrintPLAB) gclog_or_tty->print(" desired_plab_sz = %d) ", plab_sz);
|
|
109 |
if (ResizePLAB) {
|
|
110 |
_desired_plab_sz = plab_sz;
|
|
111 |
}
|
|
112 |
// Now clear the accumulators for next round:
|
|
113 |
// note this needs to be fixed in the case where we
|
|
114 |
// are retaining across scavenges. FIX ME !!! XXX
|
|
115 |
_allocated = 0;
|
|
116 |
_wasted = 0;
|
|
117 |
_unused = 0;
|
|
118 |
}
|
|
119 |
|
|
120 |
#ifndef PRODUCT
|
|
121 |
void ParGCAllocBuffer::print() {
|
|
122 |
gclog_or_tty->print("parGCAllocBuffer: _bottom: %p _top: %p _end: %p _hard_end: %p"
|
|
123 |
"_retained: %c _retained_filler: [%p,%p)\n",
|
|
124 |
_bottom, _top, _end, _hard_end,
|
|
125 |
"FT"[_retained], _retained_filler.start(), _retained_filler.end());
|
|
126 |
}
|
|
127 |
#endif // !PRODUCT
|
|
128 |
|
|
129 |
const size_t ParGCAllocBufferWithBOT::ChunkSizeInWords =
|
|
130 |
MIN2(CardTableModRefBS::par_chunk_heapword_alignment(),
|
|
131 |
((size_t)Generation::GenGrain)/HeapWordSize);
|
|
132 |
const size_t ParGCAllocBufferWithBOT::ChunkSizeInBytes =
|
|
133 |
MIN2(CardTableModRefBS::par_chunk_heapword_alignment() * HeapWordSize,
|
|
134 |
(size_t)Generation::GenGrain);
|
|
135 |
|
|
136 |
ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz,
|
|
137 |
BlockOffsetSharedArray* bsa) :
|
|
138 |
ParGCAllocBuffer(word_sz),
|
|
139 |
_bsa(bsa),
|
|
140 |
_bt(bsa, MemRegion(_bottom, _hard_end)),
|
|
141 |
_true_end(_hard_end)
|
|
142 |
{}
|
|
143 |
|
|
144 |
// The buffer comes with its own BOT, with a shared (obviously) underlying
|
|
145 |
// BlockOffsetSharedArray. We manipulate this BOT in the normal way
|
|
146 |
// as we would for any contiguous space. However, on accasion we
|
|
147 |
// need to do some buffer surgery at the extremities before we
|
|
148 |
// start using the body of the buffer for allocations. Such surgery
|
|
149 |
// (as explained elsewhere) is to prevent allocation on a card that
|
|
150 |
// is in the process of being walked concurrently by another GC thread.
|
|
151 |
// When such surgery happens at a point that is far removed (to the
|
|
152 |
// right of the current allocation point, top), we use the "contig"
|
|
153 |
// parameter below to directly manipulate the shared array without
|
|
154 |
// modifying the _next_threshold state in the BOT.
|
|
155 |
void ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr,
|
|
156 |
bool contig) {
|
|
157 |
SharedHeap::fill_region_with_object(mr);
|
|
158 |
if (contig) {
|
|
159 |
_bt.alloc_block(mr.start(), mr.end());
|
|
160 |
} else {
|
|
161 |
_bt.BlockOffsetArray::alloc_block(mr.start(), mr.end());
|
|
162 |
}
|
|
163 |
}
|
|
164 |
|
|
165 |
HeapWord* ParGCAllocBufferWithBOT::allocate_slow(size_t word_sz) {
|
|
166 |
HeapWord* res = NULL;
|
|
167 |
if (_true_end > _hard_end) {
|
|
168 |
assert((HeapWord*)align_size_down(intptr_t(_hard_end),
|
|
169 |
ChunkSizeInBytes) == _hard_end,
|
|
170 |
"or else _true_end should be equal to _hard_end");
|
|
171 |
assert(_retained, "or else _true_end should be equal to _hard_end");
|
|
172 |
assert(_retained_filler.end() <= _top, "INVARIANT");
|
|
173 |
SharedHeap::fill_region_with_object(_retained_filler);
|
|
174 |
if (_top < _hard_end) {
|
|
175 |
fill_region_with_block(MemRegion(_top, _hard_end), true);
|
|
176 |
}
|
|
177 |
HeapWord* next_hard_end = MIN2(_true_end, _hard_end + ChunkSizeInWords);
|
|
178 |
_retained_filler = MemRegion(_hard_end, FillerHeaderSize);
|
|
179 |
_bt.alloc_block(_retained_filler.start(), _retained_filler.word_size());
|
|
180 |
_top = _retained_filler.end();
|
|
181 |
_hard_end = next_hard_end;
|
|
182 |
_end = _hard_end - AlignmentReserve;
|
|
183 |
res = ParGCAllocBuffer::allocate(word_sz);
|
|
184 |
if (res != NULL) {
|
|
185 |
_bt.alloc_block(res, word_sz);
|
|
186 |
}
|
|
187 |
}
|
|
188 |
return res;
|
|
189 |
}
|
|
190 |
|
|
191 |
void
|
|
192 |
ParGCAllocBufferWithBOT::undo_allocation(HeapWord* obj, size_t word_sz) {
|
|
193 |
ParGCAllocBuffer::undo_allocation(obj, word_sz);
|
|
194 |
// This may back us up beyond the previous threshold, so reset.
|
|
195 |
_bt.set_region(MemRegion(_top, _hard_end));
|
|
196 |
_bt.initialize_threshold();
|
|
197 |
}
|
|
198 |
|
|
199 |
void ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) {
|
|
200 |
assert(!retain || end_of_gc, "Can only retain at GC end.");
|
|
201 |
if (_retained) {
|
|
202 |
// We're about to make the retained_filler into a block.
|
|
203 |
_bt.BlockOffsetArray::alloc_block(_retained_filler.start(),
|
|
204 |
_retained_filler.end());
|
|
205 |
}
|
|
206 |
// Reset _hard_end to _true_end (and update _end)
|
|
207 |
if (retain && _hard_end != NULL) {
|
|
208 |
assert(_hard_end <= _true_end, "Invariant.");
|
|
209 |
_hard_end = _true_end;
|
|
210 |
_end = MAX2(_top, _hard_end - AlignmentReserve);
|
|
211 |
assert(_end <= _hard_end, "Invariant.");
|
|
212 |
}
|
|
213 |
_true_end = _hard_end;
|
|
214 |
HeapWord* pre_top = _top;
|
|
215 |
|
|
216 |
ParGCAllocBuffer::retire(end_of_gc, retain);
|
|
217 |
// Now any old _retained_filler is cut back to size, the free part is
|
|
218 |
// filled with a filler object, and top is past the header of that
|
|
219 |
// object.
|
|
220 |
|
|
221 |
if (retain && _top < _end) {
|
|
222 |
assert(end_of_gc && retain, "Or else retain should be false.");
|
|
223 |
// If the lab does not start on a card boundary, we don't want to
|
|
224 |
// allocate onto that card, since that might lead to concurrent
|
|
225 |
// allocation and card scanning, which we don't support. So we fill
|
|
226 |
// the first card with a garbage object.
|
|
227 |
size_t first_card_index = _bsa->index_for(pre_top);
|
|
228 |
HeapWord* first_card_start = _bsa->address_for_index(first_card_index);
|
|
229 |
if (first_card_start < pre_top) {
|
|
230 |
HeapWord* second_card_start =
|
|
231 |
_bsa->address_for_index(first_card_index + 1);
|
|
232 |
|
|
233 |
// Ensure enough room to fill with the smallest block
|
|
234 |
second_card_start = MAX2(second_card_start, pre_top + AlignmentReserve);
|
|
235 |
|
|
236 |
// If the end is already in the first card, don't go beyond it!
|
|
237 |
// Or if the remainder is too small for a filler object, gobble it up.
|
|
238 |
if (_hard_end < second_card_start ||
|
|
239 |
pointer_delta(_hard_end, second_card_start) < AlignmentReserve) {
|
|
240 |
second_card_start = _hard_end;
|
|
241 |
}
|
|
242 |
if (pre_top < second_card_start) {
|
|
243 |
MemRegion first_card_suffix(pre_top, second_card_start);
|
|
244 |
fill_region_with_block(first_card_suffix, true);
|
|
245 |
}
|
|
246 |
pre_top = second_card_start;
|
|
247 |
_top = pre_top;
|
|
248 |
_end = MAX2(_top, _hard_end - AlignmentReserve);
|
|
249 |
}
|
|
250 |
|
|
251 |
// If the lab does not end on a card boundary, we don't want to
|
|
252 |
// allocate onto that card, since that might lead to concurrent
|
|
253 |
// allocation and card scanning, which we don't support. So we fill
|
|
254 |
// the last card with a garbage object.
|
|
255 |
size_t last_card_index = _bsa->index_for(_hard_end);
|
|
256 |
HeapWord* last_card_start = _bsa->address_for_index(last_card_index);
|
|
257 |
if (last_card_start < _hard_end) {
|
|
258 |
|
|
259 |
// Ensure enough room to fill with the smallest block
|
|
260 |
last_card_start = MIN2(last_card_start, _hard_end - AlignmentReserve);
|
|
261 |
|
|
262 |
// If the top is already in the last card, don't go back beyond it!
|
|
263 |
// Or if the remainder is too small for a filler object, gobble it up.
|
|
264 |
if (_top > last_card_start ||
|
|
265 |
pointer_delta(last_card_start, _top) < AlignmentReserve) {
|
|
266 |
last_card_start = _top;
|
|
267 |
}
|
|
268 |
if (last_card_start < _hard_end) {
|
|
269 |
MemRegion last_card_prefix(last_card_start, _hard_end);
|
|
270 |
fill_region_with_block(last_card_prefix, false);
|
|
271 |
}
|
|
272 |
_hard_end = last_card_start;
|
|
273 |
_end = MAX2(_top, _hard_end - AlignmentReserve);
|
|
274 |
_true_end = _hard_end;
|
|
275 |
assert(_end <= _hard_end, "Invariant.");
|
|
276 |
}
|
|
277 |
|
|
278 |
// At this point:
|
|
279 |
// 1) we had a filler object from the original top to hard_end.
|
|
280 |
// 2) We've filled in any partial cards at the front and back.
|
|
281 |
if (pre_top < _hard_end) {
|
|
282 |
// Now we can reset the _bt to do allocation in the given area.
|
|
283 |
MemRegion new_filler(pre_top, _hard_end);
|
|
284 |
fill_region_with_block(new_filler, false);
|
|
285 |
_top = pre_top + ParGCAllocBuffer::FillerHeaderSize;
|
|
286 |
// If there's no space left, don't retain.
|
|
287 |
if (_top >= _end) {
|
|
288 |
_retained = false;
|
|
289 |
invalidate();
|
|
290 |
return;
|
|
291 |
}
|
|
292 |
_retained_filler = MemRegion(pre_top, _top);
|
|
293 |
_bt.set_region(MemRegion(_top, _hard_end));
|
|
294 |
_bt.initialize_threshold();
|
|
295 |
assert(_bt.threshold() > _top, "initialize_threshold failed!");
|
|
296 |
|
|
297 |
// There may be other reasons for queries into the middle of the
|
|
298 |
// filler object. When such queries are done in parallel with
|
|
299 |
// allocation, bad things can happen, if the query involves object
|
|
300 |
// iteration. So we ensure that such queries do not involve object
|
|
301 |
// iteration, by putting another filler object on the boundaries of
|
|
302 |
// such queries. One such is the object spanning a parallel card
|
|
303 |
// chunk boundary.
|
|
304 |
|
|
305 |
// "chunk_boundary" is the address of the first chunk boundary less
|
|
306 |
// than "hard_end".
|
|
307 |
HeapWord* chunk_boundary =
|
|
308 |
(HeapWord*)align_size_down(intptr_t(_hard_end-1), ChunkSizeInBytes);
|
|
309 |
assert(chunk_boundary < _hard_end, "Or else above did not work.");
|
|
310 |
assert(pointer_delta(_true_end, chunk_boundary) >= AlignmentReserve,
|
|
311 |
"Consequence of last card handling above.");
|
|
312 |
|
|
313 |
if (_top <= chunk_boundary) {
|
|
314 |
assert(_true_end == _hard_end, "Invariant.");
|
|
315 |
while (_top <= chunk_boundary) {
|
|
316 |
assert(pointer_delta(_hard_end, chunk_boundary) >= AlignmentReserve,
|
|
317 |
"Consequence of last card handling above.");
|
|
318 |
MemRegion chunk_portion(chunk_boundary, _hard_end);
|
|
319 |
_bt.BlockOffsetArray::alloc_block(chunk_portion.start(),
|
|
320 |
chunk_portion.end());
|
|
321 |
SharedHeap::fill_region_with_object(chunk_portion);
|
|
322 |
_hard_end = chunk_portion.start();
|
|
323 |
chunk_boundary -= ChunkSizeInWords;
|
|
324 |
}
|
|
325 |
_end = _hard_end - AlignmentReserve;
|
|
326 |
assert(_top <= _end, "Invariant.");
|
|
327 |
// Now reset the initial filler chunk so it doesn't overlap with
|
|
328 |
// the one(s) inserted above.
|
|
329 |
MemRegion new_filler(pre_top, _hard_end);
|
|
330 |
fill_region_with_block(new_filler, false);
|
|
331 |
}
|
|
332 |
} else {
|
|
333 |
_retained = false;
|
|
334 |
invalidate();
|
|
335 |
}
|
|
336 |
} else {
|
|
337 |
assert(!end_of_gc ||
|
|
338 |
(!_retained && _true_end == _hard_end), "Checking.");
|
|
339 |
}
|
|
340 |
assert(_end <= _hard_end, "Invariant.");
|
|
341 |
assert(_top < _end || _top == _hard_end, "Invariant");
|
|
342 |
}
|