1
|
1 |
/*
|
|
2 |
* Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 |
* have any questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
|
25 |
# include "incls/_precompiled.incl"
|
|
26 |
# include "incls/_space.cpp.incl"
|
|
27 |
|
|
28 |
HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
|
|
29 |
HeapWord* top_obj) {
|
|
30 |
if (top_obj != NULL) {
|
|
31 |
if (_sp->block_is_obj(top_obj)) {
|
|
32 |
if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
|
|
33 |
if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
|
|
34 |
// An arrayOop is starting on the dirty card - since we do exact
|
|
35 |
// store checks for objArrays we are done.
|
|
36 |
} else {
|
|
37 |
// Otherwise, it is possible that the object starting on the dirty
|
|
38 |
// card spans the entire card, and that the store happened on a
|
|
39 |
// later card. Figure out where the object ends.
|
|
40 |
// Use the block_size() method of the space over which
|
|
41 |
// the iteration is being done. That space (e.g. CMS) may have
|
|
42 |
// specific requirements on object sizes which will
|
|
43 |
// be reflected in the block_size() method.
|
|
44 |
top = top_obj + oop(top_obj)->size();
|
|
45 |
}
|
|
46 |
}
|
|
47 |
} else {
|
|
48 |
top = top_obj;
|
|
49 |
}
|
|
50 |
} else {
|
|
51 |
assert(top == _sp->end(), "only case where top_obj == NULL");
|
|
52 |
}
|
|
53 |
return top;
|
|
54 |
}
|
|
55 |
|
|
56 |
void DirtyCardToOopClosure::walk_mem_region(MemRegion mr,
|
|
57 |
HeapWord* bottom,
|
|
58 |
HeapWord* top) {
|
|
59 |
// 1. Blocks may or may not be objects.
|
|
60 |
// 2. Even when a block_is_obj(), it may not entirely
|
|
61 |
// occupy the block if the block quantum is larger than
|
|
62 |
// the object size.
|
|
63 |
// We can and should try to optimize by calling the non-MemRegion
|
|
64 |
// version of oop_iterate() for all but the extremal objects
|
|
65 |
// (for which we need to call the MemRegion version of
|
|
66 |
// oop_iterate()) To be done post-beta XXX
|
|
67 |
for (; bottom < top; bottom += _sp->block_size(bottom)) {
|
|
68 |
// As in the case of contiguous space above, we'd like to
|
|
69 |
// just use the value returned by oop_iterate to increment the
|
|
70 |
// current pointer; unfortunately, that won't work in CMS because
|
|
71 |
// we'd need an interface change (it seems) to have the space
|
|
72 |
// "adjust the object size" (for instance pad it up to its
|
|
73 |
// block alignment or minimum block size restrictions. XXX
|
|
74 |
if (_sp->block_is_obj(bottom) &&
|
|
75 |
!_sp->obj_allocated_since_save_marks(oop(bottom))) {
|
|
76 |
oop(bottom)->oop_iterate(_cl, mr);
|
|
77 |
}
|
|
78 |
}
|
|
79 |
}
|
|
80 |
|
|
81 |
void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
|
|
82 |
|
|
83 |
// Some collectors need to do special things whenever their dirty
|
|
84 |
// cards are processed. For instance, CMS must remember mutator updates
|
|
85 |
// (i.e. dirty cards) so as to re-scan mutated objects.
|
|
86 |
// Such work can be piggy-backed here on dirty card scanning, so as to make
|
|
87 |
// it slightly more efficient than doing a complete non-detructive pre-scan
|
|
88 |
// of the card table.
|
|
89 |
MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
|
|
90 |
if (pCl != NULL) {
|
|
91 |
pCl->do_MemRegion(mr);
|
|
92 |
}
|
|
93 |
|
|
94 |
HeapWord* bottom = mr.start();
|
|
95 |
HeapWord* last = mr.last();
|
|
96 |
HeapWord* top = mr.end();
|
|
97 |
HeapWord* bottom_obj;
|
|
98 |
HeapWord* top_obj;
|
|
99 |
|
|
100 |
assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
|
|
101 |
_precision == CardTableModRefBS::Precise,
|
|
102 |
"Only ones we deal with for now.");
|
|
103 |
|
|
104 |
assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
|
|
105 |
_last_bottom == NULL ||
|
|
106 |
top <= _last_bottom,
|
|
107 |
"Not decreasing");
|
|
108 |
NOT_PRODUCT(_last_bottom = mr.start());
|
|
109 |
|
|
110 |
bottom_obj = _sp->block_start(bottom);
|
|
111 |
top_obj = _sp->block_start(last);
|
|
112 |
|
|
113 |
assert(bottom_obj <= bottom, "just checking");
|
|
114 |
assert(top_obj <= top, "just checking");
|
|
115 |
|
|
116 |
// Given what we think is the top of the memory region and
|
|
117 |
// the start of the object at the top, get the actual
|
|
118 |
// value of the top.
|
|
119 |
top = get_actual_top(top, top_obj);
|
|
120 |
|
|
121 |
// If the previous call did some part of this region, don't redo.
|
|
122 |
if (_precision == CardTableModRefBS::ObjHeadPreciseArray &&
|
|
123 |
_min_done != NULL &&
|
|
124 |
_min_done < top) {
|
|
125 |
top = _min_done;
|
|
126 |
}
|
|
127 |
|
|
128 |
// Top may have been reset, and in fact may be below bottom,
|
|
129 |
// e.g. the dirty card region is entirely in a now free object
|
|
130 |
// -- something that could happen with a concurrent sweeper.
|
|
131 |
bottom = MIN2(bottom, top);
|
|
132 |
mr = MemRegion(bottom, top);
|
|
133 |
assert(bottom <= top &&
|
|
134 |
(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
|
|
135 |
_min_done == NULL ||
|
|
136 |
top <= _min_done),
|
|
137 |
"overlap!");
|
|
138 |
|
|
139 |
// Walk the region if it is not empty; otherwise there is nothing to do.
|
|
140 |
if (!mr.is_empty()) {
|
|
141 |
walk_mem_region(mr, bottom_obj, top);
|
|
142 |
}
|
|
143 |
|
|
144 |
_min_done = bottom;
|
|
145 |
}
|
|
146 |
|
|
147 |
DirtyCardToOopClosure* Space::new_dcto_cl(OopClosure* cl,
|
|
148 |
CardTableModRefBS::PrecisionStyle precision,
|
|
149 |
HeapWord* boundary) {
|
|
150 |
return new DirtyCardToOopClosure(this, cl, precision, boundary);
|
|
151 |
}
|
|
152 |
|
|
153 |
void FilteringClosure::do_oop(oop* p) {
|
|
154 |
do_oop_nv(p);
|
|
155 |
}
|
|
156 |
|
|
157 |
HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
|
|
158 |
HeapWord* top_obj) {
|
|
159 |
if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
|
|
160 |
if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
|
|
161 |
if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
|
|
162 |
// An arrayOop is starting on the dirty card - since we do exact
|
|
163 |
// store checks for objArrays we are done.
|
|
164 |
} else {
|
|
165 |
// Otherwise, it is possible that the object starting on the dirty
|
|
166 |
// card spans the entire card, and that the store happened on a
|
|
167 |
// later card. Figure out where the object ends.
|
|
168 |
assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(),
|
|
169 |
"Block size and object size mismatch");
|
|
170 |
top = top_obj + oop(top_obj)->size();
|
|
171 |
}
|
|
172 |
}
|
|
173 |
} else {
|
|
174 |
top = (_sp->toContiguousSpace())->top();
|
|
175 |
}
|
|
176 |
return top;
|
|
177 |
}
|
|
178 |
|
|
179 |
void Filtering_DCTOC::walk_mem_region(MemRegion mr,
|
|
180 |
HeapWord* bottom,
|
|
181 |
HeapWord* top) {
|
|
182 |
// Note that this assumption won't hold if we have a concurrent
|
|
183 |
// collector in this space, which may have freed up objects after
|
|
184 |
// they were dirtied and before the stop-the-world GC that is
|
|
185 |
// examining cards here.
|
|
186 |
assert(bottom < top, "ought to be at least one obj on a dirty card.");
|
|
187 |
|
|
188 |
if (_boundary != NULL) {
|
|
189 |
// We have a boundary outside of which we don't want to look
|
|
190 |
// at objects, so create a filtering closure around the
|
|
191 |
// oop closure before walking the region.
|
|
192 |
FilteringClosure filter(_boundary, _cl);
|
|
193 |
walk_mem_region_with_cl(mr, bottom, top, &filter);
|
|
194 |
} else {
|
|
195 |
// No boundary, simply walk the heap with the oop closure.
|
|
196 |
walk_mem_region_with_cl(mr, bottom, top, _cl);
|
|
197 |
}
|
|
198 |
|
|
199 |
}
|
|
200 |
|
|
201 |
// We must replicate this so that the static type of "FilteringClosure"
|
|
202 |
// (see above) is apparent at the oop_iterate calls.
|
|
203 |
#define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
|
|
204 |
void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \
|
|
205 |
HeapWord* bottom, \
|
|
206 |
HeapWord* top, \
|
|
207 |
ClosureType* cl) { \
|
|
208 |
bottom += oop(bottom)->oop_iterate(cl, mr); \
|
|
209 |
if (bottom < top) { \
|
|
210 |
HeapWord* next_obj = bottom + oop(bottom)->size(); \
|
|
211 |
while (next_obj < top) { \
|
|
212 |
/* Bottom lies entirely below top, so we can call the */ \
|
|
213 |
/* non-memRegion version of oop_iterate below. */ \
|
|
214 |
oop(bottom)->oop_iterate(cl); \
|
|
215 |
bottom = next_obj; \
|
|
216 |
next_obj = bottom + oop(bottom)->size(); \
|
|
217 |
} \
|
|
218 |
/* Last object. */ \
|
|
219 |
oop(bottom)->oop_iterate(cl, mr); \
|
|
220 |
} \
|
|
221 |
}
|
|
222 |
|
|
223 |
// (There are only two of these, rather than N, because the split is due
|
|
224 |
// only to the introduction of the FilteringClosure, a local part of the
|
|
225 |
// impl of this abstraction.)
|
|
226 |
ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
|
|
227 |
ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
|
|
228 |
|
|
229 |
DirtyCardToOopClosure*
|
|
230 |
ContiguousSpace::new_dcto_cl(OopClosure* cl,
|
|
231 |
CardTableModRefBS::PrecisionStyle precision,
|
|
232 |
HeapWord* boundary) {
|
|
233 |
return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
|
|
234 |
}
|
|
235 |
|
|
236 |
void Space::initialize(MemRegion mr, bool clear_space) {
|
|
237 |
HeapWord* bottom = mr.start();
|
|
238 |
HeapWord* end = mr.end();
|
|
239 |
assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
|
|
240 |
"invalid space boundaries");
|
|
241 |
set_bottom(bottom);
|
|
242 |
set_end(end);
|
|
243 |
if (clear_space) clear();
|
|
244 |
}
|
|
245 |
|
|
246 |
void Space::clear() {
|
|
247 |
if (ZapUnusedHeapArea) mangle_unused_area();
|
|
248 |
}
|
|
249 |
|
|
250 |
void ContiguousSpace::initialize(MemRegion mr, bool clear_space)
|
|
251 |
{
|
|
252 |
CompactibleSpace::initialize(mr, clear_space);
|
|
253 |
_concurrent_iteration_safe_limit = top();
|
|
254 |
}
|
|
255 |
|
|
256 |
void ContiguousSpace::clear() {
|
|
257 |
set_top(bottom());
|
|
258 |
set_saved_mark();
|
|
259 |
Space::clear();
|
|
260 |
}
|
|
261 |
|
|
262 |
bool Space::is_in(const void* p) const {
|
|
263 |
HeapWord* b = block_start(p);
|
|
264 |
return b != NULL && block_is_obj(b);
|
|
265 |
}
|
|
266 |
|
|
267 |
bool ContiguousSpace::is_in(const void* p) const {
|
|
268 |
return _bottom <= p && p < _top;
|
|
269 |
}
|
|
270 |
|
|
271 |
bool ContiguousSpace::is_free_block(const HeapWord* p) const {
|
|
272 |
return p >= _top;
|
|
273 |
}
|
|
274 |
|
|
275 |
void OffsetTableContigSpace::clear() {
|
|
276 |
ContiguousSpace::clear();
|
|
277 |
_offsets.initialize_threshold();
|
|
278 |
}
|
|
279 |
|
|
280 |
void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
|
|
281 |
Space::set_bottom(new_bottom);
|
|
282 |
_offsets.set_bottom(new_bottom);
|
|
283 |
}
|
|
284 |
|
|
285 |
void OffsetTableContigSpace::set_end(HeapWord* new_end) {
|
|
286 |
// Space should not advertize an increase in size
|
|
287 |
// until after the underlying offest table has been enlarged.
|
|
288 |
_offsets.resize(pointer_delta(new_end, bottom()));
|
|
289 |
Space::set_end(new_end);
|
|
290 |
}
|
|
291 |
|
|
292 |
void ContiguousSpace::mangle_unused_area() {
|
|
293 |
// to-space is used for storing marks during mark-sweep
|
|
294 |
mangle_region(MemRegion(top(), end()));
|
|
295 |
}
|
|
296 |
|
|
297 |
void ContiguousSpace::mangle_region(MemRegion mr) {
|
|
298 |
debug_only(Copy::fill_to_words(mr.start(), mr.word_size(), badHeapWord));
|
|
299 |
}
|
|
300 |
|
|
301 |
void CompactibleSpace::initialize(MemRegion mr, bool clear_space) {
|
|
302 |
Space::initialize(mr, clear_space);
|
|
303 |
_compaction_top = bottom();
|
|
304 |
_next_compaction_space = NULL;
|
|
305 |
}
|
|
306 |
|
|
307 |
HeapWord* CompactibleSpace::forward(oop q, size_t size,
|
|
308 |
CompactPoint* cp, HeapWord* compact_top) {
|
|
309 |
// q is alive
|
|
310 |
// First check if we should switch compaction space
|
|
311 |
assert(this == cp->space, "'this' should be current compaction space.");
|
|
312 |
size_t compaction_max_size = pointer_delta(end(), compact_top);
|
|
313 |
while (size > compaction_max_size) {
|
|
314 |
// switch to next compaction space
|
|
315 |
cp->space->set_compaction_top(compact_top);
|
|
316 |
cp->space = cp->space->next_compaction_space();
|
|
317 |
if (cp->space == NULL) {
|
|
318 |
cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
|
|
319 |
assert(cp->gen != NULL, "compaction must succeed");
|
|
320 |
cp->space = cp->gen->first_compaction_space();
|
|
321 |
assert(cp->space != NULL, "generation must have a first compaction space");
|
|
322 |
}
|
|
323 |
compact_top = cp->space->bottom();
|
|
324 |
cp->space->set_compaction_top(compact_top);
|
|
325 |
cp->threshold = cp->space->initialize_threshold();
|
|
326 |
compaction_max_size = pointer_delta(cp->space->end(), compact_top);
|
|
327 |
}
|
|
328 |
|
|
329 |
// store the forwarding pointer into the mark word
|
|
330 |
if ((HeapWord*)q != compact_top) {
|
|
331 |
q->forward_to(oop(compact_top));
|
|
332 |
assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
|
|
333 |
} else {
|
|
334 |
// if the object isn't moving we can just set the mark to the default
|
|
335 |
// mark and handle it specially later on.
|
|
336 |
q->init_mark();
|
|
337 |
assert(q->forwardee() == NULL, "should be forwarded to NULL");
|
|
338 |
}
|
|
339 |
|
|
340 |
debug_only(MarkSweep::register_live_oop(q, size));
|
|
341 |
compact_top += size;
|
|
342 |
|
|
343 |
// we need to update the offset table so that the beginnings of objects can be
|
|
344 |
// found during scavenge. Note that we are updating the offset table based on
|
|
345 |
// where the object will be once the compaction phase finishes.
|
|
346 |
if (compact_top > cp->threshold)
|
|
347 |
cp->threshold =
|
|
348 |
cp->space->cross_threshold(compact_top - size, compact_top);
|
|
349 |
return compact_top;
|
|
350 |
}
|
|
351 |
|
|
352 |
|
|
353 |
bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
|
|
354 |
HeapWord* q, size_t deadlength) {
|
|
355 |
if (allowed_deadspace_words >= deadlength) {
|
|
356 |
allowed_deadspace_words -= deadlength;
|
|
357 |
oop(q)->set_mark(markOopDesc::prototype()->set_marked());
|
|
358 |
const size_t min_int_array_size = typeArrayOopDesc::header_size(T_INT);
|
|
359 |
if (deadlength >= min_int_array_size) {
|
|
360 |
oop(q)->set_klass(Universe::intArrayKlassObj());
|
|
361 |
typeArrayOop(q)->set_length((int)((deadlength - min_int_array_size)
|
|
362 |
* (HeapWordSize/sizeof(jint))));
|
|
363 |
} else {
|
|
364 |
assert((int) deadlength == instanceOopDesc::header_size(),
|
|
365 |
"size for smallest fake dead object doesn't match");
|
|
366 |
oop(q)->set_klass(SystemDictionary::object_klass());
|
|
367 |
}
|
|
368 |
assert((int) deadlength == oop(q)->size(),
|
|
369 |
"make sure size for fake dead object match");
|
|
370 |
// Recall that we required "q == compaction_top".
|
|
371 |
return true;
|
|
372 |
} else {
|
|
373 |
allowed_deadspace_words = 0;
|
|
374 |
return false;
|
|
375 |
}
|
|
376 |
}
|
|
377 |
|
|
378 |
#define block_is_always_obj(q) true
|
|
379 |
#define obj_size(q) oop(q)->size()
|
|
380 |
#define adjust_obj_size(s) s
|
|
381 |
|
|
382 |
void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) {
|
|
383 |
SCAN_AND_FORWARD(cp, end, block_is_obj, block_size);
|
|
384 |
}
|
|
385 |
|
|
386 |
// Faster object search.
|
|
387 |
void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
|
|
388 |
SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size);
|
|
389 |
}
|
|
390 |
|
|
391 |
void Space::adjust_pointers() {
|
|
392 |
// adjust all the interior pointers to point at the new locations of objects
|
|
393 |
// Used by MarkSweep::mark_sweep_phase3()
|
|
394 |
|
|
395 |
// First check to see if there is any work to be done.
|
|
396 |
if (used() == 0) {
|
|
397 |
return; // Nothing to do.
|
|
398 |
}
|
|
399 |
|
|
400 |
// Otherwise...
|
|
401 |
HeapWord* q = bottom();
|
|
402 |
HeapWord* t = end();
|
|
403 |
|
|
404 |
debug_only(HeapWord* prev_q = NULL);
|
|
405 |
while (q < t) {
|
|
406 |
if (oop(q)->is_gc_marked()) {
|
|
407 |
// q is alive
|
|
408 |
|
|
409 |
debug_only(MarkSweep::track_interior_pointers(oop(q)));
|
|
410 |
// point all the oops to the new location
|
|
411 |
size_t size = oop(q)->adjust_pointers();
|
|
412 |
debug_only(MarkSweep::check_interior_pointers());
|
|
413 |
|
|
414 |
debug_only(prev_q = q);
|
|
415 |
debug_only(MarkSweep::validate_live_oop(oop(q), size));
|
|
416 |
|
|
417 |
q += size;
|
|
418 |
} else {
|
|
419 |
// q is not a live object. But we're not in a compactible space,
|
|
420 |
// So we don't have live ranges.
|
|
421 |
debug_only(prev_q = q);
|
|
422 |
q += block_size(q);
|
|
423 |
assert(q > prev_q, "we should be moving forward through memory");
|
|
424 |
}
|
|
425 |
}
|
|
426 |
assert(q == t, "just checking");
|
|
427 |
}
|
|
428 |
|
|
429 |
void CompactibleSpace::adjust_pointers() {
|
|
430 |
// Check first is there is any work to do.
|
|
431 |
if (used() == 0) {
|
|
432 |
return; // Nothing to do.
|
|
433 |
}
|
|
434 |
|
|
435 |
SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
|
|
436 |
}
|
|
437 |
|
|
438 |
void CompactibleSpace::compact() {
|
|
439 |
SCAN_AND_COMPACT(obj_size);
|
|
440 |
}
|
|
441 |
|
|
442 |
void Space::print_short() const { print_short_on(tty); }
|
|
443 |
|
|
444 |
void Space::print_short_on(outputStream* st) const {
|
|
445 |
st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K,
|
|
446 |
(int) ((double) used() * 100 / capacity()));
|
|
447 |
}
|
|
448 |
|
|
449 |
void Space::print() const { print_on(tty); }
|
|
450 |
|
|
451 |
void Space::print_on(outputStream* st) const {
|
|
452 |
print_short_on(st);
|
|
453 |
st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")",
|
|
454 |
bottom(), end());
|
|
455 |
}
|
|
456 |
|
|
457 |
void ContiguousSpace::print_on(outputStream* st) const {
|
|
458 |
print_short_on(st);
|
|
459 |
st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
|
|
460 |
bottom(), top(), end());
|
|
461 |
}
|
|
462 |
|
|
463 |
void OffsetTableContigSpace::print_on(outputStream* st) const {
|
|
464 |
print_short_on(st);
|
|
465 |
st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
|
|
466 |
INTPTR_FORMAT ", " INTPTR_FORMAT ")",
|
|
467 |
bottom(), top(), _offsets.threshold(), end());
|
|
468 |
}
|
|
469 |
|
|
470 |
void ContiguousSpace::verify(bool allow_dirty) const {
|
|
471 |
HeapWord* p = bottom();
|
|
472 |
HeapWord* t = top();
|
|
473 |
HeapWord* prev_p = NULL;
|
|
474 |
while (p < t) {
|
|
475 |
oop(p)->verify();
|
|
476 |
prev_p = p;
|
|
477 |
p += oop(p)->size();
|
|
478 |
}
|
|
479 |
guarantee(p == top(), "end of last object must match end of space");
|
|
480 |
if (top() != end()) {
|
|
481 |
guarantee(top() == block_start(end()-1) &&
|
|
482 |
top() == block_start(top()),
|
|
483 |
"top should be start of unallocated block, if it exists");
|
|
484 |
}
|
|
485 |
}
|
|
486 |
|
|
487 |
void Space::oop_iterate(OopClosure* blk) {
|
|
488 |
ObjectToOopClosure blk2(blk);
|
|
489 |
object_iterate(&blk2);
|
|
490 |
}
|
|
491 |
|
|
492 |
HeapWord* Space::object_iterate_careful(ObjectClosureCareful* cl) {
|
|
493 |
guarantee(false, "NYI");
|
|
494 |
return bottom();
|
|
495 |
}
|
|
496 |
|
|
497 |
HeapWord* Space::object_iterate_careful_m(MemRegion mr,
|
|
498 |
ObjectClosureCareful* cl) {
|
|
499 |
guarantee(false, "NYI");
|
|
500 |
return bottom();
|
|
501 |
}
|
|
502 |
|
|
503 |
|
|
504 |
void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
|
|
505 |
assert(!mr.is_empty(), "Should be non-empty");
|
|
506 |
// We use MemRegion(bottom(), end()) rather than used_region() below
|
|
507 |
// because the two are not necessarily equal for some kinds of
|
|
508 |
// spaces, in particular, certain kinds of free list spaces.
|
|
509 |
// We could use the more complicated but more precise:
|
|
510 |
// MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
|
|
511 |
// but the slight imprecision seems acceptable in the assertion check.
|
|
512 |
assert(MemRegion(bottom(), end()).contains(mr),
|
|
513 |
"Should be within used space");
|
|
514 |
HeapWord* prev = cl->previous(); // max address from last time
|
|
515 |
if (prev >= mr.end()) { // nothing to do
|
|
516 |
return;
|
|
517 |
}
|
|
518 |
// This assert will not work when we go from cms space to perm
|
|
519 |
// space, and use same closure. Easy fix deferred for later. XXX YSR
|
|
520 |
// assert(prev == NULL || contains(prev), "Should be within space");
|
|
521 |
|
|
522 |
bool last_was_obj_array = false;
|
|
523 |
HeapWord *blk_start_addr, *region_start_addr;
|
|
524 |
if (prev > mr.start()) {
|
|
525 |
region_start_addr = prev;
|
|
526 |
blk_start_addr = prev;
|
|
527 |
assert(blk_start_addr == block_start(region_start_addr), "invariant");
|
|
528 |
} else {
|
|
529 |
region_start_addr = mr.start();
|
|
530 |
blk_start_addr = block_start(region_start_addr);
|
|
531 |
}
|
|
532 |
HeapWord* region_end_addr = mr.end();
|
|
533 |
MemRegion derived_mr(region_start_addr, region_end_addr);
|
|
534 |
while (blk_start_addr < region_end_addr) {
|
|
535 |
const size_t size = block_size(blk_start_addr);
|
|
536 |
if (block_is_obj(blk_start_addr)) {
|
|
537 |
last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
|
|
538 |
} else {
|
|
539 |
last_was_obj_array = false;
|
|
540 |
}
|
|
541 |
blk_start_addr += size;
|
|
542 |
}
|
|
543 |
if (!last_was_obj_array) {
|
|
544 |
assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
|
|
545 |
"Should be within (closed) used space");
|
|
546 |
assert(blk_start_addr > prev, "Invariant");
|
|
547 |
cl->set_previous(blk_start_addr); // min address for next time
|
|
548 |
}
|
|
549 |
}
|
|
550 |
|
|
551 |
bool Space::obj_is_alive(const HeapWord* p) const {
|
|
552 |
assert (block_is_obj(p), "The address should point to an object");
|
|
553 |
return true;
|
|
554 |
}
|
|
555 |
|
|
556 |
void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
|
|
557 |
assert(!mr.is_empty(), "Should be non-empty");
|
|
558 |
assert(used_region().contains(mr), "Should be within used space");
|
|
559 |
HeapWord* prev = cl->previous(); // max address from last time
|
|
560 |
if (prev >= mr.end()) { // nothing to do
|
|
561 |
return;
|
|
562 |
}
|
|
563 |
// See comment above (in more general method above) in case you
|
|
564 |
// happen to use this method.
|
|
565 |
assert(prev == NULL || is_in_reserved(prev), "Should be within space");
|
|
566 |
|
|
567 |
bool last_was_obj_array = false;
|
|
568 |
HeapWord *obj_start_addr, *region_start_addr;
|
|
569 |
if (prev > mr.start()) {
|
|
570 |
region_start_addr = prev;
|
|
571 |
obj_start_addr = prev;
|
|
572 |
assert(obj_start_addr == block_start(region_start_addr), "invariant");
|
|
573 |
} else {
|
|
574 |
region_start_addr = mr.start();
|
|
575 |
obj_start_addr = block_start(region_start_addr);
|
|
576 |
}
|
|
577 |
HeapWord* region_end_addr = mr.end();
|
|
578 |
MemRegion derived_mr(region_start_addr, region_end_addr);
|
|
579 |
while (obj_start_addr < region_end_addr) {
|
|
580 |
oop obj = oop(obj_start_addr);
|
|
581 |
const size_t size = obj->size();
|
|
582 |
last_was_obj_array = cl->do_object_bm(obj, derived_mr);
|
|
583 |
obj_start_addr += size;
|
|
584 |
}
|
|
585 |
if (!last_was_obj_array) {
|
|
586 |
assert((bottom() <= obj_start_addr) && (obj_start_addr <= end()),
|
|
587 |
"Should be within (closed) used space");
|
|
588 |
assert(obj_start_addr > prev, "Invariant");
|
|
589 |
cl->set_previous(obj_start_addr); // min address for next time
|
|
590 |
}
|
|
591 |
}
|
|
592 |
|
|
593 |
#ifndef SERIALGC
|
|
594 |
#define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
|
595 |
\
|
|
596 |
void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\
|
|
597 |
HeapWord* obj_addr = mr.start(); \
|
|
598 |
HeapWord* t = mr.end(); \
|
|
599 |
while (obj_addr < t) { \
|
|
600 |
assert(oop(obj_addr)->is_oop(), "Should be an oop"); \
|
|
601 |
obj_addr += oop(obj_addr)->oop_iterate(blk); \
|
|
602 |
} \
|
|
603 |
}
|
|
604 |
|
|
605 |
ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN)
|
|
606 |
|
|
607 |
#undef ContigSpace_PAR_OOP_ITERATE_DEFN
|
|
608 |
#endif // SERIALGC
|
|
609 |
|
|
610 |
void ContiguousSpace::oop_iterate(OopClosure* blk) {
|
|
611 |
if (is_empty()) return;
|
|
612 |
HeapWord* obj_addr = bottom();
|
|
613 |
HeapWord* t = top();
|
|
614 |
// Could call objects iterate, but this is easier.
|
|
615 |
while (obj_addr < t) {
|
|
616 |
obj_addr += oop(obj_addr)->oop_iterate(blk);
|
|
617 |
}
|
|
618 |
}
|
|
619 |
|
|
620 |
void ContiguousSpace::oop_iterate(MemRegion mr, OopClosure* blk) {
|
|
621 |
if (is_empty()) {
|
|
622 |
return;
|
|
623 |
}
|
|
624 |
MemRegion cur = MemRegion(bottom(), top());
|
|
625 |
mr = mr.intersection(cur);
|
|
626 |
if (mr.is_empty()) {
|
|
627 |
return;
|
|
628 |
}
|
|
629 |
if (mr.equals(cur)) {
|
|
630 |
oop_iterate(blk);
|
|
631 |
return;
|
|
632 |
}
|
|
633 |
assert(mr.end() <= top(), "just took an intersection above");
|
|
634 |
HeapWord* obj_addr = block_start(mr.start());
|
|
635 |
HeapWord* t = mr.end();
|
|
636 |
|
|
637 |
// Handle first object specially.
|
|
638 |
oop obj = oop(obj_addr);
|
|
639 |
SpaceMemRegionOopsIterClosure smr_blk(blk, mr);
|
|
640 |
obj_addr += obj->oop_iterate(&smr_blk);
|
|
641 |
while (obj_addr < t) {
|
|
642 |
oop obj = oop(obj_addr);
|
|
643 |
assert(obj->is_oop(), "expected an oop");
|
|
644 |
obj_addr += obj->size();
|
|
645 |
// If "obj_addr" is not greater than top, then the
|
|
646 |
// entire object "obj" is within the region.
|
|
647 |
if (obj_addr <= t) {
|
|
648 |
obj->oop_iterate(blk);
|
|
649 |
} else {
|
|
650 |
// "obj" extends beyond end of region
|
|
651 |
obj->oop_iterate(&smr_blk);
|
|
652 |
break;
|
|
653 |
}
|
|
654 |
};
|
|
655 |
}
|
|
656 |
|
|
657 |
void ContiguousSpace::object_iterate(ObjectClosure* blk) {
|
|
658 |
if (is_empty()) return;
|
|
659 |
WaterMark bm = bottom_mark();
|
|
660 |
object_iterate_from(bm, blk);
|
|
661 |
}
|
|
662 |
|
|
663 |
void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) {
|
|
664 |
assert(mark.space() == this, "Mark does not match space");
|
|
665 |
HeapWord* p = mark.point();
|
|
666 |
while (p < top()) {
|
|
667 |
blk->do_object(oop(p));
|
|
668 |
p += oop(p)->size();
|
|
669 |
}
|
|
670 |
}
|
|
671 |
|
|
672 |
HeapWord*
|
|
673 |
ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) {
|
|
674 |
HeapWord * limit = concurrent_iteration_safe_limit();
|
|
675 |
assert(limit <= top(), "sanity check");
|
|
676 |
for (HeapWord* p = bottom(); p < limit;) {
|
|
677 |
size_t size = blk->do_object_careful(oop(p));
|
|
678 |
if (size == 0) {
|
|
679 |
return p; // failed at p
|
|
680 |
} else {
|
|
681 |
p += size;
|
|
682 |
}
|
|
683 |
}
|
|
684 |
return NULL; // all done
|
|
685 |
}
|
|
686 |
|
|
687 |
#define ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
|
|
688 |
\
|
|
689 |
void ContiguousSpace:: \
|
|
690 |
oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
|
|
691 |
HeapWord* t; \
|
|
692 |
HeapWord* p = saved_mark_word(); \
|
|
693 |
assert(p != NULL, "expected saved mark"); \
|
|
694 |
\
|
|
695 |
const intx interval = PrefetchScanIntervalInBytes; \
|
|
696 |
do { \
|
|
697 |
t = top(); \
|
|
698 |
while (p < t) { \
|
|
699 |
Prefetch::write(p, interval); \
|
|
700 |
debug_only(HeapWord* prev = p); \
|
|
701 |
oop m = oop(p); \
|
|
702 |
p += m->oop_iterate(blk); \
|
|
703 |
} \
|
|
704 |
} while (t < top()); \
|
|
705 |
\
|
|
706 |
set_saved_mark_word(p); \
|
|
707 |
}
|
|
708 |
|
|
709 |
ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN)
|
|
710 |
|
|
711 |
#undef ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN
|
|
712 |
|
|
713 |
// Very general, slow implementation.
|
|
714 |
HeapWord* ContiguousSpace::block_start(const void* p) const {
|
|
715 |
assert(MemRegion(bottom(), end()).contains(p), "p not in space");
|
|
716 |
if (p >= top()) {
|
|
717 |
return top();
|
|
718 |
} else {
|
|
719 |
HeapWord* last = bottom();
|
|
720 |
HeapWord* cur = last;
|
|
721 |
while (cur <= p) {
|
|
722 |
last = cur;
|
|
723 |
cur += oop(cur)->size();
|
|
724 |
}
|
|
725 |
assert(oop(last)->is_oop(), "Should be an object start");
|
|
726 |
return last;
|
|
727 |
}
|
|
728 |
}
|
|
729 |
|
|
730 |
size_t ContiguousSpace::block_size(const HeapWord* p) const {
|
|
731 |
assert(MemRegion(bottom(), end()).contains(p), "p not in space");
|
|
732 |
HeapWord* current_top = top();
|
|
733 |
assert(p <= current_top, "p is not a block start");
|
|
734 |
assert(p == current_top || oop(p)->is_oop(), "p is not a block start");
|
|
735 |
if (p < current_top)
|
|
736 |
return oop(p)->size();
|
|
737 |
else {
|
|
738 |
assert(p == current_top, "just checking");
|
|
739 |
return pointer_delta(end(), (HeapWord*) p);
|
|
740 |
}
|
|
741 |
}
|
|
742 |
|
|
743 |
// This version requires locking.
|
|
744 |
inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
|
|
745 |
HeapWord* const end_value) {
|
|
746 |
assert(Heap_lock->owned_by_self() ||
|
|
747 |
(SafepointSynchronize::is_at_safepoint() &&
|
|
748 |
Thread::current()->is_VM_thread()),
|
|
749 |
"not locked");
|
|
750 |
HeapWord* obj = top();
|
|
751 |
if (pointer_delta(end_value, obj) >= size) {
|
|
752 |
HeapWord* new_top = obj + size;
|
|
753 |
set_top(new_top);
|
|
754 |
assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
|
|
755 |
return obj;
|
|
756 |
} else {
|
|
757 |
return NULL;
|
|
758 |
}
|
|
759 |
}
|
|
760 |
|
|
761 |
// This version is lock-free.
|
|
762 |
inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size,
|
|
763 |
HeapWord* const end_value) {
|
|
764 |
do {
|
|
765 |
HeapWord* obj = top();
|
|
766 |
if (pointer_delta(end_value, obj) >= size) {
|
|
767 |
HeapWord* new_top = obj + size;
|
|
768 |
HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
|
|
769 |
// result can be one of two:
|
|
770 |
// the old top value: the exchange succeeded
|
|
771 |
// otherwise: the new value of the top is returned.
|
|
772 |
if (result == obj) {
|
|
773 |
assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
|
|
774 |
return obj;
|
|
775 |
}
|
|
776 |
} else {
|
|
777 |
return NULL;
|
|
778 |
}
|
|
779 |
} while (true);
|
|
780 |
}
|
|
781 |
|
|
782 |
// Requires locking.
|
|
783 |
HeapWord* ContiguousSpace::allocate(size_t size) {
|
|
784 |
return allocate_impl(size, end());
|
|
785 |
}
|
|
786 |
|
|
787 |
// Lock-free.
|
|
788 |
HeapWord* ContiguousSpace::par_allocate(size_t size) {
|
|
789 |
return par_allocate_impl(size, end());
|
|
790 |
}
|
|
791 |
|
|
792 |
void ContiguousSpace::allocate_temporary_filler(int factor) {
|
|
793 |
// allocate temporary type array decreasing free size with factor 'factor'
|
|
794 |
assert(factor >= 0, "just checking");
|
|
795 |
size_t size = pointer_delta(end(), top());
|
|
796 |
|
|
797 |
// if space is full, return
|
|
798 |
if (size == 0) return;
|
|
799 |
|
|
800 |
if (factor > 0) {
|
|
801 |
size -= size/factor;
|
|
802 |
}
|
|
803 |
size = align_object_size(size);
|
|
804 |
|
|
805 |
const size_t min_int_array_size = typeArrayOopDesc::header_size(T_INT);
|
|
806 |
if (size >= min_int_array_size) {
|
|
807 |
size_t length = (size - min_int_array_size) * (HeapWordSize / sizeof(jint));
|
|
808 |
// allocate uninitialized int array
|
|
809 |
typeArrayOop t = (typeArrayOop) allocate(size);
|
|
810 |
assert(t != NULL, "allocation should succeed");
|
|
811 |
t->set_mark(markOopDesc::prototype());
|
|
812 |
t->set_klass(Universe::intArrayKlassObj());
|
|
813 |
t->set_length((int)length);
|
|
814 |
} else {
|
|
815 |
assert((int) size == instanceOopDesc::header_size(),
|
|
816 |
"size for smallest fake object doesn't match");
|
|
817 |
instanceOop obj = (instanceOop) allocate(size);
|
|
818 |
obj->set_mark(markOopDesc::prototype());
|
|
819 |
obj->set_klass(SystemDictionary::object_klass());
|
|
820 |
}
|
|
821 |
}
|
|
822 |
|
|
823 |
void EdenSpace::clear() {
|
|
824 |
ContiguousSpace::clear();
|
|
825 |
set_soft_end(end());
|
|
826 |
}
|
|
827 |
|
|
828 |
// Requires locking.
|
|
829 |
HeapWord* EdenSpace::allocate(size_t size) {
|
|
830 |
return allocate_impl(size, soft_end());
|
|
831 |
}
|
|
832 |
|
|
833 |
// Lock-free.
|
|
834 |
HeapWord* EdenSpace::par_allocate(size_t size) {
|
|
835 |
return par_allocate_impl(size, soft_end());
|
|
836 |
}
|
|
837 |
|
|
838 |
HeapWord* ConcEdenSpace::par_allocate(size_t size)
|
|
839 |
{
|
|
840 |
do {
|
|
841 |
// The invariant is top() should be read before end() because
|
|
842 |
// top() can't be greater than end(), so if an update of _soft_end
|
|
843 |
// occurs between 'end_val = end();' and 'top_val = top();' top()
|
|
844 |
// also can grow up to the new end() and the condition
|
|
845 |
// 'top_val > end_val' is true. To ensure the loading order
|
|
846 |
// OrderAccess::loadload() is required after top() read.
|
|
847 |
HeapWord* obj = top();
|
|
848 |
OrderAccess::loadload();
|
|
849 |
if (pointer_delta(*soft_end_addr(), obj) >= size) {
|
|
850 |
HeapWord* new_top = obj + size;
|
|
851 |
HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
|
|
852 |
// result can be one of two:
|
|
853 |
// the old top value: the exchange succeeded
|
|
854 |
// otherwise: the new value of the top is returned.
|
|
855 |
if (result == obj) {
|
|
856 |
assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
|
|
857 |
return obj;
|
|
858 |
}
|
|
859 |
} else {
|
|
860 |
return NULL;
|
|
861 |
}
|
|
862 |
} while (true);
|
|
863 |
}
|
|
864 |
|
|
865 |
|
|
866 |
HeapWord* OffsetTableContigSpace::initialize_threshold() {
|
|
867 |
return _offsets.initialize_threshold();
|
|
868 |
}
|
|
869 |
|
|
870 |
HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) {
|
|
871 |
_offsets.alloc_block(start, end);
|
|
872 |
return _offsets.threshold();
|
|
873 |
}
|
|
874 |
|
|
875 |
OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
|
|
876 |
MemRegion mr) :
|
|
877 |
_offsets(sharedOffsetArray, mr),
|
|
878 |
_par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
|
|
879 |
{
|
|
880 |
_offsets.set_contig_space(this);
|
|
881 |
initialize(mr, true);
|
|
882 |
}
|
|
883 |
|
|
884 |
|
|
885 |
class VerifyOldOopClosure : public OopClosure {
|
|
886 |
public:
|
|
887 |
oop the_obj;
|
|
888 |
bool allow_dirty;
|
|
889 |
void do_oop(oop* p) {
|
|
890 |
the_obj->verify_old_oop(p, allow_dirty);
|
|
891 |
}
|
|
892 |
};
|
|
893 |
|
|
894 |
#define OBJ_SAMPLE_INTERVAL 0
|
|
895 |
#define BLOCK_SAMPLE_INTERVAL 100
|
|
896 |
|
|
897 |
void OffsetTableContigSpace::verify(bool allow_dirty) const {
|
|
898 |
HeapWord* p = bottom();
|
|
899 |
HeapWord* prev_p = NULL;
|
|
900 |
VerifyOldOopClosure blk; // Does this do anything?
|
|
901 |
blk.allow_dirty = allow_dirty;
|
|
902 |
int objs = 0;
|
|
903 |
int blocks = 0;
|
|
904 |
|
|
905 |
if (VerifyObjectStartArray) {
|
|
906 |
_offsets.verify();
|
|
907 |
}
|
|
908 |
|
|
909 |
while (p < top()) {
|
|
910 |
size_t size = oop(p)->size();
|
|
911 |
// For a sampling of objects in the space, find it using the
|
|
912 |
// block offset table.
|
|
913 |
if (blocks == BLOCK_SAMPLE_INTERVAL) {
|
|
914 |
guarantee(p == block_start(p + (size/2)), "check offset computation");
|
|
915 |
blocks = 0;
|
|
916 |
} else {
|
|
917 |
blocks++;
|
|
918 |
}
|
|
919 |
|
|
920 |
if (objs == OBJ_SAMPLE_INTERVAL) {
|
|
921 |
oop(p)->verify();
|
|
922 |
blk.the_obj = oop(p);
|
|
923 |
oop(p)->oop_iterate(&blk);
|
|
924 |
objs = 0;
|
|
925 |
} else {
|
|
926 |
objs++;
|
|
927 |
}
|
|
928 |
prev_p = p;
|
|
929 |
p += size;
|
|
930 |
}
|
|
931 |
guarantee(p == top(), "end of last object must match end of space");
|
|
932 |
}
|
|
933 |
|
|
934 |
void OffsetTableContigSpace::serialize_block_offset_array_offsets(
|
|
935 |
SerializeOopClosure* soc) {
|
|
936 |
_offsets.serialize(soc);
|
|
937 |
}
|
|
938 |
|
|
939 |
|
|
940 |
int TenuredSpace::allowed_dead_ratio() const {
|
|
941 |
return MarkSweepDeadRatio;
|
|
942 |
}
|
|
943 |
|
|
944 |
|
|
945 |
int ContigPermSpace::allowed_dead_ratio() const {
|
|
946 |
return PermMarkSweepDeadRatio;
|
|
947 |
}
|