1
|
1 |
/*
|
|
2 |
* Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 |
* have any questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
|
25 |
# include "incls/_precompiled.incl"
|
|
26 |
# include "incls/_cardTableExtension.cpp.incl"
|
|
27 |
|
|
28 |
// Checks an individual oop for missing precise marks. Mark
|
|
29 |
// may be either dirty or newgen.
|
|
30 |
class CheckForUnmarkedOops : public OopClosure {
|
|
31 |
PSYoungGen* _young_gen;
|
|
32 |
CardTableExtension* _card_table;
|
|
33 |
HeapWord* _unmarked_addr;
|
|
34 |
jbyte* _unmarked_card;
|
|
35 |
|
|
36 |
public:
|
|
37 |
CheckForUnmarkedOops( PSYoungGen* young_gen, CardTableExtension* card_table ) :
|
|
38 |
_young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
|
|
39 |
|
|
40 |
virtual void do_oop(oop* p) {
|
|
41 |
if (_young_gen->is_in_reserved(*p) &&
|
|
42 |
!_card_table->addr_is_marked_imprecise(p)) {
|
|
43 |
// Don't overwrite the first missing card mark
|
|
44 |
if (_unmarked_addr == NULL) {
|
|
45 |
_unmarked_addr = (HeapWord*)p;
|
|
46 |
_unmarked_card = _card_table->byte_for(p);
|
|
47 |
}
|
|
48 |
}
|
|
49 |
}
|
|
50 |
|
|
51 |
bool has_unmarked_oop() {
|
|
52 |
return _unmarked_addr != NULL;
|
|
53 |
}
|
|
54 |
};
|
|
55 |
|
|
56 |
// Checks all objects for the existance of some type of mark,
|
|
57 |
// precise or imprecise, dirty or newgen.
|
|
58 |
class CheckForUnmarkedObjects : public ObjectClosure {
|
|
59 |
PSYoungGen* _young_gen;
|
|
60 |
CardTableExtension* _card_table;
|
|
61 |
|
|
62 |
public:
|
|
63 |
CheckForUnmarkedObjects() {
|
|
64 |
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
|
65 |
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
|
66 |
|
|
67 |
_young_gen = heap->young_gen();
|
|
68 |
_card_table = (CardTableExtension*)heap->barrier_set();
|
|
69 |
// No point in asserting barrier set type here. Need to make CardTableExtension
|
|
70 |
// a unique barrier set type.
|
|
71 |
}
|
|
72 |
|
|
73 |
// Card marks are not precise. The current system can leave us with
|
|
74 |
// a mismash of precise marks and begining of object marks. This means
|
|
75 |
// we test for missing precise marks first. If any are found, we don't
|
|
76 |
// fail unless the object head is also unmarked.
|
|
77 |
virtual void do_object(oop obj) {
|
|
78 |
CheckForUnmarkedOops object_check( _young_gen, _card_table );
|
|
79 |
obj->oop_iterate(&object_check);
|
|
80 |
if (object_check.has_unmarked_oop()) {
|
|
81 |
assert(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
|
|
82 |
}
|
|
83 |
}
|
|
84 |
};
|
|
85 |
|
|
86 |
// Checks for precise marking of oops as newgen.
|
|
87 |
class CheckForPreciseMarks : public OopClosure {
|
|
88 |
PSYoungGen* _young_gen;
|
|
89 |
CardTableExtension* _card_table;
|
|
90 |
|
|
91 |
public:
|
|
92 |
CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) :
|
|
93 |
_young_gen(young_gen), _card_table(card_table) { }
|
|
94 |
|
|
95 |
virtual void do_oop(oop* p) {
|
|
96 |
if (_young_gen->is_in_reserved(*p)) {
|
|
97 |
assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop");
|
|
98 |
_card_table->set_card_newgen(p);
|
|
99 |
}
|
|
100 |
}
|
|
101 |
};
|
|
102 |
|
|
103 |
// We get passed the space_top value to prevent us from traversing into
|
|
104 |
// the old_gen promotion labs, which cannot be safely parsed.
|
|
105 |
void CardTableExtension::scavenge_contents(ObjectStartArray* start_array,
|
|
106 |
MutableSpace* sp,
|
|
107 |
HeapWord* space_top,
|
|
108 |
PSPromotionManager* pm)
|
|
109 |
{
|
|
110 |
assert(start_array != NULL && sp != NULL && pm != NULL, "Sanity");
|
|
111 |
assert(start_array->covered_region().contains(sp->used_region()),
|
|
112 |
"ObjectStartArray does not cover space");
|
|
113 |
bool depth_first = pm->depth_first();
|
|
114 |
|
|
115 |
if (sp->not_empty()) {
|
|
116 |
oop* sp_top = (oop*)space_top;
|
|
117 |
oop* prev_top = NULL;
|
|
118 |
jbyte* current_card = byte_for(sp->bottom());
|
|
119 |
jbyte* end_card = byte_for(sp_top - 1); // sp_top is exclusive
|
|
120 |
// scan card marking array
|
|
121 |
while (current_card <= end_card) {
|
|
122 |
jbyte value = *current_card;
|
|
123 |
// skip clean cards
|
|
124 |
if (card_is_clean(value)) {
|
|
125 |
current_card++;
|
|
126 |
} else {
|
|
127 |
// we found a non-clean card
|
|
128 |
jbyte* first_nonclean_card = current_card++;
|
|
129 |
oop* bottom = (oop*)addr_for(first_nonclean_card);
|
|
130 |
// find object starting on card
|
|
131 |
oop* bottom_obj = (oop*)start_array->object_start((HeapWord*)bottom);
|
|
132 |
// bottom_obj = (oop*)start_array->object_start((HeapWord*)bottom);
|
|
133 |
assert(bottom_obj <= bottom, "just checking");
|
|
134 |
// make sure we don't scan oops we already looked at
|
|
135 |
if (bottom < prev_top) bottom = prev_top;
|
|
136 |
// figure out when to stop scanning
|
|
137 |
jbyte* first_clean_card;
|
|
138 |
oop* top;
|
|
139 |
bool restart_scanning;
|
|
140 |
do {
|
|
141 |
restart_scanning = false;
|
|
142 |
// find a clean card
|
|
143 |
while (current_card <= end_card) {
|
|
144 |
value = *current_card;
|
|
145 |
if (card_is_clean(value)) break;
|
|
146 |
current_card++;
|
|
147 |
}
|
|
148 |
// check if we reached the end, if so we are done
|
|
149 |
if (current_card >= end_card) {
|
|
150 |
first_clean_card = end_card + 1;
|
|
151 |
current_card++;
|
|
152 |
top = sp_top;
|
|
153 |
} else {
|
|
154 |
// we have a clean card, find object starting on that card
|
|
155 |
first_clean_card = current_card++;
|
|
156 |
top = (oop*)addr_for(first_clean_card);
|
|
157 |
oop* top_obj = (oop*)start_array->object_start((HeapWord*)top);
|
|
158 |
// top_obj = (oop*)start_array->object_start((HeapWord*)top);
|
|
159 |
assert(top_obj <= top, "just checking");
|
|
160 |
if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
|
|
161 |
// an arrayOop is starting on the clean card - since we do exact store
|
|
162 |
// checks for objArrays we are done
|
|
163 |
} else {
|
|
164 |
// otherwise, it is possible that the object starting on the clean card
|
|
165 |
// spans the entire card, and that the store happened on a later card.
|
|
166 |
// figure out where the object ends
|
|
167 |
top = top_obj + oop(top_obj)->size();
|
|
168 |
jbyte* top_card = CardTableModRefBS::byte_for(top - 1); // top is exclusive
|
|
169 |
if (top_card > first_clean_card) {
|
|
170 |
// object ends a different card
|
|
171 |
current_card = top_card + 1;
|
|
172 |
if (card_is_clean(*top_card)) {
|
|
173 |
// the ending card is clean, we are done
|
|
174 |
first_clean_card = top_card;
|
|
175 |
} else {
|
|
176 |
// the ending card is not clean, continue scanning at start of do-while
|
|
177 |
restart_scanning = true;
|
|
178 |
}
|
|
179 |
} else {
|
|
180 |
// object ends on the clean card, we are done.
|
|
181 |
assert(first_clean_card == top_card, "just checking");
|
|
182 |
}
|
|
183 |
}
|
|
184 |
}
|
|
185 |
} while (restart_scanning);
|
|
186 |
// we know which cards to scan, now clear them
|
|
187 |
while (first_nonclean_card < first_clean_card) {
|
|
188 |
*first_nonclean_card++ = clean_card;
|
|
189 |
}
|
|
190 |
// scan oops in objects
|
|
191 |
// hoisted the if (depth_first) check out of the loop
|
|
192 |
if (depth_first){
|
|
193 |
do {
|
|
194 |
oop(bottom_obj)->push_contents(pm);
|
|
195 |
bottom_obj += oop(bottom_obj)->size();
|
|
196 |
assert(bottom_obj <= sp_top, "just checking");
|
|
197 |
} while (bottom_obj < top);
|
|
198 |
pm->drain_stacks_cond_depth();
|
|
199 |
} else {
|
|
200 |
do {
|
|
201 |
oop(bottom_obj)->copy_contents(pm);
|
|
202 |
bottom_obj += oop(bottom_obj)->size();
|
|
203 |
assert(bottom_obj <= sp_top, "just checking");
|
|
204 |
} while (bottom_obj < top);
|
|
205 |
}
|
|
206 |
// remember top oop* scanned
|
|
207 |
prev_top = top;
|
|
208 |
}
|
|
209 |
}
|
|
210 |
}
|
|
211 |
}
|
|
212 |
|
|
213 |
void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_array,
|
|
214 |
MutableSpace* sp,
|
|
215 |
HeapWord* space_top,
|
|
216 |
PSPromotionManager* pm,
|
|
217 |
uint stripe_number) {
|
|
218 |
int ssize = 128; // Naked constant! Work unit = 64k.
|
|
219 |
int dirty_card_count = 0;
|
|
220 |
bool depth_first = pm->depth_first();
|
|
221 |
|
|
222 |
oop* sp_top = (oop*)space_top;
|
|
223 |
jbyte* start_card = byte_for(sp->bottom());
|
|
224 |
jbyte* end_card = byte_for(sp_top - 1) + 1;
|
|
225 |
oop* last_scanned = NULL; // Prevent scanning objects more than once
|
|
226 |
for (jbyte* slice = start_card; slice < end_card; slice += ssize*ParallelGCThreads) {
|
|
227 |
jbyte* worker_start_card = slice + stripe_number * ssize;
|
|
228 |
if (worker_start_card >= end_card)
|
|
229 |
return; // We're done.
|
|
230 |
|
|
231 |
jbyte* worker_end_card = worker_start_card + ssize;
|
|
232 |
if (worker_end_card > end_card)
|
|
233 |
worker_end_card = end_card;
|
|
234 |
|
|
235 |
// We do not want to scan objects more than once. In order to accomplish
|
|
236 |
// this, we assert that any object with an object head inside our 'slice'
|
|
237 |
// belongs to us. We may need to extend the range of scanned cards if the
|
|
238 |
// last object continues into the next 'slice'.
|
|
239 |
//
|
|
240 |
// Note! ending cards are exclusive!
|
|
241 |
HeapWord* slice_start = addr_for(worker_start_card);
|
|
242 |
HeapWord* slice_end = MIN2((HeapWord*) sp_top, addr_for(worker_end_card));
|
|
243 |
|
|
244 |
// If there are not objects starting within the chunk, skip it.
|
|
245 |
if (!start_array->object_starts_in_range(slice_start, slice_end)) {
|
|
246 |
continue;
|
|
247 |
}
|
|
248 |
// Update our begining addr
|
|
249 |
HeapWord* first_object = start_array->object_start(slice_start);
|
|
250 |
debug_only(oop* first_object_within_slice = (oop*) first_object;)
|
|
251 |
if (first_object < slice_start) {
|
|
252 |
last_scanned = (oop*)(first_object + oop(first_object)->size());
|
|
253 |
debug_only(first_object_within_slice = last_scanned;)
|
|
254 |
worker_start_card = byte_for(last_scanned);
|
|
255 |
}
|
|
256 |
|
|
257 |
// Update the ending addr
|
|
258 |
if (slice_end < (HeapWord*)sp_top) {
|
|
259 |
// The subtraction is important! An object may start precisely at slice_end.
|
|
260 |
HeapWord* last_object = start_array->object_start(slice_end - 1);
|
|
261 |
slice_end = last_object + oop(last_object)->size();
|
|
262 |
// worker_end_card is exclusive, so bump it one past the end of last_object's
|
|
263 |
// covered span.
|
|
264 |
worker_end_card = byte_for(slice_end) + 1;
|
|
265 |
|
|
266 |
if (worker_end_card > end_card)
|
|
267 |
worker_end_card = end_card;
|
|
268 |
}
|
|
269 |
|
|
270 |
assert(slice_end <= (HeapWord*)sp_top, "Last object in slice crosses space boundary");
|
|
271 |
assert(is_valid_card_address(worker_start_card), "Invalid worker start card");
|
|
272 |
assert(is_valid_card_address(worker_end_card), "Invalid worker end card");
|
|
273 |
// Note that worker_start_card >= worker_end_card is legal, and happens when
|
|
274 |
// an object spans an entire slice.
|
|
275 |
assert(worker_start_card <= end_card, "worker start card beyond end card");
|
|
276 |
assert(worker_end_card <= end_card, "worker end card beyond end card");
|
|
277 |
|
|
278 |
jbyte* current_card = worker_start_card;
|
|
279 |
while (current_card < worker_end_card) {
|
|
280 |
// Find an unclean card.
|
|
281 |
while (current_card < worker_end_card && card_is_clean(*current_card)) {
|
|
282 |
current_card++;
|
|
283 |
}
|
|
284 |
jbyte* first_unclean_card = current_card;
|
|
285 |
|
|
286 |
// Find the end of a run of contiguous unclean cards
|
|
287 |
while (current_card < worker_end_card && !card_is_clean(*current_card)) {
|
|
288 |
while (current_card < worker_end_card && !card_is_clean(*current_card)) {
|
|
289 |
current_card++;
|
|
290 |
}
|
|
291 |
|
|
292 |
if (current_card < worker_end_card) {
|
|
293 |
// Some objects may be large enough to span several cards. If such
|
|
294 |
// an object has more than one dirty card, separated by a clean card,
|
|
295 |
// we will attempt to scan it twice. The test against "last_scanned"
|
|
296 |
// prevents the redundant object scan, but it does not prevent newly
|
|
297 |
// marked cards from being cleaned.
|
|
298 |
HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1);
|
|
299 |
size_t size_of_last_object = oop(last_object_in_dirty_region)->size();
|
|
300 |
HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object;
|
|
301 |
jbyte* ending_card_of_last_object = byte_for(end_of_last_object);
|
|
302 |
assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card");
|
|
303 |
if (ending_card_of_last_object > current_card) {
|
|
304 |
// This means the object spans the next complete card.
|
|
305 |
// We need to bump the current_card to ending_card_of_last_object
|
|
306 |
current_card = ending_card_of_last_object;
|
|
307 |
}
|
|
308 |
}
|
|
309 |
}
|
|
310 |
jbyte* following_clean_card = current_card;
|
|
311 |
|
|
312 |
if (first_unclean_card < worker_end_card) {
|
|
313 |
oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card));
|
|
314 |
assert((HeapWord*)p <= addr_for(first_unclean_card), "checking");
|
|
315 |
// "p" should always be >= "last_scanned" because newly GC dirtied
|
|
316 |
// cards are no longer scanned again (see comment at end
|
|
317 |
// of loop on the increment of "current_card"). Test that
|
|
318 |
// hypothesis before removing this code.
|
|
319 |
// If this code is removed, deal with the first time through
|
|
320 |
// the loop when the last_scanned is the object starting in
|
|
321 |
// the previous slice.
|
|
322 |
assert((p >= last_scanned) ||
|
|
323 |
(last_scanned == first_object_within_slice),
|
|
324 |
"Should no longer be possible");
|
|
325 |
if (p < last_scanned) {
|
|
326 |
// Avoid scanning more than once; this can happen because
|
|
327 |
// newgen cards set by GC may a different set than the
|
|
328 |
// originally dirty set
|
|
329 |
p = last_scanned;
|
|
330 |
}
|
|
331 |
oop* to = (oop*)addr_for(following_clean_card);
|
|
332 |
|
|
333 |
// Test slice_end first!
|
|
334 |
if ((HeapWord*)to > slice_end) {
|
|
335 |
to = (oop*)slice_end;
|
|
336 |
} else if (to > sp_top) {
|
|
337 |
to = sp_top;
|
|
338 |
}
|
|
339 |
|
|
340 |
// we know which cards to scan, now clear them
|
|
341 |
if (first_unclean_card <= worker_start_card+1)
|
|
342 |
first_unclean_card = worker_start_card+1;
|
|
343 |
if (following_clean_card >= worker_end_card-1)
|
|
344 |
following_clean_card = worker_end_card-1;
|
|
345 |
|
|
346 |
while (first_unclean_card < following_clean_card) {
|
|
347 |
*first_unclean_card++ = clean_card;
|
|
348 |
}
|
|
349 |
|
|
350 |
const int interval = PrefetchScanIntervalInBytes;
|
|
351 |
// scan all objects in the range
|
|
352 |
if (interval != 0) {
|
|
353 |
// hoisted the if (depth_first) check out of the loop
|
|
354 |
if (depth_first) {
|
|
355 |
while (p < to) {
|
|
356 |
Prefetch::write(p, interval);
|
|
357 |
oop m = oop(p);
|
|
358 |
assert(m->is_oop_or_null(), "check for header");
|
|
359 |
m->push_contents(pm);
|
|
360 |
p += m->size();
|
|
361 |
}
|
|
362 |
pm->drain_stacks_cond_depth();
|
|
363 |
} else {
|
|
364 |
while (p < to) {
|
|
365 |
Prefetch::write(p, interval);
|
|
366 |
oop m = oop(p);
|
|
367 |
assert(m->is_oop_or_null(), "check for header");
|
|
368 |
m->copy_contents(pm);
|
|
369 |
p += m->size();
|
|
370 |
}
|
|
371 |
}
|
|
372 |
} else {
|
|
373 |
// hoisted the if (depth_first) check out of the loop
|
|
374 |
if (depth_first) {
|
|
375 |
while (p < to) {
|
|
376 |
oop m = oop(p);
|
|
377 |
assert(m->is_oop_or_null(), "check for header");
|
|
378 |
m->push_contents(pm);
|
|
379 |
p += m->size();
|
|
380 |
}
|
|
381 |
pm->drain_stacks_cond_depth();
|
|
382 |
} else {
|
|
383 |
while (p < to) {
|
|
384 |
oop m = oop(p);
|
|
385 |
assert(m->is_oop_or_null(), "check for header");
|
|
386 |
m->copy_contents(pm);
|
|
387 |
p += m->size();
|
|
388 |
}
|
|
389 |
}
|
|
390 |
}
|
|
391 |
last_scanned = p;
|
|
392 |
}
|
|
393 |
// "current_card" is still the "following_clean_card" or
|
|
394 |
// the current_card is >= the worker_end_card so the
|
|
395 |
// loop will not execute again.
|
|
396 |
assert((current_card == following_clean_card) ||
|
|
397 |
(current_card >= worker_end_card),
|
|
398 |
"current_card should only be incremented if it still equals "
|
|
399 |
"following_clean_card");
|
|
400 |
// Increment current_card so that it is not processed again.
|
|
401 |
// It may now be dirty because a old-to-young pointer was
|
|
402 |
// found on it an updated. If it is now dirty, it cannot be
|
|
403 |
// be safely cleaned in the next iteration.
|
|
404 |
current_card++;
|
|
405 |
}
|
|
406 |
}
|
|
407 |
}
|
|
408 |
|
|
409 |
// This should be called before a scavenge.
|
|
410 |
void CardTableExtension::verify_all_young_refs_imprecise() {
|
|
411 |
CheckForUnmarkedObjects check;
|
|
412 |
|
|
413 |
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
|
414 |
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
|
415 |
|
|
416 |
PSOldGen* old_gen = heap->old_gen();
|
|
417 |
PSPermGen* perm_gen = heap->perm_gen();
|
|
418 |
|
|
419 |
old_gen->object_iterate(&check);
|
|
420 |
perm_gen->object_iterate(&check);
|
|
421 |
}
|
|
422 |
|
|
423 |
// This should be called immediately after a scavenge, before mutators resume.
|
|
424 |
void CardTableExtension::verify_all_young_refs_precise() {
|
|
425 |
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
|
426 |
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
|
427 |
|
|
428 |
PSOldGen* old_gen = heap->old_gen();
|
|
429 |
PSPermGen* perm_gen = heap->perm_gen();
|
|
430 |
|
|
431 |
CheckForPreciseMarks check(heap->young_gen(), (CardTableExtension*)heap->barrier_set());
|
|
432 |
|
|
433 |
old_gen->oop_iterate(&check);
|
|
434 |
perm_gen->oop_iterate(&check);
|
|
435 |
|
|
436 |
verify_all_young_refs_precise_helper(old_gen->object_space()->used_region());
|
|
437 |
verify_all_young_refs_precise_helper(perm_gen->object_space()->used_region());
|
|
438 |
}
|
|
439 |
|
|
440 |
void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) {
|
|
441 |
CardTableExtension* card_table = (CardTableExtension*)Universe::heap()->barrier_set();
|
|
442 |
// FIX ME ASSERT HERE
|
|
443 |
|
|
444 |
jbyte* bot = card_table->byte_for(mr.start());
|
|
445 |
jbyte* top = card_table->byte_for(mr.end());
|
|
446 |
while(bot <= top) {
|
|
447 |
assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark");
|
|
448 |
if (*bot == verify_card)
|
|
449 |
*bot = youngergen_card;
|
|
450 |
bot++;
|
|
451 |
}
|
|
452 |
}
|
|
453 |
|
|
454 |
bool CardTableExtension::addr_is_marked_imprecise(void *addr) {
|
|
455 |
jbyte* p = byte_for(addr);
|
|
456 |
jbyte val = *p;
|
|
457 |
|
|
458 |
if (card_is_dirty(val))
|
|
459 |
return true;
|
|
460 |
|
|
461 |
if (card_is_newgen(val))
|
|
462 |
return true;
|
|
463 |
|
|
464 |
if (card_is_clean(val))
|
|
465 |
return false;
|
|
466 |
|
|
467 |
assert(false, "Found unhandled card mark type");
|
|
468 |
|
|
469 |
return false;
|
|
470 |
}
|
|
471 |
|
|
472 |
// Also includes verify_card
|
|
473 |
bool CardTableExtension::addr_is_marked_precise(void *addr) {
|
|
474 |
jbyte* p = byte_for(addr);
|
|
475 |
jbyte val = *p;
|
|
476 |
|
|
477 |
if (card_is_newgen(val))
|
|
478 |
return true;
|
|
479 |
|
|
480 |
if (card_is_verify(val))
|
|
481 |
return true;
|
|
482 |
|
|
483 |
if (card_is_clean(val))
|
|
484 |
return false;
|
|
485 |
|
|
486 |
if (card_is_dirty(val))
|
|
487 |
return false;
|
|
488 |
|
|
489 |
assert(false, "Found unhandled card mark type");
|
|
490 |
|
|
491 |
return false;
|
|
492 |
}
|
|
493 |
|
|
494 |
// Assumes that only the base or the end changes. This allows indentification
|
|
495 |
// of the region that is being resized. The
|
|
496 |
// CardTableModRefBS::resize_covered_region() is used for the normal case
|
|
497 |
// where the covered regions are growing or shrinking at the high end.
|
|
498 |
// The method resize_covered_region_by_end() is analogous to
|
|
499 |
// CardTableModRefBS::resize_covered_region() but
|
|
500 |
// for regions that grow or shrink at the low end.
|
|
501 |
void CardTableExtension::resize_covered_region(MemRegion new_region) {
|
|
502 |
|
|
503 |
for (int i = 0; i < _cur_covered_regions; i++) {
|
|
504 |
if (_covered[i].start() == new_region.start()) {
|
|
505 |
// Found a covered region with the same start as the
|
|
506 |
// new region. The region is growing or shrinking
|
|
507 |
// from the start of the region.
|
|
508 |
resize_covered_region_by_start(new_region);
|
|
509 |
return;
|
|
510 |
}
|
|
511 |
if (_covered[i].start() > new_region.start()) {
|
|
512 |
break;
|
|
513 |
}
|
|
514 |
}
|
|
515 |
|
|
516 |
int changed_region = -1;
|
|
517 |
for (int j = 0; j < _cur_covered_regions; j++) {
|
|
518 |
if (_covered[j].end() == new_region.end()) {
|
|
519 |
changed_region = j;
|
|
520 |
// This is a case where the covered region is growing or shrinking
|
|
521 |
// at the start of the region.
|
|
522 |
assert(changed_region != -1, "Don't expect to add a covered region");
|
|
523 |
assert(_covered[changed_region].byte_size() != new_region.byte_size(),
|
|
524 |
"The sizes should be different here");
|
|
525 |
resize_covered_region_by_end(changed_region, new_region);
|
|
526 |
return;
|
|
527 |
}
|
|
528 |
}
|
|
529 |
// This should only be a new covered region (where no existing
|
|
530 |
// covered region matches at the start or the end).
|
|
531 |
assert(_cur_covered_regions < _max_covered_regions,
|
|
532 |
"An existing region should have been found");
|
|
533 |
resize_covered_region_by_start(new_region);
|
|
534 |
}
|
|
535 |
|
|
536 |
void CardTableExtension::resize_covered_region_by_start(MemRegion new_region) {
|
|
537 |
CardTableModRefBS::resize_covered_region(new_region);
|
|
538 |
debug_only(verify_guard();)
|
|
539 |
}
|
|
540 |
|
|
541 |
void CardTableExtension::resize_covered_region_by_end(int changed_region,
|
|
542 |
MemRegion new_region) {
|
|
543 |
assert(SafepointSynchronize::is_at_safepoint(),
|
|
544 |
"Only expect an expansion at the low end at a GC");
|
|
545 |
debug_only(verify_guard();)
|
|
546 |
#ifdef ASSERT
|
|
547 |
for (int k = 0; k < _cur_covered_regions; k++) {
|
|
548 |
if (_covered[k].end() == new_region.end()) {
|
|
549 |
assert(changed_region == k, "Changed region is incorrect");
|
|
550 |
break;
|
|
551 |
}
|
|
552 |
}
|
|
553 |
#endif
|
|
554 |
|
|
555 |
// Commit new or uncommit old pages, if necessary.
|
|
556 |
resize_commit_uncommit(changed_region, new_region);
|
|
557 |
|
|
558 |
// Update card table entries
|
|
559 |
resize_update_card_table_entries(changed_region, new_region);
|
|
560 |
|
|
561 |
// Set the new start of the committed region
|
|
562 |
resize_update_committed_table(changed_region, new_region);
|
|
563 |
|
|
564 |
// Update the covered region
|
|
565 |
resize_update_covered_table(changed_region, new_region);
|
|
566 |
|
|
567 |
if (TraceCardTableModRefBS) {
|
|
568 |
int ind = changed_region;
|
|
569 |
gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
|
|
570 |
gclog_or_tty->print_cr(" "
|
|
571 |
" _covered[%d].start(): " INTPTR_FORMAT
|
|
572 |
" _covered[%d].last(): " INTPTR_FORMAT,
|
|
573 |
ind, _covered[ind].start(),
|
|
574 |
ind, _covered[ind].last());
|
|
575 |
gclog_or_tty->print_cr(" "
|
|
576 |
" _committed[%d].start(): " INTPTR_FORMAT
|
|
577 |
" _committed[%d].last(): " INTPTR_FORMAT,
|
|
578 |
ind, _committed[ind].start(),
|
|
579 |
ind, _committed[ind].last());
|
|
580 |
gclog_or_tty->print_cr(" "
|
|
581 |
" byte_for(start): " INTPTR_FORMAT
|
|
582 |
" byte_for(last): " INTPTR_FORMAT,
|
|
583 |
byte_for(_covered[ind].start()),
|
|
584 |
byte_for(_covered[ind].last()));
|
|
585 |
gclog_or_tty->print_cr(" "
|
|
586 |
" addr_for(start): " INTPTR_FORMAT
|
|
587 |
" addr_for(last): " INTPTR_FORMAT,
|
|
588 |
addr_for((jbyte*) _committed[ind].start()),
|
|
589 |
addr_for((jbyte*) _committed[ind].last()));
|
|
590 |
}
|
|
591 |
debug_only(verify_guard();)
|
|
592 |
}
|
|
593 |
|
|
594 |
void CardTableExtension::resize_commit_uncommit(int changed_region,
|
|
595 |
MemRegion new_region) {
|
|
596 |
// Commit new or uncommit old pages, if necessary.
|
|
597 |
MemRegion cur_committed = _committed[changed_region];
|
|
598 |
assert(_covered[changed_region].end() == new_region.end(),
|
|
599 |
"The ends of the regions are expected to match");
|
|
600 |
// Extend the start of this _committed region to
|
|
601 |
// to cover the start of any previous _committed region.
|
|
602 |
// This forms overlapping regions, but never interior regions.
|
|
603 |
HeapWord* min_prev_start = lowest_prev_committed_start(changed_region);
|
|
604 |
if (min_prev_start < cur_committed.start()) {
|
|
605 |
// Only really need to set start of "cur_committed" to
|
|
606 |
// the new start (min_prev_start) but assertion checking code
|
|
607 |
// below use cur_committed.end() so make it correct.
|
|
608 |
MemRegion new_committed =
|
|
609 |
MemRegion(min_prev_start, cur_committed.end());
|
|
610 |
cur_committed = new_committed;
|
|
611 |
}
|
|
612 |
#ifdef ASSERT
|
|
613 |
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
|
614 |
assert(cur_committed.start() ==
|
|
615 |
(HeapWord*) align_size_up((uintptr_t) cur_committed.start(),
|
|
616 |
os::vm_page_size()),
|
|
617 |
"Starts should have proper alignment");
|
|
618 |
#endif
|
|
619 |
|
|
620 |
jbyte* new_start = byte_for(new_region.start());
|
|
621 |
// Round down because this is for the start address
|
|
622 |
HeapWord* new_start_aligned =
|
|
623 |
(HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size());
|
|
624 |
// The guard page is always committed and should not be committed over.
|
|
625 |
// This method is used in cases where the generation is growing toward
|
|
626 |
// lower addresses but the guard region is still at the end of the
|
|
627 |
// card table. That still makes sense when looking for writes
|
|
628 |
// off the end of the card table.
|
|
629 |
if (new_start_aligned < cur_committed.start()) {
|
|
630 |
// Expand the committed region
|
|
631 |
//
|
|
632 |
// Case A
|
|
633 |
// |+ guard +|
|
|
634 |
// |+ cur committed +++++++++|
|
|
635 |
// |+ new committed +++++++++++++++++|
|
|
636 |
//
|
|
637 |
// Case B
|
|
638 |
// |+ guard +|
|
|
639 |
// |+ cur committed +|
|
|
640 |
// |+ new committed +++++++|
|
|
641 |
//
|
|
642 |
// These are not expected because the calculation of the
|
|
643 |
// cur committed region and the new committed region
|
|
644 |
// share the same end for the covered region.
|
|
645 |
// Case C
|
|
646 |
// |+ guard +|
|
|
647 |
// |+ cur committed +|
|
|
648 |
// |+ new committed +++++++++++++++++|
|
|
649 |
// Case D
|
|
650 |
// |+ guard +|
|
|
651 |
// |+ cur committed +++++++++++|
|
|
652 |
// |+ new committed +++++++|
|
|
653 |
|
|
654 |
HeapWord* new_end_for_commit =
|
|
655 |
MIN2(cur_committed.end(), _guard_region.start());
|
|
656 |
MemRegion new_committed =
|
|
657 |
MemRegion(new_start_aligned, new_end_for_commit);
|
|
658 |
if(!new_committed.is_empty()) {
|
|
659 |
if (!os::commit_memory((char*)new_committed.start(),
|
|
660 |
new_committed.byte_size())) {
|
|
661 |
vm_exit_out_of_memory(new_committed.byte_size(),
|
|
662 |
"card table expansion");
|
|
663 |
}
|
|
664 |
}
|
|
665 |
} else if (new_start_aligned > cur_committed.start()) {
|
|
666 |
// Shrink the committed region
|
|
667 |
MemRegion uncommit_region = committed_unique_to_self(changed_region,
|
|
668 |
MemRegion(cur_committed.start(), new_start_aligned));
|
|
669 |
if (!uncommit_region.is_empty()) {
|
|
670 |
if (!os::uncommit_memory((char*)uncommit_region.start(),
|
|
671 |
uncommit_region.byte_size())) {
|
|
672 |
vm_exit_out_of_memory(uncommit_region.byte_size(),
|
|
673 |
"card table contraction");
|
|
674 |
}
|
|
675 |
}
|
|
676 |
}
|
|
677 |
assert(_committed[changed_region].end() == cur_committed.end(),
|
|
678 |
"end should not change");
|
|
679 |
}
|
|
680 |
|
|
681 |
void CardTableExtension::resize_update_committed_table(int changed_region,
|
|
682 |
MemRegion new_region) {
|
|
683 |
|
|
684 |
jbyte* new_start = byte_for(new_region.start());
|
|
685 |
// Set the new start of the committed region
|
|
686 |
HeapWord* new_start_aligned =
|
|
687 |
(HeapWord*)align_size_down((uintptr_t)new_start,
|
|
688 |
os::vm_page_size());
|
|
689 |
MemRegion new_committed = MemRegion(new_start_aligned,
|
|
690 |
_committed[changed_region].end());
|
|
691 |
_committed[changed_region] = new_committed;
|
|
692 |
_committed[changed_region].set_start(new_start_aligned);
|
|
693 |
}
|
|
694 |
|
|
695 |
void CardTableExtension::resize_update_card_table_entries(int changed_region,
|
|
696 |
MemRegion new_region) {
|
|
697 |
debug_only(verify_guard();)
|
|
698 |
MemRegion original_covered = _covered[changed_region];
|
|
699 |
// Initialize the card entries. Only consider the
|
|
700 |
// region covered by the card table (_whole_heap)
|
|
701 |
jbyte* entry;
|
|
702 |
if (new_region.start() < _whole_heap.start()) {
|
|
703 |
entry = byte_for(_whole_heap.start());
|
|
704 |
} else {
|
|
705 |
entry = byte_for(new_region.start());
|
|
706 |
}
|
|
707 |
jbyte* end = byte_for(original_covered.start());
|
|
708 |
// If _whole_heap starts at the original covered regions start,
|
|
709 |
// this loop will not execute.
|
|
710 |
while (entry < end) { *entry++ = clean_card; }
|
|
711 |
}
|
|
712 |
|
|
713 |
void CardTableExtension::resize_update_covered_table(int changed_region,
|
|
714 |
MemRegion new_region) {
|
|
715 |
// Update the covered region
|
|
716 |
_covered[changed_region].set_start(new_region.start());
|
|
717 |
_covered[changed_region].set_word_size(new_region.word_size());
|
|
718 |
|
|
719 |
// reorder regions. There should only be at most 1 out
|
|
720 |
// of order.
|
|
721 |
for (int i = _cur_covered_regions-1 ; i > 0; i--) {
|
|
722 |
if (_covered[i].start() < _covered[i-1].start()) {
|
|
723 |
MemRegion covered_mr = _covered[i-1];
|
|
724 |
_covered[i-1] = _covered[i];
|
|
725 |
_covered[i] = covered_mr;
|
|
726 |
MemRegion committed_mr = _committed[i-1];
|
|
727 |
_committed[i-1] = _committed[i];
|
|
728 |
_committed[i] = committed_mr;
|
|
729 |
break;
|
|
730 |
}
|
|
731 |
}
|
|
732 |
#ifdef ASSERT
|
|
733 |
for (int m = 0; m < _cur_covered_regions-1; m++) {
|
|
734 |
assert(_covered[m].start() <= _covered[m+1].start(),
|
|
735 |
"Covered regions out of order");
|
|
736 |
assert(_committed[m].start() <= _committed[m+1].start(),
|
|
737 |
"Committed regions out of order");
|
|
738 |
}
|
|
739 |
#endif
|
|
740 |
}
|
|
741 |
|
|
742 |
// Returns the start of any committed region that is lower than
|
|
743 |
// the target committed region (index ind) and that intersects the
|
|
744 |
// target region. If none, return start of target region.
|
|
745 |
//
|
|
746 |
// -------------
|
|
747 |
// | |
|
|
748 |
// -------------
|
|
749 |
// ------------
|
|
750 |
// | target |
|
|
751 |
// ------------
|
|
752 |
// -------------
|
|
753 |
// | |
|
|
754 |
// -------------
|
|
755 |
// ^ returns this
|
|
756 |
//
|
|
757 |
// -------------
|
|
758 |
// | |
|
|
759 |
// -------------
|
|
760 |
// ------------
|
|
761 |
// | target |
|
|
762 |
// ------------
|
|
763 |
// -------------
|
|
764 |
// | |
|
|
765 |
// -------------
|
|
766 |
// ^ returns this
|
|
767 |
|
|
768 |
HeapWord* CardTableExtension::lowest_prev_committed_start(int ind) const {
|
|
769 |
assert(_cur_covered_regions >= 0, "Expecting at least on region");
|
|
770 |
HeapWord* min_start = _committed[ind].start();
|
|
771 |
for (int j = 0; j < ind; j++) {
|
|
772 |
HeapWord* this_start = _committed[j].start();
|
|
773 |
if ((this_start < min_start) &&
|
|
774 |
!(_committed[j].intersection(_committed[ind])).is_empty()) {
|
|
775 |
min_start = this_start;
|
|
776 |
}
|
|
777 |
}
|
|
778 |
return min_start;
|
|
779 |
}
|