author | jwilhelm |
Thu, 03 Oct 2013 21:36:29 +0200 | |
changeset 20398 | b206c580c45f |
parent 18069 | e6d4971c8650 |
child 22551 | 9bf46d16dcc6 |
permissions | -rw-r--r-- |
1 | 1 |
/* |
18069
e6d4971c8650
8013057: assert(_needs_gc || SafepointSynchronize::is_at_safepoint()) failed: only read at safepoint
dcubed
parents:
17087
diff
changeset
|
2 |
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
2131
diff
changeset
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
2131
diff
changeset
|
20 |
* or visit www.oracle.com if you need additional information or have any |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
2131
diff
changeset
|
21 |
* questions. |
1 | 22 |
* |
23 |
*/ |
|
24 |
||
7397 | 25 |
#include "precompiled.hpp" |
26 |
#include "gc_implementation/parallelScavenge/cardTableExtension.hpp" |
|
27 |
#include "gc_implementation/parallelScavenge/gcTaskManager.hpp" |
|
28 |
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" |
|
29 |
#include "gc_implementation/parallelScavenge/psTasks.hpp" |
|
30 |
#include "gc_implementation/parallelScavenge/psYoungGen.hpp" |
|
31 |
#include "oops/oop.inline.hpp" |
|
32 |
#include "oops/oop.psgc.inline.hpp" |
|
1 | 33 |
|
34 |
// Checks an individual oop for missing precise marks. Mark |
|
35 |
// may be either dirty or newgen. |
|
36 |
class CheckForUnmarkedOops : public OopClosure { |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
37 |
private: |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
38 |
PSYoungGen* _young_gen; |
1 | 39 |
CardTableExtension* _card_table; |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
40 |
HeapWord* _unmarked_addr; |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
41 |
jbyte* _unmarked_card; |
1 | 42 |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
43 |
protected: |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
44 |
template <class T> void do_oop_work(T* p) { |
12380
48f69987dbca
7160613: VerifyRememberedSets doesn't work with CompressedOops
stefank
parents:
11174
diff
changeset
|
45 |
oop obj = oopDesc::load_decode_heap_oop(p); |
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
46 |
if (_young_gen->is_in_reserved(obj) && |
1 | 47 |
!_card_table->addr_is_marked_imprecise(p)) { |
48 |
// Don't overwrite the first missing card mark |
|
49 |
if (_unmarked_addr == NULL) { |
|
50 |
_unmarked_addr = (HeapWord*)p; |
|
51 |
_unmarked_card = _card_table->byte_for(p); |
|
52 |
} |
|
53 |
} |
|
54 |
} |
|
55 |
||
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
56 |
public: |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
57 |
CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) : |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
58 |
_young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { } |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
59 |
|
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
60 |
virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); } |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
61 |
virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); } |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
62 |
|
1 | 63 |
bool has_unmarked_oop() { |
64 |
return _unmarked_addr != NULL; |
|
65 |
} |
|
66 |
}; |
|
67 |
||
68 |
// Checks all objects for the existance of some type of mark, |
|
69 |
// precise or imprecise, dirty or newgen. |
|
70 |
class CheckForUnmarkedObjects : public ObjectClosure { |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
71 |
private: |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
72 |
PSYoungGen* _young_gen; |
1 | 73 |
CardTableExtension* _card_table; |
74 |
||
75 |
public: |
|
76 |
CheckForUnmarkedObjects() { |
|
77 |
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
|
78 |
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
|
79 |
||
80 |
_young_gen = heap->young_gen(); |
|
81 |
_card_table = (CardTableExtension*)heap->barrier_set(); |
|
82 |
// No point in asserting barrier set type here. Need to make CardTableExtension |
|
83 |
// a unique barrier set type. |
|
84 |
} |
|
85 |
||
86 |
// Card marks are not precise. The current system can leave us with |
|
2131 | 87 |
// a mismash of precise marks and beginning of object marks. This means |
1 | 88 |
// we test for missing precise marks first. If any are found, we don't |
89 |
// fail unless the object head is also unmarked. |
|
90 |
virtual void do_object(oop obj) { |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
91 |
CheckForUnmarkedOops object_check(_young_gen, _card_table); |
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
12380
diff
changeset
|
92 |
obj->oop_iterate_no_header(&object_check); |
1 | 93 |
if (object_check.has_unmarked_oop()) { |
94 |
assert(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object"); |
|
95 |
} |
|
96 |
} |
|
97 |
}; |
|
98 |
||
99 |
// Checks for precise marking of oops as newgen. |
|
100 |
class CheckForPreciseMarks : public OopClosure { |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
101 |
private: |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
102 |
PSYoungGen* _young_gen; |
1 | 103 |
CardTableExtension* _card_table; |
104 |
||
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
105 |
protected: |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
106 |
template <class T> void do_oop_work(T* p) { |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
107 |
oop obj = oopDesc::load_decode_heap_oop_not_null(p); |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
108 |
if (_young_gen->is_in_reserved(obj)) { |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
109 |
assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop"); |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
110 |
_card_table->set_card_newgen(p); |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
111 |
} |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
112 |
} |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
113 |
|
1 | 114 |
public: |
115 |
CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) : |
|
116 |
_young_gen(young_gen), _card_table(card_table) { } |
|
117 |
||
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
118 |
virtual void do_oop(oop* p) { CheckForPreciseMarks::do_oop_work(p); } |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
119 |
virtual void do_oop(narrowOop* p) { CheckForPreciseMarks::do_oop_work(p); } |
1 | 120 |
}; |
121 |
||
122 |
// We get passed the space_top value to prevent us from traversing into |
|
123 |
// the old_gen promotion labs, which cannot be safely parsed. |
|
124 |
||
13924 | 125 |
// Do not call this method if the space is empty. |
126 |
// It is a waste to start tasks and get here only to |
|
127 |
// do no work. If this method needs to be called |
|
128 |
// when the space is empty, fix the calculation of |
|
129 |
// end_card to allow sp_top == sp->bottom(). |
|
1 | 130 |
|
131 |
void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_array, |
|
132 |
MutableSpace* sp, |
|
133 |
HeapWord* space_top, |
|
134 |
PSPromotionManager* pm, |
|
11174
fccee5238e70
6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads
jmasa
parents:
7397
diff
changeset
|
135 |
uint stripe_number, |
fccee5238e70
6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads
jmasa
parents:
7397
diff
changeset
|
136 |
uint stripe_total) { |
1 | 137 |
int ssize = 128; // Naked constant! Work unit = 64k. |
138 |
int dirty_card_count = 0; |
|
139 |
||
13924 | 140 |
// It is a waste to get here if empty. |
141 |
assert(sp->bottom() < sp->top(), "Should not be called if empty"); |
|
1 | 142 |
oop* sp_top = (oop*)space_top; |
143 |
jbyte* start_card = byte_for(sp->bottom()); |
|
13924 | 144 |
jbyte* end_card = byte_for(sp_top - 1) + 1; |
1 | 145 |
oop* last_scanned = NULL; // Prevent scanning objects more than once |
11174
fccee5238e70
6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads
jmasa
parents:
7397
diff
changeset
|
146 |
// The width of the stripe ssize*stripe_total must be |
fccee5238e70
6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads
jmasa
parents:
7397
diff
changeset
|
147 |
// consistent with the number of stripes so that the complete slice |
fccee5238e70
6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads
jmasa
parents:
7397
diff
changeset
|
148 |
// is covered. |
fccee5238e70
6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads
jmasa
parents:
7397
diff
changeset
|
149 |
size_t slice_width = ssize * stripe_total; |
fccee5238e70
6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads
jmasa
parents:
7397
diff
changeset
|
150 |
for (jbyte* slice = start_card; slice < end_card; slice += slice_width) { |
1 | 151 |
jbyte* worker_start_card = slice + stripe_number * ssize; |
152 |
if (worker_start_card >= end_card) |
|
153 |
return; // We're done. |
|
154 |
||
155 |
jbyte* worker_end_card = worker_start_card + ssize; |
|
156 |
if (worker_end_card > end_card) |
|
157 |
worker_end_card = end_card; |
|
158 |
||
159 |
// We do not want to scan objects more than once. In order to accomplish |
|
160 |
// this, we assert that any object with an object head inside our 'slice' |
|
161 |
// belongs to us. We may need to extend the range of scanned cards if the |
|
162 |
// last object continues into the next 'slice'. |
|
163 |
// |
|
164 |
// Note! ending cards are exclusive! |
|
165 |
HeapWord* slice_start = addr_for(worker_start_card); |
|
166 |
HeapWord* slice_end = MIN2((HeapWord*) sp_top, addr_for(worker_end_card)); |
|
167 |
||
13924 | 168 |
#ifdef ASSERT |
169 |
if (GCWorkerDelayMillis > 0) { |
|
170 |
// Delay 1 worker so that it proceeds after all the work |
|
171 |
// has been completed. |
|
172 |
if (stripe_number < 2) { |
|
173 |
os::sleep(Thread::current(), GCWorkerDelayMillis, false); |
|
174 |
} |
|
175 |
} |
|
176 |
#endif |
|
177 |
||
1 | 178 |
// If there are not objects starting within the chunk, skip it. |
179 |
if (!start_array->object_starts_in_range(slice_start, slice_end)) { |
|
180 |
continue; |
|
181 |
} |
|
2131 | 182 |
// Update our beginning addr |
1 | 183 |
HeapWord* first_object = start_array->object_start(slice_start); |
184 |
debug_only(oop* first_object_within_slice = (oop*) first_object;) |
|
185 |
if (first_object < slice_start) { |
|
186 |
last_scanned = (oop*)(first_object + oop(first_object)->size()); |
|
187 |
debug_only(first_object_within_slice = last_scanned;) |
|
188 |
worker_start_card = byte_for(last_scanned); |
|
189 |
} |
|
190 |
||
191 |
// Update the ending addr |
|
192 |
if (slice_end < (HeapWord*)sp_top) { |
|
193 |
// The subtraction is important! An object may start precisely at slice_end. |
|
194 |
HeapWord* last_object = start_array->object_start(slice_end - 1); |
|
195 |
slice_end = last_object + oop(last_object)->size(); |
|
196 |
// worker_end_card is exclusive, so bump it one past the end of last_object's |
|
197 |
// covered span. |
|
198 |
worker_end_card = byte_for(slice_end) + 1; |
|
199 |
||
200 |
if (worker_end_card > end_card) |
|
201 |
worker_end_card = end_card; |
|
202 |
} |
|
203 |
||
204 |
assert(slice_end <= (HeapWord*)sp_top, "Last object in slice crosses space boundary"); |
|
205 |
assert(is_valid_card_address(worker_start_card), "Invalid worker start card"); |
|
206 |
assert(is_valid_card_address(worker_end_card), "Invalid worker end card"); |
|
207 |
// Note that worker_start_card >= worker_end_card is legal, and happens when |
|
208 |
// an object spans an entire slice. |
|
209 |
assert(worker_start_card <= end_card, "worker start card beyond end card"); |
|
210 |
assert(worker_end_card <= end_card, "worker end card beyond end card"); |
|
211 |
||
212 |
jbyte* current_card = worker_start_card; |
|
213 |
while (current_card < worker_end_card) { |
|
214 |
// Find an unclean card. |
|
215 |
while (current_card < worker_end_card && card_is_clean(*current_card)) { |
|
216 |
current_card++; |
|
217 |
} |
|
218 |
jbyte* first_unclean_card = current_card; |
|
219 |
||
220 |
// Find the end of a run of contiguous unclean cards |
|
221 |
while (current_card < worker_end_card && !card_is_clean(*current_card)) { |
|
222 |
while (current_card < worker_end_card && !card_is_clean(*current_card)) { |
|
223 |
current_card++; |
|
224 |
} |
|
225 |
||
226 |
if (current_card < worker_end_card) { |
|
227 |
// Some objects may be large enough to span several cards. If such |
|
228 |
// an object has more than one dirty card, separated by a clean card, |
|
229 |
// we will attempt to scan it twice. The test against "last_scanned" |
|
230 |
// prevents the redundant object scan, but it does not prevent newly |
|
231 |
// marked cards from being cleaned. |
|
232 |
HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1); |
|
233 |
size_t size_of_last_object = oop(last_object_in_dirty_region)->size(); |
|
234 |
HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object; |
|
235 |
jbyte* ending_card_of_last_object = byte_for(end_of_last_object); |
|
236 |
assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card"); |
|
237 |
if (ending_card_of_last_object > current_card) { |
|
238 |
// This means the object spans the next complete card. |
|
239 |
// We need to bump the current_card to ending_card_of_last_object |
|
240 |
current_card = ending_card_of_last_object; |
|
241 |
} |
|
242 |
} |
|
243 |
} |
|
244 |
jbyte* following_clean_card = current_card; |
|
245 |
||
246 |
if (first_unclean_card < worker_end_card) { |
|
247 |
oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card)); |
|
248 |
assert((HeapWord*)p <= addr_for(first_unclean_card), "checking"); |
|
249 |
// "p" should always be >= "last_scanned" because newly GC dirtied |
|
250 |
// cards are no longer scanned again (see comment at end |
|
251 |
// of loop on the increment of "current_card"). Test that |
|
252 |
// hypothesis before removing this code. |
|
253 |
// If this code is removed, deal with the first time through |
|
254 |
// the loop when the last_scanned is the object starting in |
|
255 |
// the previous slice. |
|
256 |
assert((p >= last_scanned) || |
|
257 |
(last_scanned == first_object_within_slice), |
|
258 |
"Should no longer be possible"); |
|
259 |
if (p < last_scanned) { |
|
260 |
// Avoid scanning more than once; this can happen because |
|
261 |
// newgen cards set by GC may a different set than the |
|
262 |
// originally dirty set |
|
263 |
p = last_scanned; |
|
264 |
} |
|
265 |
oop* to = (oop*)addr_for(following_clean_card); |
|
266 |
||
267 |
// Test slice_end first! |
|
268 |
if ((HeapWord*)to > slice_end) { |
|
269 |
to = (oop*)slice_end; |
|
270 |
} else if (to > sp_top) { |
|
271 |
to = sp_top; |
|
272 |
} |
|
273 |
||
274 |
// we know which cards to scan, now clear them |
|
275 |
if (first_unclean_card <= worker_start_card+1) |
|
276 |
first_unclean_card = worker_start_card+1; |
|
277 |
if (following_clean_card >= worker_end_card-1) |
|
278 |
following_clean_card = worker_end_card-1; |
|
279 |
||
280 |
while (first_unclean_card < following_clean_card) { |
|
281 |
*first_unclean_card++ = clean_card; |
|
282 |
} |
|
283 |
||
284 |
const int interval = PrefetchScanIntervalInBytes; |
|
285 |
// scan all objects in the range |
|
286 |
if (interval != 0) { |
|
6248
2e661807cef0
6962589: remove breadth first scanning code from parallel gc
tonyp
parents:
5892
diff
changeset
|
287 |
while (p < to) { |
2e661807cef0
6962589: remove breadth first scanning code from parallel gc
tonyp
parents:
5892
diff
changeset
|
288 |
Prefetch::write(p, interval); |
2e661807cef0
6962589: remove breadth first scanning code from parallel gc
tonyp
parents:
5892
diff
changeset
|
289 |
oop m = oop(p); |
2e661807cef0
6962589: remove breadth first scanning code from parallel gc
tonyp
parents:
5892
diff
changeset
|
290 |
assert(m->is_oop_or_null(), "check for header"); |
2e661807cef0
6962589: remove breadth first scanning code from parallel gc
tonyp
parents:
5892
diff
changeset
|
291 |
m->push_contents(pm); |
2e661807cef0
6962589: remove breadth first scanning code from parallel gc
tonyp
parents:
5892
diff
changeset
|
292 |
p += m->size(); |
1 | 293 |
} |
6248
2e661807cef0
6962589: remove breadth first scanning code from parallel gc
tonyp
parents:
5892
diff
changeset
|
294 |
pm->drain_stacks_cond_depth(); |
1 | 295 |
} else { |
6248
2e661807cef0
6962589: remove breadth first scanning code from parallel gc
tonyp
parents:
5892
diff
changeset
|
296 |
while (p < to) { |
2e661807cef0
6962589: remove breadth first scanning code from parallel gc
tonyp
parents:
5892
diff
changeset
|
297 |
oop m = oop(p); |
2e661807cef0
6962589: remove breadth first scanning code from parallel gc
tonyp
parents:
5892
diff
changeset
|
298 |
assert(m->is_oop_or_null(), "check for header"); |
2e661807cef0
6962589: remove breadth first scanning code from parallel gc
tonyp
parents:
5892
diff
changeset
|
299 |
m->push_contents(pm); |
2e661807cef0
6962589: remove breadth first scanning code from parallel gc
tonyp
parents:
5892
diff
changeset
|
300 |
p += m->size(); |
1 | 301 |
} |
6248
2e661807cef0
6962589: remove breadth first scanning code from parallel gc
tonyp
parents:
5892
diff
changeset
|
302 |
pm->drain_stacks_cond_depth(); |
1 | 303 |
} |
304 |
last_scanned = p; |
|
305 |
} |
|
306 |
// "current_card" is still the "following_clean_card" or |
|
307 |
// the current_card is >= the worker_end_card so the |
|
308 |
// loop will not execute again. |
|
309 |
assert((current_card == following_clean_card) || |
|
310 |
(current_card >= worker_end_card), |
|
311 |
"current_card should only be incremented if it still equals " |
|
312 |
"following_clean_card"); |
|
313 |
// Increment current_card so that it is not processed again. |
|
314 |
// It may now be dirty because a old-to-young pointer was |
|
315 |
// found on it an updated. If it is now dirty, it cannot be |
|
316 |
// be safely cleaned in the next iteration. |
|
317 |
current_card++; |
|
318 |
} |
|
319 |
} |
|
320 |
} |
|
321 |
||
322 |
// This should be called before a scavenge. |
|
323 |
void CardTableExtension::verify_all_young_refs_imprecise() { |
|
324 |
CheckForUnmarkedObjects check; |
|
325 |
||
326 |
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
|
327 |
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
|
328 |
||
329 |
PSOldGen* old_gen = heap->old_gen(); |
|
330 |
||
331 |
old_gen->object_iterate(&check); |
|
332 |
} |
|
333 |
||
334 |
// This should be called immediately after a scavenge, before mutators resume. |
|
335 |
void CardTableExtension::verify_all_young_refs_precise() { |
|
336 |
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
|
337 |
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
|
338 |
||
339 |
PSOldGen* old_gen = heap->old_gen(); |
|
340 |
||
341 |
CheckForPreciseMarks check(heap->young_gen(), (CardTableExtension*)heap->barrier_set()); |
|
342 |
||
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
12380
diff
changeset
|
343 |
old_gen->oop_iterate_no_header(&check); |
1 | 344 |
|
345 |
verify_all_young_refs_precise_helper(old_gen->object_space()->used_region()); |
|
346 |
} |
|
347 |
||
348 |
void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) { |
|
349 |
CardTableExtension* card_table = (CardTableExtension*)Universe::heap()->barrier_set(); |
|
350 |
// FIX ME ASSERT HERE |
|
351 |
||
352 |
jbyte* bot = card_table->byte_for(mr.start()); |
|
353 |
jbyte* top = card_table->byte_for(mr.end()); |
|
354 |
while(bot <= top) { |
|
355 |
assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark"); |
|
356 |
if (*bot == verify_card) |
|
357 |
*bot = youngergen_card; |
|
358 |
bot++; |
|
359 |
} |
|
360 |
} |
|
361 |
||
362 |
bool CardTableExtension::addr_is_marked_imprecise(void *addr) { |
|
363 |
jbyte* p = byte_for(addr); |
|
364 |
jbyte val = *p; |
|
365 |
||
366 |
if (card_is_dirty(val)) |
|
367 |
return true; |
|
368 |
||
369 |
if (card_is_newgen(val)) |
|
370 |
return true; |
|
371 |
||
372 |
if (card_is_clean(val)) |
|
373 |
return false; |
|
374 |
||
375 |
assert(false, "Found unhandled card mark type"); |
|
376 |
||
377 |
return false; |
|
378 |
} |
|
379 |
||
380 |
// Also includes verify_card |
|
381 |
bool CardTableExtension::addr_is_marked_precise(void *addr) { |
|
382 |
jbyte* p = byte_for(addr); |
|
383 |
jbyte val = *p; |
|
384 |
||
385 |
if (card_is_newgen(val)) |
|
386 |
return true; |
|
387 |
||
388 |
if (card_is_verify(val)) |
|
389 |
return true; |
|
390 |
||
391 |
if (card_is_clean(val)) |
|
392 |
return false; |
|
393 |
||
394 |
if (card_is_dirty(val)) |
|
395 |
return false; |
|
396 |
||
397 |
assert(false, "Found unhandled card mark type"); |
|
398 |
||
399 |
return false; |
|
400 |
} |
|
401 |
||
402 |
// Assumes that only the base or the end changes. This allows indentification |
|
403 |
// of the region that is being resized. The |
|
404 |
// CardTableModRefBS::resize_covered_region() is used for the normal case |
|
405 |
// where the covered regions are growing or shrinking at the high end. |
|
406 |
// The method resize_covered_region_by_end() is analogous to |
|
407 |
// CardTableModRefBS::resize_covered_region() but |
|
408 |
// for regions that grow or shrink at the low end. |
|
409 |
void CardTableExtension::resize_covered_region(MemRegion new_region) { |
|
410 |
||
411 |
for (int i = 0; i < _cur_covered_regions; i++) { |
|
412 |
if (_covered[i].start() == new_region.start()) { |
|
413 |
// Found a covered region with the same start as the |
|
414 |
// new region. The region is growing or shrinking |
|
415 |
// from the start of the region. |
|
416 |
resize_covered_region_by_start(new_region); |
|
417 |
return; |
|
418 |
} |
|
419 |
if (_covered[i].start() > new_region.start()) { |
|
420 |
break; |
|
421 |
} |
|
422 |
} |
|
423 |
||
424 |
int changed_region = -1; |
|
425 |
for (int j = 0; j < _cur_covered_regions; j++) { |
|
426 |
if (_covered[j].end() == new_region.end()) { |
|
427 |
changed_region = j; |
|
428 |
// This is a case where the covered region is growing or shrinking |
|
429 |
// at the start of the region. |
|
430 |
assert(changed_region != -1, "Don't expect to add a covered region"); |
|
431 |
assert(_covered[changed_region].byte_size() != new_region.byte_size(), |
|
432 |
"The sizes should be different here"); |
|
433 |
resize_covered_region_by_end(changed_region, new_region); |
|
434 |
return; |
|
435 |
} |
|
436 |
} |
|
437 |
// This should only be a new covered region (where no existing |
|
438 |
// covered region matches at the start or the end). |
|
439 |
assert(_cur_covered_regions < _max_covered_regions, |
|
440 |
"An existing region should have been found"); |
|
441 |
resize_covered_region_by_start(new_region); |
|
442 |
} |
|
443 |
||
444 |
void CardTableExtension::resize_covered_region_by_start(MemRegion new_region) { |
|
445 |
CardTableModRefBS::resize_covered_region(new_region); |
|
446 |
debug_only(verify_guard();) |
|
447 |
} |
|
448 |
||
449 |
void CardTableExtension::resize_covered_region_by_end(int changed_region, |
|
450 |
MemRegion new_region) { |
|
451 |
assert(SafepointSynchronize::is_at_safepoint(), |
|
452 |
"Only expect an expansion at the low end at a GC"); |
|
453 |
debug_only(verify_guard();) |
|
454 |
#ifdef ASSERT |
|
455 |
for (int k = 0; k < _cur_covered_regions; k++) { |
|
456 |
if (_covered[k].end() == new_region.end()) { |
|
457 |
assert(changed_region == k, "Changed region is incorrect"); |
|
458 |
break; |
|
459 |
} |
|
460 |
} |
|
461 |
#endif |
|
462 |
||
463 |
// Commit new or uncommit old pages, if necessary. |
|
5892
477b32b9d021
6952853: SIGSEGV with UseAdaptiveGCBoundary on 64b linux running jvm2008
jmasa
parents:
5547
diff
changeset
|
464 |
if (resize_commit_uncommit(changed_region, new_region)) { |
477b32b9d021
6952853: SIGSEGV with UseAdaptiveGCBoundary on 64b linux running jvm2008
jmasa
parents:
5547
diff
changeset
|
465 |
// Set the new start of the committed region |
477b32b9d021
6952853: SIGSEGV with UseAdaptiveGCBoundary on 64b linux running jvm2008
jmasa
parents:
5547
diff
changeset
|
466 |
resize_update_committed_table(changed_region, new_region); |
477b32b9d021
6952853: SIGSEGV with UseAdaptiveGCBoundary on 64b linux running jvm2008
jmasa
parents:
5547
diff
changeset
|
467 |
} |
1 | 468 |
|
469 |
// Update card table entries |
|
470 |
resize_update_card_table_entries(changed_region, new_region); |
|
471 |
||
472 |
// Update the covered region |
|
473 |
resize_update_covered_table(changed_region, new_region); |
|
474 |
||
475 |
if (TraceCardTableModRefBS) { |
|
476 |
int ind = changed_region; |
|
477 |
gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: "); |
|
478 |
gclog_or_tty->print_cr(" " |
|
479 |
" _covered[%d].start(): " INTPTR_FORMAT |
|
480 |
" _covered[%d].last(): " INTPTR_FORMAT, |
|
481 |
ind, _covered[ind].start(), |
|
482 |
ind, _covered[ind].last()); |
|
483 |
gclog_or_tty->print_cr(" " |
|
484 |
" _committed[%d].start(): " INTPTR_FORMAT |
|
485 |
" _committed[%d].last(): " INTPTR_FORMAT, |
|
486 |
ind, _committed[ind].start(), |
|
487 |
ind, _committed[ind].last()); |
|
488 |
gclog_or_tty->print_cr(" " |
|
489 |
" byte_for(start): " INTPTR_FORMAT |
|
490 |
" byte_for(last): " INTPTR_FORMAT, |
|
491 |
byte_for(_covered[ind].start()), |
|
492 |
byte_for(_covered[ind].last())); |
|
493 |
gclog_or_tty->print_cr(" " |
|
494 |
" addr_for(start): " INTPTR_FORMAT |
|
495 |
" addr_for(last): " INTPTR_FORMAT, |
|
496 |
addr_for((jbyte*) _committed[ind].start()), |
|
497 |
addr_for((jbyte*) _committed[ind].last())); |
|
498 |
} |
|
499 |
debug_only(verify_guard();) |
|
500 |
} |
|
501 |
||
5892
477b32b9d021
6952853: SIGSEGV with UseAdaptiveGCBoundary on 64b linux running jvm2008
jmasa
parents:
5547
diff
changeset
|
502 |
bool CardTableExtension::resize_commit_uncommit(int changed_region, |
1 | 503 |
MemRegion new_region) { |
5892
477b32b9d021
6952853: SIGSEGV with UseAdaptiveGCBoundary on 64b linux running jvm2008
jmasa
parents:
5547
diff
changeset
|
504 |
bool result = false; |
1 | 505 |
// Commit new or uncommit old pages, if necessary. |
506 |
MemRegion cur_committed = _committed[changed_region]; |
|
507 |
assert(_covered[changed_region].end() == new_region.end(), |
|
508 |
"The ends of the regions are expected to match"); |
|
509 |
// Extend the start of this _committed region to |
|
510 |
// to cover the start of any previous _committed region. |
|
511 |
// This forms overlapping regions, but never interior regions. |
|
512 |
HeapWord* min_prev_start = lowest_prev_committed_start(changed_region); |
|
513 |
if (min_prev_start < cur_committed.start()) { |
|
514 |
// Only really need to set start of "cur_committed" to |
|
515 |
// the new start (min_prev_start) but assertion checking code |
|
516 |
// below use cur_committed.end() so make it correct. |
|
517 |
MemRegion new_committed = |
|
518 |
MemRegion(min_prev_start, cur_committed.end()); |
|
519 |
cur_committed = new_committed; |
|
520 |
} |
|
521 |
#ifdef ASSERT |
|
522 |
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
|
523 |
assert(cur_committed.start() == |
|
524 |
(HeapWord*) align_size_up((uintptr_t) cur_committed.start(), |
|
525 |
os::vm_page_size()), |
|
526 |
"Starts should have proper alignment"); |
|
527 |
#endif |
|
528 |
||
529 |
jbyte* new_start = byte_for(new_region.start()); |
|
530 |
// Round down because this is for the start address |
|
531 |
HeapWord* new_start_aligned = |
|
532 |
(HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size()); |
|
533 |
// The guard page is always committed and should not be committed over. |
|
534 |
// This method is used in cases where the generation is growing toward |
|
535 |
// lower addresses but the guard region is still at the end of the |
|
536 |
// card table. That still makes sense when looking for writes |
|
537 |
// off the end of the card table. |
|
538 |
if (new_start_aligned < cur_committed.start()) { |
|
539 |
// Expand the committed region |
|
540 |
// |
|
541 |
// Case A |
|
542 |
// |+ guard +| |
|
543 |
// |+ cur committed +++++++++| |
|
544 |
// |+ new committed +++++++++++++++++| |
|
545 |
// |
|
546 |
// Case B |
|
547 |
// |+ guard +| |
|
548 |
// |+ cur committed +| |
|
549 |
// |+ new committed +++++++| |
|
550 |
// |
|
551 |
// These are not expected because the calculation of the |
|
552 |
// cur committed region and the new committed region |
|
553 |
// share the same end for the covered region. |
|
554 |
// Case C |
|
555 |
// |+ guard +| |
|
556 |
// |+ cur committed +| |
|
557 |
// |+ new committed +++++++++++++++++| |
|
558 |
// Case D |
|
559 |
// |+ guard +| |
|
560 |
// |+ cur committed +++++++++++| |
|
561 |
// |+ new committed +++++++| |
|
562 |
||
563 |
HeapWord* new_end_for_commit = |
|
564 |
MIN2(cur_committed.end(), _guard_region.start()); |
|
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
565 |
if(new_start_aligned < new_end_for_commit) { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
566 |
MemRegion new_committed = |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
360
diff
changeset
|
567 |
MemRegion(new_start_aligned, new_end_for_commit); |
18069
e6d4971c8650
8013057: assert(_needs_gc || SafepointSynchronize::is_at_safepoint()) failed: only read at safepoint
dcubed
parents:
17087
diff
changeset
|
568 |
os::commit_memory_or_exit((char*)new_committed.start(), |
e6d4971c8650
8013057: assert(_needs_gc || SafepointSynchronize::is_at_safepoint()) failed: only read at safepoint
dcubed
parents:
17087
diff
changeset
|
569 |
new_committed.byte_size(), !ExecMem, |
e6d4971c8650
8013057: assert(_needs_gc || SafepointSynchronize::is_at_safepoint()) failed: only read at safepoint
dcubed
parents:
17087
diff
changeset
|
570 |
"card table expansion"); |
1 | 571 |
} |
5892
477b32b9d021
6952853: SIGSEGV with UseAdaptiveGCBoundary on 64b linux running jvm2008
jmasa
parents:
5547
diff
changeset
|
572 |
result = true; |
1 | 573 |
} else if (new_start_aligned > cur_committed.start()) { |
574 |
// Shrink the committed region |
|
5892
477b32b9d021
6952853: SIGSEGV with UseAdaptiveGCBoundary on 64b linux running jvm2008
jmasa
parents:
5547
diff
changeset
|
575 |
#if 0 // uncommitting space is currently unsafe because of the interactions |
477b32b9d021
6952853: SIGSEGV with UseAdaptiveGCBoundary on 64b linux running jvm2008
jmasa
parents:
5547
diff
changeset
|
576 |
// of growing and shrinking regions. One region A can uncommit space |
477b32b9d021
6952853: SIGSEGV with UseAdaptiveGCBoundary on 64b linux running jvm2008
jmasa
parents:
5547
diff
changeset
|
577 |
// that it owns but which is being used by another region B (maybe). |
477b32b9d021
6952853: SIGSEGV with UseAdaptiveGCBoundary on 64b linux running jvm2008
jmasa
parents:
5547
diff
changeset
|
578 |
// Region B has not committed the space because it was already |
477b32b9d021
6952853: SIGSEGV with UseAdaptiveGCBoundary on 64b linux running jvm2008
jmasa
parents:
5547
diff
changeset
|
579 |
// committed by region A. |
1 | 580 |
MemRegion uncommit_region = committed_unique_to_self(changed_region, |
581 |
MemRegion(cur_committed.start(), new_start_aligned)); |
|
582 |
if (!uncommit_region.is_empty()) { |
|
583 |
if (!os::uncommit_memory((char*)uncommit_region.start(), |
|
584 |
uncommit_region.byte_size())) { |
|
5892
477b32b9d021
6952853: SIGSEGV with UseAdaptiveGCBoundary on 64b linux running jvm2008
jmasa
parents:
5547
diff
changeset
|
585 |
// If the uncommit fails, ignore it. Let the |
477b32b9d021
6952853: SIGSEGV with UseAdaptiveGCBoundary on 64b linux running jvm2008
jmasa
parents:
5547
diff
changeset
|
586 |
// committed table resizing go even though the committed |
477b32b9d021
6952853: SIGSEGV with UseAdaptiveGCBoundary on 64b linux running jvm2008
jmasa
parents:
5547
diff
changeset
|
587 |
// table will over state the committed space. |
1 | 588 |
} |
589 |
} |
|
5892
477b32b9d021
6952853: SIGSEGV with UseAdaptiveGCBoundary on 64b linux running jvm2008
jmasa
parents:
5547
diff
changeset
|
590 |
#else |
477b32b9d021
6952853: SIGSEGV with UseAdaptiveGCBoundary on 64b linux running jvm2008
jmasa
parents:
5547
diff
changeset
|
591 |
assert(!result, "Should be false with current workaround"); |
477b32b9d021
6952853: SIGSEGV with UseAdaptiveGCBoundary on 64b linux running jvm2008
jmasa
parents:
5547
diff
changeset
|
592 |
#endif |
1 | 593 |
} |
594 |
assert(_committed[changed_region].end() == cur_committed.end(), |
|
595 |
"end should not change"); |
|
5892
477b32b9d021
6952853: SIGSEGV with UseAdaptiveGCBoundary on 64b linux running jvm2008
jmasa
parents:
5547
diff
changeset
|
596 |
return result; |
1 | 597 |
} |
598 |
||
599 |
void CardTableExtension::resize_update_committed_table(int changed_region, |
|
600 |
MemRegion new_region) { |
|
601 |
||
602 |
jbyte* new_start = byte_for(new_region.start()); |
|
603 |
// Set the new start of the committed region |
|
604 |
HeapWord* new_start_aligned = |
|
605 |
(HeapWord*)align_size_down((uintptr_t)new_start, |
|
606 |
os::vm_page_size()); |
|
607 |
MemRegion new_committed = MemRegion(new_start_aligned, |
|
608 |
_committed[changed_region].end()); |
|
609 |
_committed[changed_region] = new_committed; |
|
610 |
_committed[changed_region].set_start(new_start_aligned); |
|
611 |
} |
|
612 |
||
613 |
void CardTableExtension::resize_update_card_table_entries(int changed_region, |
|
614 |
MemRegion new_region) { |
|
615 |
debug_only(verify_guard();) |
|
616 |
MemRegion original_covered = _covered[changed_region]; |
|
617 |
// Initialize the card entries. Only consider the |
|
618 |
// region covered by the card table (_whole_heap) |
|
619 |
jbyte* entry; |
|
620 |
if (new_region.start() < _whole_heap.start()) { |
|
621 |
entry = byte_for(_whole_heap.start()); |
|
622 |
} else { |
|
623 |
entry = byte_for(new_region.start()); |
|
624 |
} |
|
625 |
jbyte* end = byte_for(original_covered.start()); |
|
626 |
// If _whole_heap starts at the original covered regions start, |
|
627 |
// this loop will not execute. |
|
628 |
while (entry < end) { *entry++ = clean_card; } |
|
629 |
} |
|
630 |
||
631 |
void CardTableExtension::resize_update_covered_table(int changed_region, |
|
632 |
MemRegion new_region) { |
|
633 |
// Update the covered region |
|
634 |
_covered[changed_region].set_start(new_region.start()); |
|
635 |
_covered[changed_region].set_word_size(new_region.word_size()); |
|
636 |
||
637 |
// reorder regions. There should only be at most 1 out |
|
638 |
// of order. |
|
639 |
for (int i = _cur_covered_regions-1 ; i > 0; i--) { |
|
640 |
if (_covered[i].start() < _covered[i-1].start()) { |
|
641 |
MemRegion covered_mr = _covered[i-1]; |
|
642 |
_covered[i-1] = _covered[i]; |
|
643 |
_covered[i] = covered_mr; |
|
644 |
MemRegion committed_mr = _committed[i-1]; |
|
645 |
_committed[i-1] = _committed[i]; |
|
646 |
_committed[i] = committed_mr; |
|
647 |
break; |
|
648 |
} |
|
649 |
} |
|
650 |
#ifdef ASSERT |
|
651 |
for (int m = 0; m < _cur_covered_regions-1; m++) { |
|
652 |
assert(_covered[m].start() <= _covered[m+1].start(), |
|
653 |
"Covered regions out of order"); |
|
654 |
assert(_committed[m].start() <= _committed[m+1].start(), |
|
655 |
"Committed regions out of order"); |
|
656 |
} |
|
657 |
#endif |
|
658 |
} |
|
659 |
||
660 |
// Returns the start of any committed region that is lower than |
|
661 |
// the target committed region (index ind) and that intersects the |
|
662 |
// target region. If none, return start of target region. |
|
663 |
// |
|
664 |
// ------------- |
|
665 |
// | | |
|
666 |
// ------------- |
|
667 |
// ------------ |
|
668 |
// | target | |
|
669 |
// ------------ |
|
670 |
// ------------- |
|
671 |
// | | |
|
672 |
// ------------- |
|
673 |
// ^ returns this |
|
674 |
// |
|
675 |
// ------------- |
|
676 |
// | | |
|
677 |
// ------------- |
|
678 |
// ------------ |
|
679 |
// | target | |
|
680 |
// ------------ |
|
681 |
// ------------- |
|
682 |
// | | |
|
683 |
// ------------- |
|
684 |
// ^ returns this |
|
685 |
||
686 |
HeapWord* CardTableExtension::lowest_prev_committed_start(int ind) const { |
|
687 |
assert(_cur_covered_regions >= 0, "Expecting at least on region"); |
|
688 |
HeapWord* min_start = _committed[ind].start(); |
|
689 |
for (int j = 0; j < ind; j++) { |
|
690 |
HeapWord* this_start = _committed[j].start(); |
|
691 |
if ((this_start < min_start) && |
|
692 |
!(_committed[j].intersection(_committed[ind])).is_empty()) { |
|
693 |
min_start = this_start; |
|
694 |
} |
|
695 |
} |
|
696 |
return min_start; |
|
697 |
} |