1 /* |
|
2 * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #include "precompiled.hpp" |
|
26 #include "gc/cms/cmsHeap.hpp" |
|
27 #include "gc/shared/cardTableModRefBS.hpp" |
|
28 #include "gc/shared/cardTableRS.hpp" |
|
29 #include "gc/shared/collectedHeap.hpp" |
|
30 #include "gc/shared/space.inline.hpp" |
|
31 #include "memory/allocation.inline.hpp" |
|
32 #include "memory/virtualspace.hpp" |
|
33 #include "oops/oop.inline.hpp" |
|
34 #include "runtime/java.hpp" |
|
35 #include "runtime/mutexLocker.hpp" |
|
36 #include "runtime/orderAccess.inline.hpp" |
|
37 #include "runtime/vmThread.hpp" |
|
38 |
|
39 void CardTableRS:: |
|
40 non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, |
|
41 OopsInGenClosure* cl, |
|
42 CardTableRS* ct, |
|
43 uint n_threads) { |
|
44 assert(n_threads > 0, "expected n_threads > 0"); |
|
45 assert(n_threads <= ParallelGCThreads, |
|
46 "n_threads: %u > ParallelGCThreads: %u", n_threads, ParallelGCThreads); |
|
47 |
|
48 // Make sure the LNC array is valid for the space. |
|
49 jbyte** lowest_non_clean; |
|
50 uintptr_t lowest_non_clean_base_chunk_index; |
|
51 size_t lowest_non_clean_chunk_size; |
|
52 get_LNC_array_for_space(sp, lowest_non_clean, |
|
53 lowest_non_clean_base_chunk_index, |
|
54 lowest_non_clean_chunk_size); |
|
55 |
|
56 uint n_strides = n_threads * ParGCStridesPerThread; |
|
57 SequentialSubTasksDone* pst = sp->par_seq_tasks(); |
|
58 // Sets the condition for completion of the subtask (how many threads |
|
59 // need to finish in order to be done). |
|
60 pst->set_n_threads(n_threads); |
|
61 pst->set_n_tasks(n_strides); |
|
62 |
|
63 uint stride = 0; |
|
64 while (!pst->is_task_claimed(/* reference */ stride)) { |
|
65 process_stride(sp, mr, stride, n_strides, |
|
66 cl, ct, |
|
67 lowest_non_clean, |
|
68 lowest_non_clean_base_chunk_index, |
|
69 lowest_non_clean_chunk_size); |
|
70 } |
|
71 if (pst->all_tasks_completed()) { |
|
72 // Clear lowest_non_clean array for next time. |
|
73 intptr_t first_chunk_index = addr_to_chunk_index(mr.start()); |
|
74 uintptr_t last_chunk_index = addr_to_chunk_index(mr.last()); |
|
75 for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) { |
|
76 intptr_t ind = ch - lowest_non_clean_base_chunk_index; |
|
77 assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size, |
|
78 "Bounds error"); |
|
79 lowest_non_clean[ind] = NULL; |
|
80 } |
|
81 } |
|
82 } |
|
83 |
|
84 void |
|
85 CardTableRS:: |
|
86 process_stride(Space* sp, |
|
87 MemRegion used, |
|
88 jint stride, int n_strides, |
|
89 OopsInGenClosure* cl, |
|
90 CardTableRS* ct, |
|
91 jbyte** lowest_non_clean, |
|
92 uintptr_t lowest_non_clean_base_chunk_index, |
|
93 size_t lowest_non_clean_chunk_size) { |
|
94 // We go from higher to lower addresses here; it wouldn't help that much |
|
95 // because of the strided parallelism pattern used here. |
|
96 |
|
97 // Find the first card address of the first chunk in the stride that is |
|
98 // at least "bottom" of the used region. |
|
99 jbyte* start_card = byte_for(used.start()); |
|
100 jbyte* end_card = byte_after(used.last()); |
|
101 uintptr_t start_chunk = addr_to_chunk_index(used.start()); |
|
102 uintptr_t start_chunk_stride_num = start_chunk % n_strides; |
|
103 jbyte* chunk_card_start; |
|
104 |
|
105 if ((uintptr_t)stride >= start_chunk_stride_num) { |
|
106 chunk_card_start = (jbyte*)(start_card + |
|
107 (stride - start_chunk_stride_num) * |
|
108 ParGCCardsPerStrideChunk); |
|
109 } else { |
|
110 // Go ahead to the next chunk group boundary, then to the requested stride. |
|
111 chunk_card_start = (jbyte*)(start_card + |
|
112 (n_strides - start_chunk_stride_num + stride) * |
|
113 ParGCCardsPerStrideChunk); |
|
114 } |
|
115 |
|
116 while (chunk_card_start < end_card) { |
|
117 // Even though we go from lower to higher addresses below, the |
|
118 // strided parallelism can interleave the actual processing of the |
|
119 // dirty pages in various ways. For a specific chunk within this |
|
120 // stride, we take care to avoid double scanning or missing a card |
|
121 // by suitably initializing the "min_done" field in process_chunk_boundaries() |
|
122 // below, together with the dirty region extension accomplished in |
|
123 // DirtyCardToOopClosure::do_MemRegion(). |
|
124 jbyte* chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk; |
|
125 // Invariant: chunk_mr should be fully contained within the "used" region. |
|
126 MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start), |
|
127 chunk_card_end >= end_card ? |
|
128 used.end() : addr_for(chunk_card_end)); |
|
129 assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)"); |
|
130 assert(used.contains(chunk_mr), "chunk_mr should be subset of used"); |
|
131 |
|
132 // This function is used by the parallel card table iteration. |
|
133 const bool parallel = true; |
|
134 |
|
135 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), |
|
136 cl->gen_boundary(), |
|
137 parallel); |
|
138 ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel); |
|
139 |
|
140 |
|
141 // Process the chunk. |
|
142 process_chunk_boundaries(sp, |
|
143 dcto_cl, |
|
144 chunk_mr, |
|
145 used, |
|
146 lowest_non_clean, |
|
147 lowest_non_clean_base_chunk_index, |
|
148 lowest_non_clean_chunk_size); |
|
149 |
|
150 // We want the LNC array updates above in process_chunk_boundaries |
|
151 // to be visible before any of the card table value changes as a |
|
152 // result of the dirty card iteration below. |
|
153 OrderAccess::storestore(); |
|
154 |
|
155 // We want to clear the cards: clear_cl here does the work of finding |
|
156 // contiguous dirty ranges of cards to process and clear. |
|
157 clear_cl.do_MemRegion(chunk_mr); |
|
158 |
|
159 // Find the next chunk of the stride. |
|
160 chunk_card_start += ParGCCardsPerStrideChunk * n_strides; |
|
161 } |
|
162 } |
|
163 |
|
164 void |
|
165 CardTableRS:: |
|
166 process_chunk_boundaries(Space* sp, |
|
167 DirtyCardToOopClosure* dcto_cl, |
|
168 MemRegion chunk_mr, |
|
169 MemRegion used, |
|
170 jbyte** lowest_non_clean, |
|
171 uintptr_t lowest_non_clean_base_chunk_index, |
|
172 size_t lowest_non_clean_chunk_size) |
|
173 { |
|
174 // We must worry about non-array objects that cross chunk boundaries, |
|
175 // because such objects are both precisely and imprecisely marked: |
|
176 // .. if the head of such an object is dirty, the entire object |
|
177 // needs to be scanned, under the interpretation that this |
|
178 // was an imprecise mark |
|
179 // .. if the head of such an object is not dirty, we can assume |
|
180 // precise marking and it's efficient to scan just the dirty |
|
181 // cards. |
|
182 // In either case, each scanned reference must be scanned precisely |
|
183 // once so as to avoid cloning of a young referent. For efficiency, |
|
184 // our closures depend on this property and do not protect against |
|
185 // double scans. |
|
186 |
|
187 uintptr_t start_chunk_index = addr_to_chunk_index(chunk_mr.start()); |
|
188 assert(start_chunk_index >= lowest_non_clean_base_chunk_index, "Bounds error."); |
|
189 uintptr_t cur_chunk_index = start_chunk_index - lowest_non_clean_base_chunk_index; |
|
190 |
|
191 // First, set "our" lowest_non_clean entry, which would be |
|
192 // used by the thread scanning an adjoining left chunk with |
|
193 // a non-array object straddling the mutual boundary. |
|
194 // Find the object that spans our boundary, if one exists. |
|
195 // first_block is the block possibly straddling our left boundary. |
|
196 HeapWord* first_block = sp->block_start(chunk_mr.start()); |
|
197 assert((chunk_mr.start() != used.start()) || (first_block == chunk_mr.start()), |
|
198 "First chunk should always have a co-initial block"); |
|
199 // Does the block straddle the chunk's left boundary, and is it |
|
200 // a non-array object? |
|
201 if (first_block < chunk_mr.start() // first block straddles left bdry |
|
202 && sp->block_is_obj(first_block) // first block is an object |
|
203 && !(oop(first_block)->is_objArray() // first block is not an array (arrays are precisely dirtied) |
|
204 || oop(first_block)->is_typeArray())) { |
|
205 // Find our least non-clean card, so that a left neighbor |
|
206 // does not scan an object straddling the mutual boundary |
|
207 // too far to the right, and attempt to scan a portion of |
|
208 // that object twice. |
|
209 jbyte* first_dirty_card = NULL; |
|
210 jbyte* last_card_of_first_obj = |
|
211 byte_for(first_block + sp->block_size(first_block) - 1); |
|
212 jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start()); |
|
213 jbyte* last_card_of_cur_chunk = byte_for(chunk_mr.last()); |
|
214 jbyte* last_card_to_check = |
|
215 (jbyte*) MIN2((intptr_t) last_card_of_cur_chunk, |
|
216 (intptr_t) last_card_of_first_obj); |
|
217 // Note that this does not need to go beyond our last card |
|
218 // if our first object completely straddles this chunk. |
|
219 for (jbyte* cur = first_card_of_cur_chunk; |
|
220 cur <= last_card_to_check; cur++) { |
|
221 jbyte val = *cur; |
|
222 if (card_will_be_scanned(val)) { |
|
223 first_dirty_card = cur; break; |
|
224 } else { |
|
225 assert(!card_may_have_been_dirty(val), "Error"); |
|
226 } |
|
227 } |
|
228 if (first_dirty_card != NULL) { |
|
229 assert(cur_chunk_index < lowest_non_clean_chunk_size, "Bounds error."); |
|
230 assert(lowest_non_clean[cur_chunk_index] == NULL, |
|
231 "Write exactly once : value should be stable hereafter for this round"); |
|
232 lowest_non_clean[cur_chunk_index] = first_dirty_card; |
|
233 } |
|
234 } else { |
|
235 // In this case we can help our neighbor by just asking them |
|
236 // to stop at our first card (even though it may not be dirty). |
|
237 assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter"); |
|
238 jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start()); |
|
239 lowest_non_clean[cur_chunk_index] = first_card_of_cur_chunk; |
|
240 } |
|
241 |
|
242 // Next, set our own max_to_do, which will strictly/exclusively bound |
|
243 // the highest address that we will scan past the right end of our chunk. |
|
244 HeapWord* max_to_do = NULL; |
|
245 if (chunk_mr.end() < used.end()) { |
|
246 // This is not the last chunk in the used region. |
|
247 // What is our last block? We check the first block of |
|
248 // the next (right) chunk rather than strictly check our last block |
|
249 // because it's potentially more efficient to do so. |
|
250 HeapWord* const last_block = sp->block_start(chunk_mr.end()); |
|
251 assert(last_block <= chunk_mr.end(), "In case this property changes."); |
|
252 if ((last_block == chunk_mr.end()) // our last block does not straddle boundary |
|
253 || !sp->block_is_obj(last_block) // last_block isn't an object |
|
254 || oop(last_block)->is_objArray() // last_block is an array (precisely marked) |
|
255 || oop(last_block)->is_typeArray()) { |
|
256 max_to_do = chunk_mr.end(); |
|
257 } else { |
|
258 assert(last_block < chunk_mr.end(), "Tautology"); |
|
259 // It is a non-array object that straddles the right boundary of this chunk. |
|
260 // last_obj_card is the card corresponding to the start of the last object |
|
261 // in the chunk. Note that the last object may not start in |
|
262 // the chunk. |
|
263 jbyte* const last_obj_card = byte_for(last_block); |
|
264 const jbyte val = *last_obj_card; |
|
265 if (!card_will_be_scanned(val)) { |
|
266 assert(!card_may_have_been_dirty(val), "Error"); |
|
267 // The card containing the head is not dirty. Any marks on |
|
268 // subsequent cards still in this chunk must have been made |
|
269 // precisely; we can cap processing at the end of our chunk. |
|
270 max_to_do = chunk_mr.end(); |
|
271 } else { |
|
272 // The last object must be considered dirty, and extends onto the |
|
273 // following chunk. Look for a dirty card in that chunk that will |
|
274 // bound our processing. |
|
275 jbyte* limit_card = NULL; |
|
276 const size_t last_block_size = sp->block_size(last_block); |
|
277 jbyte* const last_card_of_last_obj = |
|
278 byte_for(last_block + last_block_size - 1); |
|
279 jbyte* const first_card_of_next_chunk = byte_for(chunk_mr.end()); |
|
280 // This search potentially goes a long distance looking |
|
281 // for the next card that will be scanned, terminating |
|
282 // at the end of the last_block, if no earlier dirty card |
|
283 // is found. |
|
284 assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start()) == ParGCCardsPerStrideChunk, |
|
285 "last card of next chunk may be wrong"); |
|
286 for (jbyte* cur = first_card_of_next_chunk; |
|
287 cur <= last_card_of_last_obj; cur++) { |
|
288 const jbyte val = *cur; |
|
289 if (card_will_be_scanned(val)) { |
|
290 limit_card = cur; break; |
|
291 } else { |
|
292 assert(!card_may_have_been_dirty(val), "Error: card can't be skipped"); |
|
293 } |
|
294 } |
|
295 if (limit_card != NULL) { |
|
296 max_to_do = addr_for(limit_card); |
|
297 assert(limit_card != NULL && max_to_do != NULL, "Error"); |
|
298 } else { |
|
299 // The following is a pessimistic value, because it's possible |
|
300 // that a dirty card on a subsequent chunk has been cleared by |
|
301 // the time we get to look at it; we'll correct for that further below, |
|
302 // using the LNC array which records the least non-clean card |
|
303 // before cards were cleared in a particular chunk. |
|
304 limit_card = last_card_of_last_obj; |
|
305 max_to_do = last_block + last_block_size; |
|
306 assert(limit_card != NULL && max_to_do != NULL, "Error"); |
|
307 } |
|
308 assert(0 < cur_chunk_index+1 && cur_chunk_index+1 < lowest_non_clean_chunk_size, |
|
309 "Bounds error."); |
|
310 // It is possible that a dirty card for the last object may have been |
|
311 // cleared before we had a chance to examine it. In that case, the value |
|
312 // will have been logged in the LNC for that chunk. |
|
313 // We need to examine as many chunks to the right as this object |
|
314 // covers. However, we need to bound this checking to the largest |
|
315 // entry in the LNC array: this is because the heap may expand |
|
316 // after the LNC array has been created but before we reach this point, |
|
317 // and the last block in our chunk may have been expanded to include |
|
318 // the expansion delta (and possibly subsequently allocated from, so |
|
319 // it wouldn't be sufficient to check whether that last block was |
|
320 // or was not an object at this point). |
|
321 uintptr_t last_chunk_index_to_check = addr_to_chunk_index(last_block + last_block_size - 1) |
|
322 - lowest_non_clean_base_chunk_index; |
|
323 const uintptr_t last_chunk_index = addr_to_chunk_index(used.last()) |
|
324 - lowest_non_clean_base_chunk_index; |
|
325 if (last_chunk_index_to_check > last_chunk_index) { |
|
326 assert(last_block + last_block_size > used.end(), |
|
327 "Inconsistency detected: last_block [" PTR_FORMAT "," PTR_FORMAT "]" |
|
328 " does not exceed used.end() = " PTR_FORMAT "," |
|
329 " yet last_chunk_index_to_check " INTPTR_FORMAT |
|
330 " exceeds last_chunk_index " INTPTR_FORMAT, |
|
331 p2i(last_block), p2i(last_block + last_block_size), |
|
332 p2i(used.end()), |
|
333 last_chunk_index_to_check, last_chunk_index); |
|
334 assert(sp->used_region().end() > used.end(), |
|
335 "Expansion did not happen: " |
|
336 "[" PTR_FORMAT "," PTR_FORMAT ") -> [" PTR_FORMAT "," PTR_FORMAT ")", |
|
337 p2i(sp->used_region().start()), p2i(sp->used_region().end()), |
|
338 p2i(used.start()), p2i(used.end())); |
|
339 last_chunk_index_to_check = last_chunk_index; |
|
340 } |
|
341 for (uintptr_t lnc_index = cur_chunk_index + 1; |
|
342 lnc_index <= last_chunk_index_to_check; |
|
343 lnc_index++) { |
|
344 jbyte* lnc_card = lowest_non_clean[lnc_index]; |
|
345 if (lnc_card != NULL) { |
|
346 // we can stop at the first non-NULL entry we find |
|
347 if (lnc_card <= limit_card) { |
|
348 limit_card = lnc_card; |
|
349 max_to_do = addr_for(limit_card); |
|
350 assert(limit_card != NULL && max_to_do != NULL, "Error"); |
|
351 } |
|
352 // In any case, we break now |
|
353 break; |
|
354 } // else continue to look for a non-NULL entry if any |
|
355 } |
|
356 assert(limit_card != NULL && max_to_do != NULL, "Error"); |
|
357 } |
|
358 assert(max_to_do != NULL, "OOPS 1 !"); |
|
359 } |
|
360 assert(max_to_do != NULL, "OOPS 2!"); |
|
361 } else { |
|
362 max_to_do = used.end(); |
|
363 } |
|
364 assert(max_to_do != NULL, "OOPS 3!"); |
|
365 // Now we can set the closure we're using so it doesn't to beyond |
|
366 // max_to_do. |
|
367 dcto_cl->set_min_done(max_to_do); |
|
368 #ifndef PRODUCT |
|
369 dcto_cl->set_last_bottom(max_to_do); |
|
370 #endif |
|
371 } |
|
372 |
|
373 void |
|
374 CardTableRS:: |
|
375 get_LNC_array_for_space(Space* sp, |
|
376 jbyte**& lowest_non_clean, |
|
377 uintptr_t& lowest_non_clean_base_chunk_index, |
|
378 size_t& lowest_non_clean_chunk_size) { |
|
379 |
|
380 int i = find_covering_region_containing(sp->bottom()); |
|
381 MemRegion covered = _covered[i]; |
|
382 size_t n_chunks = chunks_to_cover(covered); |
|
383 |
|
384 // Only the first thread to obtain the lock will resize the |
|
385 // LNC array for the covered region. Any later expansion can't affect |
|
386 // the used_at_save_marks region. |
|
387 // (I observed a bug in which the first thread to execute this would |
|
388 // resize, and then it would cause "expand_and_allocate" that would |
|
389 // increase the number of chunks in the covered region. Then a second |
|
390 // thread would come and execute this, see that the size didn't match, |
|
391 // and free and allocate again. So the first thread would be using a |
|
392 // freed "_lowest_non_clean" array.) |
|
393 |
|
394 // Do a dirty read here. If we pass the conditional then take the rare |
|
395 // event lock and do the read again in case some other thread had already |
|
396 // succeeded and done the resize. |
|
397 int cur_collection = CMSHeap::heap()->total_collections(); |
|
398 // Updated _last_LNC_resizing_collection[i] must not be visible before |
|
399 // _lowest_non_clean and friends are visible. Therefore use acquire/release |
|
400 // to guarantee this on non TSO architecures. |
|
401 if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) { |
|
402 MutexLocker x(ParGCRareEvent_lock); |
|
403 // This load_acquire is here for clarity only. The MutexLocker already fences. |
|
404 if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) { |
|
405 if (_lowest_non_clean[i] == NULL || |
|
406 n_chunks != _lowest_non_clean_chunk_size[i]) { |
|
407 |
|
408 // Should we delete the old? |
|
409 if (_lowest_non_clean[i] != NULL) { |
|
410 assert(n_chunks != _lowest_non_clean_chunk_size[i], |
|
411 "logical consequence"); |
|
412 FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i]); |
|
413 _lowest_non_clean[i] = NULL; |
|
414 } |
|
415 // Now allocate a new one if necessary. |
|
416 if (_lowest_non_clean[i] == NULL) { |
|
417 _lowest_non_clean[i] = NEW_C_HEAP_ARRAY(CardPtr, n_chunks, mtGC); |
|
418 _lowest_non_clean_chunk_size[i] = n_chunks; |
|
419 _lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start()); |
|
420 for (int j = 0; j < (int)n_chunks; j++) |
|
421 _lowest_non_clean[i][j] = NULL; |
|
422 } |
|
423 } |
|
424 // Make sure this gets visible only after _lowest_non_clean* was initialized |
|
425 OrderAccess::release_store(&_last_LNC_resizing_collection[i], cur_collection); |
|
426 } |
|
427 } |
|
428 // In any case, now do the initialization. |
|
429 lowest_non_clean = _lowest_non_clean[i]; |
|
430 lowest_non_clean_base_chunk_index = _lowest_non_clean_base_chunk_index[i]; |
|
431 lowest_non_clean_chunk_size = _lowest_non_clean_chunk_size[i]; |
|
432 } |
|