|
1 /* |
|
2 * Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 * CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 * have any questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and |
|
26 // enumerate ref fields that have been modified (since the last |
|
27 // enumeration.) |
|
28 |
|
29 // As it currently stands, this barrier is *imprecise*: when a ref field in |
|
30 // an object "o" is modified, the card table entry for the card containing |
|
31 // the head of "o" is dirtied, not necessarily the card containing the |
|
32 // modified field itself. For object arrays, however, the barrier *is* |
|
33 // precise; only the card containing the modified element is dirtied. |
|
34 // Any MemRegionClosures used to scan dirty cards should take these |
|
35 // considerations into account. |
|
36 |
|
37 class Generation; |
|
38 class OopsInGenClosure; |
|
39 class DirtyCardToOopClosure; |
|
40 |
|
41 class CardTableModRefBS: public ModRefBarrierSet { |
|
42 // Some classes get to look at some private stuff. |
|
43 friend class BytecodeInterpreter; |
|
44 friend class VMStructs; |
|
45 friend class CardTableRS; |
|
46 friend class CheckForUnmarkedOops; // Needs access to raw card bytes. |
|
47 #ifndef PRODUCT |
|
48 // For debugging. |
|
49 friend class GuaranteeNotModClosure; |
|
50 #endif |
|
51 protected: |
|
52 |
|
53 enum CardValues { |
|
54 clean_card = -1, |
|
55 dirty_card = 0, |
|
56 precleaned_card = 1, |
|
57 last_card = 4, |
|
58 CT_MR_BS_last_reserved = 10 |
|
59 }; |
|
60 |
|
61 // dirty and precleaned are equivalent wrt younger_refs_iter. |
|
62 static bool card_is_dirty_wrt_gen_iter(jbyte cv) { |
|
63 return cv == dirty_card || cv == precleaned_card; |
|
64 } |
|
65 |
|
66 // Returns "true" iff the value "cv" will cause the card containing it |
|
67 // to be scanned in the current traversal. May be overridden by |
|
68 // subtypes. |
|
69 virtual bool card_will_be_scanned(jbyte cv) { |
|
70 return CardTableModRefBS::card_is_dirty_wrt_gen_iter(cv); |
|
71 } |
|
72 |
|
73 // Returns "true" iff the value "cv" may have represented a dirty card at |
|
74 // some point. |
|
75 virtual bool card_may_have_been_dirty(jbyte cv) { |
|
76 return card_is_dirty_wrt_gen_iter(cv); |
|
77 } |
|
78 |
|
79 // The declaration order of these const fields is important; see the |
|
80 // constructor before changing. |
|
81 const MemRegion _whole_heap; // the region covered by the card table |
|
82 const size_t _guard_index; // index of very last element in the card |
|
83 // table; it is set to a guard value |
|
84 // (last_card) and should never be modified |
|
85 const size_t _last_valid_index; // index of the last valid element |
|
86 const size_t _page_size; // page size used when mapping _byte_map |
|
87 const size_t _byte_map_size; // in bytes |
|
88 jbyte* _byte_map; // the card marking array |
|
89 |
|
90 int _cur_covered_regions; |
|
91 // The covered regions should be in address order. |
|
92 MemRegion* _covered; |
|
93 // The committed regions correspond one-to-one to the covered regions. |
|
94 // They represent the card-table memory that has been committed to service |
|
95 // the corresponding covered region. It may be that committed region for |
|
96 // one covered region corresponds to a larger region because of page-size |
|
97 // roundings. Thus, a committed region for one covered region may |
|
98 // actually extend onto the card-table space for the next covered region. |
|
99 MemRegion* _committed; |
|
100 |
|
101 // The last card is a guard card, and we commit the page for it so |
|
102 // we can use the card for verification purposes. We make sure we never |
|
103 // uncommit the MemRegion for that page. |
|
104 MemRegion _guard_region; |
|
105 |
|
106 protected: |
|
107 // Initialization utilities; covered_words is the size of the covered region |
|
108 // in, um, words. |
|
109 inline size_t cards_required(size_t covered_words); |
|
110 inline size_t compute_byte_map_size(); |
|
111 |
|
112 // Finds and return the index of the region, if any, to which the given |
|
113 // region would be contiguous. If none exists, assign a new region and |
|
114 // returns its index. Requires that no more than the maximum number of |
|
115 // covered regions defined in the constructor are ever in use. |
|
116 int find_covering_region_by_base(HeapWord* base); |
|
117 |
|
118 // Same as above, but finds the region containing the given address |
|
119 // instead of starting at a given base address. |
|
120 int find_covering_region_containing(HeapWord* addr); |
|
121 |
|
122 // Resize one of the regions covered by the remembered set. |
|
123 void resize_covered_region(MemRegion new_region); |
|
124 |
|
125 // Returns the leftmost end of a committed region corresponding to a |
|
126 // covered region before covered region "ind", or else "NULL" if "ind" is |
|
127 // the first covered region. |
|
128 HeapWord* largest_prev_committed_end(int ind) const; |
|
129 |
|
130 // Returns the part of the region mr that doesn't intersect with |
|
131 // any committed region other than self. Used to prevent uncommitting |
|
132 // regions that are also committed by other regions. Also protects |
|
133 // against uncommitting the guard region. |
|
134 MemRegion committed_unique_to_self(int self, MemRegion mr) const; |
|
135 |
|
136 // Mapping from address to card marking array entry |
|
137 jbyte* byte_for(const void* p) const { |
|
138 assert(_whole_heap.contains(p), |
|
139 "out of bounds access to card marking array"); |
|
140 jbyte* result = &byte_map_base[uintptr_t(p) >> card_shift]; |
|
141 assert(result >= _byte_map && result < _byte_map + _byte_map_size, |
|
142 "out of bounds accessor for card marking array"); |
|
143 return result; |
|
144 } |
|
145 |
|
146 // The card table byte one after the card marking array |
|
147 // entry for argument address. Typically used for higher bounds |
|
148 // for loops iterating through the card table. |
|
149 jbyte* byte_after(const void* p) const { |
|
150 return byte_for(p) + 1; |
|
151 } |
|
152 |
|
153 // Mapping from card marking array entry to address of first word |
|
154 HeapWord* addr_for(const jbyte* p) const { |
|
155 assert(p >= _byte_map && p < _byte_map + _byte_map_size, |
|
156 "out of bounds access to card marking array"); |
|
157 size_t delta = pointer_delta(p, byte_map_base, sizeof(jbyte)); |
|
158 HeapWord* result = (HeapWord*) (delta << card_shift); |
|
159 assert(_whole_heap.contains(result), |
|
160 "out of bounds accessor from card marking array"); |
|
161 return result; |
|
162 } |
|
163 |
|
164 // Iterate over the portion of the card-table which covers the given |
|
165 // region mr in the given space and apply cl to any dirty sub-regions |
|
166 // of mr. cl and dcto_cl must either be the same closure or cl must |
|
167 // wrap dcto_cl. Both are required - neither may be NULL. Also, dcto_cl |
|
168 // may be modified. Note that this function will operate in a parallel |
|
169 // mode if worker threads are available. |
|
170 void non_clean_card_iterate(Space* sp, MemRegion mr, |
|
171 DirtyCardToOopClosure* dcto_cl, |
|
172 MemRegionClosure* cl, |
|
173 bool clear); |
|
174 |
|
175 // Utility function used to implement the other versions below. |
|
176 void non_clean_card_iterate_work(MemRegion mr, MemRegionClosure* cl, |
|
177 bool clear); |
|
178 |
|
179 void par_non_clean_card_iterate_work(Space* sp, MemRegion mr, |
|
180 DirtyCardToOopClosure* dcto_cl, |
|
181 MemRegionClosure* cl, |
|
182 bool clear, |
|
183 int n_threads); |
|
184 |
|
185 // Dirty the bytes corresponding to "mr" (not all of which must be |
|
186 // covered.) |
|
187 void dirty_MemRegion(MemRegion mr); |
|
188 |
|
189 // Clear (to clean_card) the bytes entirely contained within "mr" (not |
|
190 // all of which must be covered.) |
|
191 void clear_MemRegion(MemRegion mr); |
|
192 |
|
193 // *** Support for parallel card scanning. |
|
194 |
|
195 enum SomeConstantsForParallelism { |
|
196 StridesPerThread = 2, |
|
197 CardsPerStrideChunk = 256 |
|
198 }; |
|
199 |
|
200 // This is an array, one element per covered region of the card table. |
|
201 // Each entry is itself an array, with one element per chunk in the |
|
202 // covered region. Each entry of these arrays is the lowest non-clean |
|
203 // card of the corresponding chunk containing part of an object from the |
|
204 // previous chunk, or else NULL. |
|
205 typedef jbyte* CardPtr; |
|
206 typedef CardPtr* CardArr; |
|
207 CardArr* _lowest_non_clean; |
|
208 size_t* _lowest_non_clean_chunk_size; |
|
209 uintptr_t* _lowest_non_clean_base_chunk_index; |
|
210 int* _last_LNC_resizing_collection; |
|
211 |
|
212 // Initializes "lowest_non_clean" to point to the array for the region |
|
213 // covering "sp", and "lowest_non_clean_base_chunk_index" to the chunk |
|
214 // index of the corresponding to the first element of that array. |
|
215 // Ensures that these arrays are of sufficient size, allocating if necessary. |
|
216 // May be called by several threads concurrently. |
|
217 void get_LNC_array_for_space(Space* sp, |
|
218 jbyte**& lowest_non_clean, |
|
219 uintptr_t& lowest_non_clean_base_chunk_index, |
|
220 size_t& lowest_non_clean_chunk_size); |
|
221 |
|
222 // Returns the number of chunks necessary to cover "mr". |
|
223 size_t chunks_to_cover(MemRegion mr) { |
|
224 return (size_t)(addr_to_chunk_index(mr.last()) - |
|
225 addr_to_chunk_index(mr.start()) + 1); |
|
226 } |
|
227 |
|
228 // Returns the index of the chunk in a stride which |
|
229 // covers the given address. |
|
230 uintptr_t addr_to_chunk_index(const void* addr) { |
|
231 uintptr_t card = (uintptr_t) byte_for(addr); |
|
232 return card / CardsPerStrideChunk; |
|
233 } |
|
234 |
|
235 // Apply cl, which must either itself apply dcto_cl or be dcto_cl, |
|
236 // to the cards in the stride (of n_strides) within the given space. |
|
237 void process_stride(Space* sp, |
|
238 MemRegion used, |
|
239 jint stride, int n_strides, |
|
240 DirtyCardToOopClosure* dcto_cl, |
|
241 MemRegionClosure* cl, |
|
242 bool clear, |
|
243 jbyte** lowest_non_clean, |
|
244 uintptr_t lowest_non_clean_base_chunk_index, |
|
245 size_t lowest_non_clean_chunk_size); |
|
246 |
|
247 // Makes sure that chunk boundaries are handled appropriately, by |
|
248 // adjusting the min_done of dcto_cl, and by using a special card-table |
|
249 // value to indicate how min_done should be set. |
|
250 void process_chunk_boundaries(Space* sp, |
|
251 DirtyCardToOopClosure* dcto_cl, |
|
252 MemRegion chunk_mr, |
|
253 MemRegion used, |
|
254 jbyte** lowest_non_clean, |
|
255 uintptr_t lowest_non_clean_base_chunk_index, |
|
256 size_t lowest_non_clean_chunk_size); |
|
257 |
|
258 public: |
|
259 // Constants |
|
260 enum SomePublicConstants { |
|
261 card_shift = 9, |
|
262 card_size = 1 << card_shift, |
|
263 card_size_in_words = card_size / sizeof(HeapWord) |
|
264 }; |
|
265 |
|
266 // For RTTI simulation. |
|
267 BarrierSet::Name kind() { return BarrierSet::CardTableModRef; } |
|
268 bool is_a(BarrierSet::Name bsn) { |
|
269 return bsn == BarrierSet::CardTableModRef || bsn == BarrierSet::ModRef; |
|
270 } |
|
271 |
|
272 CardTableModRefBS(MemRegion whole_heap, int max_covered_regions); |
|
273 |
|
274 // *** Barrier set functions. |
|
275 |
|
276 inline bool write_ref_needs_barrier(oop* field, oop new_val) { |
|
277 // Note that this assumes the perm gen is the highest generation |
|
278 // in the address space |
|
279 return new_val != NULL && !new_val->is_perm(); |
|
280 } |
|
281 |
|
282 // Record a reference update. Note that these versions are precise! |
|
283 // The scanning code has to handle the fact that the write barrier may be |
|
284 // either precise or imprecise. We make non-virtual inline variants of |
|
285 // these functions here for performance. |
|
286 protected: |
|
287 void write_ref_field_work(oop obj, size_t offset, oop newVal); |
|
288 void write_ref_field_work(oop* field, oop newVal); |
|
289 public: |
|
290 |
|
291 bool has_write_ref_array_opt() { return true; } |
|
292 bool has_write_region_opt() { return true; } |
|
293 |
|
294 inline void inline_write_region(MemRegion mr) { |
|
295 dirty_MemRegion(mr); |
|
296 } |
|
297 protected: |
|
298 void write_region_work(MemRegion mr) { |
|
299 inline_write_region(mr); |
|
300 } |
|
301 public: |
|
302 |
|
303 inline void inline_write_ref_array(MemRegion mr) { |
|
304 dirty_MemRegion(mr); |
|
305 } |
|
306 protected: |
|
307 void write_ref_array_work(MemRegion mr) { |
|
308 inline_write_ref_array(mr); |
|
309 } |
|
310 public: |
|
311 |
|
312 bool is_aligned(HeapWord* addr) { |
|
313 return is_card_aligned(addr); |
|
314 } |
|
315 |
|
316 // *** Card-table-barrier-specific things. |
|
317 |
|
318 inline void inline_write_ref_field(oop* field, oop newVal) { |
|
319 jbyte* byte = byte_for(field); |
|
320 *byte = dirty_card; |
|
321 } |
|
322 |
|
323 // Card marking array base (adjusted for heap low boundary) |
|
324 // This would be the 0th element of _byte_map, if the heap started at 0x0. |
|
325 // But since the heap starts at some higher address, this points to somewhere |
|
326 // before the beginning of the actual _byte_map. |
|
327 jbyte* byte_map_base; |
|
328 |
|
329 // Return true if "p" is at the start of a card. |
|
330 bool is_card_aligned(HeapWord* p) { |
|
331 jbyte* pcard = byte_for(p); |
|
332 return (addr_for(pcard) == p); |
|
333 } |
|
334 |
|
335 // The kinds of precision a CardTableModRefBS may offer. |
|
336 enum PrecisionStyle { |
|
337 Precise, |
|
338 ObjHeadPreciseArray |
|
339 }; |
|
340 |
|
341 // Tells what style of precision this card table offers. |
|
342 PrecisionStyle precision() { |
|
343 return ObjHeadPreciseArray; // Only one supported for now. |
|
344 } |
|
345 |
|
346 // ModRefBS functions. |
|
347 void invalidate(MemRegion mr); |
|
348 void clear(MemRegion mr); |
|
349 void mod_oop_in_space_iterate(Space* sp, OopClosure* cl, |
|
350 bool clear = false, |
|
351 bool before_save_marks = false); |
|
352 |
|
353 // *** Card-table-RemSet-specific things. |
|
354 |
|
355 // Invoke "cl.do_MemRegion" on a set of MemRegions that collectively |
|
356 // includes all the modified cards (expressing each card as a |
|
357 // MemRegion). Thus, several modified cards may be lumped into one |
|
358 // region. The regions are non-overlapping, and are visited in |
|
359 // *decreasing* address order. (This order aids with imprecise card |
|
360 // marking, where a dirty card may cause scanning, and summarization |
|
361 // marking, of objects that extend onto subsequent cards.) |
|
362 // If "clear" is true, the card is (conceptually) marked unmodified before |
|
363 // applying the closure. |
|
364 void mod_card_iterate(MemRegionClosure* cl, bool clear = false) { |
|
365 non_clean_card_iterate_work(_whole_heap, cl, clear); |
|
366 } |
|
367 |
|
368 // Like the "mod_cards_iterate" above, except only invokes the closure |
|
369 // for cards within the MemRegion "mr" (which is required to be |
|
370 // card-aligned and sized.) |
|
371 void mod_card_iterate(MemRegion mr, MemRegionClosure* cl, |
|
372 bool clear = false) { |
|
373 non_clean_card_iterate_work(mr, cl, clear); |
|
374 } |
|
375 |
|
376 static uintx ct_max_alignment_constraint(); |
|
377 |
|
378 // Apply closure cl to the dirty cards lying completely |
|
379 // within MemRegion mr, setting the cards to precleaned. |
|
380 void dirty_card_iterate(MemRegion mr, MemRegionClosure* cl); |
|
381 |
|
382 // Return the MemRegion corresponding to the first maximal run |
|
383 // of dirty cards lying completely within MemRegion mr, after |
|
384 // marking those cards precleaned. |
|
385 MemRegion dirty_card_range_after_preclean(MemRegion mr); |
|
386 |
|
387 // Set all the dirty cards in the given region to precleaned state. |
|
388 void preclean_dirty_cards(MemRegion mr); |
|
389 |
|
390 // Mapping from address to card marking array index. |
|
391 int index_for(void* p) { |
|
392 assert(_whole_heap.contains(p), |
|
393 "out of bounds access to card marking array"); |
|
394 return byte_for(p) - _byte_map; |
|
395 } |
|
396 |
|
397 void verify(); |
|
398 void verify_guard(); |
|
399 |
|
400 void verify_clean_region(MemRegion mr) PRODUCT_RETURN; |
|
401 |
|
402 static size_t par_chunk_heapword_alignment() { |
|
403 return CardsPerStrideChunk * card_size_in_words; |
|
404 } |
|
405 }; |
|
406 |
|
407 class CardTableRS; |
|
408 |
|
409 // A specialization for the CardTableRS gen rem set. |
|
410 class CardTableModRefBSForCTRS: public CardTableModRefBS { |
|
411 CardTableRS* _rs; |
|
412 protected: |
|
413 bool card_will_be_scanned(jbyte cv); |
|
414 bool card_may_have_been_dirty(jbyte cv); |
|
415 public: |
|
416 CardTableModRefBSForCTRS(MemRegion whole_heap, |
|
417 int max_covered_regions) : |
|
418 CardTableModRefBS(whole_heap, max_covered_regions) {} |
|
419 |
|
420 void set_CTRS(CardTableRS* rs) { _rs = rs; } |
|
421 }; |