|
1 /* |
|
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #include "precompiled.hpp" |
|
26 #include "code/nmethod.hpp" |
|
27 #include "gc/g1/g1BlockOffsetTable.inline.hpp" |
|
28 #include "gc/g1/g1CollectedHeap.inline.hpp" |
|
29 #include "gc/g1/g1OopClosures.inline.hpp" |
|
30 #include "gc/g1/heapRegion.inline.hpp" |
|
31 #include "gc/g1/heapRegionBounds.inline.hpp" |
|
32 #include "gc/g1/heapRegionManager.inline.hpp" |
|
33 #include "gc/g1/heapRegionRemSet.hpp" |
|
34 #include "gc/shared/genOopClosures.inline.hpp" |
|
35 #include "gc/shared/liveRange.hpp" |
|
36 #include "gc/shared/space.inline.hpp" |
|
37 #include "memory/iterator.hpp" |
|
38 #include "oops/oop.inline.hpp" |
|
39 #include "runtime/atomic.inline.hpp" |
|
40 #include "runtime/orderAccess.inline.hpp" |
|
41 |
|
42 int HeapRegion::LogOfHRGrainBytes = 0; |
|
43 int HeapRegion::LogOfHRGrainWords = 0; |
|
44 size_t HeapRegion::GrainBytes = 0; |
|
45 size_t HeapRegion::GrainWords = 0; |
|
46 size_t HeapRegion::CardsPerRegion = 0; |
|
47 |
|
48 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1, |
|
49 HeapRegion* hr, |
|
50 G1ParPushHeapRSClosure* cl, |
|
51 CardTableModRefBS::PrecisionStyle precision) : |
|
52 DirtyCardToOopClosure(hr, cl, precision, NULL), |
|
53 _hr(hr), _rs_scan(cl), _g1(g1) { } |
|
54 |
|
55 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, |
|
56 OopClosure* oc) : |
|
57 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { } |
|
58 |
|
59 void HeapRegionDCTOC::walk_mem_region(MemRegion mr, |
|
60 HeapWord* bottom, |
|
61 HeapWord* top) { |
|
62 G1CollectedHeap* g1h = _g1; |
|
63 size_t oop_size; |
|
64 HeapWord* cur = bottom; |
|
65 |
|
66 // Start filtering what we add to the remembered set. If the object is |
|
67 // not considered dead, either because it is marked (in the mark bitmap) |
|
68 // or it was allocated after marking finished, then we add it. Otherwise |
|
69 // we can safely ignore the object. |
|
70 if (!g1h->is_obj_dead(oop(cur), _hr)) { |
|
71 oop_size = oop(cur)->oop_iterate(_rs_scan, mr); |
|
72 } else { |
|
73 oop_size = _hr->block_size(cur); |
|
74 } |
|
75 |
|
76 cur += oop_size; |
|
77 |
|
78 if (cur < top) { |
|
79 oop cur_oop = oop(cur); |
|
80 oop_size = _hr->block_size(cur); |
|
81 HeapWord* next_obj = cur + oop_size; |
|
82 while (next_obj < top) { |
|
83 // Keep filtering the remembered set. |
|
84 if (!g1h->is_obj_dead(cur_oop, _hr)) { |
|
85 // Bottom lies entirely below top, so we can call the |
|
86 // non-memRegion version of oop_iterate below. |
|
87 cur_oop->oop_iterate(_rs_scan); |
|
88 } |
|
89 cur = next_obj; |
|
90 cur_oop = oop(cur); |
|
91 oop_size = _hr->block_size(cur); |
|
92 next_obj = cur + oop_size; |
|
93 } |
|
94 |
|
95 // Last object. Need to do dead-obj filtering here too. |
|
96 if (!g1h->is_obj_dead(oop(cur), _hr)) { |
|
97 oop(cur)->oop_iterate(_rs_scan, mr); |
|
98 } |
|
99 } |
|
100 } |
|
101 |
|
102 size_t HeapRegion::max_region_size() { |
|
103 return HeapRegionBounds::max_size(); |
|
104 } |
|
105 |
|
106 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) { |
|
107 size_t region_size = G1HeapRegionSize; |
|
108 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { |
|
109 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2; |
|
110 region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(), |
|
111 HeapRegionBounds::min_size()); |
|
112 } |
|
113 |
|
114 int region_size_log = log2_long((jlong) region_size); |
|
115 // Recalculate the region size to make sure it's a power of |
|
116 // 2. This means that region_size is the largest power of 2 that's |
|
117 // <= what we've calculated so far. |
|
118 region_size = ((size_t)1 << region_size_log); |
|
119 |
|
120 // Now make sure that we don't go over or under our limits. |
|
121 if (region_size < HeapRegionBounds::min_size()) { |
|
122 region_size = HeapRegionBounds::min_size(); |
|
123 } else if (region_size > HeapRegionBounds::max_size()) { |
|
124 region_size = HeapRegionBounds::max_size(); |
|
125 } |
|
126 |
|
127 // And recalculate the log. |
|
128 region_size_log = log2_long((jlong) region_size); |
|
129 |
|
130 // Now, set up the globals. |
|
131 guarantee(LogOfHRGrainBytes == 0, "we should only set it once"); |
|
132 LogOfHRGrainBytes = region_size_log; |
|
133 |
|
134 guarantee(LogOfHRGrainWords == 0, "we should only set it once"); |
|
135 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize; |
|
136 |
|
137 guarantee(GrainBytes == 0, "we should only set it once"); |
|
138 // The cast to int is safe, given that we've bounded region_size by |
|
139 // MIN_REGION_SIZE and MAX_REGION_SIZE. |
|
140 GrainBytes = region_size; |
|
141 |
|
142 guarantee(GrainWords == 0, "we should only set it once"); |
|
143 GrainWords = GrainBytes >> LogHeapWordSize; |
|
144 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity"); |
|
145 |
|
146 guarantee(CardsPerRegion == 0, "we should only set it once"); |
|
147 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift; |
|
148 } |
|
149 |
|
150 void HeapRegion::reset_after_compaction() { |
|
151 G1OffsetTableContigSpace::reset_after_compaction(); |
|
152 // After a compaction the mark bitmap is invalid, so we must |
|
153 // treat all objects as being inside the unmarked area. |
|
154 zero_marked_bytes(); |
|
155 init_top_at_mark_start(); |
|
156 } |
|
157 |
|
158 void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) { |
|
159 assert(_humongous_start_region == NULL, |
|
160 "we should have already filtered out humongous regions"); |
|
161 assert(_end == orig_end(), |
|
162 "we should have already filtered out humongous regions"); |
|
163 assert(!in_collection_set(), |
|
164 err_msg("Should not clear heap region %u in the collection set", hrm_index())); |
|
165 |
|
166 set_allocation_context(AllocationContext::system()); |
|
167 set_young_index_in_cset(-1); |
|
168 uninstall_surv_rate_group(); |
|
169 set_free(); |
|
170 reset_pre_dummy_top(); |
|
171 |
|
172 if (!par) { |
|
173 // If this is parallel, this will be done later. |
|
174 HeapRegionRemSet* hrrs = rem_set(); |
|
175 if (locked) { |
|
176 hrrs->clear_locked(); |
|
177 } else { |
|
178 hrrs->clear(); |
|
179 } |
|
180 } |
|
181 zero_marked_bytes(); |
|
182 |
|
183 _offsets.resize(HeapRegion::GrainWords); |
|
184 init_top_at_mark_start(); |
|
185 if (clear_space) clear(SpaceDecorator::Mangle); |
|
186 } |
|
187 |
|
188 void HeapRegion::par_clear() { |
|
189 assert(used() == 0, "the region should have been already cleared"); |
|
190 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal"); |
|
191 HeapRegionRemSet* hrrs = rem_set(); |
|
192 hrrs->clear(); |
|
193 CardTableModRefBS* ct_bs = |
|
194 barrier_set_cast<CardTableModRefBS>(G1CollectedHeap::heap()->barrier_set()); |
|
195 ct_bs->clear(MemRegion(bottom(), end())); |
|
196 } |
|
197 |
|
198 void HeapRegion::calc_gc_efficiency() { |
|
199 // GC efficiency is the ratio of how much space would be |
|
200 // reclaimed over how long we predict it would take to reclaim it. |
|
201 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
|
202 G1CollectorPolicy* g1p = g1h->g1_policy(); |
|
203 |
|
204 // Retrieve a prediction of the elapsed time for this region for |
|
205 // a mixed gc because the region will only be evacuated during a |
|
206 // mixed gc. |
|
207 double region_elapsed_time_ms = |
|
208 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */); |
|
209 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms; |
|
210 } |
|
211 |
|
212 void HeapRegion::set_starts_humongous(HeapWord* new_top, HeapWord* new_end) { |
|
213 assert(!is_humongous(), "sanity / pre-condition"); |
|
214 assert(end() == orig_end(), |
|
215 "Should be normal before the humongous object allocation"); |
|
216 assert(top() == bottom(), "should be empty"); |
|
217 assert(bottom() <= new_top && new_top <= new_end, "pre-condition"); |
|
218 |
|
219 _type.set_starts_humongous(); |
|
220 _humongous_start_region = this; |
|
221 |
|
222 set_end(new_end); |
|
223 _offsets.set_for_starts_humongous(new_top); |
|
224 } |
|
225 |
|
226 void HeapRegion::set_continues_humongous(HeapRegion* first_hr) { |
|
227 assert(!is_humongous(), "sanity / pre-condition"); |
|
228 assert(end() == orig_end(), |
|
229 "Should be normal before the humongous object allocation"); |
|
230 assert(top() == bottom(), "should be empty"); |
|
231 assert(first_hr->is_starts_humongous(), "pre-condition"); |
|
232 |
|
233 _type.set_continues_humongous(); |
|
234 _humongous_start_region = first_hr; |
|
235 } |
|
236 |
|
237 void HeapRegion::clear_humongous() { |
|
238 assert(is_humongous(), "pre-condition"); |
|
239 |
|
240 if (is_starts_humongous()) { |
|
241 assert(top() <= end(), "pre-condition"); |
|
242 set_end(orig_end()); |
|
243 if (top() > end()) { |
|
244 // at least one "continues humongous" region after it |
|
245 set_top(end()); |
|
246 } |
|
247 } else { |
|
248 // continues humongous |
|
249 assert(end() == orig_end(), "sanity"); |
|
250 } |
|
251 |
|
252 assert(capacity() == HeapRegion::GrainBytes, "pre-condition"); |
|
253 _humongous_start_region = NULL; |
|
254 } |
|
255 |
|
256 HeapRegion::HeapRegion(uint hrm_index, |
|
257 G1BlockOffsetSharedArray* sharedOffsetArray, |
|
258 MemRegion mr) : |
|
259 G1OffsetTableContigSpace(sharedOffsetArray, mr), |
|
260 _hrm_index(hrm_index), |
|
261 _allocation_context(AllocationContext::system()), |
|
262 _humongous_start_region(NULL), |
|
263 _next_in_special_set(NULL), |
|
264 _evacuation_failed(false), |
|
265 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), |
|
266 _next_young_region(NULL), |
|
267 _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL), |
|
268 #ifdef ASSERT |
|
269 _containing_set(NULL), |
|
270 #endif // ASSERT |
|
271 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), |
|
272 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0), |
|
273 _predicted_bytes_to_copy(0) |
|
274 { |
|
275 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this); |
|
276 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant."); |
|
277 |
|
278 initialize(mr); |
|
279 } |
|
280 |
|
281 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { |
|
282 assert(_rem_set->is_empty(), "Remembered set must be empty"); |
|
283 |
|
284 G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space); |
|
285 |
|
286 hr_clear(false /*par*/, false /*clear_space*/); |
|
287 set_top(bottom()); |
|
288 record_timestamp(); |
|
289 |
|
290 assert(mr.end() == orig_end(), |
|
291 err_msg("Given region end address " PTR_FORMAT " should match exactly " |
|
292 "bottom plus one region size, i.e. " PTR_FORMAT, |
|
293 p2i(mr.end()), p2i(orig_end()))); |
|
294 } |
|
295 |
|
296 CompactibleSpace* HeapRegion::next_compaction_space() const { |
|
297 return G1CollectedHeap::heap()->next_compaction_region(this); |
|
298 } |
|
299 |
|
300 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark, |
|
301 bool during_conc_mark) { |
|
302 // We always recreate the prev marking info and we'll explicitly |
|
303 // mark all objects we find to be self-forwarded on the prev |
|
304 // bitmap. So all objects need to be below PTAMS. |
|
305 _prev_marked_bytes = 0; |
|
306 |
|
307 if (during_initial_mark) { |
|
308 // During initial-mark, we'll also explicitly mark all objects |
|
309 // we find to be self-forwarded on the next bitmap. So all |
|
310 // objects need to be below NTAMS. |
|
311 _next_top_at_mark_start = top(); |
|
312 _next_marked_bytes = 0; |
|
313 } else if (during_conc_mark) { |
|
314 // During concurrent mark, all objects in the CSet (including |
|
315 // the ones we find to be self-forwarded) are implicitly live. |
|
316 // So all objects need to be above NTAMS. |
|
317 _next_top_at_mark_start = bottom(); |
|
318 _next_marked_bytes = 0; |
|
319 } |
|
320 } |
|
321 |
|
322 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark, |
|
323 bool during_conc_mark, |
|
324 size_t marked_bytes) { |
|
325 assert(marked_bytes <= used(), |
|
326 err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT, marked_bytes, used())); |
|
327 _prev_top_at_mark_start = top(); |
|
328 _prev_marked_bytes = marked_bytes; |
|
329 } |
|
330 |
|
331 HeapWord* |
|
332 HeapRegion::object_iterate_mem_careful(MemRegion mr, |
|
333 ObjectClosure* cl) { |
|
334 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
|
335 // We used to use "block_start_careful" here. But we're actually happy |
|
336 // to update the BOT while we do this... |
|
337 HeapWord* cur = block_start(mr.start()); |
|
338 mr = mr.intersection(used_region()); |
|
339 if (mr.is_empty()) return NULL; |
|
340 // Otherwise, find the obj that extends onto mr.start(). |
|
341 |
|
342 assert(cur <= mr.start() |
|
343 && (oop(cur)->klass_or_null() == NULL || |
|
344 cur + oop(cur)->size() > mr.start()), |
|
345 "postcondition of block_start"); |
|
346 oop obj; |
|
347 while (cur < mr.end()) { |
|
348 obj = oop(cur); |
|
349 if (obj->klass_or_null() == NULL) { |
|
350 // Ran into an unparseable point. |
|
351 return cur; |
|
352 } else if (!g1h->is_obj_dead(obj)) { |
|
353 cl->do_object(obj); |
|
354 } |
|
355 cur += block_size(cur); |
|
356 } |
|
357 return NULL; |
|
358 } |
|
359 |
|
360 HeapWord* |
|
361 HeapRegion:: |
|
362 oops_on_card_seq_iterate_careful(MemRegion mr, |
|
363 FilterOutOfRegionClosure* cl, |
|
364 bool filter_young, |
|
365 jbyte* card_ptr) { |
|
366 // Currently, we should only have to clean the card if filter_young |
|
367 // is true and vice versa. |
|
368 if (filter_young) { |
|
369 assert(card_ptr != NULL, "pre-condition"); |
|
370 } else { |
|
371 assert(card_ptr == NULL, "pre-condition"); |
|
372 } |
|
373 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
|
374 |
|
375 // If we're within a stop-world GC, then we might look at a card in a |
|
376 // GC alloc region that extends onto a GC LAB, which may not be |
|
377 // parseable. Stop such at the "scan_top" of the region. |
|
378 if (g1h->is_gc_active()) { |
|
379 mr = mr.intersection(MemRegion(bottom(), scan_top())); |
|
380 } else { |
|
381 mr = mr.intersection(used_region()); |
|
382 } |
|
383 if (mr.is_empty()) return NULL; |
|
384 // Otherwise, find the obj that extends onto mr.start(). |
|
385 |
|
386 // The intersection of the incoming mr (for the card) and the |
|
387 // allocated part of the region is non-empty. This implies that |
|
388 // we have actually allocated into this region. The code in |
|
389 // G1CollectedHeap.cpp that allocates a new region sets the |
|
390 // is_young tag on the region before allocating. Thus we |
|
391 // safely know if this region is young. |
|
392 if (is_young() && filter_young) { |
|
393 return NULL; |
|
394 } |
|
395 |
|
396 assert(!is_young(), "check value of filter_young"); |
|
397 |
|
398 // We can only clean the card here, after we make the decision that |
|
399 // the card is not young. And we only clean the card if we have been |
|
400 // asked to (i.e., card_ptr != NULL). |
|
401 if (card_ptr != NULL) { |
|
402 *card_ptr = CardTableModRefBS::clean_card_val(); |
|
403 // We must complete this write before we do any of the reads below. |
|
404 OrderAccess::storeload(); |
|
405 } |
|
406 |
|
407 // Cache the boundaries of the memory region in some const locals |
|
408 HeapWord* const start = mr.start(); |
|
409 HeapWord* const end = mr.end(); |
|
410 |
|
411 // We used to use "block_start_careful" here. But we're actually happy |
|
412 // to update the BOT while we do this... |
|
413 HeapWord* cur = block_start(start); |
|
414 assert(cur <= start, "Postcondition"); |
|
415 |
|
416 oop obj; |
|
417 |
|
418 HeapWord* next = cur; |
|
419 do { |
|
420 cur = next; |
|
421 obj = oop(cur); |
|
422 if (obj->klass_or_null() == NULL) { |
|
423 // Ran into an unparseable point. |
|
424 return cur; |
|
425 } |
|
426 // Otherwise... |
|
427 next = cur + block_size(cur); |
|
428 } while (next <= start); |
|
429 |
|
430 // If we finish the above loop...We have a parseable object that |
|
431 // begins on or before the start of the memory region, and ends |
|
432 // inside or spans the entire region. |
|
433 assert(cur <= start, "Loop postcondition"); |
|
434 assert(obj->klass_or_null() != NULL, "Loop postcondition"); |
|
435 |
|
436 do { |
|
437 obj = oop(cur); |
|
438 assert((cur + block_size(cur)) > (HeapWord*)obj, "Loop invariant"); |
|
439 if (obj->klass_or_null() == NULL) { |
|
440 // Ran into an unparseable point. |
|
441 return cur; |
|
442 } |
|
443 |
|
444 // Advance the current pointer. "obj" still points to the object to iterate. |
|
445 cur = cur + block_size(cur); |
|
446 |
|
447 if (!g1h->is_obj_dead(obj)) { |
|
448 // Non-objArrays are sometimes marked imprecise at the object start. We |
|
449 // always need to iterate over them in full. |
|
450 // We only iterate over object arrays in full if they are completely contained |
|
451 // in the memory region. |
|
452 if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) { |
|
453 obj->oop_iterate(cl); |
|
454 } else { |
|
455 obj->oop_iterate(cl, mr); |
|
456 } |
|
457 } |
|
458 } while (cur < end); |
|
459 |
|
460 return NULL; |
|
461 } |
|
462 |
|
463 // Code roots support |
|
464 |
|
465 void HeapRegion::add_strong_code_root(nmethod* nm) { |
|
466 HeapRegionRemSet* hrrs = rem_set(); |
|
467 hrrs->add_strong_code_root(nm); |
|
468 } |
|
469 |
|
470 void HeapRegion::add_strong_code_root_locked(nmethod* nm) { |
|
471 assert_locked_or_safepoint(CodeCache_lock); |
|
472 HeapRegionRemSet* hrrs = rem_set(); |
|
473 hrrs->add_strong_code_root_locked(nm); |
|
474 } |
|
475 |
|
476 void HeapRegion::remove_strong_code_root(nmethod* nm) { |
|
477 HeapRegionRemSet* hrrs = rem_set(); |
|
478 hrrs->remove_strong_code_root(nm); |
|
479 } |
|
480 |
|
481 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const { |
|
482 HeapRegionRemSet* hrrs = rem_set(); |
|
483 hrrs->strong_code_roots_do(blk); |
|
484 } |
|
485 |
|
486 class VerifyStrongCodeRootOopClosure: public OopClosure { |
|
487 const HeapRegion* _hr; |
|
488 nmethod* _nm; |
|
489 bool _failures; |
|
490 bool _has_oops_in_region; |
|
491 |
|
492 template <class T> void do_oop_work(T* p) { |
|
493 T heap_oop = oopDesc::load_heap_oop(p); |
|
494 if (!oopDesc::is_null(heap_oop)) { |
|
495 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
|
496 |
|
497 // Note: not all the oops embedded in the nmethod are in the |
|
498 // current region. We only look at those which are. |
|
499 if (_hr->is_in(obj)) { |
|
500 // Object is in the region. Check that its less than top |
|
501 if (_hr->top() <= (HeapWord*)obj) { |
|
502 // Object is above top |
|
503 gclog_or_tty->print_cr("Object "PTR_FORMAT" in region " |
|
504 "["PTR_FORMAT", "PTR_FORMAT") is above " |
|
505 "top "PTR_FORMAT, |
|
506 p2i(obj), p2i(_hr->bottom()), p2i(_hr->end()), p2i(_hr->top())); |
|
507 _failures = true; |
|
508 return; |
|
509 } |
|
510 // Nmethod has at least one oop in the current region |
|
511 _has_oops_in_region = true; |
|
512 } |
|
513 } |
|
514 } |
|
515 |
|
516 public: |
|
517 VerifyStrongCodeRootOopClosure(const HeapRegion* hr, nmethod* nm): |
|
518 _hr(hr), _failures(false), _has_oops_in_region(false) {} |
|
519 |
|
520 void do_oop(narrowOop* p) { do_oop_work(p); } |
|
521 void do_oop(oop* p) { do_oop_work(p); } |
|
522 |
|
523 bool failures() { return _failures; } |
|
524 bool has_oops_in_region() { return _has_oops_in_region; } |
|
525 }; |
|
526 |
|
527 class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure { |
|
528 const HeapRegion* _hr; |
|
529 bool _failures; |
|
530 public: |
|
531 VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) : |
|
532 _hr(hr), _failures(false) {} |
|
533 |
|
534 void do_code_blob(CodeBlob* cb) { |
|
535 nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null(); |
|
536 if (nm != NULL) { |
|
537 // Verify that the nemthod is live |
|
538 if (!nm->is_alive()) { |
|
539 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has dead nmethod " |
|
540 PTR_FORMAT" in its strong code roots", |
|
541 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); |
|
542 _failures = true; |
|
543 } else { |
|
544 VerifyStrongCodeRootOopClosure oop_cl(_hr, nm); |
|
545 nm->oops_do(&oop_cl); |
|
546 if (!oop_cl.has_oops_in_region()) { |
|
547 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has nmethod " |
|
548 PTR_FORMAT" in its strong code roots " |
|
549 "with no pointers into region", |
|
550 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); |
|
551 _failures = true; |
|
552 } else if (oop_cl.failures()) { |
|
553 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has other " |
|
554 "failures for nmethod "PTR_FORMAT, |
|
555 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); |
|
556 _failures = true; |
|
557 } |
|
558 } |
|
559 } |
|
560 } |
|
561 |
|
562 bool failures() { return _failures; } |
|
563 }; |
|
564 |
|
565 void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const { |
|
566 if (!G1VerifyHeapRegionCodeRoots) { |
|
567 // We're not verifying code roots. |
|
568 return; |
|
569 } |
|
570 if (vo == VerifyOption_G1UseMarkWord) { |
|
571 // Marking verification during a full GC is performed after class |
|
572 // unloading, code cache unloading, etc so the strong code roots |
|
573 // attached to each heap region are in an inconsistent state. They won't |
|
574 // be consistent until the strong code roots are rebuilt after the |
|
575 // actual GC. Skip verifying the strong code roots in this particular |
|
576 // time. |
|
577 assert(VerifyDuringGC, "only way to get here"); |
|
578 return; |
|
579 } |
|
580 |
|
581 HeapRegionRemSet* hrrs = rem_set(); |
|
582 size_t strong_code_roots_length = hrrs->strong_code_roots_list_length(); |
|
583 |
|
584 // if this region is empty then there should be no entries |
|
585 // on its strong code root list |
|
586 if (is_empty()) { |
|
587 if (strong_code_roots_length > 0) { |
|
588 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is empty " |
|
589 "but has "SIZE_FORMAT" code root entries", |
|
590 p2i(bottom()), p2i(end()), strong_code_roots_length); |
|
591 *failures = true; |
|
592 } |
|
593 return; |
|
594 } |
|
595 |
|
596 if (is_continues_humongous()) { |
|
597 if (strong_code_roots_length > 0) { |
|
598 gclog_or_tty->print_cr("region "HR_FORMAT" is a continuation of a humongous " |
|
599 "region but has "SIZE_FORMAT" code root entries", |
|
600 HR_FORMAT_PARAMS(this), strong_code_roots_length); |
|
601 *failures = true; |
|
602 } |
|
603 return; |
|
604 } |
|
605 |
|
606 VerifyStrongCodeRootCodeBlobClosure cb_cl(this); |
|
607 strong_code_roots_do(&cb_cl); |
|
608 |
|
609 if (cb_cl.failures()) { |
|
610 *failures = true; |
|
611 } |
|
612 } |
|
613 |
|
614 void HeapRegion::print() const { print_on(gclog_or_tty); } |
|
615 void HeapRegion::print_on(outputStream* st) const { |
|
616 st->print("AC%4u", allocation_context()); |
|
617 |
|
618 st->print(" %2s", get_short_type_str()); |
|
619 if (in_collection_set()) |
|
620 st->print(" CS"); |
|
621 else |
|
622 st->print(" "); |
|
623 st->print(" TS %5d", _gc_time_stamp); |
|
624 st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT, |
|
625 p2i(prev_top_at_mark_start()), p2i(next_top_at_mark_start())); |
|
626 G1OffsetTableContigSpace::print_on(st); |
|
627 } |
|
628 |
|
629 class VerifyLiveClosure: public OopClosure { |
|
630 private: |
|
631 G1CollectedHeap* _g1h; |
|
632 CardTableModRefBS* _bs; |
|
633 oop _containing_obj; |
|
634 bool _failures; |
|
635 int _n_failures; |
|
636 VerifyOption _vo; |
|
637 public: |
|
638 // _vo == UsePrevMarking -> use "prev" marking information, |
|
639 // _vo == UseNextMarking -> use "next" marking information, |
|
640 // _vo == UseMarkWord -> use mark word from object header. |
|
641 VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) : |
|
642 _g1h(g1h), _bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), |
|
643 _containing_obj(NULL), _failures(false), _n_failures(0), _vo(vo) |
|
644 { } |
|
645 |
|
646 void set_containing_obj(oop obj) { |
|
647 _containing_obj = obj; |
|
648 } |
|
649 |
|
650 bool failures() { return _failures; } |
|
651 int n_failures() { return _n_failures; } |
|
652 |
|
653 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
|
654 virtual void do_oop( oop* p) { do_oop_work(p); } |
|
655 |
|
656 void print_object(outputStream* out, oop obj) { |
|
657 #ifdef PRODUCT |
|
658 Klass* k = obj->klass(); |
|
659 const char* class_name = InstanceKlass::cast(k)->external_name(); |
|
660 out->print_cr("class name %s", class_name); |
|
661 #else // PRODUCT |
|
662 obj->print_on(out); |
|
663 #endif // PRODUCT |
|
664 } |
|
665 |
|
666 template <class T> |
|
667 void do_oop_work(T* p) { |
|
668 assert(_containing_obj != NULL, "Precondition"); |
|
669 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), |
|
670 "Precondition"); |
|
671 T heap_oop = oopDesc::load_heap_oop(p); |
|
672 if (!oopDesc::is_null(heap_oop)) { |
|
673 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
|
674 bool failed = false; |
|
675 if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) { |
|
676 MutexLockerEx x(ParGCRareEvent_lock, |
|
677 Mutex::_no_safepoint_check_flag); |
|
678 |
|
679 if (!_failures) { |
|
680 gclog_or_tty->cr(); |
|
681 gclog_or_tty->print_cr("----------"); |
|
682 } |
|
683 if (!_g1h->is_in_closed_subset(obj)) { |
|
684 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); |
|
685 gclog_or_tty->print_cr("Field "PTR_FORMAT |
|
686 " of live obj "PTR_FORMAT" in region " |
|
687 "["PTR_FORMAT", "PTR_FORMAT")", |
|
688 p2i(p), p2i(_containing_obj), |
|
689 p2i(from->bottom()), p2i(from->end())); |
|
690 print_object(gclog_or_tty, _containing_obj); |
|
691 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap", |
|
692 p2i(obj)); |
|
693 } else { |
|
694 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); |
|
695 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj); |
|
696 gclog_or_tty->print_cr("Field "PTR_FORMAT |
|
697 " of live obj "PTR_FORMAT" in region " |
|
698 "["PTR_FORMAT", "PTR_FORMAT")", |
|
699 p2i(p), p2i(_containing_obj), |
|
700 p2i(from->bottom()), p2i(from->end())); |
|
701 print_object(gclog_or_tty, _containing_obj); |
|
702 gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region " |
|
703 "["PTR_FORMAT", "PTR_FORMAT")", |
|
704 p2i(obj), p2i(to->bottom()), p2i(to->end())); |
|
705 print_object(gclog_or_tty, obj); |
|
706 } |
|
707 gclog_or_tty->print_cr("----------"); |
|
708 gclog_or_tty->flush(); |
|
709 _failures = true; |
|
710 failed = true; |
|
711 _n_failures++; |
|
712 } |
|
713 |
|
714 if (!_g1h->full_collection() || G1VerifyRSetsDuringFullGC) { |
|
715 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); |
|
716 HeapRegion* to = _g1h->heap_region_containing(obj); |
|
717 if (from != NULL && to != NULL && |
|
718 from != to && |
|
719 !to->is_humongous()) { |
|
720 jbyte cv_obj = *_bs->byte_for_const(_containing_obj); |
|
721 jbyte cv_field = *_bs->byte_for_const(p); |
|
722 const jbyte dirty = CardTableModRefBS::dirty_card_val(); |
|
723 |
|
724 bool is_bad = !(from->is_young() |
|
725 || to->rem_set()->contains_reference(p) |
|
726 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed |
|
727 (_containing_obj->is_objArray() ? |
|
728 cv_field == dirty |
|
729 : cv_obj == dirty || cv_field == dirty)); |
|
730 if (is_bad) { |
|
731 MutexLockerEx x(ParGCRareEvent_lock, |
|
732 Mutex::_no_safepoint_check_flag); |
|
733 |
|
734 if (!_failures) { |
|
735 gclog_or_tty->cr(); |
|
736 gclog_or_tty->print_cr("----------"); |
|
737 } |
|
738 gclog_or_tty->print_cr("Missing rem set entry:"); |
|
739 gclog_or_tty->print_cr("Field "PTR_FORMAT" " |
|
740 "of obj "PTR_FORMAT", " |
|
741 "in region "HR_FORMAT, |
|
742 p2i(p), p2i(_containing_obj), |
|
743 HR_FORMAT_PARAMS(from)); |
|
744 _containing_obj->print_on(gclog_or_tty); |
|
745 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" " |
|
746 "in region "HR_FORMAT, |
|
747 p2i(obj), |
|
748 HR_FORMAT_PARAMS(to)); |
|
749 obj->print_on(gclog_or_tty); |
|
750 gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.", |
|
751 cv_obj, cv_field); |
|
752 gclog_or_tty->print_cr("----------"); |
|
753 gclog_or_tty->flush(); |
|
754 _failures = true; |
|
755 if (!failed) _n_failures++; |
|
756 } |
|
757 } |
|
758 } |
|
759 } |
|
760 } |
|
761 }; |
|
762 |
|
763 // This really ought to be commoned up into OffsetTableContigSpace somehow. |
|
764 // We would need a mechanism to make that code skip dead objects. |
|
765 |
|
766 void HeapRegion::verify(VerifyOption vo, |
|
767 bool* failures) const { |
|
768 G1CollectedHeap* g1 = G1CollectedHeap::heap(); |
|
769 *failures = false; |
|
770 HeapWord* p = bottom(); |
|
771 HeapWord* prev_p = NULL; |
|
772 VerifyLiveClosure vl_cl(g1, vo); |
|
773 bool is_region_humongous = is_humongous(); |
|
774 size_t object_num = 0; |
|
775 while (p < top()) { |
|
776 oop obj = oop(p); |
|
777 size_t obj_size = block_size(p); |
|
778 object_num += 1; |
|
779 |
|
780 if (is_region_humongous != g1->is_humongous(obj_size) && |
|
781 !g1->is_obj_dead(obj, this)) { // Dead objects may have bigger block_size since they span several objects. |
|
782 gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size (" |
|
783 SIZE_FORMAT" words) in a %shumongous region", |
|
784 p2i(p), g1->is_humongous(obj_size) ? "" : "non-", |
|
785 obj_size, is_region_humongous ? "" : "non-"); |
|
786 *failures = true; |
|
787 return; |
|
788 } |
|
789 |
|
790 if (!g1->is_obj_dead_cond(obj, this, vo)) { |
|
791 if (obj->is_oop()) { |
|
792 Klass* klass = obj->klass(); |
|
793 bool is_metaspace_object = Metaspace::contains(klass) || |
|
794 (vo == VerifyOption_G1UsePrevMarking && |
|
795 ClassLoaderDataGraph::unload_list_contains(klass)); |
|
796 if (!is_metaspace_object) { |
|
797 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " |
|
798 "not metadata", p2i(klass), p2i(obj)); |
|
799 *failures = true; |
|
800 return; |
|
801 } else if (!klass->is_klass()) { |
|
802 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " |
|
803 "not a klass", p2i(klass), p2i(obj)); |
|
804 *failures = true; |
|
805 return; |
|
806 } else { |
|
807 vl_cl.set_containing_obj(obj); |
|
808 obj->oop_iterate_no_header(&vl_cl); |
|
809 if (vl_cl.failures()) { |
|
810 *failures = true; |
|
811 } |
|
812 if (G1MaxVerifyFailures >= 0 && |
|
813 vl_cl.n_failures() >= G1MaxVerifyFailures) { |
|
814 return; |
|
815 } |
|
816 } |
|
817 } else { |
|
818 gclog_or_tty->print_cr(PTR_FORMAT" no an oop", p2i(obj)); |
|
819 *failures = true; |
|
820 return; |
|
821 } |
|
822 } |
|
823 prev_p = p; |
|
824 p += obj_size; |
|
825 } |
|
826 |
|
827 if (!is_young() && !is_empty()) { |
|
828 _offsets.verify(); |
|
829 } |
|
830 |
|
831 if (p != top()) { |
|
832 gclog_or_tty->print_cr("end of last object "PTR_FORMAT" " |
|
833 "does not match top "PTR_FORMAT, p2i(p), p2i(top())); |
|
834 *failures = true; |
|
835 return; |
|
836 } |
|
837 |
|
838 HeapWord* the_end = end(); |
|
839 assert(p == top(), "it should still hold"); |
|
840 // Do some extra BOT consistency checking for addresses in the |
|
841 // range [top, end). BOT look-ups in this range should yield |
|
842 // top. No point in doing that if top == end (there's nothing there). |
|
843 if (p < the_end) { |
|
844 // Look up top |
|
845 HeapWord* addr_1 = p; |
|
846 HeapWord* b_start_1 = _offsets.block_start_const(addr_1); |
|
847 if (b_start_1 != p) { |
|
848 gclog_or_tty->print_cr("BOT look up for top: "PTR_FORMAT" " |
|
849 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, |
|
850 p2i(addr_1), p2i(b_start_1), p2i(p)); |
|
851 *failures = true; |
|
852 return; |
|
853 } |
|
854 |
|
855 // Look up top + 1 |
|
856 HeapWord* addr_2 = p + 1; |
|
857 if (addr_2 < the_end) { |
|
858 HeapWord* b_start_2 = _offsets.block_start_const(addr_2); |
|
859 if (b_start_2 != p) { |
|
860 gclog_or_tty->print_cr("BOT look up for top + 1: "PTR_FORMAT" " |
|
861 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, |
|
862 p2i(addr_2), p2i(b_start_2), p2i(p)); |
|
863 *failures = true; |
|
864 return; |
|
865 } |
|
866 } |
|
867 |
|
868 // Look up an address between top and end |
|
869 size_t diff = pointer_delta(the_end, p) / 2; |
|
870 HeapWord* addr_3 = p + diff; |
|
871 if (addr_3 < the_end) { |
|
872 HeapWord* b_start_3 = _offsets.block_start_const(addr_3); |
|
873 if (b_start_3 != p) { |
|
874 gclog_or_tty->print_cr("BOT look up for top + diff: "PTR_FORMAT" " |
|
875 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, |
|
876 p2i(addr_3), p2i(b_start_3), p2i(p)); |
|
877 *failures = true; |
|
878 return; |
|
879 } |
|
880 } |
|
881 |
|
882 // Look up end - 1 |
|
883 HeapWord* addr_4 = the_end - 1; |
|
884 HeapWord* b_start_4 = _offsets.block_start_const(addr_4); |
|
885 if (b_start_4 != p) { |
|
886 gclog_or_tty->print_cr("BOT look up for end - 1: "PTR_FORMAT" " |
|
887 " yielded "PTR_FORMAT", expecting "PTR_FORMAT, |
|
888 p2i(addr_4), p2i(b_start_4), p2i(p)); |
|
889 *failures = true; |
|
890 return; |
|
891 } |
|
892 } |
|
893 |
|
894 if (is_region_humongous && object_num > 1) { |
|
895 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous " |
|
896 "but has "SIZE_FORMAT", objects", |
|
897 p2i(bottom()), p2i(end()), object_num); |
|
898 *failures = true; |
|
899 return; |
|
900 } |
|
901 |
|
902 verify_strong_code_roots(vo, failures); |
|
903 } |
|
904 |
|
905 void HeapRegion::verify() const { |
|
906 bool dummy = false; |
|
907 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); |
|
908 } |
|
909 |
|
910 void HeapRegion::prepare_for_compaction(CompactPoint* cp) { |
|
911 scan_and_forward(this, cp); |
|
912 } |
|
913 |
|
914 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go |
|
915 // away eventually. |
|
916 |
|
917 void G1OffsetTableContigSpace::clear(bool mangle_space) { |
|
918 set_top(bottom()); |
|
919 _scan_top = bottom(); |
|
920 CompactibleSpace::clear(mangle_space); |
|
921 reset_bot(); |
|
922 } |
|
923 |
|
924 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { |
|
925 Space::set_bottom(new_bottom); |
|
926 _offsets.set_bottom(new_bottom); |
|
927 } |
|
928 |
|
929 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) { |
|
930 Space::set_end(new_end); |
|
931 _offsets.resize(new_end - bottom()); |
|
932 } |
|
933 |
|
934 #ifndef PRODUCT |
|
935 void G1OffsetTableContigSpace::mangle_unused_area() { |
|
936 mangle_unused_area_complete(); |
|
937 } |
|
938 |
|
939 void G1OffsetTableContigSpace::mangle_unused_area_complete() { |
|
940 SpaceMangler::mangle_region(MemRegion(top(), end())); |
|
941 } |
|
942 #endif |
|
943 |
|
944 void G1OffsetTableContigSpace::print() const { |
|
945 print_short(); |
|
946 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " |
|
947 INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
|
948 p2i(bottom()), p2i(top()), p2i(_offsets.threshold()), p2i(end())); |
|
949 } |
|
950 |
|
951 HeapWord* G1OffsetTableContigSpace::initialize_threshold() { |
|
952 return _offsets.initialize_threshold(); |
|
953 } |
|
954 |
|
955 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start, |
|
956 HeapWord* end) { |
|
957 _offsets.alloc_block(start, end); |
|
958 return _offsets.threshold(); |
|
959 } |
|
960 |
|
961 HeapWord* G1OffsetTableContigSpace::scan_top() const { |
|
962 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
|
963 HeapWord* local_top = top(); |
|
964 OrderAccess::loadload(); |
|
965 const unsigned local_time_stamp = _gc_time_stamp; |
|
966 assert(local_time_stamp <= g1h->get_gc_time_stamp(), "invariant"); |
|
967 if (local_time_stamp < g1h->get_gc_time_stamp()) { |
|
968 return local_top; |
|
969 } else { |
|
970 return _scan_top; |
|
971 } |
|
972 } |
|
973 |
|
974 void G1OffsetTableContigSpace::record_timestamp() { |
|
975 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
|
976 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp(); |
|
977 |
|
978 if (_gc_time_stamp < curr_gc_time_stamp) { |
|
979 // Setting the time stamp here tells concurrent readers to look at |
|
980 // scan_top to know the maximum allowed address to look at. |
|
981 |
|
982 // scan_top should be bottom for all regions except for the |
|
983 // retained old alloc region which should have scan_top == top |
|
984 HeapWord* st = _scan_top; |
|
985 guarantee(st == _bottom || st == _top, "invariant"); |
|
986 |
|
987 _gc_time_stamp = curr_gc_time_stamp; |
|
988 } |
|
989 } |
|
990 |
|
991 void G1OffsetTableContigSpace::record_retained_region() { |
|
992 // scan_top is the maximum address where it's safe for the next gc to |
|
993 // scan this region. |
|
994 _scan_top = top(); |
|
995 } |
|
996 |
|
997 void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) { |
|
998 object_iterate(blk); |
|
999 } |
|
1000 |
|
1001 void G1OffsetTableContigSpace::object_iterate(ObjectClosure* blk) { |
|
1002 HeapWord* p = bottom(); |
|
1003 while (p < top()) { |
|
1004 if (block_is_obj(p)) { |
|
1005 blk->do_object(oop(p)); |
|
1006 } |
|
1007 p += block_size(p); |
|
1008 } |
|
1009 } |
|
1010 |
|
1011 G1OffsetTableContigSpace:: |
|
1012 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, |
|
1013 MemRegion mr) : |
|
1014 _offsets(sharedOffsetArray, mr), |
|
1015 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true), |
|
1016 _gc_time_stamp(0) |
|
1017 { |
|
1018 _offsets.set_space(this); |
|
1019 } |
|
1020 |
|
1021 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) { |
|
1022 CompactibleSpace::initialize(mr, clear_space, mangle_space); |
|
1023 _top = bottom(); |
|
1024 _scan_top = bottom(); |
|
1025 set_saved_mark_word(NULL); |
|
1026 reset_bot(); |
|
1027 } |
|
1028 |