author | tonyp |
Tue, 24 Aug 2010 17:24:33 -0400 | |
changeset 7398 | e4aa6d9bda09 |
parent 7397 | 5b173b4ca846 |
child 7904 | e90e097fced4 |
permissions | -rw-r--r-- |
1374 | 1 |
/* |
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5545
diff
changeset
|
2 |
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. |
1374 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5545
diff
changeset
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5545
diff
changeset
|
20 |
* or visit www.oracle.com if you need additional information or have any |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5545
diff
changeset
|
21 |
* questions. |
1374 | 22 |
* |
23 |
*/ |
|
24 |
||
7397 | 25 |
#include "precompiled.hpp" |
26 |
#include "gc_implementation/g1/concurrentZFThread.hpp" |
|
27 |
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" |
|
28 |
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" |
|
29 |
#include "gc_implementation/g1/g1OopClosures.inline.hpp" |
|
30 |
#include "gc_implementation/g1/heapRegion.inline.hpp" |
|
31 |
#include "gc_implementation/g1/heapRegionRemSet.hpp" |
|
32 |
#include "gc_implementation/g1/heapRegionSeq.inline.hpp" |
|
33 |
#include "memory/genOopClosures.inline.hpp" |
|
34 |
#include "memory/iterator.hpp" |
|
35 |
#include "oops/oop.inline.hpp" |
|
1374 | 36 |
|
3697
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
37 |
int HeapRegion::LogOfHRGrainBytes = 0; |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
38 |
int HeapRegion::LogOfHRGrainWords = 0; |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
39 |
int HeapRegion::GrainBytes = 0; |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
40 |
int HeapRegion::GrainWords = 0; |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
41 |
int HeapRegion::CardsPerRegion = 0; |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
42 |
|
1374 | 43 |
HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1, |
44 |
HeapRegion* hr, OopClosure* cl, |
|
45 |
CardTableModRefBS::PrecisionStyle precision, |
|
46 |
FilterKind fk) : |
|
47 |
ContiguousSpaceDCTOC(hr, cl, precision, NULL), |
|
48 |
_hr(hr), _fk(fk), _g1(g1) |
|
49 |
{} |
|
50 |
||
51 |
FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, |
|
52 |
OopClosure* oc) : |
|
53 |
_r_bottom(r->bottom()), _r_end(r->end()), |
|
54 |
_oc(oc), _out_of_region(0) |
|
55 |
{} |
|
56 |
||
57 |
class VerifyLiveClosure: public OopClosure { |
|
3000 | 58 |
private: |
1374 | 59 |
G1CollectedHeap* _g1h; |
60 |
CardTableModRefBS* _bs; |
|
61 |
oop _containing_obj; |
|
62 |
bool _failures; |
|
63 |
int _n_failures; |
|
3000 | 64 |
bool _use_prev_marking; |
1374 | 65 |
public: |
3000 | 66 |
// use_prev_marking == true -> use "prev" marking information, |
67 |
// use_prev_marking == false -> use "next" marking information |
|
68 |
VerifyLiveClosure(G1CollectedHeap* g1h, bool use_prev_marking) : |
|
1374 | 69 |
_g1h(g1h), _bs(NULL), _containing_obj(NULL), |
3000 | 70 |
_failures(false), _n_failures(0), _use_prev_marking(use_prev_marking) |
1374 | 71 |
{ |
72 |
BarrierSet* bs = _g1h->barrier_set(); |
|
73 |
if (bs->is_a(BarrierSet::CardTableModRef)) |
|
74 |
_bs = (CardTableModRefBS*)bs; |
|
75 |
} |
|
76 |
||
77 |
void set_containing_obj(oop obj) { |
|
78 |
_containing_obj = obj; |
|
79 |
} |
|
80 |
||
81 |
bool failures() { return _failures; } |
|
82 |
int n_failures() { return _n_failures; } |
|
83 |
||
3262
30d1c247fc25
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
3191
diff
changeset
|
84 |
virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
30d1c247fc25
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
3191
diff
changeset
|
85 |
virtual void do_oop( oop* p) { do_oop_work(p); } |
1374 | 86 |
|
5344 | 87 |
void print_object(outputStream* out, oop obj) { |
88 |
#ifdef PRODUCT |
|
89 |
klassOop k = obj->klass(); |
|
90 |
const char* class_name = instanceKlass::cast(k)->external_name(); |
|
91 |
out->print_cr("class name %s", class_name); |
|
92 |
#else // PRODUCT |
|
93 |
obj->print_on(out); |
|
94 |
#endif // PRODUCT |
|
95 |
} |
|
96 |
||
3262
30d1c247fc25
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
3191
diff
changeset
|
97 |
template <class T> void do_oop_work(T* p) { |
1374 | 98 |
assert(_containing_obj != NULL, "Precondition"); |
3000 | 99 |
assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking), |
100 |
"Precondition"); |
|
3262
30d1c247fc25
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
3191
diff
changeset
|
101 |
T heap_oop = oopDesc::load_heap_oop(p); |
30d1c247fc25
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
3191
diff
changeset
|
102 |
if (!oopDesc::is_null(heap_oop)) { |
30d1c247fc25
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
3191
diff
changeset
|
103 |
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
1374 | 104 |
bool failed = false; |
3000 | 105 |
if (!_g1h->is_in_closed_subset(obj) || |
106 |
_g1h->is_obj_dead_cond(obj, _use_prev_marking)) { |
|
1374 | 107 |
if (!_failures) { |
108 |
gclog_or_tty->print_cr(""); |
|
109 |
gclog_or_tty->print_cr("----------"); |
|
110 |
} |
|
111 |
if (!_g1h->is_in_closed_subset(obj)) { |
|
5344 | 112 |
HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); |
1374 | 113 |
gclog_or_tty->print_cr("Field "PTR_FORMAT |
5344 | 114 |
" of live obj "PTR_FORMAT" in region " |
115 |
"["PTR_FORMAT", "PTR_FORMAT")", |
|
116 |
p, (void*) _containing_obj, |
|
117 |
from->bottom(), from->end()); |
|
118 |
print_object(gclog_or_tty, _containing_obj); |
|
119 |
gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap", |
|
120 |
(void*) obj); |
|
121 |
} else { |
|
122 |
HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); |
|
123 |
HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj); |
|
124 |
gclog_or_tty->print_cr("Field "PTR_FORMAT |
|
125 |
" of live obj "PTR_FORMAT" in region " |
|
126 |
"["PTR_FORMAT", "PTR_FORMAT")", |
|
127 |
p, (void*) _containing_obj, |
|
128 |
from->bottom(), from->end()); |
|
129 |
print_object(gclog_or_tty, _containing_obj); |
|
130 |
gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region " |
|
131 |
"["PTR_FORMAT", "PTR_FORMAT")", |
|
132 |
(void*) obj, to->bottom(), to->end()); |
|
133 |
print_object(gclog_or_tty, obj); |
|
1374 | 134 |
} |
135 |
gclog_or_tty->print_cr("----------"); |
|
136 |
_failures = true; |
|
137 |
failed = true; |
|
138 |
_n_failures++; |
|
139 |
} |
|
140 |
||
141 |
if (!_g1h->full_collection()) { |
|
3262
30d1c247fc25
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
3191
diff
changeset
|
142 |
HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); |
30d1c247fc25
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
3191
diff
changeset
|
143 |
HeapRegion* to = _g1h->heap_region_containing(obj); |
1374 | 144 |
if (from != NULL && to != NULL && |
145 |
from != to && |
|
146 |
!to->isHumongous()) { |
|
147 |
jbyte cv_obj = *_bs->byte_for_const(_containing_obj); |
|
148 |
jbyte cv_field = *_bs->byte_for_const(p); |
|
149 |
const jbyte dirty = CardTableModRefBS::dirty_card_val(); |
|
150 |
||
151 |
bool is_bad = !(from->is_young() |
|
152 |
|| to->rem_set()->contains_reference(p) |
|
153 |
|| !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed |
|
154 |
(_containing_obj->is_objArray() ? |
|
155 |
cv_field == dirty |
|
156 |
: cv_obj == dirty || cv_field == dirty)); |
|
157 |
if (is_bad) { |
|
158 |
if (!_failures) { |
|
159 |
gclog_or_tty->print_cr(""); |
|
160 |
gclog_or_tty->print_cr("----------"); |
|
161 |
} |
|
162 |
gclog_or_tty->print_cr("Missing rem set entry:"); |
|
163 |
gclog_or_tty->print_cr("Field "PTR_FORMAT |
|
164 |
" of obj "PTR_FORMAT |
|
165 |
", in region %d ["PTR_FORMAT |
|
166 |
", "PTR_FORMAT"),", |
|
167 |
p, (void*) _containing_obj, |
|
168 |
from->hrs_index(), |
|
169 |
from->bottom(), |
|
170 |
from->end()); |
|
171 |
_containing_obj->print_on(gclog_or_tty); |
|
172 |
gclog_or_tty->print_cr("points to obj "PTR_FORMAT |
|
173 |
" in region %d ["PTR_FORMAT |
|
174 |
", "PTR_FORMAT").", |
|
175 |
(void*) obj, to->hrs_index(), |
|
176 |
to->bottom(), to->end()); |
|
177 |
obj->print_on(gclog_or_tty); |
|
178 |
gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.", |
|
179 |
cv_obj, cv_field); |
|
180 |
gclog_or_tty->print_cr("----------"); |
|
181 |
_failures = true; |
|
182 |
if (!failed) _n_failures++; |
|
183 |
} |
|
184 |
} |
|
185 |
} |
|
186 |
} |
|
187 |
} |
|
188 |
}; |
|
189 |
||
190 |
template<class ClosureType> |
|
191 |
HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h, |
|
192 |
HeapRegion* hr, |
|
193 |
HeapWord* cur, HeapWord* top) { |
|
194 |
oop cur_oop = oop(cur); |
|
195 |
int oop_size = cur_oop->size(); |
|
196 |
HeapWord* next_obj = cur + oop_size; |
|
197 |
while (next_obj < top) { |
|
198 |
// Keep filtering the remembered set. |
|
199 |
if (!g1h->is_obj_dead(cur_oop, hr)) { |
|
200 |
// Bottom lies entirely below top, so we can call the |
|
201 |
// non-memRegion version of oop_iterate below. |
|
202 |
cur_oop->oop_iterate(cl); |
|
203 |
} |
|
204 |
cur = next_obj; |
|
205 |
cur_oop = oop(cur); |
|
206 |
oop_size = cur_oop->size(); |
|
207 |
next_obj = cur + oop_size; |
|
208 |
} |
|
209 |
return cur; |
|
210 |
} |
|
211 |
||
212 |
void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr, |
|
213 |
HeapWord* bottom, |
|
214 |
HeapWord* top, |
|
215 |
OopClosure* cl) { |
|
216 |
G1CollectedHeap* g1h = _g1; |
|
217 |
||
218 |
int oop_size; |
|
219 |
||
220 |
OopClosure* cl2 = cl; |
|
221 |
FilterIntoCSClosure intoCSFilt(this, g1h, cl); |
|
222 |
FilterOutOfRegionClosure outOfRegionFilt(_hr, cl); |
|
223 |
switch (_fk) { |
|
224 |
case IntoCSFilterKind: cl2 = &intoCSFilt; break; |
|
225 |
case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break; |
|
226 |
} |
|
227 |
||
228 |
// Start filtering what we add to the remembered set. If the object is |
|
229 |
// not considered dead, either because it is marked (in the mark bitmap) |
|
230 |
// or it was allocated after marking finished, then we add it. Otherwise |
|
231 |
// we can safely ignore the object. |
|
232 |
if (!g1h->is_obj_dead(oop(bottom), _hr)) { |
|
233 |
oop_size = oop(bottom)->oop_iterate(cl2, mr); |
|
234 |
} else { |
|
235 |
oop_size = oop(bottom)->size(); |
|
236 |
} |
|
237 |
||
238 |
bottom += oop_size; |
|
239 |
||
240 |
if (bottom < top) { |
|
241 |
// We replicate the loop below for several kinds of possible filters. |
|
242 |
switch (_fk) { |
|
243 |
case NoFilterKind: |
|
244 |
bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top); |
|
245 |
break; |
|
246 |
case IntoCSFilterKind: { |
|
247 |
FilterIntoCSClosure filt(this, g1h, cl); |
|
248 |
bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); |
|
249 |
break; |
|
250 |
} |
|
251 |
case OutOfRegionFilterKind: { |
|
252 |
FilterOutOfRegionClosure filt(_hr, cl); |
|
253 |
bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); |
|
254 |
break; |
|
255 |
} |
|
256 |
default: |
|
257 |
ShouldNotReachHere(); |
|
258 |
} |
|
259 |
||
260 |
// Last object. Need to do dead-obj filtering here too. |
|
261 |
if (!g1h->is_obj_dead(oop(bottom), _hr)) { |
|
262 |
oop(bottom)->oop_iterate(cl2, mr); |
|
263 |
} |
|
264 |
} |
|
265 |
} |
|
266 |
||
3697
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
267 |
// Minimum region size; we won't go lower than that. |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
268 |
// We might want to decrease this in the future, to deal with small |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
269 |
// heaps a bit more efficiently. |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
270 |
#define MIN_REGION_SIZE ( 1024 * 1024 ) |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
271 |
|
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
272 |
// Maximum region size; we don't go higher than that. There's a good |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
273 |
// reason for having an upper bound. We don't want regions to get too |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
274 |
// large, otherwise cleanup's effectiveness would decrease as there |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
275 |
// will be fewer opportunities to find totally empty regions after |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
276 |
// marking. |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
277 |
#define MAX_REGION_SIZE ( 32 * 1024 * 1024 ) |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
278 |
|
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
279 |
// The automatic region size calculation will try to have around this |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
280 |
// many regions in the heap (based on the min heap size). |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
281 |
#define TARGET_REGION_NUMBER 2048 |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
282 |
|
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
283 |
void HeapRegion::setup_heap_region_size(uintx min_heap_size) { |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
284 |
// region_size in bytes |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
285 |
uintx region_size = G1HeapRegionSize; |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
286 |
if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
287 |
// We base the automatic calculation on the min heap size. This |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
288 |
// can be problematic if the spread between min and max is quite |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
289 |
// wide, imagine -Xms128m -Xmx32g. But, if we decided it based on |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
290 |
// the max size, the region size might be way too large for the |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
291 |
// min size. Either way, some users might have to set the region |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
292 |
// size manually for some -Xms / -Xmx combos. |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
293 |
|
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
294 |
region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER, |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
295 |
(uintx) MIN_REGION_SIZE); |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
296 |
} |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
297 |
|
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
298 |
int region_size_log = log2_long((jlong) region_size); |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
299 |
// Recalculate the region size to make sure it's a power of |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
300 |
// 2. This means that region_size is the largest power of 2 that's |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
301 |
// <= what we've calculated so far. |
5377
bcf55c5acf4e
6931180: Migration to recent versions of MS Platform SDK
prr
parents:
5350
diff
changeset
|
302 |
region_size = ((uintx)1 << region_size_log); |
3697
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
303 |
|
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
304 |
// Now make sure that we don't go over or under our limits. |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
305 |
if (region_size < MIN_REGION_SIZE) { |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
306 |
region_size = MIN_REGION_SIZE; |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
307 |
} else if (region_size > MAX_REGION_SIZE) { |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
308 |
region_size = MAX_REGION_SIZE; |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
309 |
} |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
310 |
|
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
311 |
// And recalculate the log. |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
312 |
region_size_log = log2_long((jlong) region_size); |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
313 |
|
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
314 |
// Now, set up the globals. |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
315 |
guarantee(LogOfHRGrainBytes == 0, "we should only set it once"); |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
316 |
LogOfHRGrainBytes = region_size_log; |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
317 |
|
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
318 |
guarantee(LogOfHRGrainWords == 0, "we should only set it once"); |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
319 |
LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize; |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
320 |
|
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
321 |
guarantee(GrainBytes == 0, "we should only set it once"); |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
322 |
// The cast to int is safe, given that we've bounded region_size by |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
323 |
// MIN_REGION_SIZE and MAX_REGION_SIZE. |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
324 |
GrainBytes = (int) region_size; |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
325 |
|
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
326 |
guarantee(GrainWords == 0, "we should only set it once"); |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
327 |
GrainWords = GrainBytes >> LogHeapWordSize; |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
328 |
guarantee(1 << LogOfHRGrainWords == GrainWords, "sanity"); |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
329 |
|
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
330 |
guarantee(CardsPerRegion == 0, "we should only set it once"); |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
331 |
CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift; |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
332 |
} |
ea9211aa02f5
6819085: G1: use larger and/or user settable region size
tonyp
parents:
3283
diff
changeset
|
333 |
|
1374 | 334 |
void HeapRegion::reset_after_compaction() { |
335 |
G1OffsetTableContigSpace::reset_after_compaction(); |
|
336 |
// After a compaction the mark bitmap is invalid, so we must |
|
337 |
// treat all objects as being inside the unmarked area. |
|
338 |
zero_marked_bytes(); |
|
339 |
init_top_at_mark_start(); |
|
340 |
} |
|
341 |
||
342 |
DirtyCardToOopClosure* |
|
343 |
HeapRegion::new_dcto_closure(OopClosure* cl, |
|
344 |
CardTableModRefBS::PrecisionStyle precision, |
|
345 |
HeapRegionDCTOC::FilterKind fk) { |
|
346 |
return new HeapRegionDCTOC(G1CollectedHeap::heap(), |
|
347 |
this, cl, precision, fk); |
|
348 |
} |
|
349 |
||
350 |
void HeapRegion::hr_clear(bool par, bool clear_space) { |
|
1387 | 351 |
_humongous_type = NotHumongous; |
1374 | 352 |
_humongous_start_region = NULL; |
353 |
_in_collection_set = false; |
|
354 |
_is_gc_alloc_region = false; |
|
355 |
||
356 |
// Age stuff (if parallel, this will be done separately, since it needs |
|
357 |
// to be sequential). |
|
358 |
G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
|
359 |
||
360 |
set_young_index_in_cset(-1); |
|
361 |
uninstall_surv_rate_group(); |
|
362 |
set_young_type(NotYoung); |
|
363 |
||
364 |
// In case it had been the start of a humongous sequence, reset its end. |
|
365 |
set_end(_orig_end); |
|
366 |
||
367 |
if (!par) { |
|
368 |
// If this is parallel, this will be done later. |
|
369 |
HeapRegionRemSet* hrrs = rem_set(); |
|
370 |
if (hrrs != NULL) hrrs->clear(); |
|
1387 | 371 |
_claimed = InitialClaimValue; |
1374 | 372 |
} |
373 |
zero_marked_bytes(); |
|
374 |
set_sort_index(-1); |
|
375 |
||
376 |
_offsets.resize(HeapRegion::GrainWords); |
|
377 |
init_top_at_mark_start(); |
|
1388 | 378 |
if (clear_space) clear(SpaceDecorator::Mangle); |
1374 | 379 |
} |
380 |
||
381 |
// <PREDICTION> |
|
382 |
void HeapRegion::calc_gc_efficiency() { |
|
383 |
G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
|
384 |
_gc_efficiency = (double) garbage_bytes() / |
|
385 |
g1h->predict_region_elapsed_time_ms(this, false); |
|
386 |
} |
|
387 |
// </PREDICTION> |
|
388 |
||
6983
a8c50cedbce9
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
6260
diff
changeset
|
389 |
void HeapRegion::set_startsHumongous(HeapWord* new_end) { |
a8c50cedbce9
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
6260
diff
changeset
|
390 |
assert(end() == _orig_end, |
a8c50cedbce9
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
6260
diff
changeset
|
391 |
"Should be normal before the humongous object allocation"); |
a8c50cedbce9
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
6260
diff
changeset
|
392 |
assert(top() == bottom(), "should be empty"); |
a8c50cedbce9
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
6260
diff
changeset
|
393 |
|
1387 | 394 |
_humongous_type = StartsHumongous; |
1374 | 395 |
_humongous_start_region = this; |
6983
a8c50cedbce9
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
6260
diff
changeset
|
396 |
|
a8c50cedbce9
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
6260
diff
changeset
|
397 |
set_end(new_end); |
a8c50cedbce9
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
6260
diff
changeset
|
398 |
_offsets.set_for_starts_humongous(new_end); |
a8c50cedbce9
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
6260
diff
changeset
|
399 |
} |
a8c50cedbce9
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
6260
diff
changeset
|
400 |
|
a8c50cedbce9
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
6260
diff
changeset
|
401 |
void HeapRegion::set_continuesHumongous(HeapRegion* start) { |
a8c50cedbce9
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
6260
diff
changeset
|
402 |
assert(end() == _orig_end, |
a8c50cedbce9
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
6260
diff
changeset
|
403 |
"Should be normal before the humongous object allocation"); |
a8c50cedbce9
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
6260
diff
changeset
|
404 |
assert(top() == bottom(), "should be empty"); |
a8c50cedbce9
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
6260
diff
changeset
|
405 |
assert(start->startsHumongous(), "pre-condition"); |
a8c50cedbce9
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
6260
diff
changeset
|
406 |
|
a8c50cedbce9
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
6260
diff
changeset
|
407 |
_humongous_type = ContinuesHumongous; |
a8c50cedbce9
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
6260
diff
changeset
|
408 |
_humongous_start_region = start; |
1374 | 409 |
} |
410 |
||
411 |
bool HeapRegion::claimHeapRegion(jint claimValue) { |
|
412 |
jint current = _claimed; |
|
413 |
if (current != claimValue) { |
|
414 |
jint res = Atomic::cmpxchg(claimValue, &_claimed, current); |
|
415 |
if (res == current) { |
|
416 |
return true; |
|
417 |
} |
|
418 |
} |
|
419 |
return false; |
|
420 |
} |
|
421 |
||
422 |
HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) { |
|
423 |
HeapWord* low = addr; |
|
424 |
HeapWord* high = end(); |
|
425 |
while (low < high) { |
|
426 |
size_t diff = pointer_delta(high, low); |
|
427 |
// Must add one below to bias toward the high amount. Otherwise, if |
|
428 |
// "high" were at the desired value, and "low" were one less, we |
|
429 |
// would not converge on "high". This is not symmetric, because |
|
430 |
// we set "high" to a block start, which might be the right one, |
|
431 |
// which we don't do for "low". |
|
432 |
HeapWord* middle = low + (diff+1)/2; |
|
433 |
if (middle == high) return high; |
|
434 |
HeapWord* mid_bs = block_start_careful(middle); |
|
435 |
if (mid_bs < addr) { |
|
436 |
low = middle; |
|
437 |
} else { |
|
438 |
high = mid_bs; |
|
439 |
} |
|
440 |
} |
|
441 |
assert(low == high && low >= addr, "Didn't work."); |
|
442 |
return low; |
|
443 |
} |
|
444 |
||
445 |
void HeapRegion::set_next_on_unclean_list(HeapRegion* r) { |
|
446 |
assert(r == NULL || r->is_on_unclean_list(), "Malformed unclean list."); |
|
447 |
_next_in_special_set = r; |
|
448 |
} |
|
449 |
||
450 |
void HeapRegion::set_on_unclean_list(bool b) { |
|
451 |
_is_on_unclean_list = b; |
|
452 |
} |
|
453 |
||
1388 | 454 |
void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { |
455 |
G1OffsetTableContigSpace::initialize(mr, false, mangle_space); |
|
1374 | 456 |
hr_clear(false/*par*/, clear_space); |
457 |
} |
|
458 |
#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away |
|
459 |
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list |
|
460 |
#endif // _MSC_VER |
|
461 |
||
462 |
||
463 |
HeapRegion:: |
|
464 |
HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray, |
|
465 |
MemRegion mr, bool is_zeroed) |
|
466 |
: G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed), |
|
467 |
_next_fk(HeapRegionDCTOC::NoFilterKind), |
|
468 |
_hrs_index(-1), |
|
1387 | 469 |
_humongous_type(NotHumongous), _humongous_start_region(NULL), |
1374 | 470 |
_in_collection_set(false), _is_gc_alloc_region(false), |
471 |
_is_on_free_list(false), _is_on_unclean_list(false), |
|
472 |
_next_in_special_set(NULL), _orig_end(NULL), |
|
1387 | 473 |
_claimed(InitialClaimValue), _evacuation_failed(false), |
1374 | 474 |
_prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1), |
475 |
_young_type(NotYoung), _next_young_region(NULL), |
|
2883
406d1e6d1aa1
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
2741
diff
changeset
|
476 |
_next_dirty_cards_region(NULL), |
1374 | 477 |
_young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), |
5350
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
5344
diff
changeset
|
478 |
_rem_set(NULL), _zfs(NotZeroFilled), |
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
5344
diff
changeset
|
479 |
_recorded_rs_length(0), _predicted_elapsed_time_ms(0), |
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
5344
diff
changeset
|
480 |
_predicted_bytes_to_copy(0) |
1374 | 481 |
{ |
482 |
_orig_end = mr.end(); |
|
483 |
// Note that initialize() will set the start of the unmarked area of the |
|
484 |
// region. |
|
1388 | 485 |
this->initialize(mr, !is_zeroed, SpaceDecorator::Mangle); |
486 |
set_top(bottom()); |
|
487 |
set_saved_mark(); |
|
1374 | 488 |
|
489 |
_rem_set = new HeapRegionRemSet(sharedOffsetArray, this); |
|
490 |
||
491 |
assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant."); |
|
492 |
// In case the region is allocated during a pause, note the top. |
|
493 |
// We haven't done any counting on a brand new region. |
|
494 |
_top_at_conc_mark_count = bottom(); |
|
495 |
} |
|
496 |
||
497 |
class NextCompactionHeapRegionClosure: public HeapRegionClosure { |
|
498 |
const HeapRegion* _target; |
|
499 |
bool _target_seen; |
|
500 |
HeapRegion* _last; |
|
501 |
CompactibleSpace* _res; |
|
502 |
public: |
|
503 |
NextCompactionHeapRegionClosure(const HeapRegion* target) : |
|
504 |
_target(target), _target_seen(false), _res(NULL) {} |
|
505 |
bool doHeapRegion(HeapRegion* cur) { |
|
506 |
if (_target_seen) { |
|
507 |
if (!cur->isHumongous()) { |
|
508 |
_res = cur; |
|
509 |
return true; |
|
510 |
} |
|
511 |
} else if (cur == _target) { |
|
512 |
_target_seen = true; |
|
513 |
} |
|
514 |
return false; |
|
515 |
} |
|
516 |
CompactibleSpace* result() { return _res; } |
|
517 |
}; |
|
518 |
||
519 |
CompactibleSpace* HeapRegion::next_compaction_space() const { |
|
520 |
G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
|
521 |
// cast away const-ness |
|
522 |
HeapRegion* r = (HeapRegion*) this; |
|
523 |
NextCompactionHeapRegionClosure blk(r); |
|
524 |
g1h->heap_region_iterate_from(r, &blk); |
|
525 |
return blk.result(); |
|
526 |
} |
|
527 |
||
528 |
void HeapRegion::save_marks() { |
|
529 |
set_saved_mark(); |
|
530 |
} |
|
531 |
||
532 |
void HeapRegion::oops_in_mr_iterate(MemRegion mr, OopClosure* cl) { |
|
533 |
HeapWord* p = mr.start(); |
|
534 |
HeapWord* e = mr.end(); |
|
535 |
oop obj; |
|
536 |
while (p < e) { |
|
537 |
obj = oop(p); |
|
538 |
p += obj->oop_iterate(cl); |
|
539 |
} |
|
540 |
assert(p == e, "bad memregion: doesn't end on obj boundary"); |
|
541 |
} |
|
542 |
||
543 |
#define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ |
|
544 |
void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ |
|
545 |
ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \ |
|
546 |
} |
|
547 |
SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN) |
|
548 |
||
549 |
||
550 |
void HeapRegion::oop_before_save_marks_iterate(OopClosure* cl) { |
|
551 |
oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl); |
|
552 |
} |
|
553 |
||
554 |
#ifdef DEBUG |
|
555 |
HeapWord* HeapRegion::allocate(size_t size) { |
|
556 |
jint state = zero_fill_state(); |
|
557 |
assert(!G1CollectedHeap::heap()->allocs_are_zero_filled() || |
|
558 |
zero_fill_is_allocated(), |
|
559 |
"When ZF is on, only alloc in ZF'd regions"); |
|
560 |
return G1OffsetTableContigSpace::allocate(size); |
|
561 |
} |
|
562 |
#endif |
|
563 |
||
564 |
void HeapRegion::set_zero_fill_state_work(ZeroFillState zfs) { |
|
565 |
assert(ZF_mon->owned_by_self() || |
|
566 |
Universe::heap()->is_gc_active(), |
|
567 |
"Must hold the lock or be a full GC to modify."); |
|
5540
e8a688e539aa
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
5350
diff
changeset
|
568 |
#ifdef ASSERT |
e8a688e539aa
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
5350
diff
changeset
|
569 |
if (top() != bottom() && zfs != Allocated) { |
e8a688e539aa
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
5350
diff
changeset
|
570 |
ResourceMark rm; |
e8a688e539aa
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
5350
diff
changeset
|
571 |
stringStream region_str; |
e8a688e539aa
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
5350
diff
changeset
|
572 |
print_on(®ion_str); |
e8a688e539aa
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
5350
diff
changeset
|
573 |
assert(top() == bottom() || zfs == Allocated, |
e8a688e539aa
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
5350
diff
changeset
|
574 |
err_msg("Region must be empty, or we must be setting it to allocated. " |
e8a688e539aa
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
5350
diff
changeset
|
575 |
"_zfs=%d, zfs=%d, region: %s", _zfs, zfs, region_str.as_string())); |
e8a688e539aa
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
5350
diff
changeset
|
576 |
} |
e8a688e539aa
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
5350
diff
changeset
|
577 |
#endif |
1374 | 578 |
_zfs = zfs; |
579 |
} |
|
580 |
||
581 |
void HeapRegion::set_zero_fill_complete() { |
|
582 |
set_zero_fill_state_work(ZeroFilled); |
|
583 |
if (ZF_mon->owned_by_self()) { |
|
584 |
ZF_mon->notify_all(); |
|
585 |
} |
|
586 |
} |
|
587 |
||
588 |
||
589 |
void HeapRegion::ensure_zero_filled() { |
|
590 |
MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
|
591 |
ensure_zero_filled_locked(); |
|
592 |
} |
|
593 |
||
594 |
void HeapRegion::ensure_zero_filled_locked() { |
|
595 |
assert(ZF_mon->owned_by_self(), "Precondition"); |
|
596 |
bool should_ignore_zf = SafepointSynchronize::is_at_safepoint(); |
|
597 |
assert(should_ignore_zf || Heap_lock->is_locked(), |
|
598 |
"Either we're in a GC or we're allocating a region."); |
|
599 |
switch (zero_fill_state()) { |
|
600 |
case HeapRegion::NotZeroFilled: |
|
601 |
set_zero_fill_in_progress(Thread::current()); |
|
602 |
{ |
|
603 |
ZF_mon->unlock(); |
|
604 |
Copy::fill_to_words(bottom(), capacity()/HeapWordSize); |
|
605 |
ZF_mon->lock_without_safepoint_check(); |
|
606 |
} |
|
607 |
// A trap. |
|
608 |
guarantee(zero_fill_state() == HeapRegion::ZeroFilling |
|
609 |
&& zero_filler() == Thread::current(), |
|
610 |
"AHA! Tell Dave D if you see this..."); |
|
611 |
set_zero_fill_complete(); |
|
612 |
// gclog_or_tty->print_cr("Did sync ZF."); |
|
613 |
ConcurrentZFThread::note_sync_zfs(); |
|
614 |
break; |
|
615 |
case HeapRegion::ZeroFilling: |
|
616 |
if (should_ignore_zf) { |
|
617 |
// We can "break" the lock and take over the work. |
|
618 |
Copy::fill_to_words(bottom(), capacity()/HeapWordSize); |
|
619 |
set_zero_fill_complete(); |
|
620 |
ConcurrentZFThread::note_sync_zfs(); |
|
621 |
break; |
|
622 |
} else { |
|
623 |
ConcurrentZFThread::wait_for_ZF_completed(this); |
|
624 |
} |
|
625 |
case HeapRegion::ZeroFilled: |
|
626 |
// Nothing to do. |
|
627 |
break; |
|
628 |
case HeapRegion::Allocated: |
|
629 |
guarantee(false, "Should not call on allocated regions."); |
|
630 |
} |
|
631 |
assert(zero_fill_state() == HeapRegion::ZeroFilled, "Post"); |
|
632 |
} |
|
633 |
||
634 |
HeapWord* |
|
635 |
HeapRegion::object_iterate_mem_careful(MemRegion mr, |
|
636 |
ObjectClosure* cl) { |
|
637 |
G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
|
638 |
// We used to use "block_start_careful" here. But we're actually happy |
|
639 |
// to update the BOT while we do this... |
|
640 |
HeapWord* cur = block_start(mr.start()); |
|
641 |
mr = mr.intersection(used_region()); |
|
642 |
if (mr.is_empty()) return NULL; |
|
643 |
// Otherwise, find the obj that extends onto mr.start(). |
|
644 |
||
645 |
assert(cur <= mr.start() |
|
3262
30d1c247fc25
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
3191
diff
changeset
|
646 |
&& (oop(cur)->klass_or_null() == NULL || |
1374 | 647 |
cur + oop(cur)->size() > mr.start()), |
648 |
"postcondition of block_start"); |
|
649 |
oop obj; |
|
650 |
while (cur < mr.end()) { |
|
651 |
obj = oop(cur); |
|
3262
30d1c247fc25
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
3191
diff
changeset
|
652 |
if (obj->klass_or_null() == NULL) { |
1374 | 653 |
// Ran into an unparseable point. |
654 |
return cur; |
|
655 |
} else if (!g1h->is_obj_dead(obj)) { |
|
656 |
cl->do_object(obj); |
|
657 |
} |
|
658 |
if (cl->abort()) return cur; |
|
659 |
// The check above must occur before the operation below, since an |
|
660 |
// abort might invalidate the "size" operation. |
|
661 |
cur += obj->size(); |
|
662 |
} |
|
663 |
return NULL; |
|
664 |
} |
|
665 |
||
666 |
HeapWord* |
|
667 |
HeapRegion:: |
|
668 |
oops_on_card_seq_iterate_careful(MemRegion mr, |
|
6068
80ef41e75a2d
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
5547
diff
changeset
|
669 |
FilterOutOfRegionClosure* cl, |
80ef41e75a2d
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
5547
diff
changeset
|
670 |
bool filter_young) { |
1374 | 671 |
G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
672 |
||
673 |
// If we're within a stop-world GC, then we might look at a card in a |
|
674 |
// GC alloc region that extends onto a GC LAB, which may not be |
|
675 |
// parseable. Stop such at the "saved_mark" of the region. |
|
676 |
if (G1CollectedHeap::heap()->is_gc_active()) { |
|
677 |
mr = mr.intersection(used_region_at_save_marks()); |
|
678 |
} else { |
|
679 |
mr = mr.intersection(used_region()); |
|
680 |
} |
|
681 |
if (mr.is_empty()) return NULL; |
|
682 |
// Otherwise, find the obj that extends onto mr.start(). |
|
683 |
||
6068
80ef41e75a2d
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
5547
diff
changeset
|
684 |
// The intersection of the incoming mr (for the card) and the |
80ef41e75a2d
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
5547
diff
changeset
|
685 |
// allocated part of the region is non-empty. This implies that |
80ef41e75a2d
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
5547
diff
changeset
|
686 |
// we have actually allocated into this region. The code in |
80ef41e75a2d
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
5547
diff
changeset
|
687 |
// G1CollectedHeap.cpp that allocates a new region sets the |
80ef41e75a2d
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
5547
diff
changeset
|
688 |
// is_young tag on the region before allocating. Thus we |
80ef41e75a2d
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
5547
diff
changeset
|
689 |
// safely know if this region is young. |
80ef41e75a2d
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
5547
diff
changeset
|
690 |
if (is_young() && filter_young) { |
80ef41e75a2d
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
5547
diff
changeset
|
691 |
return NULL; |
80ef41e75a2d
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
5547
diff
changeset
|
692 |
} |
80ef41e75a2d
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
5547
diff
changeset
|
693 |
|
6247 | 694 |
assert(!is_young(), "check value of filter_young"); |
695 |
||
1374 | 696 |
// We used to use "block_start_careful" here. But we're actually happy |
697 |
// to update the BOT while we do this... |
|
698 |
HeapWord* cur = block_start(mr.start()); |
|
699 |
assert(cur <= mr.start(), "Postcondition"); |
|
700 |
||
701 |
while (cur <= mr.start()) { |
|
3262
30d1c247fc25
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
3191
diff
changeset
|
702 |
if (oop(cur)->klass_or_null() == NULL) { |
1374 | 703 |
// Ran into an unparseable point. |
704 |
return cur; |
|
705 |
} |
|
706 |
// Otherwise... |
|
707 |
int sz = oop(cur)->size(); |
|
708 |
if (cur + sz > mr.start()) break; |
|
709 |
// Otherwise, go on. |
|
710 |
cur = cur + sz; |
|
711 |
} |
|
712 |
oop obj; |
|
713 |
obj = oop(cur); |
|
714 |
// If we finish this loop... |
|
715 |
assert(cur <= mr.start() |
|
3262
30d1c247fc25
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
3191
diff
changeset
|
716 |
&& obj->klass_or_null() != NULL |
1374 | 717 |
&& cur + obj->size() > mr.start(), |
718 |
"Loop postcondition"); |
|
719 |
if (!g1h->is_obj_dead(obj)) { |
|
720 |
obj->oop_iterate(cl, mr); |
|
721 |
} |
|
722 |
||
723 |
HeapWord* next; |
|
724 |
while (cur < mr.end()) { |
|
725 |
obj = oop(cur); |
|
3262
30d1c247fc25
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
3191
diff
changeset
|
726 |
if (obj->klass_or_null() == NULL) { |
1374 | 727 |
// Ran into an unparseable point. |
728 |
return cur; |
|
729 |
}; |
|
730 |
// Otherwise: |
|
731 |
next = (cur + obj->size()); |
|
732 |
if (!g1h->is_obj_dead(obj)) { |
|
733 |
if (next < mr.end()) { |
|
734 |
obj->oop_iterate(cl); |
|
735 |
} else { |
|
736 |
// this obj spans the boundary. If it's an array, stop at the |
|
737 |
// boundary. |
|
738 |
if (obj->is_objArray()) { |
|
739 |
obj->oop_iterate(cl, mr); |
|
740 |
} else { |
|
741 |
obj->oop_iterate(cl); |
|
742 |
} |
|
743 |
} |
|
744 |
} |
|
745 |
cur = next; |
|
746 |
} |
|
747 |
return NULL; |
|
748 |
} |
|
749 |
||
750 |
void HeapRegion::print() const { print_on(gclog_or_tty); } |
|
751 |
void HeapRegion::print_on(outputStream* st) const { |
|
752 |
if (isHumongous()) { |
|
753 |
if (startsHumongous()) |
|
754 |
st->print(" HS"); |
|
755 |
else |
|
756 |
st->print(" HC"); |
|
757 |
} else { |
|
758 |
st->print(" "); |
|
759 |
} |
|
760 |
if (in_collection_set()) |
|
761 |
st->print(" CS"); |
|
762 |
else if (is_gc_alloc_region()) |
|
763 |
st->print(" A "); |
|
764 |
else |
|
765 |
st->print(" "); |
|
766 |
if (is_young()) |
|
5350
cccf0925702e
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
5344
diff
changeset
|
767 |
st->print(is_survivor() ? " SU" : " Y "); |
1374 | 768 |
else |
769 |
st->print(" "); |
|
770 |
if (is_empty()) |
|
771 |
st->print(" F"); |
|
772 |
else |
|
773 |
st->print(" "); |
|
4023
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
774 |
st->print(" %5d", _gc_time_stamp); |
5344 | 775 |
st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT, |
776 |
prev_top_at_mark_start(), next_top_at_mark_start()); |
|
1374 | 777 |
G1OffsetTableContigSpace::print_on(st); |
778 |
} |
|
779 |
||
3000 | 780 |
void HeapRegion::verify(bool allow_dirty) const { |
4023
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
781 |
bool dummy = false; |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
782 |
verify(allow_dirty, /* use_prev_marking */ true, /* failures */ &dummy); |
3000 | 783 |
} |
784 |
||
1374 | 785 |
#define OBJ_SAMPLE_INTERVAL 0 |
786 |
#define BLOCK_SAMPLE_INTERVAL 100 |
|
787 |
||
788 |
// This really ought to be commoned up into OffsetTableContigSpace somehow. |
|
789 |
// We would need a mechanism to make that code skip dead objects. |
|
790 |
||
4023
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
791 |
void HeapRegion::verify(bool allow_dirty, |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
792 |
bool use_prev_marking, |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
793 |
bool* failures) const { |
1374 | 794 |
G1CollectedHeap* g1 = G1CollectedHeap::heap(); |
4023
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
795 |
*failures = false; |
1374 | 796 |
HeapWord* p = bottom(); |
797 |
HeapWord* prev_p = NULL; |
|
798 |
int objs = 0; |
|
799 |
int blocks = 0; |
|
3000 | 800 |
VerifyLiveClosure vl_cl(g1, use_prev_marking); |
6260
fba83fd3adc7
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
6247
diff
changeset
|
801 |
bool is_humongous = isHumongous(); |
fba83fd3adc7
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
6247
diff
changeset
|
802 |
size_t object_num = 0; |
1374 | 803 |
while (p < top()) { |
804 |
size_t size = oop(p)->size(); |
|
6260
fba83fd3adc7
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
6247
diff
changeset
|
805 |
if (is_humongous != g1->isHumongous(size)) { |
fba83fd3adc7
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
6247
diff
changeset
|
806 |
gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size (" |
fba83fd3adc7
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
6247
diff
changeset
|
807 |
SIZE_FORMAT" words) in a %shumongous region", |
fba83fd3adc7
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
6247
diff
changeset
|
808 |
p, g1->isHumongous(size) ? "" : "non-", |
fba83fd3adc7
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
6247
diff
changeset
|
809 |
size, is_humongous ? "" : "non-"); |
fba83fd3adc7
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
6247
diff
changeset
|
810 |
*failures = true; |
fba83fd3adc7
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
6247
diff
changeset
|
811 |
} |
fba83fd3adc7
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
6247
diff
changeset
|
812 |
object_num += 1; |
1374 | 813 |
if (blocks == BLOCK_SAMPLE_INTERVAL) { |
4023
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
814 |
HeapWord* res = block_start_const(p + (size/2)); |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
815 |
if (p != res) { |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
816 |
gclog_or_tty->print_cr("offset computation 1 for "PTR_FORMAT" and " |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
817 |
SIZE_FORMAT" returned "PTR_FORMAT, |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
818 |
p, size, res); |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
819 |
*failures = true; |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
820 |
return; |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
821 |
} |
1374 | 822 |
blocks = 0; |
823 |
} else { |
|
824 |
blocks++; |
|
825 |
} |
|
826 |
if (objs == OBJ_SAMPLE_INTERVAL) { |
|
827 |
oop obj = oop(p); |
|
3000 | 828 |
if (!g1->is_obj_dead_cond(obj, this, use_prev_marking)) { |
4023
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
829 |
if (obj->is_oop()) { |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
830 |
klassOop klass = obj->klass(); |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
831 |
if (!klass->is_perm()) { |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
832 |
gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
833 |
"not in perm", klass, obj); |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
834 |
*failures = true; |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
835 |
return; |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
836 |
} else if (!klass->is_klass()) { |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
837 |
gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
838 |
"not a klass", klass, obj); |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
839 |
*failures = true; |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
840 |
return; |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
841 |
} else { |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
842 |
vl_cl.set_containing_obj(obj); |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
843 |
obj->oop_iterate(&vl_cl); |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
844 |
if (vl_cl.failures()) { |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
845 |
*failures = true; |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
846 |
} |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
847 |
if (G1MaxVerifyFailures >= 0 && |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
848 |
vl_cl.n_failures() >= G1MaxVerifyFailures) { |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
849 |
return; |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
850 |
} |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
851 |
} |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
852 |
} else { |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
853 |
gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj); |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
854 |
*failures = true; |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
855 |
return; |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
856 |
} |
1374 | 857 |
} |
858 |
objs = 0; |
|
859 |
} else { |
|
860 |
objs++; |
|
861 |
} |
|
862 |
prev_p = p; |
|
863 |
p += size; |
|
864 |
} |
|
865 |
HeapWord* rend = end(); |
|
866 |
HeapWord* rtop = top(); |
|
867 |
if (rtop < rend) { |
|
4023
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
868 |
HeapWord* res = block_start_const(rtop + (rend - rtop) / 2); |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
869 |
if (res != rtop) { |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
870 |
gclog_or_tty->print_cr("offset computation 2 for "PTR_FORMAT" and " |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
871 |
PTR_FORMAT" returned "PTR_FORMAT, |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
872 |
rtop, rend, res); |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
873 |
*failures = true; |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
874 |
return; |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
875 |
} |
1374 | 876 |
} |
4023
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
877 |
|
6260
fba83fd3adc7
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
6247
diff
changeset
|
878 |
if (is_humongous && object_num > 1) { |
fba83fd3adc7
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
6247
diff
changeset
|
879 |
gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous " |
fba83fd3adc7
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
6247
diff
changeset
|
880 |
"but has "SIZE_FORMAT", objects", |
fba83fd3adc7
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
6247
diff
changeset
|
881 |
bottom(), end(), object_num); |
fba83fd3adc7
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
6247
diff
changeset
|
882 |
*failures = true; |
fba83fd3adc7
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
6247
diff
changeset
|
883 |
} |
fba83fd3adc7
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
6247
diff
changeset
|
884 |
|
4023
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
885 |
if (p != top()) { |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
886 |
gclog_or_tty->print_cr("end of last object "PTR_FORMAT" " |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
887 |
"does not match top "PTR_FORMAT, p, top()); |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
888 |
*failures = true; |
6c3401503290
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
3697
diff
changeset
|
889 |
return; |
1374 | 890 |
} |
891 |
} |
|
892 |
||
893 |
// G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go |
|
894 |
// away eventually. |
|
895 |
||
1388 | 896 |
void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) { |
1374 | 897 |
// false ==> we'll do the clearing if there's clearing to be done. |
1388 | 898 |
ContiguousSpace::initialize(mr, false, mangle_space); |
1374 | 899 |
_offsets.zero_bottom_entry(); |
900 |
_offsets.initialize_threshold(); |
|
1388 | 901 |
if (clear_space) clear(mangle_space); |
1374 | 902 |
} |
903 |
||
1388 | 904 |
void G1OffsetTableContigSpace::clear(bool mangle_space) { |
905 |
ContiguousSpace::clear(mangle_space); |
|
1374 | 906 |
_offsets.zero_bottom_entry(); |
907 |
_offsets.initialize_threshold(); |
|
908 |
} |
|
909 |
||
910 |
void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { |
|
911 |
Space::set_bottom(new_bottom); |
|
912 |
_offsets.set_bottom(new_bottom); |
|
913 |
} |
|
914 |
||
915 |
void G1OffsetTableContigSpace::set_end(HeapWord* new_end) { |
|
916 |
Space::set_end(new_end); |
|
917 |
_offsets.resize(new_end - bottom()); |
|
918 |
} |
|
919 |
||
920 |
void G1OffsetTableContigSpace::print() const { |
|
921 |
print_short(); |
|
922 |
gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " |
|
923 |
INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
|
924 |
bottom(), top(), _offsets.threshold(), end()); |
|
925 |
} |
|
926 |
||
927 |
HeapWord* G1OffsetTableContigSpace::initialize_threshold() { |
|
928 |
return _offsets.initialize_threshold(); |
|
929 |
} |
|
930 |
||
931 |
HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start, |
|
932 |
HeapWord* end) { |
|
933 |
_offsets.alloc_block(start, end); |
|
934 |
return _offsets.threshold(); |
|
935 |
} |
|
936 |
||
937 |
HeapWord* G1OffsetTableContigSpace::saved_mark_word() const { |
|
938 |
G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
|
939 |
assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" ); |
|
940 |
if (_gc_time_stamp < g1h->get_gc_time_stamp()) |
|
941 |
return top(); |
|
942 |
else |
|
943 |
return ContiguousSpace::saved_mark_word(); |
|
944 |
} |
|
945 |
||
946 |
void G1OffsetTableContigSpace::set_saved_mark() { |
|
947 |
G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
|
948 |
unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp(); |
|
949 |
||
950 |
if (_gc_time_stamp < curr_gc_time_stamp) { |
|
951 |
// The order of these is important, as another thread might be |
|
952 |
// about to start scanning this region. If it does so after |
|
953 |
// set_saved_mark and before _gc_time_stamp = ..., then the latter |
|
954 |
// will be false, and it will pick up top() as the high water mark |
|
955 |
// of region. If it does so after _gc_time_stamp = ..., then it |
|
956 |
// will pick up the right saved_mark_word() as the high water mark |
|
957 |
// of the region. Either way, the behaviour will be correct. |
|
958 |
ContiguousSpace::set_saved_mark(); |
|
3262
30d1c247fc25
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
3191
diff
changeset
|
959 |
OrderAccess::storestore(); |
1385
1751733b089b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
1374
diff
changeset
|
960 |
_gc_time_stamp = curr_gc_time_stamp; |
3262
30d1c247fc25
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
3191
diff
changeset
|
961 |
// The following fence is to force a flush of the writes above, but |
30d1c247fc25
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
3191
diff
changeset
|
962 |
// is strictly not needed because when an allocating worker thread |
30d1c247fc25
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
3191
diff
changeset
|
963 |
// calls set_saved_mark() it does so under the ParGCRareEvent_lock; |
30d1c247fc25
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
3191
diff
changeset
|
964 |
// when the lock is released, the write will be flushed. |
30d1c247fc25
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
3191
diff
changeset
|
965 |
// OrderAccess::fence(); |
1374 | 966 |
} |
967 |
} |
|
968 |
||
969 |
G1OffsetTableContigSpace:: |
|
970 |
G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, |
|
971 |
MemRegion mr, bool is_zeroed) : |
|
972 |
_offsets(sharedOffsetArray, mr), |
|
973 |
_par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true), |
|
974 |
_gc_time_stamp(0) |
|
975 |
{ |
|
976 |
_offsets.set_space(this); |
|
1388 | 977 |
initialize(mr, !is_zeroed, SpaceDecorator::Mangle); |
1374 | 978 |
} |
979 |
||
980 |
size_t RegionList::length() { |
|
981 |
size_t len = 0; |
|
982 |
HeapRegion* cur = hd(); |
|
983 |
DEBUG_ONLY(HeapRegion* last = NULL); |
|
984 |
while (cur != NULL) { |
|
985 |
len++; |
|
986 |
DEBUG_ONLY(last = cur); |
|
987 |
cur = get_next(cur); |
|
988 |
} |
|
989 |
assert(last == tl(), "Invariant"); |
|
990 |
return len; |
|
991 |
} |
|
992 |
||
993 |
void RegionList::insert_before_head(HeapRegion* r) { |
|
994 |
assert(well_formed(), "Inv"); |
|
995 |
set_next(r, hd()); |
|
996 |
_hd = r; |
|
997 |
_sz++; |
|
998 |
if (tl() == NULL) _tl = r; |
|
999 |
assert(well_formed(), "Inv"); |
|
1000 |
} |
|
1001 |
||
1002 |
void RegionList::prepend_list(RegionList* new_list) { |
|
1003 |
assert(well_formed(), "Precondition"); |
|
1004 |
assert(new_list->well_formed(), "Precondition"); |
|
1005 |
HeapRegion* new_tl = new_list->tl(); |
|
1006 |
if (new_tl != NULL) { |
|
1007 |
set_next(new_tl, hd()); |
|
1008 |
_hd = new_list->hd(); |
|
1009 |
_sz += new_list->sz(); |
|
1010 |
if (tl() == NULL) _tl = new_list->tl(); |
|
1011 |
} else { |
|
1012 |
assert(new_list->hd() == NULL && new_list->sz() == 0, "Inv"); |
|
1013 |
} |
|
1014 |
assert(well_formed(), "Inv"); |
|
1015 |
} |
|
1016 |
||
1017 |
void RegionList::delete_after(HeapRegion* r) { |
|
1018 |
assert(well_formed(), "Precondition"); |
|
1019 |
HeapRegion* next = get_next(r); |
|
1020 |
assert(r != NULL, "Precondition"); |
|
1021 |
HeapRegion* next_tl = get_next(next); |
|
1022 |
set_next(r, next_tl); |
|
1023 |
dec_sz(); |
|
1024 |
if (next == tl()) { |
|
1025 |
assert(next_tl == NULL, "Inv"); |
|
1026 |
_tl = r; |
|
1027 |
} |
|
1028 |
assert(well_formed(), "Inv"); |
|
1029 |
} |
|
1030 |
||
1031 |
HeapRegion* RegionList::pop() { |
|
1032 |
assert(well_formed(), "Inv"); |
|
1033 |
HeapRegion* res = hd(); |
|
1034 |
if (res != NULL) { |
|
1035 |
_hd = get_next(res); |
|
1036 |
_sz--; |
|
1037 |
set_next(res, NULL); |
|
1038 |
if (sz() == 0) _tl = NULL; |
|
1039 |
} |
|
1040 |
assert(well_formed(), "Inv"); |
|
1041 |
return res; |
|
1042 |
} |