author | johnc |
Thu, 20 Jan 2011 13:57:12 -0800 | |
changeset 7924 | 6cbca8cebf93 |
parent 7923 | fc200fcd4e05 |
child 8071 | 195789ab14f9 |
permissions | -rw-r--r-- |
1374 | 1 |
/* |
7904
e90e097fced4
7007068: G1: refine the BOT during evac failure handling
tonyp
parents:
7397
diff
changeset
|
2 |
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. |
1374 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
3807
diff
changeset
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
3807
diff
changeset
|
20 |
* or visit www.oracle.com if you need additional information or have any |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
3807
diff
changeset
|
21 |
* questions. |
1374 | 22 |
* |
23 |
*/ |
|
24 |
||
7397 | 25 |
#include "precompiled.hpp" |
26 |
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" |
|
27 |
#include "gc_implementation/g1/heapRegionSeq.hpp" |
|
28 |
#include "memory/allocation.hpp" |
|
1374 | 29 |
|
30 |
// Local to this file. |
|
31 |
||
32 |
static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) { |
|
33 |
if ((*hr1p)->end() <= (*hr2p)->bottom()) return -1; |
|
34 |
else if ((*hr2p)->end() <= (*hr1p)->bottom()) return 1; |
|
35 |
else if (*hr1p == *hr2p) return 0; |
|
36 |
else { |
|
37 |
assert(false, "We should never compare distinct overlapping regions."); |
|
38 |
} |
|
39 |
return 0; |
|
40 |
} |
|
41 |
||
1425 | 42 |
HeapRegionSeq::HeapRegionSeq(const size_t max_size) : |
1374 | 43 |
_alloc_search_start(0), |
44 |
// The line below is the worst bit of C++ hackery I've ever written |
|
45 |
// (Detlefs, 11/23). You should think of it as equivalent to |
|
46 |
// "_regions(100, true)": initialize the growable array and inform it |
|
6183
4c74cfe14f20
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
5547
diff
changeset
|
47 |
// that it should allocate its elem array(s) on the C heap. |
4c74cfe14f20
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
5547
diff
changeset
|
48 |
// |
4c74cfe14f20
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
5547
diff
changeset
|
49 |
// The first argument, however, is actually a comma expression |
4c74cfe14f20
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
5547
diff
changeset
|
50 |
// (set_allocation_type(this, C_HEAP), 100). The purpose of the |
4c74cfe14f20
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
5547
diff
changeset
|
51 |
// set_allocation_type() call is to replace the default allocation |
4c74cfe14f20
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
5547
diff
changeset
|
52 |
// type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will |
4c74cfe14f20
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
5547
diff
changeset
|
53 |
// allow to pass the assert in GenericGrowableArray() which checks |
4c74cfe14f20
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
5547
diff
changeset
|
54 |
// that a growable array object must be on C heap if elements are. |
4c74cfe14f20
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
5547
diff
changeset
|
55 |
// |
4c74cfe14f20
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
5547
diff
changeset
|
56 |
// Note: containing object is allocated on C heap since it is CHeapObj. |
4c74cfe14f20
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
5547
diff
changeset
|
57 |
// |
4c74cfe14f20
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
5547
diff
changeset
|
58 |
_regions((ResourceObj::set_allocation_type((address)&_regions, |
4c74cfe14f20
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
5547
diff
changeset
|
59 |
ResourceObj::C_HEAP), |
1425 | 60 |
(int)max_size), |
1374 | 61 |
true), |
62 |
_next_rr_candidate(0), |
|
63 |
_seq_bottom(NULL) |
|
64 |
{} |
|
65 |
||
66 |
// Private methods. |
|
67 |
||
2344 | 68 |
void HeapRegionSeq::print_empty_runs() { |
1374 | 69 |
int empty_run = 0; |
70 |
int n_empty = 0; |
|
71 |
int empty_run_start; |
|
72 |
for (int i = 0; i < _regions.length(); i++) { |
|
73 |
HeapRegion* r = _regions.at(i); |
|
74 |
if (r->continuesHumongous()) continue; |
|
2344 | 75 |
if (r->is_empty()) { |
1374 | 76 |
assert(!r->isHumongous(), "H regions should not be empty."); |
77 |
if (empty_run == 0) empty_run_start = i; |
|
78 |
empty_run++; |
|
79 |
n_empty++; |
|
80 |
} else { |
|
81 |
if (empty_run > 0) { |
|
82 |
gclog_or_tty->print(" %d:%d", empty_run_start, empty_run); |
|
83 |
empty_run = 0; |
|
84 |
} |
|
85 |
} |
|
86 |
} |
|
87 |
if (empty_run > 0) { |
|
88 |
gclog_or_tty->print(" %d:%d", empty_run_start, empty_run); |
|
89 |
} |
|
90 |
gclog_or_tty->print_cr(" [tot = %d]", n_empty); |
|
91 |
} |
|
92 |
||
93 |
int HeapRegionSeq::find(HeapRegion* hr) { |
|
94 |
// FIXME: optimized for adjacent regions of fixed size. |
|
95 |
int ind = hr->hrs_index(); |
|
96 |
if (ind != -1) { |
|
97 |
assert(_regions.at(ind) == hr, "Mismatch"); |
|
98 |
} |
|
99 |
return ind; |
|
100 |
} |
|
101 |
||
102 |
||
103 |
// Public methods. |
|
104 |
||
105 |
void HeapRegionSeq::insert(HeapRegion* hr) { |
|
1425 | 106 |
assert(!_regions.is_full(), "Too many elements in HeapRegionSeq"); |
1374 | 107 |
if (_regions.length() == 0 |
108 |
|| _regions.top()->end() <= hr->bottom()) { |
|
109 |
hr->set_hrs_index(_regions.length()); |
|
110 |
_regions.append(hr); |
|
111 |
} else { |
|
112 |
_regions.append(hr); |
|
113 |
_regions.sort(orderRegions); |
|
114 |
for (int i = 0; i < _regions.length(); i++) { |
|
115 |
_regions.at(i)->set_hrs_index(i); |
|
116 |
} |
|
117 |
} |
|
118 |
char* bot = (char*)_regions.at(0)->bottom(); |
|
119 |
if (_seq_bottom == NULL || bot < _seq_bottom) _seq_bottom = bot; |
|
120 |
} |
|
121 |
||
122 |
size_t HeapRegionSeq::length() { |
|
123 |
return _regions.length(); |
|
124 |
} |
|
125 |
||
126 |
size_t HeapRegionSeq::free_suffix() { |
|
127 |
size_t res = 0; |
|
128 |
int first = _regions.length() - 1; |
|
129 |
int cur = first; |
|
130 |
while (cur >= 0 && |
|
131 |
(_regions.at(cur)->is_empty() |
|
132 |
&& (first == cur |
|
133 |
|| (_regions.at(cur+1)->bottom() == |
|
134 |
_regions.at(cur)->end())))) { |
|
135 |
res++; |
|
136 |
cur--; |
|
137 |
} |
|
138 |
return res; |
|
139 |
} |
|
140 |
||
7923 | 141 |
int HeapRegionSeq::find_contiguous_from(int from, size_t num) { |
142 |
assert(num > 1, "pre-condition"); |
|
143 |
assert(0 <= from && from <= _regions.length(), |
|
144 |
err_msg("from: %d should be valid and <= than %d", |
|
145 |
from, _regions.length())); |
|
146 |
||
147 |
int curr = from; |
|
148 |
int first = -1; |
|
149 |
size_t num_so_far = 0; |
|
150 |
while (curr < _regions.length() && num_so_far < num) { |
|
151 |
HeapRegion* curr_hr = _regions.at(curr); |
|
152 |
if (curr_hr->is_empty()) { |
|
153 |
if (first == -1) { |
|
154 |
first = curr; |
|
155 |
num_so_far = 1; |
|
156 |
} else { |
|
157 |
num_so_far += 1; |
|
158 |
} |
|
159 |
} else { |
|
160 |
first = -1; |
|
161 |
num_so_far = 0; |
|
162 |
} |
|
163 |
curr += 1; |
|
164 |
} |
|
165 |
||
166 |
assert(num_so_far <= num, "post-condition"); |
|
167 |
if (num_so_far == num) { |
|
168 |
// we find enough space for the humongous object |
|
169 |
assert(from <= first && first < _regions.length(), "post-condition"); |
|
170 |
assert(first < curr && (curr - first) == (int) num, "post-condition"); |
|
171 |
for (int i = first; i < first + (int) num; ++i) { |
|
172 |
assert(_regions.at(i)->is_empty(), "post-condition"); |
|
173 |
} |
|
174 |
return first; |
|
175 |
} else { |
|
176 |
// we failed to find enough space for the humongous object |
|
177 |
return -1; |
|
178 |
} |
|
179 |
} |
|
180 |
||
181 |
int HeapRegionSeq::find_contiguous(size_t num) { |
|
182 |
assert(num > 1, "otherwise we should not be calling this"); |
|
183 |
assert(0 <= _alloc_search_start && _alloc_search_start <= _regions.length(), |
|
184 |
err_msg("_alloc_search_start: %d should be valid and <= than %d", |
|
185 |
_alloc_search_start, _regions.length())); |
|
186 |
||
187 |
int start = _alloc_search_start; |
|
188 |
int res = find_contiguous_from(start, num); |
|
189 |
if (res == -1 && start != 0) { |
|
190 |
// Try starting from the beginning. If _alloc_search_start was 0, |
|
191 |
// no point in doing this again. |
|
192 |
res = find_contiguous_from(0, num); |
|
193 |
} |
|
194 |
if (res != -1) { |
|
195 |
assert(0 <= res && res < _regions.length(), |
|
196 |
err_msg("res: %d should be valid", res)); |
|
197 |
_alloc_search_start = res + (int) num; |
|
198 |
} |
|
199 |
assert(0 < _alloc_search_start && _alloc_search_start <= _regions.length(), |
|
200 |
err_msg("_alloc_search_start: %d should be valid", |
|
201 |
_alloc_search_start)); |
|
1374 | 202 |
return res; |
203 |
} |
|
204 |
||
205 |
void HeapRegionSeq::iterate(HeapRegionClosure* blk) { |
|
206 |
iterate_from((HeapRegion*)NULL, blk); |
|
207 |
} |
|
208 |
||
209 |
// The first argument r is the heap region at which iteration begins. |
|
210 |
// This operation runs fastest when r is NULL, or the heap region for |
|
211 |
// which a HeapRegionClosure most recently returned true, or the |
|
212 |
// heap region immediately to its right in the sequence. In all |
|
213 |
// other cases a linear search is required to find the index of r. |
|
214 |
||
215 |
void HeapRegionSeq::iterate_from(HeapRegion* r, HeapRegionClosure* blk) { |
|
216 |
||
217 |
// :::: FIXME :::: |
|
218 |
// Static cache value is bad, especially when we start doing parallel |
|
219 |
// remembered set update. For now just don't cache anything (the |
|
220 |
// code in the def'd out blocks). |
|
221 |
||
222 |
#if 0 |
|
223 |
static int cached_j = 0; |
|
224 |
#endif |
|
225 |
int len = _regions.length(); |
|
226 |
int j = 0; |
|
227 |
// Find the index of r. |
|
228 |
if (r != NULL) { |
|
229 |
#if 0 |
|
230 |
assert(cached_j >= 0, "Invariant."); |
|
231 |
if ((cached_j < len) && (r == _regions.at(cached_j))) { |
|
232 |
j = cached_j; |
|
233 |
} else if ((cached_j + 1 < len) && (r == _regions.at(cached_j + 1))) { |
|
234 |
j = cached_j + 1; |
|
235 |
} else { |
|
236 |
j = find(r); |
|
237 |
#endif |
|
238 |
if (j < 0) { |
|
239 |
j = 0; |
|
240 |
} |
|
241 |
#if 0 |
|
242 |
} |
|
243 |
#endif |
|
244 |
} |
|
245 |
int i; |
|
246 |
for (i = j; i < len; i += 1) { |
|
247 |
int res = blk->doHeapRegion(_regions.at(i)); |
|
248 |
if (res) { |
|
249 |
#if 0 |
|
250 |
cached_j = i; |
|
251 |
#endif |
|
252 |
blk->incomplete(); |
|
253 |
return; |
|
254 |
} |
|
255 |
} |
|
256 |
for (i = 0; i < j; i += 1) { |
|
257 |
int res = blk->doHeapRegion(_regions.at(i)); |
|
258 |
if (res) { |
|
259 |
#if 0 |
|
260 |
cached_j = i; |
|
261 |
#endif |
|
262 |
blk->incomplete(); |
|
263 |
return; |
|
264 |
} |
|
265 |
} |
|
266 |
} |
|
267 |
||
268 |
void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) { |
|
269 |
int len = _regions.length(); |
|
270 |
int i; |
|
271 |
for (i = idx; i < len; i++) { |
|
272 |
if (blk->doHeapRegion(_regions.at(i))) { |
|
273 |
blk->incomplete(); |
|
274 |
return; |
|
275 |
} |
|
276 |
} |
|
277 |
for (i = 0; i < idx; i++) { |
|
278 |
if (blk->doHeapRegion(_regions.at(i))) { |
|
279 |
blk->incomplete(); |
|
280 |
return; |
|
281 |
} |
|
282 |
} |
|
283 |
} |
|
284 |
||
285 |
MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes, |
|
286 |
size_t& num_regions_deleted) { |
|
7923 | 287 |
// Reset this in case it's currently pointing into the regions that |
288 |
// we just removed. |
|
289 |
_alloc_search_start = 0; |
|
290 |
||
1374 | 291 |
assert(shrink_bytes % os::vm_page_size() == 0, "unaligned"); |
292 |
assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned"); |
|
293 |
||
294 |
if (_regions.length() == 0) { |
|
295 |
num_regions_deleted = 0; |
|
296 |
return MemRegion(); |
|
297 |
} |
|
298 |
int j = _regions.length() - 1; |
|
299 |
HeapWord* end = _regions.at(j)->end(); |
|
300 |
HeapWord* last_start = end; |
|
301 |
while (j >= 0 && shrink_bytes > 0) { |
|
302 |
HeapRegion* cur = _regions.at(j); |
|
303 |
// We have to leave humongous regions where they are, |
|
304 |
// and work around them. |
|
305 |
if (cur->isHumongous()) { |
|
306 |
return MemRegion(last_start, end); |
|
307 |
} |
|
308 |
assert(cur == _regions.top(), "Should be top"); |
|
309 |
if (!cur->is_empty()) break; |
|
310 |
shrink_bytes -= cur->capacity(); |
|
311 |
num_regions_deleted++; |
|
312 |
_regions.pop(); |
|
313 |
last_start = cur->bottom(); |
|
314 |
// We need to delete these somehow, but can't currently do so here: if |
|
315 |
// we do, the ZF thread may still access the deleted region. We'll |
|
316 |
// leave this here as a reminder that we have to do something about |
|
317 |
// this. |
|
318 |
// delete cur; |
|
319 |
j--; |
|
320 |
} |
|
321 |
return MemRegion(last_start, end); |
|
322 |
} |
|
323 |
||
324 |
class PrintHeapRegionClosure : public HeapRegionClosure { |
|
325 |
public: |
|
326 |
bool doHeapRegion(HeapRegion* r) { |
|
327 |
gclog_or_tty->print(PTR_FORMAT ":", r); |
|
328 |
r->print(); |
|
329 |
return false; |
|
330 |
} |
|
331 |
}; |
|
332 |
||
333 |
void HeapRegionSeq::print() { |
|
334 |
PrintHeapRegionClosure cl; |
|
335 |
iterate(&cl); |
|
336 |
} |