author | stefank |
Tue, 27 Nov 2012 14:20:21 +0100 | |
changeset 14583 | d70ee55535f4 |
parent 13728 | 882756847a04 |
child 14584 | bd4290e6d0a5 |
permissions | -rw-r--r-- |
1 | 1 |
/* |
12379 | 2 |
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
4571
diff
changeset
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
4571
diff
changeset
|
20 |
* or visit www.oracle.com if you need additional information or have any |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
4571
diff
changeset
|
21 |
* questions. |
1 | 22 |
* |
23 |
*/ |
|
24 |
||
7397 | 25 |
#include "precompiled.hpp" |
26 |
#include "classfile/systemDictionary.hpp" |
|
27 |
#include "classfile/vmSymbols.hpp" |
|
28 |
#include "gc_implementation/shared/liveRange.hpp" |
|
29 |
#include "gc_implementation/shared/markSweep.hpp" |
|
30 |
#include "gc_implementation/shared/spaceDecorator.hpp" |
|
31 |
#include "memory/blockOffsetTable.inline.hpp" |
|
32 |
#include "memory/defNewGeneration.hpp" |
|
33 |
#include "memory/genCollectedHeap.hpp" |
|
34 |
#include "memory/space.hpp" |
|
35 |
#include "memory/space.inline.hpp" |
|
36 |
#include "memory/universe.inline.hpp" |
|
37 |
#include "oops/oop.inline.hpp" |
|
38 |
#include "oops/oop.inline2.hpp" |
|
39 |
#include "runtime/java.hpp" |
|
40 |
#include "runtime/safepoint.hpp" |
|
41 |
#include "utilities/copy.hpp" |
|
42 |
#include "utilities/globalDefinitions.hpp" |
|
1 | 43 |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
44 |
void SpaceMemRegionOopsIterClosure::do_oop(oop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); } |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
45 |
void SpaceMemRegionOopsIterClosure::do_oop(narrowOop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); } |
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
46 |
|
1 | 47 |
HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top, |
48 |
HeapWord* top_obj) { |
|
49 |
if (top_obj != NULL) { |
|
50 |
if (_sp->block_is_obj(top_obj)) { |
|
51 |
if (_precision == CardTableModRefBS::ObjHeadPreciseArray) { |
|
52 |
if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { |
|
53 |
// An arrayOop is starting on the dirty card - since we do exact |
|
54 |
// store checks for objArrays we are done. |
|
55 |
} else { |
|
56 |
// Otherwise, it is possible that the object starting on the dirty |
|
57 |
// card spans the entire card, and that the store happened on a |
|
58 |
// later card. Figure out where the object ends. |
|
59 |
// Use the block_size() method of the space over which |
|
60 |
// the iteration is being done. That space (e.g. CMS) may have |
|
61 |
// specific requirements on object sizes which will |
|
62 |
// be reflected in the block_size() method. |
|
63 |
top = top_obj + oop(top_obj)->size(); |
|
64 |
} |
|
65 |
} |
|
66 |
} else { |
|
67 |
top = top_obj; |
|
68 |
} |
|
69 |
} else { |
|
70 |
assert(top == _sp->end(), "only case where top_obj == NULL"); |
|
71 |
} |
|
72 |
return top; |
|
73 |
} |
|
74 |
||
75 |
void DirtyCardToOopClosure::walk_mem_region(MemRegion mr, |
|
76 |
HeapWord* bottom, |
|
77 |
HeapWord* top) { |
|
78 |
// 1. Blocks may or may not be objects. |
|
79 |
// 2. Even when a block_is_obj(), it may not entirely |
|
80 |
// occupy the block if the block quantum is larger than |
|
81 |
// the object size. |
|
82 |
// We can and should try to optimize by calling the non-MemRegion |
|
83 |
// version of oop_iterate() for all but the extremal objects |
|
84 |
// (for which we need to call the MemRegion version of |
|
85 |
// oop_iterate()) To be done post-beta XXX |
|
86 |
for (; bottom < top; bottom += _sp->block_size(bottom)) { |
|
87 |
// As in the case of contiguous space above, we'd like to |
|
88 |
// just use the value returned by oop_iterate to increment the |
|
89 |
// current pointer; unfortunately, that won't work in CMS because |
|
90 |
// we'd need an interface change (it seems) to have the space |
|
91 |
// "adjust the object size" (for instance pad it up to its |
|
92 |
// block alignment or minimum block size restrictions. XXX |
|
93 |
if (_sp->block_is_obj(bottom) && |
|
94 |
!_sp->obj_allocated_since_save_marks(oop(bottom))) { |
|
95 |
oop(bottom)->oop_iterate(_cl, mr); |
|
96 |
} |
|
97 |
} |
|
98 |
} |
|
99 |
||
9624
c3657c3324ee
6883834: ParNew: assert(!_g->to()->is_in_reserved(obj),"Scanning field twice?") with LargeObjects tests
ysr
parents:
8928
diff
changeset
|
100 |
// We get called with "mr" representing the dirty region |
c3657c3324ee
6883834: ParNew: assert(!_g->to()->is_in_reserved(obj),"Scanning field twice?") with LargeObjects tests
ysr
parents:
8928
diff
changeset
|
101 |
// that we want to process. Because of imprecise marking, |
c3657c3324ee
6883834: ParNew: assert(!_g->to()->is_in_reserved(obj),"Scanning field twice?") with LargeObjects tests
ysr
parents:
8928
diff
changeset
|
102 |
// we may need to extend the incoming "mr" to the right, |
c3657c3324ee
6883834: ParNew: assert(!_g->to()->is_in_reserved(obj),"Scanning field twice?") with LargeObjects tests
ysr
parents:
8928
diff
changeset
|
103 |
// and scan more. However, because we may already have |
c3657c3324ee
6883834: ParNew: assert(!_g->to()->is_in_reserved(obj),"Scanning field twice?") with LargeObjects tests
ysr
parents:
8928
diff
changeset
|
104 |
// scanned some of that extended region, we may need to |
c3657c3324ee
6883834: ParNew: assert(!_g->to()->is_in_reserved(obj),"Scanning field twice?") with LargeObjects tests
ysr
parents:
8928
diff
changeset
|
105 |
// trim its right-end back some so we do not scan what |
c3657c3324ee
6883834: ParNew: assert(!_g->to()->is_in_reserved(obj),"Scanning field twice?") with LargeObjects tests
ysr
parents:
8928
diff
changeset
|
106 |
// we (or another worker thread) may already have scanned |
c3657c3324ee
6883834: ParNew: assert(!_g->to()->is_in_reserved(obj),"Scanning field twice?") with LargeObjects tests
ysr
parents:
8928
diff
changeset
|
107 |
// or planning to scan. |
1 | 108 |
void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) { |
109 |
||
110 |
// Some collectors need to do special things whenever their dirty |
|
111 |
// cards are processed. For instance, CMS must remember mutator updates |
|
112 |
// (i.e. dirty cards) so as to re-scan mutated objects. |
|
113 |
// Such work can be piggy-backed here on dirty card scanning, so as to make |
|
114 |
// it slightly more efficient than doing a complete non-detructive pre-scan |
|
115 |
// of the card table. |
|
116 |
MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure(); |
|
117 |
if (pCl != NULL) { |
|
118 |
pCl->do_MemRegion(mr); |
|
119 |
} |
|
120 |
||
121 |
HeapWord* bottom = mr.start(); |
|
122 |
HeapWord* last = mr.last(); |
|
123 |
HeapWord* top = mr.end(); |
|
124 |
HeapWord* bottom_obj; |
|
125 |
HeapWord* top_obj; |
|
126 |
||
127 |
assert(_precision == CardTableModRefBS::ObjHeadPreciseArray || |
|
128 |
_precision == CardTableModRefBS::Precise, |
|
129 |
"Only ones we deal with for now."); |
|
130 |
||
131 |
assert(_precision != CardTableModRefBS::ObjHeadPreciseArray || |
|
1374
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
360
diff
changeset
|
132 |
_cl->idempotent() || _last_bottom == NULL || |
1 | 133 |
top <= _last_bottom, |
134 |
"Not decreasing"); |
|
135 |
NOT_PRODUCT(_last_bottom = mr.start()); |
|
136 |
||
137 |
bottom_obj = _sp->block_start(bottom); |
|
138 |
top_obj = _sp->block_start(last); |
|
139 |
||
140 |
assert(bottom_obj <= bottom, "just checking"); |
|
141 |
assert(top_obj <= top, "just checking"); |
|
142 |
||
143 |
// Given what we think is the top of the memory region and |
|
144 |
// the start of the object at the top, get the actual |
|
145 |
// value of the top. |
|
146 |
top = get_actual_top(top, top_obj); |
|
147 |
||
148 |
// If the previous call did some part of this region, don't redo. |
|
149 |
if (_precision == CardTableModRefBS::ObjHeadPreciseArray && |
|
150 |
_min_done != NULL && |
|
151 |
_min_done < top) { |
|
152 |
top = _min_done; |
|
153 |
} |
|
154 |
||
155 |
// Top may have been reset, and in fact may be below bottom, |
|
156 |
// e.g. the dirty card region is entirely in a now free object |
|
157 |
// -- something that could happen with a concurrent sweeper. |
|
158 |
bottom = MIN2(bottom, top); |
|
9624
c3657c3324ee
6883834: ParNew: assert(!_g->to()->is_in_reserved(obj),"Scanning field twice?") with LargeObjects tests
ysr
parents:
8928
diff
changeset
|
159 |
MemRegion extended_mr = MemRegion(bottom, top); |
1 | 160 |
assert(bottom <= top && |
161 |
(_precision != CardTableModRefBS::ObjHeadPreciseArray || |
|
162 |
_min_done == NULL || |
|
163 |
top <= _min_done), |
|
164 |
"overlap!"); |
|
165 |
||
166 |
// Walk the region if it is not empty; otherwise there is nothing to do. |
|
9624
c3657c3324ee
6883834: ParNew: assert(!_g->to()->is_in_reserved(obj),"Scanning field twice?") with LargeObjects tests
ysr
parents:
8928
diff
changeset
|
167 |
if (!extended_mr.is_empty()) { |
c3657c3324ee
6883834: ParNew: assert(!_g->to()->is_in_reserved(obj),"Scanning field twice?") with LargeObjects tests
ysr
parents:
8928
diff
changeset
|
168 |
walk_mem_region(extended_mr, bottom_obj, top); |
1 | 169 |
} |
170 |
||
1374
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
360
diff
changeset
|
171 |
// An idempotent closure might be applied in any order, so we don't |
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
360
diff
changeset
|
172 |
// record a _min_done for it. |
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
360
diff
changeset
|
173 |
if (!_cl->idempotent()) { |
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
360
diff
changeset
|
174 |
_min_done = bottom; |
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
360
diff
changeset
|
175 |
} else { |
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
360
diff
changeset
|
176 |
assert(_min_done == _last_explicit_min_done, |
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
360
diff
changeset
|
177 |
"Don't update _min_done for idempotent cl"); |
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
360
diff
changeset
|
178 |
} |
1 | 179 |
} |
180 |
||
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
12379
diff
changeset
|
181 |
DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl, |
1 | 182 |
CardTableModRefBS::PrecisionStyle precision, |
183 |
HeapWord* boundary) { |
|
184 |
return new DirtyCardToOopClosure(this, cl, precision, boundary); |
|
185 |
} |
|
186 |
||
187 |
HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top, |
|
188 |
HeapWord* top_obj) { |
|
189 |
if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) { |
|
190 |
if (_precision == CardTableModRefBS::ObjHeadPreciseArray) { |
|
191 |
if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { |
|
192 |
// An arrayOop is starting on the dirty card - since we do exact |
|
193 |
// store checks for objArrays we are done. |
|
194 |
} else { |
|
195 |
// Otherwise, it is possible that the object starting on the dirty |
|
196 |
// card spans the entire card, and that the store happened on a |
|
197 |
// later card. Figure out where the object ends. |
|
198 |
assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(), |
|
199 |
"Block size and object size mismatch"); |
|
200 |
top = top_obj + oop(top_obj)->size(); |
|
201 |
} |
|
202 |
} |
|
203 |
} else { |
|
204 |
top = (_sp->toContiguousSpace())->top(); |
|
205 |
} |
|
206 |
return top; |
|
207 |
} |
|
208 |
||
209 |
void Filtering_DCTOC::walk_mem_region(MemRegion mr, |
|
210 |
HeapWord* bottom, |
|
211 |
HeapWord* top) { |
|
212 |
// Note that this assumption won't hold if we have a concurrent |
|
213 |
// collector in this space, which may have freed up objects after |
|
214 |
// they were dirtied and before the stop-the-world GC that is |
|
215 |
// examining cards here. |
|
216 |
assert(bottom < top, "ought to be at least one obj on a dirty card."); |
|
217 |
||
218 |
if (_boundary != NULL) { |
|
219 |
// We have a boundary outside of which we don't want to look |
|
220 |
// at objects, so create a filtering closure around the |
|
221 |
// oop closure before walking the region. |
|
222 |
FilteringClosure filter(_boundary, _cl); |
|
223 |
walk_mem_region_with_cl(mr, bottom, top, &filter); |
|
224 |
} else { |
|
225 |
// No boundary, simply walk the heap with the oop closure. |
|
226 |
walk_mem_region_with_cl(mr, bottom, top, _cl); |
|
227 |
} |
|
228 |
||
229 |
} |
|
230 |
||
231 |
// We must replicate this so that the static type of "FilteringClosure" |
|
232 |
// (see above) is apparent at the oop_iterate calls. |
|
233 |
#define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \ |
|
234 |
void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \ |
|
235 |
HeapWord* bottom, \ |
|
236 |
HeapWord* top, \ |
|
237 |
ClosureType* cl) { \ |
|
238 |
bottom += oop(bottom)->oop_iterate(cl, mr); \ |
|
239 |
if (bottom < top) { \ |
|
240 |
HeapWord* next_obj = bottom + oop(bottom)->size(); \ |
|
241 |
while (next_obj < top) { \ |
|
242 |
/* Bottom lies entirely below top, so we can call the */ \ |
|
243 |
/* non-memRegion version of oop_iterate below. */ \ |
|
244 |
oop(bottom)->oop_iterate(cl); \ |
|
245 |
bottom = next_obj; \ |
|
246 |
next_obj = bottom + oop(bottom)->size(); \ |
|
247 |
} \ |
|
248 |
/* Last object. */ \ |
|
249 |
oop(bottom)->oop_iterate(cl, mr); \ |
|
250 |
} \ |
|
251 |
} |
|
252 |
||
253 |
// (There are only two of these, rather than N, because the split is due |
|
254 |
// only to the introduction of the FilteringClosure, a local part of the |
|
255 |
// impl of this abstraction.) |
|
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
12379
diff
changeset
|
256 |
ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure) |
1 | 257 |
ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) |
258 |
||
259 |
DirtyCardToOopClosure* |
|
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
12379
diff
changeset
|
260 |
ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl, |
1 | 261 |
CardTableModRefBS::PrecisionStyle precision, |
262 |
HeapWord* boundary) { |
|
263 |
return new ContiguousSpaceDCTOC(this, cl, precision, boundary); |
|
264 |
} |
|
265 |
||
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
266 |
void Space::initialize(MemRegion mr, |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
267 |
bool clear_space, |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
268 |
bool mangle_space) { |
1 | 269 |
HeapWord* bottom = mr.start(); |
270 |
HeapWord* end = mr.end(); |
|
271 |
assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end), |
|
272 |
"invalid space boundaries"); |
|
273 |
set_bottom(bottom); |
|
274 |
set_end(end); |
|
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
275 |
if (clear_space) clear(mangle_space); |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
276 |
} |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
277 |
|
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
278 |
void Space::clear(bool mangle_space) { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
279 |
if (ZapUnusedHeapArea && mangle_space) { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
280 |
mangle_unused_area(); |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
281 |
} |
1 | 282 |
} |
283 |
||
1388 | 284 |
ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL), |
285 |
_concurrent_iteration_safe_limit(NULL) { |
|
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
286 |
_mangler = new GenSpaceMangler(this); |
1 | 287 |
} |
288 |
||
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
289 |
ContiguousSpace::~ContiguousSpace() { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
290 |
delete _mangler; |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
291 |
} |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
292 |
|
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
293 |
void ContiguousSpace::initialize(MemRegion mr, |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
294 |
bool clear_space, |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
295 |
bool mangle_space) |
1 | 296 |
{ |
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
297 |
CompactibleSpace::initialize(mr, clear_space, mangle_space); |
1379
ccfaefa561cd
6718086: CMS assert: _concurrent_iteration_safe_limit update missed
ysr
parents:
1376
diff
changeset
|
298 |
set_concurrent_iteration_safe_limit(top()); |
1 | 299 |
} |
300 |
||
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
301 |
void ContiguousSpace::clear(bool mangle_space) { |
1 | 302 |
set_top(bottom()); |
303 |
set_saved_mark(); |
|
1388 | 304 |
CompactibleSpace::clear(mangle_space); |
1 | 305 |
} |
306 |
||
307 |
bool ContiguousSpace::is_in(const void* p) const { |
|
308 |
return _bottom <= p && p < _top; |
|
309 |
} |
|
310 |
||
311 |
bool ContiguousSpace::is_free_block(const HeapWord* p) const { |
|
312 |
return p >= _top; |
|
313 |
} |
|
314 |
||
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
315 |
void OffsetTableContigSpace::clear(bool mangle_space) { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
316 |
ContiguousSpace::clear(mangle_space); |
1 | 317 |
_offsets.initialize_threshold(); |
318 |
} |
|
319 |
||
320 |
void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { |
|
321 |
Space::set_bottom(new_bottom); |
|
322 |
_offsets.set_bottom(new_bottom); |
|
323 |
} |
|
324 |
||
325 |
void OffsetTableContigSpace::set_end(HeapWord* new_end) { |
|
326 |
// Space should not advertize an increase in size |
|
327 |
// until after the underlying offest table has been enlarged. |
|
328 |
_offsets.resize(pointer_delta(new_end, bottom())); |
|
329 |
Space::set_end(new_end); |
|
330 |
} |
|
331 |
||
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
332 |
#ifndef PRODUCT |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
333 |
|
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
334 |
void ContiguousSpace::set_top_for_allocations(HeapWord* v) { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
335 |
mangler()->set_top_for_allocations(v); |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
336 |
} |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
337 |
void ContiguousSpace::set_top_for_allocations() { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
338 |
mangler()->set_top_for_allocations(top()); |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
339 |
} |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
340 |
void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
341 |
mangler()->check_mangled_unused_area(limit); |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
342 |
} |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
343 |
|
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
344 |
void ContiguousSpace::check_mangled_unused_area_complete() { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
345 |
mangler()->check_mangled_unused_area_complete(); |
1 | 346 |
} |
347 |
||
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
348 |
// Mangled only the unused space that has not previously |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
349 |
// been mangled and that has not been allocated since being |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
350 |
// mangled. |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
351 |
void ContiguousSpace::mangle_unused_area() { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
352 |
mangler()->mangle_unused_area(); |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
353 |
} |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
354 |
void ContiguousSpace::mangle_unused_area_complete() { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
355 |
mangler()->mangle_unused_area_complete(); |
1 | 356 |
} |
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
357 |
void ContiguousSpace::mangle_region(MemRegion mr) { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
358 |
// Although this method uses SpaceMangler::mangle_region() which |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
359 |
// is not specific to a space, the when the ContiguousSpace version |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
360 |
// is called, it is always with regard to a space and this |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
361 |
// bounds checking is appropriate. |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
362 |
MemRegion space_mr(bottom(), end()); |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
363 |
assert(space_mr.contains(mr), "Mangling outside space"); |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
364 |
SpaceMangler::mangle_region(mr); |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
365 |
} |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
366 |
#endif // NOT_PRODUCT |
1 | 367 |
|
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
368 |
void CompactibleSpace::initialize(MemRegion mr, |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
369 |
bool clear_space, |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
370 |
bool mangle_space) { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
371 |
Space::initialize(mr, clear_space, mangle_space); |
1388 | 372 |
set_compaction_top(bottom()); |
373 |
_next_compaction_space = NULL; |
|
374 |
} |
|
375 |
||
376 |
void CompactibleSpace::clear(bool mangle_space) { |
|
377 |
Space::clear(mangle_space); |
|
1 | 378 |
_compaction_top = bottom(); |
379 |
} |
|
380 |
||
381 |
HeapWord* CompactibleSpace::forward(oop q, size_t size, |
|
382 |
CompactPoint* cp, HeapWord* compact_top) { |
|
383 |
// q is alive |
|
384 |
// First check if we should switch compaction space |
|
385 |
assert(this == cp->space, "'this' should be current compaction space."); |
|
386 |
size_t compaction_max_size = pointer_delta(end(), compact_top); |
|
387 |
while (size > compaction_max_size) { |
|
388 |
// switch to next compaction space |
|
389 |
cp->space->set_compaction_top(compact_top); |
|
390 |
cp->space = cp->space->next_compaction_space(); |
|
391 |
if (cp->space == NULL) { |
|
392 |
cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen); |
|
393 |
assert(cp->gen != NULL, "compaction must succeed"); |
|
394 |
cp->space = cp->gen->first_compaction_space(); |
|
395 |
assert(cp->space != NULL, "generation must have a first compaction space"); |
|
396 |
} |
|
397 |
compact_top = cp->space->bottom(); |
|
398 |
cp->space->set_compaction_top(compact_top); |
|
399 |
cp->threshold = cp->space->initialize_threshold(); |
|
400 |
compaction_max_size = pointer_delta(cp->space->end(), compact_top); |
|
401 |
} |
|
402 |
||
403 |
// store the forwarding pointer into the mark word |
|
404 |
if ((HeapWord*)q != compact_top) { |
|
405 |
q->forward_to(oop(compact_top)); |
|
406 |
assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); |
|
407 |
} else { |
|
408 |
// if the object isn't moving we can just set the mark to the default |
|
409 |
// mark and handle it specially later on. |
|
410 |
q->init_mark(); |
|
411 |
assert(q->forwardee() == NULL, "should be forwarded to NULL"); |
|
412 |
} |
|
413 |
||
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
414 |
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, size)); |
1 | 415 |
compact_top += size; |
416 |
||
417 |
// we need to update the offset table so that the beginnings of objects can be |
|
418 |
// found during scavenge. Note that we are updating the offset table based on |
|
419 |
// where the object will be once the compaction phase finishes. |
|
420 |
if (compact_top > cp->threshold) |
|
421 |
cp->threshold = |
|
422 |
cp->space->cross_threshold(compact_top - size, compact_top); |
|
423 |
return compact_top; |
|
424 |
} |
|
425 |
||
426 |
||
427 |
bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words, |
|
428 |
HeapWord* q, size_t deadlength) { |
|
429 |
if (allowed_deadspace_words >= deadlength) { |
|
430 |
allowed_deadspace_words -= deadlength; |
|
1668
8ec481b8f514
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
1557
diff
changeset
|
431 |
CollectedHeap::fill_with_object(q, deadlength); |
8ec481b8f514
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
1557
diff
changeset
|
432 |
oop(q)->set_mark(oop(q)->mark()->set_marked()); |
8ec481b8f514
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
1557
diff
changeset
|
433 |
assert((int) deadlength == oop(q)->size(), "bad filler object size"); |
1 | 434 |
// Recall that we required "q == compaction_top". |
435 |
return true; |
|
436 |
} else { |
|
437 |
allowed_deadspace_words = 0; |
|
438 |
return false; |
|
439 |
} |
|
440 |
} |
|
441 |
||
442 |
#define block_is_always_obj(q) true |
|
443 |
#define obj_size(q) oop(q)->size() |
|
444 |
#define adjust_obj_size(s) s |
|
445 |
||
446 |
void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) { |
|
447 |
SCAN_AND_FORWARD(cp, end, block_is_obj, block_size); |
|
448 |
} |
|
449 |
||
450 |
// Faster object search. |
|
451 |
void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { |
|
452 |
SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size); |
|
453 |
} |
|
454 |
||
455 |
void Space::adjust_pointers() { |
|
456 |
// adjust all the interior pointers to point at the new locations of objects |
|
457 |
// Used by MarkSweep::mark_sweep_phase3() |
|
458 |
||
459 |
// First check to see if there is any work to be done. |
|
460 |
if (used() == 0) { |
|
461 |
return; // Nothing to do. |
|
462 |
} |
|
463 |
||
464 |
// Otherwise... |
|
465 |
HeapWord* q = bottom(); |
|
466 |
HeapWord* t = end(); |
|
467 |
||
468 |
debug_only(HeapWord* prev_q = NULL); |
|
469 |
while (q < t) { |
|
470 |
if (oop(q)->is_gc_marked()) { |
|
471 |
// q is alive |
|
472 |
||
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
473 |
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); |
1 | 474 |
// point all the oops to the new location |
475 |
size_t size = oop(q)->adjust_pointers(); |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
476 |
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); |
1 | 477 |
|
478 |
debug_only(prev_q = q); |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
1
diff
changeset
|
479 |
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); |
1 | 480 |
|
481 |
q += size; |
|
482 |
} else { |
|
483 |
// q is not a live object. But we're not in a compactible space, |
|
484 |
// So we don't have live ranges. |
|
485 |
debug_only(prev_q = q); |
|
486 |
q += block_size(q); |
|
487 |
assert(q > prev_q, "we should be moving forward through memory"); |
|
488 |
} |
|
489 |
} |
|
490 |
assert(q == t, "just checking"); |
|
491 |
} |
|
492 |
||
493 |
void CompactibleSpace::adjust_pointers() { |
|
494 |
// Check first is there is any work to do. |
|
495 |
if (used() == 0) { |
|
496 |
return; // Nothing to do. |
|
497 |
} |
|
498 |
||
499 |
SCAN_AND_ADJUST_POINTERS(adjust_obj_size); |
|
500 |
} |
|
501 |
||
502 |
void CompactibleSpace::compact() { |
|
503 |
SCAN_AND_COMPACT(obj_size); |
|
504 |
} |
|
505 |
||
506 |
void Space::print_short() const { print_short_on(tty); } |
|
507 |
||
508 |
void Space::print_short_on(outputStream* st) const { |
|
509 |
st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K, |
|
510 |
(int) ((double) used() * 100 / capacity())); |
|
511 |
} |
|
512 |
||
513 |
void Space::print() const { print_on(tty); } |
|
514 |
||
515 |
void Space::print_on(outputStream* st) const { |
|
516 |
print_short_on(st); |
|
517 |
st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
|
518 |
bottom(), end()); |
|
519 |
} |
|
520 |
||
521 |
void ContiguousSpace::print_on(outputStream* st) const { |
|
522 |
print_short_on(st); |
|
523 |
st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
|
524 |
bottom(), top(), end()); |
|
525 |
} |
|
526 |
||
527 |
void OffsetTableContigSpace::print_on(outputStream* st) const { |
|
528 |
print_short_on(st); |
|
529 |
st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " |
|
530 |
INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
|
531 |
bottom(), top(), _offsets.threshold(), end()); |
|
532 |
} |
|
533 |
||
12379 | 534 |
void ContiguousSpace::verify() const { |
1 | 535 |
HeapWord* p = bottom(); |
536 |
HeapWord* t = top(); |
|
537 |
HeapWord* prev_p = NULL; |
|
538 |
while (p < t) { |
|
539 |
oop(p)->verify(); |
|
540 |
prev_p = p; |
|
541 |
p += oop(p)->size(); |
|
542 |
} |
|
543 |
guarantee(p == top(), "end of last object must match end of space"); |
|
544 |
if (top() != end()) { |
|
1374
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
360
diff
changeset
|
545 |
guarantee(top() == block_start_const(end()-1) && |
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
360
diff
changeset
|
546 |
top() == block_start_const(top()), |
1 | 547 |
"top should be start of unallocated block, if it exists"); |
548 |
} |
|
549 |
} |
|
550 |
||
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
12379
diff
changeset
|
551 |
void Space::oop_iterate(ExtendedOopClosure* blk) { |
1 | 552 |
ObjectToOopClosure blk2(blk); |
553 |
object_iterate(&blk2); |
|
554 |
} |
|
555 |
||
556 |
HeapWord* Space::object_iterate_careful(ObjectClosureCareful* cl) { |
|
557 |
guarantee(false, "NYI"); |
|
558 |
return bottom(); |
|
559 |
} |
|
560 |
||
561 |
HeapWord* Space::object_iterate_careful_m(MemRegion mr, |
|
562 |
ObjectClosureCareful* cl) { |
|
563 |
guarantee(false, "NYI"); |
|
564 |
return bottom(); |
|
565 |
} |
|
566 |
||
567 |
||
568 |
void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) { |
|
569 |
assert(!mr.is_empty(), "Should be non-empty"); |
|
570 |
// We use MemRegion(bottom(), end()) rather than used_region() below |
|
571 |
// because the two are not necessarily equal for some kinds of |
|
572 |
// spaces, in particular, certain kinds of free list spaces. |
|
573 |
// We could use the more complicated but more precise: |
|
574 |
// MemRegion(used_region().start(), round_to(used_region().end(), CardSize)) |
|
575 |
// but the slight imprecision seems acceptable in the assertion check. |
|
576 |
assert(MemRegion(bottom(), end()).contains(mr), |
|
577 |
"Should be within used space"); |
|
578 |
HeapWord* prev = cl->previous(); // max address from last time |
|
579 |
if (prev >= mr.end()) { // nothing to do |
|
580 |
return; |
|
581 |
} |
|
582 |
// This assert will not work when we go from cms space to perm |
|
583 |
// space, and use same closure. Easy fix deferred for later. XXX YSR |
|
584 |
// assert(prev == NULL || contains(prev), "Should be within space"); |
|
585 |
||
586 |
bool last_was_obj_array = false; |
|
587 |
HeapWord *blk_start_addr, *region_start_addr; |
|
588 |
if (prev > mr.start()) { |
|
589 |
region_start_addr = prev; |
|
590 |
blk_start_addr = prev; |
|
1894
5c343868d071
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
1893
diff
changeset
|
591 |
// The previous invocation may have pushed "prev" beyond the |
5c343868d071
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
1893
diff
changeset
|
592 |
// last allocated block yet there may be still be blocks |
5c343868d071
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
1893
diff
changeset
|
593 |
// in this region due to a particular coalescing policy. |
5c343868d071
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
1893
diff
changeset
|
594 |
// Relax the assertion so that the case where the unallocated |
5c343868d071
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
1893
diff
changeset
|
595 |
// block is maintained and "prev" is beyond the unallocated |
5c343868d071
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
1893
diff
changeset
|
596 |
// block does not cause the assertion to fire. |
5c343868d071
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
1893
diff
changeset
|
597 |
assert((BlockOffsetArrayUseUnallocatedBlock && |
5c343868d071
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
1893
diff
changeset
|
598 |
(!is_in(prev))) || |
5c343868d071
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
1893
diff
changeset
|
599 |
(blk_start_addr == block_start(region_start_addr)), "invariant"); |
1 | 600 |
} else { |
601 |
region_start_addr = mr.start(); |
|
602 |
blk_start_addr = block_start(region_start_addr); |
|
603 |
} |
|
604 |
HeapWord* region_end_addr = mr.end(); |
|
605 |
MemRegion derived_mr(region_start_addr, region_end_addr); |
|
606 |
while (blk_start_addr < region_end_addr) { |
|
607 |
const size_t size = block_size(blk_start_addr); |
|
608 |
if (block_is_obj(blk_start_addr)) { |
|
609 |
last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr); |
|
610 |
} else { |
|
611 |
last_was_obj_array = false; |
|
612 |
} |
|
613 |
blk_start_addr += size; |
|
614 |
} |
|
615 |
if (!last_was_obj_array) { |
|
616 |
assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()), |
|
617 |
"Should be within (closed) used space"); |
|
618 |
assert(blk_start_addr > prev, "Invariant"); |
|
619 |
cl->set_previous(blk_start_addr); // min address for next time |
|
620 |
} |
|
621 |
} |
|
622 |
||
623 |
bool Space::obj_is_alive(const HeapWord* p) const { |
|
624 |
assert (block_is_obj(p), "The address should point to an object"); |
|
625 |
return true; |
|
626 |
} |
|
627 |
||
628 |
void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) { |
|
629 |
assert(!mr.is_empty(), "Should be non-empty"); |
|
630 |
assert(used_region().contains(mr), "Should be within used space"); |
|
631 |
HeapWord* prev = cl->previous(); // max address from last time |
|
632 |
if (prev >= mr.end()) { // nothing to do |
|
633 |
return; |
|
634 |
} |
|
635 |
// See comment above (in more general method above) in case you |
|
636 |
// happen to use this method. |
|
637 |
assert(prev == NULL || is_in_reserved(prev), "Should be within space"); |
|
638 |
||
639 |
bool last_was_obj_array = false; |
|
640 |
HeapWord *obj_start_addr, *region_start_addr; |
|
641 |
if (prev > mr.start()) { |
|
642 |
region_start_addr = prev; |
|
643 |
obj_start_addr = prev; |
|
644 |
assert(obj_start_addr == block_start(region_start_addr), "invariant"); |
|
645 |
} else { |
|
646 |
region_start_addr = mr.start(); |
|
647 |
obj_start_addr = block_start(region_start_addr); |
|
648 |
} |
|
649 |
HeapWord* region_end_addr = mr.end(); |
|
650 |
MemRegion derived_mr(region_start_addr, region_end_addr); |
|
651 |
while (obj_start_addr < region_end_addr) { |
|
652 |
oop obj = oop(obj_start_addr); |
|
653 |
const size_t size = obj->size(); |
|
654 |
last_was_obj_array = cl->do_object_bm(obj, derived_mr); |
|
655 |
obj_start_addr += size; |
|
656 |
} |
|
657 |
if (!last_was_obj_array) { |
|
658 |
assert((bottom() <= obj_start_addr) && (obj_start_addr <= end()), |
|
659 |
"Should be within (closed) used space"); |
|
660 |
assert(obj_start_addr > prev, "Invariant"); |
|
661 |
cl->set_previous(obj_start_addr); // min address for next time |
|
662 |
} |
|
663 |
} |
|
664 |
||
665 |
#ifndef SERIALGC |
|
666 |
#define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ |
|
667 |
\ |
|
668 |
void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\ |
|
669 |
HeapWord* obj_addr = mr.start(); \ |
|
670 |
HeapWord* t = mr.end(); \ |
|
671 |
while (obj_addr < t) { \ |
|
672 |
assert(oop(obj_addr)->is_oop(), "Should be an oop"); \ |
|
673 |
obj_addr += oop(obj_addr)->oop_iterate(blk); \ |
|
674 |
} \ |
|
675 |
} |
|
676 |
||
677 |
ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN) |
|
678 |
||
679 |
#undef ContigSpace_PAR_OOP_ITERATE_DEFN |
|
680 |
#endif // SERIALGC |
|
681 |
||
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
12379
diff
changeset
|
682 |
void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) { |
1 | 683 |
if (is_empty()) return; |
684 |
HeapWord* obj_addr = bottom(); |
|
685 |
HeapWord* t = top(); |
|
686 |
// Could call objects iterate, but this is easier. |
|
687 |
while (obj_addr < t) { |
|
688 |
obj_addr += oop(obj_addr)->oop_iterate(blk); |
|
689 |
} |
|
690 |
} |
|
691 |
||
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
12379
diff
changeset
|
692 |
void ContiguousSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* blk) { |
1 | 693 |
if (is_empty()) { |
694 |
return; |
|
695 |
} |
|
696 |
MemRegion cur = MemRegion(bottom(), top()); |
|
697 |
mr = mr.intersection(cur); |
|
698 |
if (mr.is_empty()) { |
|
699 |
return; |
|
700 |
} |
|
701 |
if (mr.equals(cur)) { |
|
702 |
oop_iterate(blk); |
|
703 |
return; |
|
704 |
} |
|
705 |
assert(mr.end() <= top(), "just took an intersection above"); |
|
706 |
HeapWord* obj_addr = block_start(mr.start()); |
|
707 |
HeapWord* t = mr.end(); |
|
708 |
||
709 |
// Handle first object specially. |
|
710 |
oop obj = oop(obj_addr); |
|
711 |
SpaceMemRegionOopsIterClosure smr_blk(blk, mr); |
|
712 |
obj_addr += obj->oop_iterate(&smr_blk); |
|
713 |
while (obj_addr < t) { |
|
714 |
oop obj = oop(obj_addr); |
|
715 |
assert(obj->is_oop(), "expected an oop"); |
|
716 |
obj_addr += obj->size(); |
|
717 |
// If "obj_addr" is not greater than top, then the |
|
718 |
// entire object "obj" is within the region. |
|
719 |
if (obj_addr <= t) { |
|
720 |
obj->oop_iterate(blk); |
|
721 |
} else { |
|
722 |
// "obj" extends beyond end of region |
|
723 |
obj->oop_iterate(&smr_blk); |
|
724 |
break; |
|
725 |
} |
|
726 |
}; |
|
727 |
} |
|
728 |
||
729 |
void ContiguousSpace::object_iterate(ObjectClosure* blk) { |
|
730 |
if (is_empty()) return; |
|
731 |
WaterMark bm = bottom_mark(); |
|
732 |
object_iterate_from(bm, blk); |
|
733 |
} |
|
734 |
||
1893
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
1668
diff
changeset
|
735 |
// For a continguous space object_iterate() and safe_object_iterate() |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
1668
diff
changeset
|
736 |
// are the same. |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
1668
diff
changeset
|
737 |
void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) { |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
1668
diff
changeset
|
738 |
object_iterate(blk); |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
1668
diff
changeset
|
739 |
} |
c82e388e17c5
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
1668
diff
changeset
|
740 |
|
1 | 741 |
void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) { |
742 |
assert(mark.space() == this, "Mark does not match space"); |
|
743 |
HeapWord* p = mark.point(); |
|
744 |
while (p < top()) { |
|
745 |
blk->do_object(oop(p)); |
|
746 |
p += oop(p)->size(); |
|
747 |
} |
|
748 |
} |
|
749 |
||
750 |
HeapWord* |
|
751 |
ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) { |
|
752 |
HeapWord * limit = concurrent_iteration_safe_limit(); |
|
753 |
assert(limit <= top(), "sanity check"); |
|
754 |
for (HeapWord* p = bottom(); p < limit;) { |
|
755 |
size_t size = blk->do_object_careful(oop(p)); |
|
756 |
if (size == 0) { |
|
757 |
return p; // failed at p |
|
758 |
} else { |
|
759 |
p += size; |
|
760 |
} |
|
761 |
} |
|
762 |
return NULL; // all done |
|
763 |
} |
|
764 |
||
765 |
#define ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ |
|
766 |
\ |
|
767 |
void ContiguousSpace:: \ |
|
768 |
oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ |
|
769 |
HeapWord* t; \ |
|
770 |
HeapWord* p = saved_mark_word(); \ |
|
771 |
assert(p != NULL, "expected saved mark"); \ |
|
772 |
\ |
|
773 |
const intx interval = PrefetchScanIntervalInBytes; \ |
|
774 |
do { \ |
|
775 |
t = top(); \ |
|
776 |
while (p < t) { \ |
|
777 |
Prefetch::write(p, interval); \ |
|
778 |
debug_only(HeapWord* prev = p); \ |
|
779 |
oop m = oop(p); \ |
|
780 |
p += m->oop_iterate(blk); \ |
|
781 |
} \ |
|
782 |
} while (t < top()); \ |
|
783 |
\ |
|
784 |
set_saved_mark_word(p); \ |
|
785 |
} |
|
786 |
||
787 |
ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN) |
|
788 |
||
789 |
#undef ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN |
|
790 |
||
791 |
// Very general, slow implementation. |
|
1374
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
360
diff
changeset
|
792 |
HeapWord* ContiguousSpace::block_start_const(const void* p) const { |
1 | 793 |
assert(MemRegion(bottom(), end()).contains(p), "p not in space"); |
794 |
if (p >= top()) { |
|
795 |
return top(); |
|
796 |
} else { |
|
797 |
HeapWord* last = bottom(); |
|
798 |
HeapWord* cur = last; |
|
799 |
while (cur <= p) { |
|
800 |
last = cur; |
|
801 |
cur += oop(cur)->size(); |
|
802 |
} |
|
803 |
assert(oop(last)->is_oop(), "Should be an object start"); |
|
804 |
return last; |
|
805 |
} |
|
806 |
} |
|
807 |
||
808 |
size_t ContiguousSpace::block_size(const HeapWord* p) const { |
|
809 |
assert(MemRegion(bottom(), end()).contains(p), "p not in space"); |
|
810 |
HeapWord* current_top = top(); |
|
811 |
assert(p <= current_top, "p is not a block start"); |
|
812 |
assert(p == current_top || oop(p)->is_oop(), "p is not a block start"); |
|
813 |
if (p < current_top) |
|
814 |
return oop(p)->size(); |
|
815 |
else { |
|
816 |
assert(p == current_top, "just checking"); |
|
817 |
return pointer_delta(end(), (HeapWord*) p); |
|
818 |
} |
|
819 |
} |
|
820 |
||
821 |
// This version requires locking. |
|
822 |
inline HeapWord* ContiguousSpace::allocate_impl(size_t size, |
|
823 |
HeapWord* const end_value) { |
|
8928
e5c53268bef5
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
7397
diff
changeset
|
824 |
// In G1 there are places where a GC worker can allocates into a |
e5c53268bef5
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
7397
diff
changeset
|
825 |
// region using this serial allocation code without being prone to a |
e5c53268bef5
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
7397
diff
changeset
|
826 |
// race with other GC workers (we ensure that no other GC worker can |
e5c53268bef5
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
7397
diff
changeset
|
827 |
// access the same region at the same time). So the assert below is |
e5c53268bef5
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
7397
diff
changeset
|
828 |
// too strong in the case of G1. |
1 | 829 |
assert(Heap_lock->owned_by_self() || |
830 |
(SafepointSynchronize::is_at_safepoint() && |
|
8928
e5c53268bef5
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
7397
diff
changeset
|
831 |
(Thread::current()->is_VM_thread() || UseG1GC)), |
1 | 832 |
"not locked"); |
833 |
HeapWord* obj = top(); |
|
834 |
if (pointer_delta(end_value, obj) >= size) { |
|
835 |
HeapWord* new_top = obj + size; |
|
836 |
set_top(new_top); |
|
837 |
assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); |
|
838 |
return obj; |
|
839 |
} else { |
|
840 |
return NULL; |
|
841 |
} |
|
842 |
} |
|
843 |
||
844 |
// This version is lock-free. |
|
845 |
inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size, |
|
846 |
HeapWord* const end_value) { |
|
847 |
do { |
|
848 |
HeapWord* obj = top(); |
|
849 |
if (pointer_delta(end_value, obj) >= size) { |
|
850 |
HeapWord* new_top = obj + size; |
|
851 |
HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); |
|
852 |
// result can be one of two: |
|
853 |
// the old top value: the exchange succeeded |
|
854 |
// otherwise: the new value of the top is returned. |
|
855 |
if (result == obj) { |
|
856 |
assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); |
|
857 |
return obj; |
|
858 |
} |
|
859 |
} else { |
|
860 |
return NULL; |
|
861 |
} |
|
862 |
} while (true); |
|
863 |
} |
|
864 |
||
865 |
// Requires locking. |
|
866 |
HeapWord* ContiguousSpace::allocate(size_t size) { |
|
867 |
return allocate_impl(size, end()); |
|
868 |
} |
|
869 |
||
870 |
// Lock-free. |
|
871 |
HeapWord* ContiguousSpace::par_allocate(size_t size) { |
|
872 |
return par_allocate_impl(size, end()); |
|
873 |
} |
|
874 |
||
875 |
void ContiguousSpace::allocate_temporary_filler(int factor) { |
|
876 |
// allocate temporary type array decreasing free size with factor 'factor' |
|
877 |
assert(factor >= 0, "just checking"); |
|
878 |
size_t size = pointer_delta(end(), top()); |
|
879 |
||
880 |
// if space is full, return |
|
881 |
if (size == 0) return; |
|
882 |
||
883 |
if (factor > 0) { |
|
884 |
size -= size/factor; |
|
885 |
} |
|
886 |
size = align_object_size(size); |
|
887 |
||
5694
1e0532a6abff
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
4571
diff
changeset
|
888 |
const size_t array_header_size = typeArrayOopDesc::header_size(T_INT); |
1e0532a6abff
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
4571
diff
changeset
|
889 |
if (size >= (size_t)align_object_size(array_header_size)) { |
1e0532a6abff
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
4571
diff
changeset
|
890 |
size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint)); |
1 | 891 |
// allocate uninitialized int array |
892 |
typeArrayOop t = (typeArrayOop) allocate(size); |
|
893 |
assert(t != NULL, "allocation should succeed"); |
|
894 |
t->set_mark(markOopDesc::prototype()); |
|
895 |
t->set_klass(Universe::intArrayKlassObj()); |
|
896 |
t->set_length((int)length); |
|
897 |
} else { |
|
5694
1e0532a6abff
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
4571
diff
changeset
|
898 |
assert(size == CollectedHeap::min_fill_size(), |
1 | 899 |
"size for smallest fake object doesn't match"); |
900 |
instanceOop obj = (instanceOop) allocate(size); |
|
901 |
obj->set_mark(markOopDesc::prototype()); |
|
593
803947e176bd
6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
coleenp
parents:
360
diff
changeset
|
902 |
obj->set_klass_gap(0); |
4571 | 903 |
obj->set_klass(SystemDictionary::Object_klass()); |
1 | 904 |
} |
905 |
} |
|
906 |
||
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
907 |
void EdenSpace::clear(bool mangle_space) { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
908 |
ContiguousSpace::clear(mangle_space); |
1 | 909 |
set_soft_end(end()); |
910 |
} |
|
911 |
||
912 |
// Requires locking. |
|
913 |
HeapWord* EdenSpace::allocate(size_t size) { |
|
914 |
return allocate_impl(size, soft_end()); |
|
915 |
} |
|
916 |
||
917 |
// Lock-free. |
|
918 |
HeapWord* EdenSpace::par_allocate(size_t size) { |
|
919 |
return par_allocate_impl(size, soft_end()); |
|
920 |
} |
|
921 |
||
922 |
HeapWord* ConcEdenSpace::par_allocate(size_t size) |
|
923 |
{ |
|
924 |
do { |
|
925 |
// The invariant is top() should be read before end() because |
|
926 |
// top() can't be greater than end(), so if an update of _soft_end |
|
927 |
// occurs between 'end_val = end();' and 'top_val = top();' top() |
|
928 |
// also can grow up to the new end() and the condition |
|
929 |
// 'top_val > end_val' is true. To ensure the loading order |
|
930 |
// OrderAccess::loadload() is required after top() read. |
|
931 |
HeapWord* obj = top(); |
|
932 |
OrderAccess::loadload(); |
|
933 |
if (pointer_delta(*soft_end_addr(), obj) >= size) { |
|
934 |
HeapWord* new_top = obj + size; |
|
935 |
HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); |
|
936 |
// result can be one of two: |
|
937 |
// the old top value: the exchange succeeded |
|
938 |
// otherwise: the new value of the top is returned. |
|
939 |
if (result == obj) { |
|
940 |
assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); |
|
941 |
return obj; |
|
942 |
} |
|
943 |
} else { |
|
944 |
return NULL; |
|
945 |
} |
|
946 |
} while (true); |
|
947 |
} |
|
948 |
||
949 |
||
950 |
HeapWord* OffsetTableContigSpace::initialize_threshold() { |
|
951 |
return _offsets.initialize_threshold(); |
|
952 |
} |
|
953 |
||
954 |
HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) { |
|
955 |
_offsets.alloc_block(start, end); |
|
956 |
return _offsets.threshold(); |
|
957 |
} |
|
958 |
||
959 |
OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray, |
|
960 |
MemRegion mr) : |
|
961 |
_offsets(sharedOffsetArray, mr), |
|
962 |
_par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true) |
|
963 |
{ |
|
964 |
_offsets.set_contig_space(this); |
|
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
593
diff
changeset
|
965 |
initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle); |
1 | 966 |
} |
967 |
||
968 |
#define OBJ_SAMPLE_INTERVAL 0 |
|
969 |
#define BLOCK_SAMPLE_INTERVAL 100 |
|
970 |
||
12379 | 971 |
void OffsetTableContigSpace::verify() const { |
1 | 972 |
HeapWord* p = bottom(); |
973 |
HeapWord* prev_p = NULL; |
|
974 |
int objs = 0; |
|
975 |
int blocks = 0; |
|
976 |
||
977 |
if (VerifyObjectStartArray) { |
|
978 |
_offsets.verify(); |
|
979 |
} |
|
980 |
||
981 |
while (p < top()) { |
|
982 |
size_t size = oop(p)->size(); |
|
983 |
// For a sampling of objects in the space, find it using the |
|
984 |
// block offset table. |
|
985 |
if (blocks == BLOCK_SAMPLE_INTERVAL) { |
|
1374
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
360
diff
changeset
|
986 |
guarantee(p == block_start_const(p + (size/2)), |
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
360
diff
changeset
|
987 |
"check offset computation"); |
1 | 988 |
blocks = 0; |
989 |
} else { |
|
990 |
blocks++; |
|
991 |
} |
|
992 |
||
993 |
if (objs == OBJ_SAMPLE_INTERVAL) { |
|
994 |
oop(p)->verify(); |
|
995 |
objs = 0; |
|
996 |
} else { |
|
997 |
objs++; |
|
998 |
} |
|
999 |
prev_p = p; |
|
1000 |
p += size; |
|
1001 |
} |
|
1002 |
guarantee(p == top(), "end of last object must match end of space"); |
|
1003 |
} |
|
1004 |
||
1005 |
||
1557 | 1006 |
size_t TenuredSpace::allowed_dead_ratio() const { |
1 | 1007 |
return MarkSweepDeadRatio; |
1008 |
} |