|
1 /* |
|
2 * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 */ |
|
23 |
|
24 #ifndef SHARE_GC_Z_ZPAGE_INLINE_HPP |
|
25 #define SHARE_GC_Z_ZPAGE_INLINE_HPP |
|
26 |
|
27 #include "gc/z/zAddress.inline.hpp" |
|
28 #include "gc/z/zForwardingTable.inline.hpp" |
|
29 #include "gc/z/zGlobals.hpp" |
|
30 #include "gc/z/zLiveMap.inline.hpp" |
|
31 #include "gc/z/zMark.hpp" |
|
32 #include "gc/z/zNUMA.hpp" |
|
33 #include "gc/z/zPage.hpp" |
|
34 #include "gc/z/zPhysicalMemory.inline.hpp" |
|
35 #include "gc/z/zUtils.inline.hpp" |
|
36 #include "gc/z/zVirtualMemory.inline.hpp" |
|
37 #include "oops/oop.inline.hpp" |
|
38 #include "runtime/atomic.hpp" |
|
39 #include "utilities/align.hpp" |
|
40 #include "utilities/debug.hpp" |
|
41 |
|
42 inline const char* ZPage::type_to_string() const { |
|
43 switch (type()) { |
|
44 case ZPageTypeSmall: |
|
45 return "Small"; |
|
46 |
|
47 case ZPageTypeMedium: |
|
48 return "Medium"; |
|
49 |
|
50 default: |
|
51 assert(type() == ZPageTypeLarge, "Invalid page type"); |
|
52 return "Large"; |
|
53 } |
|
54 } |
|
55 |
|
56 inline uint32_t ZPage::object_max_count() const { |
|
57 switch (type()) { |
|
58 case ZPageTypeLarge: |
|
59 // A large page can only contain a single |
|
60 // object aligned to the start of the page. |
|
61 return 1; |
|
62 |
|
63 default: |
|
64 return (uint32_t)(size() >> object_alignment_shift()); |
|
65 } |
|
66 } |
|
67 |
|
68 inline size_t ZPage::object_alignment_shift() const { |
|
69 switch (type()) { |
|
70 case ZPageTypeSmall: |
|
71 return ZObjectAlignmentSmallShift; |
|
72 |
|
73 case ZPageTypeMedium: |
|
74 return ZObjectAlignmentMediumShift; |
|
75 |
|
76 default: |
|
77 assert(type() == ZPageTypeLarge, "Invalid page type"); |
|
78 return ZObjectAlignmentLargeShift; |
|
79 } |
|
80 } |
|
81 |
|
82 inline size_t ZPage::object_alignment() const { |
|
83 switch (type()) { |
|
84 case ZPageTypeSmall: |
|
85 return ZObjectAlignmentSmall; |
|
86 |
|
87 case ZPageTypeMedium: |
|
88 return ZObjectAlignmentMedium; |
|
89 |
|
90 default: |
|
91 assert(type() == ZPageTypeLarge, "Invalid page type"); |
|
92 return ZObjectAlignmentLarge; |
|
93 } |
|
94 } |
|
95 |
|
96 inline uint8_t ZPage::type() const { |
|
97 return _type; |
|
98 } |
|
99 |
|
100 inline uintptr_t ZPage::start() const { |
|
101 return _virtual.start(); |
|
102 } |
|
103 |
|
104 inline uintptr_t ZPage::end() const { |
|
105 return _virtual.end(); |
|
106 } |
|
107 |
|
108 inline size_t ZPage::size() const { |
|
109 return _virtual.size(); |
|
110 } |
|
111 |
|
112 inline uintptr_t ZPage::top() const { |
|
113 return _top; |
|
114 } |
|
115 |
|
116 inline size_t ZPage::remaining() const { |
|
117 return end() - top(); |
|
118 } |
|
119 |
|
120 inline ZPhysicalMemory& ZPage::physical_memory() { |
|
121 return _physical; |
|
122 } |
|
123 |
|
124 inline const ZVirtualMemory& ZPage::virtual_memory() const { |
|
125 return _virtual; |
|
126 } |
|
127 |
|
128 inline uint8_t ZPage::numa_id() { |
|
129 if (_numa_id == (uint8_t)-1) { |
|
130 _numa_id = (uint8_t)ZNUMA::memory_id(ZAddress::good(start())); |
|
131 } |
|
132 |
|
133 return _numa_id; |
|
134 } |
|
135 |
|
136 inline bool ZPage::inc_refcount() { |
|
137 for (uint32_t prev_refcount = _refcount; prev_refcount > 0; prev_refcount = _refcount) { |
|
138 if (Atomic::cmpxchg(prev_refcount + 1, &_refcount, prev_refcount) == prev_refcount) { |
|
139 return true; |
|
140 } |
|
141 } |
|
142 return false; |
|
143 } |
|
144 |
|
145 inline bool ZPage::dec_refcount() { |
|
146 assert(is_active(), "Should be active"); |
|
147 return Atomic::sub(1u, &_refcount) == 0; |
|
148 } |
|
149 |
|
150 inline bool ZPage::is_in(uintptr_t addr) const { |
|
151 const uintptr_t offset = ZAddress::offset(addr); |
|
152 return offset >= start() && offset < top(); |
|
153 } |
|
154 |
|
155 inline uintptr_t ZPage::block_start(uintptr_t addr) const { |
|
156 if (block_is_obj(addr)) { |
|
157 return addr; |
|
158 } else { |
|
159 return ZAddress::good(top()); |
|
160 } |
|
161 } |
|
162 |
|
163 inline size_t ZPage::block_size(uintptr_t addr) const { |
|
164 if (block_is_obj(addr)) { |
|
165 return ZUtils::object_size(addr); |
|
166 } else { |
|
167 return end() - top(); |
|
168 } |
|
169 } |
|
170 |
|
171 inline bool ZPage::block_is_obj(uintptr_t addr) const { |
|
172 return ZAddress::offset(addr) < top(); |
|
173 } |
|
174 |
|
175 inline bool ZPage::is_active() const { |
|
176 return _refcount > 0; |
|
177 } |
|
178 |
|
179 inline bool ZPage::is_allocating() const { |
|
180 return is_active() && _seqnum == ZGlobalSeqNum; |
|
181 } |
|
182 |
|
183 inline bool ZPage::is_relocatable() const { |
|
184 return is_active() && _seqnum < ZGlobalSeqNum; |
|
185 } |
|
186 |
|
187 inline bool ZPage::is_detached() const { |
|
188 return _physical.is_null(); |
|
189 } |
|
190 |
|
191 inline bool ZPage::is_mapped() const { |
|
192 return _seqnum > 0; |
|
193 } |
|
194 |
|
195 inline void ZPage::set_pre_mapped() { |
|
196 // The _seqnum variable is also used to signal that the virtual and physical |
|
197 // memory has been mapped. So, we need to set it to non-zero when the memory |
|
198 // has been pre-mapped. |
|
199 _seqnum = 1; |
|
200 } |
|
201 |
|
202 inline bool ZPage::is_pinned() const { |
|
203 return _pinned; |
|
204 } |
|
205 |
|
206 inline void ZPage::set_pinned() { |
|
207 _pinned = 1; |
|
208 } |
|
209 |
|
210 inline bool ZPage::is_forwarding() const { |
|
211 return !_forwarding.is_null(); |
|
212 } |
|
213 |
|
214 inline void ZPage::set_forwarding() { |
|
215 assert(is_marked(), "Should be marked"); |
|
216 _forwarding.setup(_livemap.live_objects()); |
|
217 } |
|
218 |
|
219 inline void ZPage::reset_forwarding() { |
|
220 _forwarding.reset(); |
|
221 _pinned = 0; |
|
222 } |
|
223 |
|
224 inline void ZPage::verify_forwarding() const { |
|
225 _forwarding.verify(object_max_count(), _livemap.live_objects()); |
|
226 } |
|
227 |
|
228 inline bool ZPage::is_marked() const { |
|
229 assert(is_relocatable(), "Invalid page state"); |
|
230 return _livemap.is_marked(); |
|
231 } |
|
232 |
|
233 inline bool ZPage::is_object_marked(uintptr_t addr) const { |
|
234 const size_t index = ((ZAddress::offset(addr) - start()) >> object_alignment_shift()) * 2; |
|
235 return _livemap.get(index); |
|
236 } |
|
237 |
|
238 inline bool ZPage::is_object_strongly_marked(uintptr_t addr) const { |
|
239 const size_t index = ((ZAddress::offset(addr) - start()) >> object_alignment_shift()) * 2; |
|
240 return _livemap.get(index + 1); |
|
241 } |
|
242 |
|
243 inline bool ZPage::is_object_live(uintptr_t addr) const { |
|
244 return is_allocating() || is_object_marked(addr); |
|
245 } |
|
246 |
|
247 inline bool ZPage::is_object_strongly_live(uintptr_t addr) const { |
|
248 return is_allocating() || is_object_strongly_marked(addr); |
|
249 } |
|
250 |
|
251 inline bool ZPage::mark_object(uintptr_t addr, bool finalizable, bool& inc_live) { |
|
252 assert(ZAddress::is_marked(addr), "Invalid address"); |
|
253 assert(is_relocatable(), "Invalid page state"); |
|
254 assert(is_in(addr), "Invalid address"); |
|
255 |
|
256 // Set mark bit |
|
257 const size_t index = ((ZAddress::offset(addr) - start()) >> object_alignment_shift()) * 2; |
|
258 return _livemap.set_atomic(index, finalizable, inc_live); |
|
259 } |
|
260 |
|
261 inline void ZPage::inc_live_atomic(uint32_t objects, size_t bytes) { |
|
262 _livemap.inc_live_atomic(objects, bytes); |
|
263 } |
|
264 |
|
265 inline size_t ZPage::live_bytes() const { |
|
266 assert(is_marked(), "Should be marked"); |
|
267 return _livemap.live_bytes(); |
|
268 } |
|
269 |
|
270 inline void ZPage::object_iterate(ObjectClosure* cl) { |
|
271 _livemap.iterate(cl, ZAddress::good(start()), object_alignment_shift()); |
|
272 } |
|
273 |
|
274 inline uintptr_t ZPage::alloc_object(size_t size) { |
|
275 assert(is_allocating(), "Invalid state"); |
|
276 |
|
277 const size_t aligned_size = align_up(size, object_alignment()); |
|
278 const uintptr_t addr = top(); |
|
279 const uintptr_t new_top = addr + aligned_size; |
|
280 |
|
281 if (new_top > end()) { |
|
282 // Not enough space left |
|
283 return 0; |
|
284 } |
|
285 |
|
286 _top = new_top; |
|
287 |
|
288 // Fill alignment padding if needed |
|
289 if (aligned_size != size) { |
|
290 ZUtils::insert_filler_object(addr + size, aligned_size - size); |
|
291 } |
|
292 |
|
293 return ZAddress::good(addr); |
|
294 } |
|
295 |
|
296 inline uintptr_t ZPage::alloc_object_atomic(size_t size) { |
|
297 assert(is_allocating(), "Invalid state"); |
|
298 |
|
299 const size_t aligned_size = align_up(size, object_alignment()); |
|
300 uintptr_t addr = top(); |
|
301 |
|
302 for (;;) { |
|
303 const uintptr_t new_top = addr + aligned_size; |
|
304 if (new_top > end()) { |
|
305 // Not enough space left |
|
306 return 0; |
|
307 } |
|
308 |
|
309 const uintptr_t prev_top = Atomic::cmpxchg(new_top, &_top, addr); |
|
310 if (prev_top == addr) { |
|
311 // Fill alignment padding if needed |
|
312 if (aligned_size != size) { |
|
313 ZUtils::insert_filler_object(addr + size, aligned_size - size); |
|
314 } |
|
315 |
|
316 // Success |
|
317 return ZAddress::good(addr); |
|
318 } |
|
319 |
|
320 // Retry |
|
321 addr = prev_top; |
|
322 } |
|
323 } |
|
324 |
|
325 inline bool ZPage::undo_alloc_object(uintptr_t addr, size_t size) { |
|
326 assert(is_allocating(), "Invalid state"); |
|
327 |
|
328 const uintptr_t offset = ZAddress::offset(addr); |
|
329 const size_t aligned_size = align_up(size, object_alignment()); |
|
330 const uintptr_t old_top = top(); |
|
331 const uintptr_t new_top = old_top - aligned_size; |
|
332 |
|
333 if (new_top != offset) { |
|
334 // Failed to undo allocation, not the last allocated object |
|
335 return false; |
|
336 } |
|
337 |
|
338 _top = new_top; |
|
339 |
|
340 // Success |
|
341 return true; |
|
342 } |
|
343 |
|
344 inline bool ZPage::undo_alloc_object_atomic(uintptr_t addr, size_t size) { |
|
345 assert(is_allocating(), "Invalid state"); |
|
346 |
|
347 const uintptr_t offset = ZAddress::offset(addr); |
|
348 const size_t aligned_size = align_up(size, object_alignment()); |
|
349 uintptr_t old_top = top(); |
|
350 |
|
351 for (;;) { |
|
352 const uintptr_t new_top = old_top - aligned_size; |
|
353 if (new_top != offset) { |
|
354 // Failed to undo allocation, not the last allocated object |
|
355 return false; |
|
356 } |
|
357 |
|
358 const uintptr_t prev_top = Atomic::cmpxchg(new_top, &_top, old_top); |
|
359 if (prev_top == old_top) { |
|
360 // Success |
|
361 return true; |
|
362 } |
|
363 |
|
364 // Retry |
|
365 old_top = prev_top; |
|
366 } |
|
367 } |
|
368 |
|
369 #endif // SHARE_GC_Z_ZPAGE_INLINE_HPP |