1 /* |
|
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP |
|
26 #define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP |
|
27 |
|
28 #include "memory/allocation.hpp" |
|
29 #include "memory/blockOffsetTable.hpp" |
|
30 #include "memory/threadLocalAllocBuffer.hpp" |
|
31 #include "utilities/globalDefinitions.hpp" |
|
32 |
|
33 // Forward decl. |
|
34 |
|
35 class PLABStats; |
|
36 |
|
37 // A per-thread allocation buffer used during GC. |
|
38 class ParGCAllocBuffer: public CHeapObj<mtGC> { |
|
39 protected: |
|
40 char head[32]; |
|
41 size_t _word_sz; // in HeapWord units |
|
42 HeapWord* _bottom; |
|
43 HeapWord* _top; |
|
44 HeapWord* _end; // last allocatable address + 1 |
|
45 HeapWord* _hard_end; // _end + AlignmentReserve |
|
46 bool _retained; // whether we hold a _retained_filler |
|
47 MemRegion _retained_filler; |
|
48 // In support of ergonomic sizing of PLAB's |
|
49 size_t _allocated; // in HeapWord units |
|
50 size_t _wasted; // in HeapWord units |
|
51 char tail[32]; |
|
52 static size_t FillerHeaderSize; |
|
53 static size_t AlignmentReserve; |
|
54 |
|
55 public: |
|
56 // Initializes the buffer to be empty, but with the given "word_sz". |
|
57 // Must get initialized with "set_buf" for an allocation to succeed. |
|
58 ParGCAllocBuffer(size_t word_sz); |
|
59 |
|
60 static const size_t min_size() { |
|
61 return ThreadLocalAllocBuffer::min_size(); |
|
62 } |
|
63 |
|
64 static const size_t max_size() { |
|
65 return ThreadLocalAllocBuffer::max_size(); |
|
66 } |
|
67 |
|
68 // If an allocation of the given "word_sz" can be satisfied within the |
|
69 // buffer, do the allocation, returning a pointer to the start of the |
|
70 // allocated block. If the allocation request cannot be satisfied, |
|
71 // return NULL. |
|
72 HeapWord* allocate(size_t word_sz) { |
|
73 HeapWord* res = _top; |
|
74 if (pointer_delta(_end, _top) >= word_sz) { |
|
75 _top = _top + word_sz; |
|
76 return res; |
|
77 } else { |
|
78 return NULL; |
|
79 } |
|
80 } |
|
81 |
|
82 // Undo the last allocation in the buffer, which is required to be of the |
|
83 // "obj" of the given "word_sz". |
|
84 void undo_allocation(HeapWord* obj, size_t word_sz) { |
|
85 assert(pointer_delta(_top, _bottom) >= word_sz, "Bad undo"); |
|
86 assert(pointer_delta(_top, obj) == word_sz, "Bad undo"); |
|
87 _top = obj; |
|
88 } |
|
89 |
|
90 // The total (word) size of the buffer, including both allocated and |
|
91 // unallocted space. |
|
92 size_t word_sz() { return _word_sz; } |
|
93 |
|
94 // Should only be done if we are about to reset with a new buffer of the |
|
95 // given size. |
|
96 void set_word_size(size_t new_word_sz) { |
|
97 assert(new_word_sz > AlignmentReserve, "Too small"); |
|
98 _word_sz = new_word_sz; |
|
99 } |
|
100 |
|
101 // The number of words of unallocated space remaining in the buffer. |
|
102 size_t words_remaining() { |
|
103 assert(_end >= _top, "Negative buffer"); |
|
104 return pointer_delta(_end, _top, HeapWordSize); |
|
105 } |
|
106 |
|
107 bool contains(void* addr) { |
|
108 return (void*)_bottom <= addr && addr < (void*)_hard_end; |
|
109 } |
|
110 |
|
111 // Sets the space of the buffer to be [buf, space+word_sz()). |
|
112 void set_buf(HeapWord* buf) { |
|
113 _bottom = buf; |
|
114 _top = _bottom; |
|
115 _hard_end = _bottom + word_sz(); |
|
116 _end = _hard_end - AlignmentReserve; |
|
117 assert(_end >= _top, "Negative buffer"); |
|
118 // In support of ergonomic sizing |
|
119 _allocated += word_sz(); |
|
120 } |
|
121 |
|
122 // Flush the stats supporting ergonomic sizing of PLAB's |
|
123 void flush_stats(PLABStats* stats); |
|
124 void flush_stats_and_retire(PLABStats* stats, bool retain) { |
|
125 // We flush the stats first in order to get a reading of |
|
126 // unused space in the last buffer. |
|
127 if (ResizePLAB) { |
|
128 flush_stats(stats); |
|
129 } |
|
130 // Retire the last allocation buffer. |
|
131 retire(true, retain); |
|
132 } |
|
133 |
|
134 // Force future allocations to fail and queries for contains() |
|
135 // to return false |
|
136 void invalidate() { |
|
137 assert(!_retained, "Shouldn't retain an invalidated buffer."); |
|
138 _end = _hard_end; |
|
139 _wasted += pointer_delta(_end, _top); // unused space |
|
140 _top = _end; // force future allocations to fail |
|
141 _bottom = _end; // force future contains() queries to return false |
|
142 } |
|
143 |
|
144 // Fills in the unallocated portion of the buffer with a garbage object. |
|
145 // If "end_of_gc" is TRUE, is after the last use in the GC. IF "retain" |
|
146 // is true, attempt to re-use the unused portion in the next GC. |
|
147 void retire(bool end_of_gc, bool retain); |
|
148 |
|
149 void print() PRODUCT_RETURN; |
|
150 }; |
|
151 |
|
152 // PLAB stats book-keeping |
|
153 class PLABStats VALUE_OBJ_CLASS_SPEC { |
|
154 size_t _allocated; // total allocated |
|
155 size_t _wasted; // of which wasted (internal fragmentation) |
|
156 size_t _unused; // Unused in last buffer |
|
157 size_t _used; // derived = allocated - wasted - unused |
|
158 size_t _desired_plab_sz;// output of filter (below), suitably trimmed and quantized |
|
159 AdaptiveWeightedAverage |
|
160 _filter; // integrator with decay |
|
161 |
|
162 public: |
|
163 PLABStats(size_t desired_plab_sz_, unsigned wt) : |
|
164 _allocated(0), |
|
165 _wasted(0), |
|
166 _unused(0), |
|
167 _used(0), |
|
168 _desired_plab_sz(desired_plab_sz_), |
|
169 _filter(wt) |
|
170 { |
|
171 size_t min_sz = min_size(); |
|
172 size_t max_sz = max_size(); |
|
173 size_t aligned_min_sz = align_object_size(min_sz); |
|
174 size_t aligned_max_sz = align_object_size(max_sz); |
|
175 assert(min_sz <= aligned_min_sz && max_sz >= aligned_max_sz && |
|
176 min_sz <= max_sz, |
|
177 "PLAB clipping computation in adjust_desired_plab_sz()" |
|
178 " may be incorrect"); |
|
179 } |
|
180 |
|
181 static const size_t min_size() { |
|
182 return ParGCAllocBuffer::min_size(); |
|
183 } |
|
184 |
|
185 static const size_t max_size() { |
|
186 return ParGCAllocBuffer::max_size(); |
|
187 } |
|
188 |
|
189 size_t desired_plab_sz() { |
|
190 return _desired_plab_sz; |
|
191 } |
|
192 |
|
193 void adjust_desired_plab_sz(); // filter computation, latches output to |
|
194 // _desired_plab_sz, clears sensor accumulators |
|
195 |
|
196 void add_allocated(size_t v) { |
|
197 Atomic::add_ptr(v, &_allocated); |
|
198 } |
|
199 |
|
200 void add_unused(size_t v) { |
|
201 Atomic::add_ptr(v, &_unused); |
|
202 } |
|
203 |
|
204 void add_wasted(size_t v) { |
|
205 Atomic::add_ptr(v, &_wasted); |
|
206 } |
|
207 }; |
|
208 |
|
209 class ParGCAllocBufferWithBOT: public ParGCAllocBuffer { |
|
210 BlockOffsetArrayContigSpace _bt; |
|
211 BlockOffsetSharedArray* _bsa; |
|
212 HeapWord* _true_end; // end of the whole ParGCAllocBuffer |
|
213 |
|
214 static const size_t ChunkSizeInWords; |
|
215 static const size_t ChunkSizeInBytes; |
|
216 HeapWord* allocate_slow(size_t word_sz); |
|
217 |
|
218 void fill_region_with_block(MemRegion mr, bool contig); |
|
219 |
|
220 public: |
|
221 ParGCAllocBufferWithBOT(size_t word_sz, BlockOffsetSharedArray* bsa); |
|
222 |
|
223 HeapWord* allocate(size_t word_sz) { |
|
224 HeapWord* res = ParGCAllocBuffer::allocate(word_sz); |
|
225 if (res != NULL) { |
|
226 _bt.alloc_block(res, word_sz); |
|
227 } else { |
|
228 res = allocate_slow(word_sz); |
|
229 } |
|
230 return res; |
|
231 } |
|
232 |
|
233 void undo_allocation(HeapWord* obj, size_t word_sz); |
|
234 |
|
235 void set_buf(HeapWord* buf_start) { |
|
236 ParGCAllocBuffer::set_buf(buf_start); |
|
237 _true_end = _hard_end; |
|
238 _bt.set_region(MemRegion(buf_start, word_sz())); |
|
239 _bt.initialize_threshold(); |
|
240 } |
|
241 |
|
242 void retire(bool end_of_gc, bool retain); |
|
243 |
|
244 MemRegion range() { |
|
245 return MemRegion(_top, _true_end); |
|
246 } |
|
247 }; |
|
248 |
|
249 #endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP |
|