14123
|
1 |
/*
|
30578
|
2 |
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
14123
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
20 |
* or visit www.oracle.com if you need additional information or have any
|
|
21 |
* questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
30578
|
25 |
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_ADAPTIVEFREELIST_HPP
|
|
26 |
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_ADAPTIVEFREELIST_HPP
|
14123
|
27 |
|
|
28 |
#include "memory/freeList.hpp"
|
|
29 |
#include "gc_implementation/shared/allocationStats.hpp"
|
|
30 |
|
|
31 |
class CompactibleFreeListSpace;
|
|
32 |
|
|
33 |
// A class for maintaining a free list of Chunk's. The FreeList
|
|
34 |
// maintains a the structure of the list (head, tail, etc.) plus
|
|
35 |
// statistics for allocations from the list. The links between items
|
|
36 |
// are not part of FreeList. The statistics are
|
|
37 |
// used to make decisions about coalescing Chunk's when they
|
|
38 |
// are swept during collection.
|
|
39 |
//
|
|
40 |
// See the corresponding .cpp file for a description of the specifics
|
|
41 |
// for that implementation.
|
|
42 |
|
|
43 |
class Mutex;
|
|
44 |
|
|
45 |
template <class Chunk>
|
|
46 |
class AdaptiveFreeList : public FreeList<Chunk> {
|
|
47 |
friend class CompactibleFreeListSpace;
|
|
48 |
friend class VMStructs;
|
|
49 |
// friend class PrintTreeCensusClosure<Chunk, FreeList_t>;
|
|
50 |
|
|
51 |
size_t _hint; // next larger size list with a positive surplus
|
|
52 |
|
|
53 |
AllocationStats _allocation_stats; // allocation-related statistics
|
|
54 |
|
|
55 |
public:
|
|
56 |
|
|
57 |
AdaptiveFreeList();
|
|
58 |
|
|
59 |
using FreeList<Chunk>::assert_proper_lock_protection;
|
|
60 |
#ifdef ASSERT
|
|
61 |
using FreeList<Chunk>::protecting_lock;
|
|
62 |
#endif
|
|
63 |
using FreeList<Chunk>::count;
|
|
64 |
using FreeList<Chunk>::size;
|
|
65 |
using FreeList<Chunk>::verify_chunk_in_free_list;
|
|
66 |
using FreeList<Chunk>::getFirstNChunksFromList;
|
|
67 |
using FreeList<Chunk>::print_on;
|
|
68 |
void return_chunk_at_head(Chunk* fc, bool record_return);
|
|
69 |
void return_chunk_at_head(Chunk* fc);
|
|
70 |
void return_chunk_at_tail(Chunk* fc, bool record_return);
|
|
71 |
void return_chunk_at_tail(Chunk* fc);
|
|
72 |
using FreeList<Chunk>::return_chunk_at_tail;
|
|
73 |
using FreeList<Chunk>::remove_chunk;
|
|
74 |
using FreeList<Chunk>::prepend;
|
|
75 |
using FreeList<Chunk>::print_labels_on;
|
|
76 |
using FreeList<Chunk>::get_chunk_at_head;
|
|
77 |
|
|
78 |
// Initialize.
|
|
79 |
void initialize();
|
|
80 |
|
|
81 |
// Reset the head, tail, hint, and count of a free list.
|
|
82 |
void reset(size_t hint);
|
|
83 |
|
|
84 |
void print_on(outputStream* st, const char* c = NULL) const;
|
|
85 |
|
|
86 |
size_t hint() const {
|
|
87 |
return _hint;
|
|
88 |
}
|
|
89 |
void set_hint(size_t v) {
|
|
90 |
assert_proper_lock_protection();
|
|
91 |
assert(v == 0 || size() < v, "Bad hint");
|
|
92 |
_hint = v;
|
|
93 |
}
|
|
94 |
|
|
95 |
size_t get_better_size();
|
|
96 |
|
|
97 |
// Accessors for statistics
|
|
98 |
void init_statistics(bool split_birth = false);
|
|
99 |
|
|
100 |
AllocationStats* allocation_stats() {
|
|
101 |
assert_proper_lock_protection();
|
|
102 |
return &_allocation_stats;
|
|
103 |
}
|
|
104 |
|
|
105 |
ssize_t desired() const {
|
|
106 |
return _allocation_stats.desired();
|
|
107 |
}
|
|
108 |
void set_desired(ssize_t v) {
|
|
109 |
assert_proper_lock_protection();
|
|
110 |
_allocation_stats.set_desired(v);
|
|
111 |
}
|
|
112 |
void compute_desired(float inter_sweep_current,
|
|
113 |
float inter_sweep_estimate,
|
|
114 |
float intra_sweep_estimate) {
|
|
115 |
assert_proper_lock_protection();
|
|
116 |
_allocation_stats.compute_desired(count(),
|
|
117 |
inter_sweep_current,
|
|
118 |
inter_sweep_estimate,
|
|
119 |
intra_sweep_estimate);
|
|
120 |
}
|
|
121 |
ssize_t coal_desired() const {
|
|
122 |
return _allocation_stats.coal_desired();
|
|
123 |
}
|
|
124 |
void set_coal_desired(ssize_t v) {
|
|
125 |
assert_proper_lock_protection();
|
|
126 |
_allocation_stats.set_coal_desired(v);
|
|
127 |
}
|
|
128 |
|
|
129 |
ssize_t surplus() const {
|
|
130 |
return _allocation_stats.surplus();
|
|
131 |
}
|
|
132 |
void set_surplus(ssize_t v) {
|
|
133 |
assert_proper_lock_protection();
|
|
134 |
_allocation_stats.set_surplus(v);
|
|
135 |
}
|
|
136 |
void increment_surplus() {
|
|
137 |
assert_proper_lock_protection();
|
|
138 |
_allocation_stats.increment_surplus();
|
|
139 |
}
|
|
140 |
void decrement_surplus() {
|
|
141 |
assert_proper_lock_protection();
|
|
142 |
_allocation_stats.decrement_surplus();
|
|
143 |
}
|
|
144 |
|
|
145 |
ssize_t bfr_surp() const {
|
|
146 |
return _allocation_stats.bfr_surp();
|
|
147 |
}
|
|
148 |
void set_bfr_surp(ssize_t v) {
|
|
149 |
assert_proper_lock_protection();
|
|
150 |
_allocation_stats.set_bfr_surp(v);
|
|
151 |
}
|
|
152 |
ssize_t prev_sweep() const {
|
|
153 |
return _allocation_stats.prev_sweep();
|
|
154 |
}
|
|
155 |
void set_prev_sweep(ssize_t v) {
|
|
156 |
assert_proper_lock_protection();
|
|
157 |
_allocation_stats.set_prev_sweep(v);
|
|
158 |
}
|
|
159 |
ssize_t before_sweep() const {
|
|
160 |
return _allocation_stats.before_sweep();
|
|
161 |
}
|
|
162 |
void set_before_sweep(ssize_t v) {
|
|
163 |
assert_proper_lock_protection();
|
|
164 |
_allocation_stats.set_before_sweep(v);
|
|
165 |
}
|
|
166 |
|
|
167 |
ssize_t coal_births() const {
|
|
168 |
return _allocation_stats.coal_births();
|
|
169 |
}
|
|
170 |
void set_coal_births(ssize_t v) {
|
|
171 |
assert_proper_lock_protection();
|
|
172 |
_allocation_stats.set_coal_births(v);
|
|
173 |
}
|
|
174 |
void increment_coal_births() {
|
|
175 |
assert_proper_lock_protection();
|
|
176 |
_allocation_stats.increment_coal_births();
|
|
177 |
}
|
|
178 |
|
|
179 |
ssize_t coal_deaths() const {
|
|
180 |
return _allocation_stats.coal_deaths();
|
|
181 |
}
|
|
182 |
void set_coal_deaths(ssize_t v) {
|
|
183 |
assert_proper_lock_protection();
|
|
184 |
_allocation_stats.set_coal_deaths(v);
|
|
185 |
}
|
|
186 |
void increment_coal_deaths() {
|
|
187 |
assert_proper_lock_protection();
|
|
188 |
_allocation_stats.increment_coal_deaths();
|
|
189 |
}
|
|
190 |
|
|
191 |
ssize_t split_births() const {
|
|
192 |
return _allocation_stats.split_births();
|
|
193 |
}
|
|
194 |
void set_split_births(ssize_t v) {
|
|
195 |
assert_proper_lock_protection();
|
|
196 |
_allocation_stats.set_split_births(v);
|
|
197 |
}
|
|
198 |
void increment_split_births() {
|
|
199 |
assert_proper_lock_protection();
|
|
200 |
_allocation_stats.increment_split_births();
|
|
201 |
}
|
|
202 |
|
|
203 |
ssize_t split_deaths() const {
|
|
204 |
return _allocation_stats.split_deaths();
|
|
205 |
}
|
|
206 |
void set_split_deaths(ssize_t v) {
|
|
207 |
assert_proper_lock_protection();
|
|
208 |
_allocation_stats.set_split_deaths(v);
|
|
209 |
}
|
|
210 |
void increment_split_deaths() {
|
|
211 |
assert_proper_lock_protection();
|
|
212 |
_allocation_stats.increment_split_deaths();
|
|
213 |
}
|
|
214 |
|
|
215 |
#ifndef PRODUCT
|
|
216 |
// For debugging. The "_returned_bytes" in all the lists are summed
|
|
217 |
// and compared with the total number of bytes swept during a
|
|
218 |
// collection.
|
|
219 |
size_t returned_bytes() const { return _allocation_stats.returned_bytes(); }
|
|
220 |
void set_returned_bytes(size_t v) { _allocation_stats.set_returned_bytes(v); }
|
|
221 |
void increment_returned_bytes_by(size_t v) {
|
|
222 |
_allocation_stats.set_returned_bytes(_allocation_stats.returned_bytes() + v);
|
|
223 |
}
|
|
224 |
// Stats verification
|
|
225 |
void verify_stats() const;
|
|
226 |
#endif // NOT PRODUCT
|
|
227 |
};
|
|
228 |
|
30578
|
229 |
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_ADAPTIVEFREELIST_HPP
|