author | iveresov |
Mon, 06 Oct 2008 20:59:16 -0700 | |
changeset 1423 | 1233b1e85dfd |
parent 1405 | ce6e6fe90107 |
child 1615 | b46d9f19bde2 |
permissions | -rw-r--r-- |
1 | 1 |
|
2 |
/* |
|
670 | 3 |
* Copyright 2006-2008 Sun Microsystems, Inc. All Rights Reserved. |
1 | 4 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
5 |
* |
|
6 |
* This code is free software; you can redistribute it and/or modify it |
|
7 |
* under the terms of the GNU General Public License version 2 only, as |
|
8 |
* published by the Free Software Foundation. |
|
9 |
* |
|
10 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
11 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
12 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
13 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
14 |
* accompanied this code). |
|
15 |
* |
|
16 |
* You should have received a copy of the GNU General Public License version |
|
17 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
18 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
19 |
* |
|
20 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
21 |
* CA 95054 USA or visit www.sun.com if you need additional information or |
|
22 |
* have any questions. |
|
23 |
* |
|
24 |
*/ |
|
25 |
||
26 |
# include "incls/_precompiled.incl" |
|
27 |
# include "incls/_mutableNUMASpace.cpp.incl" |
|
28 |
||
29 |
||
30 |
MutableNUMASpace::MutableNUMASpace() { |
|
31 |
_lgrp_spaces = new (ResourceObj::C_HEAP) GrowableArray<LGRPSpace*>(0, true); |
|
32 |
_page_size = os::vm_page_size(); |
|
33 |
_adaptation_cycles = 0; |
|
34 |
_samples_count = 0; |
|
35 |
update_layout(true); |
|
36 |
} |
|
37 |
||
38 |
MutableNUMASpace::~MutableNUMASpace() { |
|
39 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
|
40 |
delete lgrp_spaces()->at(i); |
|
41 |
} |
|
42 |
delete lgrp_spaces(); |
|
43 |
} |
|
44 |
||
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
45 |
#ifndef PRODUCT |
1 | 46 |
void MutableNUMASpace::mangle_unused_area() { |
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
47 |
// This method should do nothing. |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
48 |
// It can be called on a numa space during a full compaction. |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
49 |
} |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
50 |
void MutableNUMASpace::mangle_unused_area_complete() { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
51 |
// This method should do nothing. |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
52 |
// It can be called on a numa space during a full compaction. |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
53 |
} |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
54 |
void MutableNUMASpace::mangle_region(MemRegion mr) { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
55 |
// This method should do nothing because numa spaces are not mangled. |
1 | 56 |
} |
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
57 |
void MutableNUMASpace::set_top_for_allocations(HeapWord* v) { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
58 |
assert(false, "Do not mangle MutableNUMASpace's"); |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
59 |
} |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
60 |
void MutableNUMASpace::set_top_for_allocations() { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
61 |
// This method should do nothing. |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
62 |
} |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
63 |
void MutableNUMASpace::check_mangled_unused_area(HeapWord* limit) { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
64 |
// This method should do nothing. |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
65 |
} |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
66 |
void MutableNUMASpace::check_mangled_unused_area_complete() { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
67 |
// This method should do nothing. |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
68 |
} |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
69 |
#endif // NOT_PRODUCT |
1 | 70 |
|
71 |
// There may be unallocated holes in the middle chunks |
|
72 |
// that should be filled with dead objects to ensure parseability. |
|
73 |
void MutableNUMASpace::ensure_parsability() { |
|
74 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
|
75 |
LGRPSpace *ls = lgrp_spaces()->at(i); |
|
76 |
MutableSpace *s = ls->space(); |
|
391
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
77 |
if (s->top() < top()) { // For all spaces preceeding the one containing top() |
1 | 78 |
if (s->free_in_words() > 0) { |
79 |
SharedHeap::fill_region_with_object(MemRegion(s->top(), s->end())); |
|
391
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
80 |
size_t area_touched_words = pointer_delta(s->end(), s->top()); |
1 | 81 |
#ifndef ASSERT |
82 |
if (!ZapUnusedHeapArea) { |
|
83 |
area_touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)), |
|
84 |
area_touched_words); |
|
85 |
} |
|
86 |
#endif |
|
388 | 87 |
if (!os::numa_has_static_binding()) { |
88 |
MemRegion invalid; |
|
89 |
HeapWord *crossing_start = (HeapWord*)round_to((intptr_t)s->top(), os::vm_page_size()); |
|
90 |
HeapWord *crossing_end = (HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), |
|
91 |
os::vm_page_size()); |
|
92 |
if (crossing_start != crossing_end) { |
|
93 |
// If object header crossed a small page boundary we mark the area |
|
94 |
// as invalid rounding it to a page_size(). |
|
95 |
HeapWord *start = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom()); |
|
96 |
HeapWord *end = MIN2((HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), page_size()), |
|
97 |
s->end()); |
|
98 |
invalid = MemRegion(start, end); |
|
99 |
} |
|
100 |
||
101 |
ls->add_invalid_region(invalid); |
|
1 | 102 |
} |
103 |
} |
|
104 |
} else { |
|
388 | 105 |
if (!os::numa_has_static_binding()) { |
1 | 106 |
#ifdef ASSERT |
107 |
MemRegion invalid(s->top(), s->end()); |
|
108 |
ls->add_invalid_region(invalid); |
|
388 | 109 |
#else |
110 |
if (ZapUnusedHeapArea) { |
|
111 |
MemRegion invalid(s->top(), s->end()); |
|
112 |
ls->add_invalid_region(invalid); |
|
391
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
113 |
} else { |
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
114 |
return; |
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
115 |
} |
1 | 116 |
#endif |
391
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
117 |
} else { |
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
118 |
return; |
388 | 119 |
} |
1 | 120 |
} |
121 |
} |
|
122 |
} |
|
123 |
||
124 |
size_t MutableNUMASpace::used_in_words() const { |
|
125 |
size_t s = 0; |
|
126 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
|
127 |
s += lgrp_spaces()->at(i)->space()->used_in_words(); |
|
128 |
} |
|
129 |
return s; |
|
130 |
} |
|
131 |
||
132 |
size_t MutableNUMASpace::free_in_words() const { |
|
133 |
size_t s = 0; |
|
134 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
|
135 |
s += lgrp_spaces()->at(i)->space()->free_in_words(); |
|
136 |
} |
|
137 |
return s; |
|
138 |
} |
|
139 |
||
140 |
||
141 |
size_t MutableNUMASpace::tlab_capacity(Thread *thr) const { |
|
142 |
guarantee(thr != NULL, "No thread"); |
|
143 |
int lgrp_id = thr->lgrp_id(); |
|
976
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
144 |
if (lgrp_id == -1) { |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
145 |
// This case can occur after the topology of the system has |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
146 |
// changed. Thread can change their location, the new home |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
147 |
// group will be determined during the first allocation |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
148 |
// attempt. For now we can safely assume that all spaces |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
149 |
// have equal size because the whole space will be reinitialized. |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
150 |
if (lgrp_spaces()->length() > 0) { |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
151 |
return capacity_in_bytes() / lgrp_spaces()->length(); |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
152 |
} else { |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
153 |
assert(false, "There should be at least one locality group"); |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
154 |
return 0; |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
155 |
} |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
156 |
} |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
157 |
// That's the normal case, where we know the locality group of the thread. |
1 | 158 |
int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
159 |
if (i == -1) { |
|
160 |
return 0; |
|
161 |
} |
|
162 |
return lgrp_spaces()->at(i)->space()->capacity_in_bytes(); |
|
163 |
} |
|
164 |
||
165 |
size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const { |
|
976
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
166 |
// Please see the comments for tlab_capacity(). |
1 | 167 |
guarantee(thr != NULL, "No thread"); |
168 |
int lgrp_id = thr->lgrp_id(); |
|
976
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
169 |
if (lgrp_id == -1) { |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
170 |
if (lgrp_spaces()->length() > 0) { |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
171 |
return free_in_bytes() / lgrp_spaces()->length(); |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
172 |
} else { |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
173 |
assert(false, "There should be at least one locality group"); |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
174 |
return 0; |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
175 |
} |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
176 |
} |
1 | 177 |
int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
178 |
if (i == -1) { |
|
179 |
return 0; |
|
180 |
} |
|
181 |
return lgrp_spaces()->at(i)->space()->free_in_bytes(); |
|
182 |
} |
|
183 |
||
1405
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
184 |
|
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
185 |
size_t MutableNUMASpace::capacity_in_words(Thread* thr) const { |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
186 |
guarantee(thr != NULL, "No thread"); |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
187 |
int lgrp_id = thr->lgrp_id(); |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
188 |
if (lgrp_id == -1) { |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
189 |
if (lgrp_spaces()->length() > 0) { |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
190 |
return capacity_in_words() / lgrp_spaces()->length(); |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
191 |
} else { |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
192 |
assert(false, "There should be at least one locality group"); |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
193 |
return 0; |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
194 |
} |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
195 |
} |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
196 |
int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
197 |
if (i == -1) { |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
198 |
return 0; |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
199 |
} |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
200 |
return lgrp_spaces()->at(i)->space()->capacity_in_words(); |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
201 |
} |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
202 |
|
1 | 203 |
// Check if the NUMA topology has changed. Add and remove spaces if needed. |
204 |
// The update can be forced by setting the force parameter equal to true. |
|
205 |
bool MutableNUMASpace::update_layout(bool force) { |
|
206 |
// Check if the topology had changed. |
|
207 |
bool changed = os::numa_topology_changed(); |
|
208 |
if (force || changed) { |
|
209 |
// Compute lgrp intersection. Add/remove spaces. |
|
210 |
int lgrp_limit = (int)os::numa_get_groups_num(); |
|
211 |
int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit); |
|
212 |
int lgrp_num = (int)os::numa_get_leaf_groups(lgrp_ids, lgrp_limit); |
|
213 |
assert(lgrp_num > 0, "There should be at least one locality group"); |
|
214 |
// Add new spaces for the new nodes |
|
215 |
for (int i = 0; i < lgrp_num; i++) { |
|
216 |
bool found = false; |
|
217 |
for (int j = 0; j < lgrp_spaces()->length(); j++) { |
|
218 |
if (lgrp_spaces()->at(j)->lgrp_id() == lgrp_ids[i]) { |
|
219 |
found = true; |
|
220 |
break; |
|
221 |
} |
|
222 |
} |
|
223 |
if (!found) { |
|
224 |
lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i])); |
|
225 |
} |
|
226 |
} |
|
227 |
||
228 |
// Remove spaces for the removed nodes. |
|
229 |
for (int i = 0; i < lgrp_spaces()->length();) { |
|
230 |
bool found = false; |
|
231 |
for (int j = 0; j < lgrp_num; j++) { |
|
232 |
if (lgrp_spaces()->at(i)->lgrp_id() == lgrp_ids[j]) { |
|
233 |
found = true; |
|
234 |
break; |
|
235 |
} |
|
236 |
} |
|
237 |
if (!found) { |
|
238 |
delete lgrp_spaces()->at(i); |
|
239 |
lgrp_spaces()->remove_at(i); |
|
240 |
} else { |
|
241 |
i++; |
|
242 |
} |
|
243 |
} |
|
244 |
||
245 |
FREE_C_HEAP_ARRAY(int, lgrp_ids); |
|
246 |
||
247 |
if (changed) { |
|
248 |
for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) { |
|
249 |
thread->set_lgrp_id(-1); |
|
250 |
} |
|
251 |
} |
|
252 |
return true; |
|
253 |
} |
|
254 |
return false; |
|
255 |
} |
|
256 |
||
257 |
// Bias region towards the first-touching lgrp. Set the right page sizes. |
|
388 | 258 |
void MutableNUMASpace::bias_region(MemRegion mr, int lgrp_id) { |
1 | 259 |
HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size()); |
260 |
HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size()); |
|
261 |
if (end > start) { |
|
262 |
MemRegion aligned_region(start, end); |
|
263 |
assert((intptr_t)aligned_region.start() % page_size() == 0 && |
|
264 |
(intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment"); |
|
265 |
assert(region().contains(aligned_region), "Sanity"); |
|
388 | 266 |
// First we tell the OS which page size we want in the given range. The underlying |
267 |
// large page can be broken down if we require small pages. |
|
1 | 268 |
os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size()); |
388 | 269 |
// Then we uncommit the pages in the range. |
270 |
os::free_memory((char*)aligned_region.start(), aligned_region.byte_size()); |
|
271 |
// And make them local/first-touch biased. |
|
272 |
os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), lgrp_id); |
|
1 | 273 |
} |
274 |
} |
|
275 |
||
276 |
// Free all pages in the region. |
|
277 |
void MutableNUMASpace::free_region(MemRegion mr) { |
|
278 |
HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size()); |
|
279 |
HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size()); |
|
280 |
if (end > start) { |
|
281 |
MemRegion aligned_region(start, end); |
|
282 |
assert((intptr_t)aligned_region.start() % page_size() == 0 && |
|
283 |
(intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment"); |
|
284 |
assert(region().contains(aligned_region), "Sanity"); |
|
285 |
os::free_memory((char*)aligned_region.start(), aligned_region.byte_size()); |
|
286 |
} |
|
287 |
} |
|
288 |
||
289 |
// Update space layout. Perform adaptation. |
|
290 |
void MutableNUMASpace::update() { |
|
291 |
if (update_layout(false)) { |
|
292 |
// If the topology has changed, make all chunks zero-sized. |
|
976
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
293 |
// And clear the alloc-rate statistics. |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
294 |
// In future we may want to handle this more gracefully in order |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
295 |
// to avoid the reallocation of the pages as much as possible. |
1 | 296 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
976
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
297 |
LGRPSpace *ls = lgrp_spaces()->at(i); |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
298 |
MutableSpace *s = ls->space(); |
1 | 299 |
s->set_end(s->bottom()); |
300 |
s->set_top(s->bottom()); |
|
976
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
301 |
ls->clear_alloc_rate(); |
1 | 302 |
} |
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
303 |
// A NUMA space is never mangled |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
304 |
initialize(region(), |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
305 |
SpaceDecorator::Clear, |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
306 |
SpaceDecorator::DontMangle); |
1 | 307 |
} else { |
308 |
bool should_initialize = false; |
|
388 | 309 |
if (!os::numa_has_static_binding()) { |
310 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
|
311 |
if (!lgrp_spaces()->at(i)->invalid_region().is_empty()) { |
|
312 |
should_initialize = true; |
|
313 |
break; |
|
314 |
} |
|
1 | 315 |
} |
316 |
} |
|
317 |
||
318 |
if (should_initialize || |
|
319 |
(UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) { |
|
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
320 |
// A NUMA space is never mangled |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
321 |
initialize(region(), |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
322 |
SpaceDecorator::Clear, |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
323 |
SpaceDecorator::DontMangle); |
1 | 324 |
} |
325 |
} |
|
326 |
||
327 |
if (NUMAStats) { |
|
328 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
|
329 |
lgrp_spaces()->at(i)->accumulate_statistics(page_size()); |
|
330 |
} |
|
331 |
} |
|
332 |
||
333 |
scan_pages(NUMAPageScanRate); |
|
334 |
} |
|
335 |
||
336 |
// Scan pages. Free pages that have smaller size or wrong placement. |
|
337 |
void MutableNUMASpace::scan_pages(size_t page_count) |
|
338 |
{ |
|
339 |
size_t pages_per_chunk = page_count / lgrp_spaces()->length(); |
|
340 |
if (pages_per_chunk > 0) { |
|
341 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
|
342 |
LGRPSpace *ls = lgrp_spaces()->at(i); |
|
343 |
ls->scan_pages(page_size(), pages_per_chunk); |
|
344 |
} |
|
345 |
} |
|
346 |
} |
|
347 |
||
348 |
// Accumulate statistics about the allocation rate of each lgrp. |
|
349 |
void MutableNUMASpace::accumulate_statistics() { |
|
350 |
if (UseAdaptiveNUMAChunkSizing) { |
|
351 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
|
352 |
lgrp_spaces()->at(i)->sample(); |
|
353 |
} |
|
354 |
increment_samples_count(); |
|
355 |
} |
|
356 |
||
357 |
if (NUMAStats) { |
|
358 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
|
359 |
lgrp_spaces()->at(i)->accumulate_statistics(page_size()); |
|
360 |
} |
|
361 |
} |
|
362 |
} |
|
363 |
||
364 |
// Get the current size of a chunk. |
|
365 |
// This function computes the size of the chunk based on the |
|
366 |
// difference between chunk ends. This allows it to work correctly in |
|
367 |
// case the whole space is resized and during the process of adaptive |
|
368 |
// chunk resizing. |
|
369 |
size_t MutableNUMASpace::current_chunk_size(int i) { |
|
370 |
HeapWord *cur_end, *prev_end; |
|
371 |
if (i == 0) { |
|
372 |
prev_end = bottom(); |
|
373 |
} else { |
|
374 |
prev_end = lgrp_spaces()->at(i - 1)->space()->end(); |
|
375 |
} |
|
376 |
if (i == lgrp_spaces()->length() - 1) { |
|
377 |
cur_end = end(); |
|
378 |
} else { |
|
379 |
cur_end = lgrp_spaces()->at(i)->space()->end(); |
|
380 |
} |
|
381 |
if (cur_end > prev_end) { |
|
382 |
return pointer_delta(cur_end, prev_end, sizeof(char)); |
|
383 |
} |
|
384 |
return 0; |
|
385 |
} |
|
386 |
||
387 |
// Return the default chunk size by equally diving the space. |
|
388 |
// page_size() aligned. |
|
389 |
size_t MutableNUMASpace::default_chunk_size() { |
|
390 |
return base_space_size() / lgrp_spaces()->length() * page_size(); |
|
391 |
} |
|
392 |
||
393 |
// Produce a new chunk size. page_size() aligned. |
|
1423
1233b1e85dfd
6753547: NUMA allocator: Invalid chunk size computation during adaptive resizing
iveresov
parents:
1405
diff
changeset
|
394 |
// This function is expected to be called on sequence of i's from 0 to |
1233b1e85dfd
6753547: NUMA allocator: Invalid chunk size computation during adaptive resizing
iveresov
parents:
1405
diff
changeset
|
395 |
// lgrp_spaces()->length(). |
1 | 396 |
size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) { |
397 |
size_t pages_available = base_space_size(); |
|
398 |
for (int j = 0; j < i; j++) { |
|
399 |
pages_available -= round_down(current_chunk_size(j), page_size()) / page_size(); |
|
400 |
} |
|
401 |
pages_available -= lgrp_spaces()->length() - i - 1; |
|
402 |
assert(pages_available > 0, "No pages left"); |
|
403 |
float alloc_rate = 0; |
|
404 |
for (int j = i; j < lgrp_spaces()->length(); j++) { |
|
405 |
alloc_rate += lgrp_spaces()->at(j)->alloc_rate()->average(); |
|
406 |
} |
|
407 |
size_t chunk_size = 0; |
|
408 |
if (alloc_rate > 0) { |
|
409 |
LGRPSpace *ls = lgrp_spaces()->at(i); |
|
1423
1233b1e85dfd
6753547: NUMA allocator: Invalid chunk size computation during adaptive resizing
iveresov
parents:
1405
diff
changeset
|
410 |
chunk_size = (size_t)(ls->alloc_rate()->average() / alloc_rate * pages_available) * page_size(); |
1 | 411 |
} |
412 |
chunk_size = MAX2(chunk_size, page_size()); |
|
413 |
||
414 |
if (limit > 0) { |
|
415 |
limit = round_down(limit, page_size()); |
|
416 |
if (chunk_size > current_chunk_size(i)) { |
|
417 |
chunk_size = MIN2((off_t)chunk_size, (off_t)current_chunk_size(i) + (off_t)limit); |
|
418 |
} else { |
|
419 |
chunk_size = MAX2((off_t)chunk_size, (off_t)current_chunk_size(i) - (off_t)limit); |
|
420 |
} |
|
421 |
} |
|
422 |
assert(chunk_size <= pages_available * page_size(), "Chunk size out of range"); |
|
423 |
return chunk_size; |
|
424 |
} |
|
425 |
||
426 |
||
427 |
// Return the bottom_region and the top_region. Align them to page_size() boundary. |
|
428 |
// |------------------new_region---------------------------------| |
|
429 |
// |----bottom_region--|---intersection---|------top_region------| |
|
430 |
void MutableNUMASpace::select_tails(MemRegion new_region, MemRegion intersection, |
|
431 |
MemRegion* bottom_region, MemRegion *top_region) { |
|
432 |
// Is there bottom? |
|
433 |
if (new_region.start() < intersection.start()) { // Yes |
|
434 |
// Try to coalesce small pages into a large one. |
|
435 |
if (UseLargePages && page_size() >= os::large_page_size()) { |
|
436 |
HeapWord* p = (HeapWord*)round_to((intptr_t) intersection.start(), os::large_page_size()); |
|
437 |
if (new_region.contains(p) |
|
438 |
&& pointer_delta(p, new_region.start(), sizeof(char)) >= os::large_page_size()) { |
|
439 |
if (intersection.contains(p)) { |
|
440 |
intersection = MemRegion(p, intersection.end()); |
|
441 |
} else { |
|
442 |
intersection = MemRegion(p, p); |
|
443 |
} |
|
444 |
} |
|
445 |
} |
|
446 |
*bottom_region = MemRegion(new_region.start(), intersection.start()); |
|
447 |
} else { |
|
448 |
*bottom_region = MemRegion(); |
|
449 |
} |
|
450 |
||
451 |
// Is there top? |
|
452 |
if (intersection.end() < new_region.end()) { // Yes |
|
453 |
// Try to coalesce small pages into a large one. |
|
454 |
if (UseLargePages && page_size() >= os::large_page_size()) { |
|
455 |
HeapWord* p = (HeapWord*)round_down((intptr_t) intersection.end(), os::large_page_size()); |
|
456 |
if (new_region.contains(p) |
|
457 |
&& pointer_delta(new_region.end(), p, sizeof(char)) >= os::large_page_size()) { |
|
458 |
if (intersection.contains(p)) { |
|
459 |
intersection = MemRegion(intersection.start(), p); |
|
460 |
} else { |
|
461 |
intersection = MemRegion(p, p); |
|
462 |
} |
|
463 |
} |
|
464 |
} |
|
465 |
*top_region = MemRegion(intersection.end(), new_region.end()); |
|
466 |
} else { |
|
467 |
*top_region = MemRegion(); |
|
468 |
} |
|
469 |
} |
|
470 |
||
471 |
// Try to merge the invalid region with the bottom or top region by decreasing |
|
472 |
// the intersection area. Return the invalid_region aligned to the page_size() |
|
473 |
// boundary if it's inside the intersection. Return non-empty invalid_region |
|
474 |
// if it lies inside the intersection (also page-aligned). |
|
475 |
// |------------------new_region---------------------------------| |
|
476 |
// |----------------|-------invalid---|--------------------------| |
|
477 |
// |----bottom_region--|---intersection---|------top_region------| |
|
478 |
void MutableNUMASpace::merge_regions(MemRegion new_region, MemRegion* intersection, |
|
479 |
MemRegion *invalid_region) { |
|
480 |
if (intersection->start() >= invalid_region->start() && intersection->contains(invalid_region->end())) { |
|
481 |
*intersection = MemRegion(invalid_region->end(), intersection->end()); |
|
482 |
*invalid_region = MemRegion(); |
|
483 |
} else |
|
484 |
if (intersection->end() <= invalid_region->end() && intersection->contains(invalid_region->start())) { |
|
485 |
*intersection = MemRegion(intersection->start(), invalid_region->start()); |
|
486 |
*invalid_region = MemRegion(); |
|
487 |
} else |
|
488 |
if (intersection->equals(*invalid_region) || invalid_region->contains(*intersection)) { |
|
489 |
*intersection = MemRegion(new_region.start(), new_region.start()); |
|
490 |
*invalid_region = MemRegion(); |
|
491 |
} else |
|
492 |
if (intersection->contains(invalid_region)) { |
|
493 |
// That's the only case we have to make an additional bias_region() call. |
|
494 |
HeapWord* start = invalid_region->start(); |
|
495 |
HeapWord* end = invalid_region->end(); |
|
496 |
if (UseLargePages && page_size() >= os::large_page_size()) { |
|
497 |
HeapWord *p = (HeapWord*)round_down((intptr_t) start, os::large_page_size()); |
|
498 |
if (new_region.contains(p)) { |
|
499 |
start = p; |
|
500 |
} |
|
501 |
p = (HeapWord*)round_to((intptr_t) end, os::large_page_size()); |
|
502 |
if (new_region.contains(end)) { |
|
503 |
end = p; |
|
504 |
} |
|
505 |
} |
|
506 |
if (intersection->start() > start) { |
|
507 |
*intersection = MemRegion(start, intersection->end()); |
|
508 |
} |
|
509 |
if (intersection->end() < end) { |
|
510 |
*intersection = MemRegion(intersection->start(), end); |
|
511 |
} |
|
512 |
*invalid_region = MemRegion(start, end); |
|
513 |
} |
|
514 |
} |
|
515 |
||
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
516 |
void MutableNUMASpace::initialize(MemRegion mr, |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
517 |
bool clear_space, |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
518 |
bool mangle_space) { |
1 | 519 |
assert(clear_space, "Reallocation will destory data!"); |
520 |
assert(lgrp_spaces()->length() > 0, "There should be at least one space"); |
|
521 |
||
522 |
MemRegion old_region = region(), new_region; |
|
523 |
set_bottom(mr.start()); |
|
524 |
set_end(mr.end()); |
|
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
525 |
// Must always clear the space |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
526 |
clear(SpaceDecorator::DontMangle); |
1 | 527 |
|
528 |
// Compute chunk sizes |
|
529 |
size_t prev_page_size = page_size(); |
|
530 |
set_page_size(UseLargePages ? os::large_page_size() : os::vm_page_size()); |
|
531 |
HeapWord* rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size()); |
|
532 |
HeapWord* rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size()); |
|
533 |
size_t base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size(); |
|
534 |
||
535 |
// Try small pages if the chunk size is too small |
|
536 |
if (base_space_size_pages / lgrp_spaces()->length() == 0 |
|
537 |
&& page_size() > (size_t)os::vm_page_size()) { |
|
538 |
set_page_size(os::vm_page_size()); |
|
539 |
rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size()); |
|
540 |
rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size()); |
|
541 |
base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size(); |
|
542 |
} |
|
543 |
guarantee(base_space_size_pages / lgrp_spaces()->length() > 0, "Space too small"); |
|
544 |
set_base_space_size(base_space_size_pages); |
|
545 |
||
546 |
// Handle space resize |
|
547 |
MemRegion top_region, bottom_region; |
|
548 |
if (!old_region.equals(region())) { |
|
549 |
new_region = MemRegion(rounded_bottom, rounded_end); |
|
550 |
MemRegion intersection = new_region.intersection(old_region); |
|
551 |
if (intersection.start() == NULL || |
|
552 |
intersection.end() == NULL || |
|
553 |
prev_page_size > page_size()) { // If the page size got smaller we have to change |
|
554 |
// the page size preference for the whole space. |
|
555 |
intersection = MemRegion(new_region.start(), new_region.start()); |
|
556 |
} |
|
557 |
select_tails(new_region, intersection, &bottom_region, &top_region); |
|
388 | 558 |
bias_region(bottom_region, lgrp_spaces()->at(0)->lgrp_id()); |
559 |
bias_region(top_region, lgrp_spaces()->at(lgrp_spaces()->length() - 1)->lgrp_id()); |
|
1 | 560 |
} |
561 |
||
562 |
// Check if the space layout has changed significantly? |
|
563 |
// This happens when the space has been resized so that either head or tail |
|
564 |
// chunk became less than a page. |
|
565 |
bool layout_valid = UseAdaptiveNUMAChunkSizing && |
|
566 |
current_chunk_size(0) > page_size() && |
|
567 |
current_chunk_size(lgrp_spaces()->length() - 1) > page_size(); |
|
568 |
||
569 |
||
570 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
|
571 |
LGRPSpace *ls = lgrp_spaces()->at(i); |
|
572 |
MutableSpace *s = ls->space(); |
|
573 |
old_region = s->region(); |
|
574 |
||
575 |
size_t chunk_byte_size = 0, old_chunk_byte_size = 0; |
|
576 |
if (i < lgrp_spaces()->length() - 1) { |
|
577 |
if (!UseAdaptiveNUMAChunkSizing || |
|
578 |
(UseAdaptiveNUMAChunkSizing && NUMAChunkResizeWeight == 0) || |
|
579 |
samples_count() < AdaptiveSizePolicyReadyThreshold) { |
|
580 |
// No adaptation. Divide the space equally. |
|
581 |
chunk_byte_size = default_chunk_size(); |
|
582 |
} else |
|
583 |
if (!layout_valid || NUMASpaceResizeRate == 0) { |
|
584 |
// Fast adaptation. If no space resize rate is set, resize |
|
585 |
// the chunks instantly. |
|
586 |
chunk_byte_size = adaptive_chunk_size(i, 0); |
|
587 |
} else { |
|
588 |
// Slow adaptation. Resize the chunks moving no more than |
|
589 |
// NUMASpaceResizeRate bytes per collection. |
|
590 |
size_t limit = NUMASpaceResizeRate / |
|
591 |
(lgrp_spaces()->length() * (lgrp_spaces()->length() + 1) / 2); |
|
592 |
chunk_byte_size = adaptive_chunk_size(i, MAX2(limit * (i + 1), page_size())); |
|
593 |
} |
|
594 |
||
595 |
assert(chunk_byte_size >= page_size(), "Chunk size too small"); |
|
596 |
assert(chunk_byte_size <= capacity_in_bytes(), "Sanity check"); |
|
597 |
} |
|
598 |
||
599 |
if (i == 0) { // Bottom chunk |
|
600 |
if (i != lgrp_spaces()->length() - 1) { |
|
601 |
new_region = MemRegion(bottom(), rounded_bottom + (chunk_byte_size >> LogHeapWordSize)); |
|
602 |
} else { |
|
603 |
new_region = MemRegion(bottom(), end()); |
|
604 |
} |
|
605 |
} else |
|
606 |
if (i < lgrp_spaces()->length() - 1) { // Middle chunks |
|
607 |
MutableSpace *ps = lgrp_spaces()->at(i - 1)->space(); |
|
608 |
new_region = MemRegion(ps->end(), |
|
609 |
ps->end() + (chunk_byte_size >> LogHeapWordSize)); |
|
610 |
} else { // Top chunk |
|
611 |
MutableSpace *ps = lgrp_spaces()->at(i - 1)->space(); |
|
612 |
new_region = MemRegion(ps->end(), end()); |
|
613 |
} |
|
614 |
guarantee(region().contains(new_region), "Region invariant"); |
|
615 |
||
616 |
||
617 |
// The general case: |
|
618 |
// |---------------------|--invalid---|--------------------------| |
|
619 |
// |------------------new_region---------------------------------| |
|
620 |
// |----bottom_region--|---intersection---|------top_region------| |
|
621 |
// |----old_region----| |
|
622 |
// The intersection part has all pages in place we don't need to migrate them. |
|
623 |
// Pages for the top and bottom part should be freed and then reallocated. |
|
624 |
||
625 |
MemRegion intersection = old_region.intersection(new_region); |
|
626 |
||
627 |
if (intersection.start() == NULL || intersection.end() == NULL) { |
|
628 |
intersection = MemRegion(new_region.start(), new_region.start()); |
|
629 |
} |
|
630 |
||
388 | 631 |
if (!os::numa_has_static_binding()) { |
632 |
MemRegion invalid_region = ls->invalid_region().intersection(new_region); |
|
633 |
// Invalid region is a range of memory that could've possibly |
|
634 |
// been allocated on the other node. That's relevant only on Solaris where |
|
635 |
// there is no static memory binding. |
|
636 |
if (!invalid_region.is_empty()) { |
|
637 |
merge_regions(new_region, &intersection, &invalid_region); |
|
638 |
free_region(invalid_region); |
|
639 |
ls->set_invalid_region(MemRegion()); |
|
640 |
} |
|
1 | 641 |
} |
388 | 642 |
|
1 | 643 |
select_tails(new_region, intersection, &bottom_region, &top_region); |
388 | 644 |
|
645 |
if (!os::numa_has_static_binding()) { |
|
646 |
// If that's a system with the first-touch policy then it's enough |
|
647 |
// to free the pages. |
|
648 |
free_region(bottom_region); |
|
649 |
free_region(top_region); |
|
650 |
} else { |
|
651 |
// In a system with static binding we have to change the bias whenever |
|
652 |
// we reshape the heap. |
|
653 |
bias_region(bottom_region, ls->lgrp_id()); |
|
654 |
bias_region(top_region, ls->lgrp_id()); |
|
655 |
} |
|
1 | 656 |
|
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
657 |
// Clear space (set top = bottom) but never mangle. |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
658 |
s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle); |
1 | 659 |
|
660 |
set_adaptation_cycles(samples_count()); |
|
661 |
} |
|
662 |
} |
|
663 |
||
664 |
// Set the top of the whole space. |
|
665 |
// Mark the the holes in chunks below the top() as invalid. |
|
666 |
void MutableNUMASpace::set_top(HeapWord* value) { |
|
667 |
bool found_top = false; |
|
616
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
668 |
for (int i = 0; i < lgrp_spaces()->length();) { |
1 | 669 |
LGRPSpace *ls = lgrp_spaces()->at(i); |
670 |
MutableSpace *s = ls->space(); |
|
671 |
HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom()); |
|
672 |
||
673 |
if (s->contains(value)) { |
|
616
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
674 |
// Check if setting the chunk's top to a given value would create a hole less than |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
675 |
// a minimal object; assuming that's not the last chunk in which case we don't care. |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
676 |
if (i < lgrp_spaces()->length() - 1) { |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
677 |
size_t remainder = pointer_delta(s->end(), value); |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
678 |
const size_t minimal_object_size = oopDesc::header_size(); |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
679 |
if (remainder < minimal_object_size && remainder > 0) { |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
680 |
// Add a filler object of a minimal size, it will cross the chunk boundary. |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
681 |
SharedHeap::fill_region_with_object(MemRegion(value, minimal_object_size)); |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
682 |
value += minimal_object_size; |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
683 |
assert(!s->contains(value), "Should be in the next chunk"); |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
684 |
// Restart the loop from the same chunk, since the value has moved |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
685 |
// to the next one. |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
686 |
continue; |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
687 |
} |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
688 |
} |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
689 |
|
388 | 690 |
if (!os::numa_has_static_binding() && top < value && top < s->end()) { |
1 | 691 |
ls->add_invalid_region(MemRegion(top, value)); |
692 |
} |
|
693 |
s->set_top(value); |
|
694 |
found_top = true; |
|
695 |
} else { |
|
696 |
if (found_top) { |
|
697 |
s->set_top(s->bottom()); |
|
698 |
} else { |
|
388 | 699 |
if (!os::numa_has_static_binding() && top < s->end()) { |
700 |
ls->add_invalid_region(MemRegion(top, s->end())); |
|
701 |
} |
|
702 |
s->set_top(s->end()); |
|
1 | 703 |
} |
704 |
} |
|
616
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
705 |
i++; |
1 | 706 |
} |
707 |
MutableSpace::set_top(value); |
|
708 |
} |
|
709 |
||
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
710 |
void MutableNUMASpace::clear(bool mangle_space) { |
1 | 711 |
MutableSpace::set_top(bottom()); |
712 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
|
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
713 |
// Never mangle NUMA spaces because the mangling will |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
714 |
// bind the memory to a possibly unwanted lgroup. |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
715 |
lgrp_spaces()->at(i)->space()->clear(SpaceDecorator::DontMangle); |
1 | 716 |
} |
717 |
} |
|
718 |
||
388 | 719 |
/* |
720 |
Linux supports static memory binding, therefore the most part of the |
|
721 |
logic dealing with the possible invalid page allocation is effectively |
|
722 |
disabled. Besides there is no notion of the home node in Linux. A |
|
723 |
thread is allowed to migrate freely. Although the scheduler is rather |
|
724 |
reluctant to move threads between the nodes. We check for the current |
|
725 |
node every allocation. And with a high probability a thread stays on |
|
726 |
the same node for some time allowing local access to recently allocated |
|
727 |
objects. |
|
728 |
*/ |
|
729 |
||
1 | 730 |
HeapWord* MutableNUMASpace::allocate(size_t size) { |
388 | 731 |
Thread* thr = Thread::current(); |
732 |
int lgrp_id = thr->lgrp_id(); |
|
733 |
if (lgrp_id == -1 || !os::numa_has_group_homing()) { |
|
1 | 734 |
lgrp_id = os::numa_get_group_id(); |
388 | 735 |
thr->set_lgrp_id(lgrp_id); |
1 | 736 |
} |
737 |
||
738 |
int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
|
739 |
||
740 |
// It is possible that a new CPU has been hotplugged and |
|
741 |
// we haven't reshaped the space accordingly. |
|
742 |
if (i == -1) { |
|
743 |
i = os::random() % lgrp_spaces()->length(); |
|
744 |
} |
|
745 |
||
1405
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
746 |
LGRPSpace* ls = lgrp_spaces()->at(i); |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
747 |
MutableSpace *s = ls->space(); |
1 | 748 |
HeapWord *p = s->allocate(size); |
749 |
||
391
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
750 |
if (p != NULL) { |
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
751 |
size_t remainder = s->free_in_words(); |
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
752 |
if (remainder < (size_t)oopDesc::header_size() && remainder > 0) { |
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
753 |
s->set_top(s->top() - size); |
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
754 |
p = NULL; |
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
755 |
} |
1 | 756 |
} |
757 |
if (p != NULL) { |
|
758 |
if (top() < s->top()) { // Keep _top updated. |
|
759 |
MutableSpace::set_top(s->top()); |
|
760 |
} |
|
761 |
} |
|
388 | 762 |
// Make the page allocation happen here if there is no static binding.. |
763 |
if (p != NULL && !os::numa_has_static_binding()) { |
|
1 | 764 |
for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) { |
765 |
*(int*)i = 0; |
|
766 |
} |
|
767 |
} |
|
1405
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
768 |
if (p == NULL) { |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
769 |
ls->set_allocation_failed(); |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
770 |
} |
1 | 771 |
return p; |
772 |
} |
|
773 |
||
774 |
// This version is lock-free. |
|
775 |
HeapWord* MutableNUMASpace::cas_allocate(size_t size) { |
|
388 | 776 |
Thread* thr = Thread::current(); |
777 |
int lgrp_id = thr->lgrp_id(); |
|
778 |
if (lgrp_id == -1 || !os::numa_has_group_homing()) { |
|
1 | 779 |
lgrp_id = os::numa_get_group_id(); |
388 | 780 |
thr->set_lgrp_id(lgrp_id); |
1 | 781 |
} |
782 |
||
783 |
int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
|
784 |
// It is possible that a new CPU has been hotplugged and |
|
785 |
// we haven't reshaped the space accordingly. |
|
786 |
if (i == -1) { |
|
787 |
i = os::random() % lgrp_spaces()->length(); |
|
788 |
} |
|
1405
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
789 |
LGRPSpace *ls = lgrp_spaces()->at(i); |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
790 |
MutableSpace *s = ls->space(); |
1 | 791 |
HeapWord *p = s->cas_allocate(size); |
391
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
792 |
if (p != NULL) { |
616
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
793 |
size_t remainder = pointer_delta(s->end(), p + size); |
391
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
794 |
if (remainder < (size_t)oopDesc::header_size() && remainder > 0) { |
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
795 |
if (s->cas_deallocate(p, size)) { |
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
796 |
// We were the last to allocate and created a fragment less than |
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
797 |
// a minimal object. |
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
798 |
p = NULL; |
616
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
799 |
} else { |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
800 |
guarantee(false, "Deallocation should always succeed"); |
391
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
801 |
} |
1 | 802 |
} |
803 |
} |
|
804 |
if (p != NULL) { |
|
805 |
HeapWord* cur_top, *cur_chunk_top = p + size; |
|
806 |
while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated. |
|
807 |
if (Atomic::cmpxchg_ptr(cur_chunk_top, top_addr(), cur_top) == cur_top) { |
|
808 |
break; |
|
809 |
} |
|
810 |
} |
|
811 |
} |
|
812 |
||
388 | 813 |
// Make the page allocation happen here if there is no static binding. |
814 |
if (p != NULL && !os::numa_has_static_binding() ) { |
|
1 | 815 |
for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) { |
816 |
*(int*)i = 0; |
|
817 |
} |
|
818 |
} |
|
1405
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
819 |
if (p == NULL) { |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
820 |
ls->set_allocation_failed(); |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
821 |
} |
1 | 822 |
return p; |
823 |
} |
|
824 |
||
825 |
void MutableNUMASpace::print_short_on(outputStream* st) const { |
|
826 |
MutableSpace::print_short_on(st); |
|
827 |
st->print(" ("); |
|
828 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
|
829 |
st->print("lgrp %d: ", lgrp_spaces()->at(i)->lgrp_id()); |
|
830 |
lgrp_spaces()->at(i)->space()->print_short_on(st); |
|
831 |
if (i < lgrp_spaces()->length() - 1) { |
|
832 |
st->print(", "); |
|
833 |
} |
|
834 |
} |
|
835 |
st->print(")"); |
|
836 |
} |
|
837 |
||
838 |
void MutableNUMASpace::print_on(outputStream* st) const { |
|
839 |
MutableSpace::print_on(st); |
|
840 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
|
841 |
LGRPSpace *ls = lgrp_spaces()->at(i); |
|
842 |
st->print(" lgrp %d", ls->lgrp_id()); |
|
843 |
ls->space()->print_on(st); |
|
844 |
if (NUMAStats) { |
|
391
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
845 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
846 |
lgrp_spaces()->at(i)->accumulate_statistics(page_size()); |
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
847 |
} |
1 | 848 |
st->print(" local/remote/unbiased/uncommitted: %dK/%dK/%dK/%dK, large/small pages: %d/%d\n", |
849 |
ls->space_stats()->_local_space / K, |
|
850 |
ls->space_stats()->_remote_space / K, |
|
851 |
ls->space_stats()->_unbiased_space / K, |
|
852 |
ls->space_stats()->_uncommited_space / K, |
|
853 |
ls->space_stats()->_large_pages, |
|
854 |
ls->space_stats()->_small_pages); |
|
855 |
} |
|
856 |
} |
|
857 |
} |
|
858 |
||
616
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
859 |
void MutableNUMASpace::verify(bool allow_dirty) { |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
860 |
// This can be called after setting an arbitary value to the space's top, |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
861 |
// so an object can cross the chunk boundary. We ensure the parsablity |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
862 |
// of the space and just walk the objects in linear fashion. |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
863 |
ensure_parsability(); |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
864 |
MutableSpace::verify(allow_dirty); |
1 | 865 |
} |
866 |
||
867 |
// Scan pages and gather stats about page placement and size. |
|
868 |
void MutableNUMASpace::LGRPSpace::accumulate_statistics(size_t page_size) { |
|
869 |
clear_space_stats(); |
|
870 |
char *start = (char*)round_to((intptr_t) space()->bottom(), page_size); |
|
871 |
char* end = (char*)round_down((intptr_t) space()->end(), page_size); |
|
872 |
if (start < end) { |
|
873 |
for (char *p = start; p < end;) { |
|
874 |
os::page_info info; |
|
875 |
if (os::get_page_info(p, &info)) { |
|
876 |
if (info.size > 0) { |
|
877 |
if (info.size > (size_t)os::vm_page_size()) { |
|
878 |
space_stats()->_large_pages++; |
|
879 |
} else { |
|
880 |
space_stats()->_small_pages++; |
|
881 |
} |
|
882 |
if (info.lgrp_id == lgrp_id()) { |
|
883 |
space_stats()->_local_space += info.size; |
|
884 |
} else { |
|
885 |
space_stats()->_remote_space += info.size; |
|
886 |
} |
|
887 |
p += info.size; |
|
888 |
} else { |
|
889 |
p += os::vm_page_size(); |
|
890 |
space_stats()->_uncommited_space += os::vm_page_size(); |
|
891 |
} |
|
892 |
} else { |
|
893 |
return; |
|
894 |
} |
|
895 |
} |
|
896 |
} |
|
897 |
space_stats()->_unbiased_space = pointer_delta(start, space()->bottom(), sizeof(char)) + |
|
898 |
pointer_delta(space()->end(), end, sizeof(char)); |
|
899 |
||
900 |
} |
|
901 |
||
902 |
// Scan page_count pages and verify if they have the right size and right placement. |
|
903 |
// If invalid pages are found they are freed in hope that subsequent reallocation |
|
904 |
// will be more successful. |
|
905 |
void MutableNUMASpace::LGRPSpace::scan_pages(size_t page_size, size_t page_count) |
|
906 |
{ |
|
907 |
char* range_start = (char*)round_to((intptr_t) space()->bottom(), page_size); |
|
908 |
char* range_end = (char*)round_down((intptr_t) space()->end(), page_size); |
|
909 |
||
910 |
if (range_start > last_page_scanned() || last_page_scanned() >= range_end) { |
|
911 |
set_last_page_scanned(range_start); |
|
912 |
} |
|
913 |
||
914 |
char *scan_start = last_page_scanned(); |
|
915 |
char* scan_end = MIN2(scan_start + page_size * page_count, range_end); |
|
916 |
||
917 |
os::page_info page_expected, page_found; |
|
918 |
page_expected.size = page_size; |
|
919 |
page_expected.lgrp_id = lgrp_id(); |
|
920 |
||
921 |
char *s = scan_start; |
|
922 |
while (s < scan_end) { |
|
923 |
char *e = os::scan_pages(s, (char*)scan_end, &page_expected, &page_found); |
|
924 |
if (e == NULL) { |
|
925 |
break; |
|
926 |
} |
|
927 |
if (e != scan_end) { |
|
928 |
if ((page_expected.size != page_size || page_expected.lgrp_id != lgrp_id()) |
|
929 |
&& page_expected.size != 0) { |
|
930 |
os::free_memory(s, pointer_delta(e, s, sizeof(char))); |
|
931 |
} |
|
932 |
page_expected = page_found; |
|
933 |
} |
|
934 |
s = e; |
|
935 |
} |
|
936 |
||
937 |
set_last_page_scanned(scan_end); |
|
938 |
} |