author | sangheki |
Tue, 07 Mar 2017 10:25:58 -0800 | |
changeset 46312 | 385a8b027e7d |
parent 40655 | 9f644073d3a0 |
child 46620 | 750c6edff33b |
permissions | -rw-r--r-- |
1 | 1 |
/* |
40655
9f644073d3a0
8157907: Incorrect inclusion of atomic.hpp instead of atomic.inline.hpp
dholmes
parents:
33105
diff
changeset
|
2 |
* Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
2154
diff
changeset
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
2154
diff
changeset
|
20 |
* or visit www.oracle.com if you need additional information or have any |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
2154
diff
changeset
|
21 |
* questions. |
1 | 22 |
* |
23 |
*/ |
|
24 |
||
7397 | 25 |
#include "precompiled.hpp" |
30764 | 26 |
#include "gc/parallel/mutableNUMASpace.hpp" |
27 |
#include "gc/shared/collectedHeap.hpp" |
|
28 |
#include "gc/shared/spaceDecorator.hpp" |
|
7397 | 29 |
#include "oops/oop.inline.hpp" |
40655
9f644073d3a0
8157907: Incorrect inclusion of atomic.hpp instead of atomic.inline.hpp
dholmes
parents:
33105
diff
changeset
|
30 |
#include "runtime/atomic.hpp" |
14583
d70ee55535f4
8003935: Simplify the needed includes for using Thread::current()
stefank
parents:
13195
diff
changeset
|
31 |
#include "runtime/thread.inline.hpp" |
1 | 32 |
|
46312
385a8b027e7d
8023905: Failing to initialize VM with small initial heap when NUMA and large pages are enabled
sangheki
parents:
40655
diff
changeset
|
33 |
MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment), _must_use_large_pages(false) { |
13195 | 34 |
_lgrp_spaces = new (ResourceObj::C_HEAP, mtGC) GrowableArray<LGRPSpace*>(0, true); |
1 | 35 |
_page_size = os::vm_page_size(); |
36 |
_adaptation_cycles = 0; |
|
37 |
_samples_count = 0; |
|
46312
385a8b027e7d
8023905: Failing to initialize VM with small initial heap when NUMA and large pages are enabled
sangheki
parents:
40655
diff
changeset
|
38 |
|
385a8b027e7d
8023905: Failing to initialize VM with small initial heap when NUMA and large pages are enabled
sangheki
parents:
40655
diff
changeset
|
39 |
#ifdef LINUX |
385a8b027e7d
8023905: Failing to initialize VM with small initial heap when NUMA and large pages are enabled
sangheki
parents:
40655
diff
changeset
|
40 |
// Changing the page size can lead to freeing of memory. When using large pages |
385a8b027e7d
8023905: Failing to initialize VM with small initial heap when NUMA and large pages are enabled
sangheki
parents:
40655
diff
changeset
|
41 |
// and the memory has been both reserved and committed, Linux does not support |
385a8b027e7d
8023905: Failing to initialize VM with small initial heap when NUMA and large pages are enabled
sangheki
parents:
40655
diff
changeset
|
42 |
// freeing parts of it. |
385a8b027e7d
8023905: Failing to initialize VM with small initial heap when NUMA and large pages are enabled
sangheki
parents:
40655
diff
changeset
|
43 |
if (UseLargePages && !os::can_commit_large_page_memory()) { |
385a8b027e7d
8023905: Failing to initialize VM with small initial heap when NUMA and large pages are enabled
sangheki
parents:
40655
diff
changeset
|
44 |
_must_use_large_pages = true; |
385a8b027e7d
8023905: Failing to initialize VM with small initial heap when NUMA and large pages are enabled
sangheki
parents:
40655
diff
changeset
|
45 |
} |
385a8b027e7d
8023905: Failing to initialize VM with small initial heap when NUMA and large pages are enabled
sangheki
parents:
40655
diff
changeset
|
46 |
#endif // LINUX |
385a8b027e7d
8023905: Failing to initialize VM with small initial heap when NUMA and large pages are enabled
sangheki
parents:
40655
diff
changeset
|
47 |
|
1 | 48 |
update_layout(true); |
49 |
} |
|
50 |
||
51 |
MutableNUMASpace::~MutableNUMASpace() { |
|
52 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
|
53 |
delete lgrp_spaces()->at(i); |
|
54 |
} |
|
55 |
delete lgrp_spaces(); |
|
56 |
} |
|
57 |
||
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
58 |
#ifndef PRODUCT |
1 | 59 |
void MutableNUMASpace::mangle_unused_area() { |
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
60 |
// This method should do nothing. |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
61 |
// It can be called on a numa space during a full compaction. |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
62 |
} |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
63 |
void MutableNUMASpace::mangle_unused_area_complete() { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
64 |
// This method should do nothing. |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
65 |
// It can be called on a numa space during a full compaction. |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
66 |
} |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
67 |
void MutableNUMASpace::mangle_region(MemRegion mr) { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
68 |
// This method should do nothing because numa spaces are not mangled. |
1 | 69 |
} |
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
70 |
void MutableNUMASpace::set_top_for_allocations(HeapWord* v) { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
71 |
assert(false, "Do not mangle MutableNUMASpace's"); |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
72 |
} |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
73 |
void MutableNUMASpace::set_top_for_allocations() { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
74 |
// This method should do nothing. |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
75 |
} |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
76 |
void MutableNUMASpace::check_mangled_unused_area(HeapWord* limit) { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
77 |
// This method should do nothing. |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
78 |
} |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
79 |
void MutableNUMASpace::check_mangled_unused_area_complete() { |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
80 |
// This method should do nothing. |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
81 |
} |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
82 |
#endif // NOT_PRODUCT |
1 | 83 |
|
84 |
// There may be unallocated holes in the middle chunks |
|
22551 | 85 |
// that should be filled with dead objects to ensure parsability. |
1 | 86 |
void MutableNUMASpace::ensure_parsability() { |
87 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
|
88 |
LGRPSpace *ls = lgrp_spaces()->at(i); |
|
89 |
MutableSpace *s = ls->space(); |
|
2131 | 90 |
if (s->top() < top()) { // For all spaces preceding the one containing top() |
1 | 91 |
if (s->free_in_words() > 0) { |
12229
c34a85c8f5aa
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
11402
diff
changeset
|
92 |
intptr_t cur_top = (intptr_t)s->top(); |
c34a85c8f5aa
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
11402
diff
changeset
|
93 |
size_t words_left_to_fill = pointer_delta(s->end(), s->top());; |
c34a85c8f5aa
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
11402
diff
changeset
|
94 |
while (words_left_to_fill > 0) { |
c34a85c8f5aa
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
11402
diff
changeset
|
95 |
size_t words_to_fill = MIN2(words_left_to_fill, CollectedHeap::filler_array_max_size()); |
c34a85c8f5aa
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
11402
diff
changeset
|
96 |
assert(words_to_fill >= CollectedHeap::min_fill_size(), |
33105
294e48b4f704
8080775: Better argument formatting for assert() and friends
david
parents:
31592
diff
changeset
|
97 |
"Remaining size (" SIZE_FORMAT ") is too small to fill (based on " SIZE_FORMAT " and " SIZE_FORMAT ")", |
294e48b4f704
8080775: Better argument formatting for assert() and friends
david
parents:
31592
diff
changeset
|
98 |
words_to_fill, words_left_to_fill, CollectedHeap::filler_array_max_size()); |
12229
c34a85c8f5aa
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
11402
diff
changeset
|
99 |
CollectedHeap::fill_with_object((HeapWord*)cur_top, words_to_fill); |
c34a85c8f5aa
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
11402
diff
changeset
|
100 |
if (!os::numa_has_static_binding()) { |
c34a85c8f5aa
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
11402
diff
changeset
|
101 |
size_t touched_words = words_to_fill; |
1 | 102 |
#ifndef ASSERT |
12229
c34a85c8f5aa
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
11402
diff
changeset
|
103 |
if (!ZapUnusedHeapArea) { |
c34a85c8f5aa
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
11402
diff
changeset
|
104 |
touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)), |
c34a85c8f5aa
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
11402
diff
changeset
|
105 |
touched_words); |
c34a85c8f5aa
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
11402
diff
changeset
|
106 |
} |
1 | 107 |
#endif |
12229
c34a85c8f5aa
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
11402
diff
changeset
|
108 |
MemRegion invalid; |
c34a85c8f5aa
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
11402
diff
changeset
|
109 |
HeapWord *crossing_start = (HeapWord*)round_to(cur_top, os::vm_page_size()); |
c34a85c8f5aa
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
11402
diff
changeset
|
110 |
HeapWord *crossing_end = (HeapWord*)round_to(cur_top + touched_words, os::vm_page_size()); |
c34a85c8f5aa
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
11402
diff
changeset
|
111 |
if (crossing_start != crossing_end) { |
c34a85c8f5aa
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
11402
diff
changeset
|
112 |
// If object header crossed a small page boundary we mark the area |
c34a85c8f5aa
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
11402
diff
changeset
|
113 |
// as invalid rounding it to a page_size(). |
c34a85c8f5aa
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
11402
diff
changeset
|
114 |
HeapWord *start = MAX2((HeapWord*)round_down(cur_top, page_size()), s->bottom()); |
c34a85c8f5aa
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
11402
diff
changeset
|
115 |
HeapWord *end = MIN2((HeapWord*)round_to(cur_top + touched_words, page_size()), s->end()); |
c34a85c8f5aa
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
11402
diff
changeset
|
116 |
invalid = MemRegion(start, end); |
c34a85c8f5aa
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
11402
diff
changeset
|
117 |
} |
c34a85c8f5aa
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
11402
diff
changeset
|
118 |
|
c34a85c8f5aa
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
11402
diff
changeset
|
119 |
ls->add_invalid_region(invalid); |
388 | 120 |
} |
12229
c34a85c8f5aa
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
11402
diff
changeset
|
121 |
cur_top = cur_top + (words_to_fill * HeapWordSize); |
c34a85c8f5aa
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents:
11402
diff
changeset
|
122 |
words_left_to_fill -= words_to_fill; |
1 | 123 |
} |
124 |
} |
|
125 |
} else { |
|
388 | 126 |
if (!os::numa_has_static_binding()) { |
1 | 127 |
#ifdef ASSERT |
128 |
MemRegion invalid(s->top(), s->end()); |
|
129 |
ls->add_invalid_region(invalid); |
|
388 | 130 |
#else |
131 |
if (ZapUnusedHeapArea) { |
|
132 |
MemRegion invalid(s->top(), s->end()); |
|
133 |
ls->add_invalid_region(invalid); |
|
391
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
134 |
} else { |
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
135 |
return; |
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
136 |
} |
1 | 137 |
#endif |
391
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
138 |
} else { |
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
139 |
return; |
388 | 140 |
} |
1 | 141 |
} |
142 |
} |
|
143 |
} |
|
144 |
||
145 |
size_t MutableNUMASpace::used_in_words() const { |
|
146 |
size_t s = 0; |
|
147 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
|
148 |
s += lgrp_spaces()->at(i)->space()->used_in_words(); |
|
149 |
} |
|
150 |
return s; |
|
151 |
} |
|
152 |
||
153 |
size_t MutableNUMASpace::free_in_words() const { |
|
154 |
size_t s = 0; |
|
155 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
|
156 |
s += lgrp_spaces()->at(i)->space()->free_in_words(); |
|
157 |
} |
|
158 |
return s; |
|
159 |
} |
|
160 |
||
161 |
||
162 |
size_t MutableNUMASpace::tlab_capacity(Thread *thr) const { |
|
163 |
guarantee(thr != NULL, "No thread"); |
|
164 |
int lgrp_id = thr->lgrp_id(); |
|
976
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
165 |
if (lgrp_id == -1) { |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
166 |
// This case can occur after the topology of the system has |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
167 |
// changed. Thread can change their location, the new home |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
168 |
// group will be determined during the first allocation |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
169 |
// attempt. For now we can safely assume that all spaces |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
170 |
// have equal size because the whole space will be reinitialized. |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
171 |
if (lgrp_spaces()->length() > 0) { |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
172 |
return capacity_in_bytes() / lgrp_spaces()->length(); |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
173 |
} else { |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
174 |
assert(false, "There should be at least one locality group"); |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
175 |
return 0; |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
176 |
} |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
177 |
} |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
178 |
// That's the normal case, where we know the locality group of the thread. |
1 | 179 |
int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
180 |
if (i == -1) { |
|
181 |
return 0; |
|
182 |
} |
|
183 |
return lgrp_spaces()->at(i)->space()->capacity_in_bytes(); |
|
184 |
} |
|
185 |
||
22552 | 186 |
size_t MutableNUMASpace::tlab_used(Thread *thr) const { |
187 |
// Please see the comments for tlab_capacity(). |
|
188 |
guarantee(thr != NULL, "No thread"); |
|
189 |
int lgrp_id = thr->lgrp_id(); |
|
190 |
if (lgrp_id == -1) { |
|
191 |
if (lgrp_spaces()->length() > 0) { |
|
192 |
return (used_in_bytes()) / lgrp_spaces()->length(); |
|
193 |
} else { |
|
194 |
assert(false, "There should be at least one locality group"); |
|
195 |
return 0; |
|
196 |
} |
|
197 |
} |
|
198 |
int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
|
199 |
if (i == -1) { |
|
200 |
return 0; |
|
201 |
} |
|
202 |
return lgrp_spaces()->at(i)->space()->used_in_bytes(); |
|
203 |
} |
|
204 |
||
205 |
||
1 | 206 |
size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const { |
976
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
207 |
// Please see the comments for tlab_capacity(). |
1 | 208 |
guarantee(thr != NULL, "No thread"); |
209 |
int lgrp_id = thr->lgrp_id(); |
|
976
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
210 |
if (lgrp_id == -1) { |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
211 |
if (lgrp_spaces()->length() > 0) { |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
212 |
return free_in_bytes() / lgrp_spaces()->length(); |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
213 |
} else { |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
214 |
assert(false, "There should be at least one locality group"); |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
215 |
return 0; |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
216 |
} |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
217 |
} |
1 | 218 |
int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
219 |
if (i == -1) { |
|
220 |
return 0; |
|
221 |
} |
|
222 |
return lgrp_spaces()->at(i)->space()->free_in_bytes(); |
|
223 |
} |
|
224 |
||
1405
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
225 |
|
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
226 |
size_t MutableNUMASpace::capacity_in_words(Thread* thr) const { |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
227 |
guarantee(thr != NULL, "No thread"); |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
228 |
int lgrp_id = thr->lgrp_id(); |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
229 |
if (lgrp_id == -1) { |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
230 |
if (lgrp_spaces()->length() > 0) { |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
231 |
return capacity_in_words() / lgrp_spaces()->length(); |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
232 |
} else { |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
233 |
assert(false, "There should be at least one locality group"); |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
234 |
return 0; |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
235 |
} |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
236 |
} |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
237 |
int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
238 |
if (i == -1) { |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
239 |
return 0; |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
240 |
} |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
241 |
return lgrp_spaces()->at(i)->space()->capacity_in_words(); |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
242 |
} |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
243 |
|
1 | 244 |
// Check if the NUMA topology has changed. Add and remove spaces if needed. |
245 |
// The update can be forced by setting the force parameter equal to true. |
|
246 |
bool MutableNUMASpace::update_layout(bool force) { |
|
247 |
// Check if the topology had changed. |
|
248 |
bool changed = os::numa_topology_changed(); |
|
249 |
if (force || changed) { |
|
250 |
// Compute lgrp intersection. Add/remove spaces. |
|
251 |
int lgrp_limit = (int)os::numa_get_groups_num(); |
|
13195 | 252 |
int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtGC); |
1 | 253 |
int lgrp_num = (int)os::numa_get_leaf_groups(lgrp_ids, lgrp_limit); |
254 |
assert(lgrp_num > 0, "There should be at least one locality group"); |
|
255 |
// Add new spaces for the new nodes |
|
256 |
for (int i = 0; i < lgrp_num; i++) { |
|
257 |
bool found = false; |
|
258 |
for (int j = 0; j < lgrp_spaces()->length(); j++) { |
|
259 |
if (lgrp_spaces()->at(j)->lgrp_id() == lgrp_ids[i]) { |
|
260 |
found = true; |
|
261 |
break; |
|
262 |
} |
|
263 |
} |
|
264 |
if (!found) { |
|
1911
b7cfe7eb809c
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
1668
diff
changeset
|
265 |
lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i], alignment())); |
1 | 266 |
} |
267 |
} |
|
268 |
||
269 |
// Remove spaces for the removed nodes. |
|
270 |
for (int i = 0; i < lgrp_spaces()->length();) { |
|
271 |
bool found = false; |
|
272 |
for (int j = 0; j < lgrp_num; j++) { |
|
273 |
if (lgrp_spaces()->at(i)->lgrp_id() == lgrp_ids[j]) { |
|
274 |
found = true; |
|
275 |
break; |
|
276 |
} |
|
277 |
} |
|
278 |
if (!found) { |
|
279 |
delete lgrp_spaces()->at(i); |
|
280 |
lgrp_spaces()->remove_at(i); |
|
281 |
} else { |
|
282 |
i++; |
|
283 |
} |
|
284 |
} |
|
285 |
||
27880
afb974a04396
8060074: os::free() takes MemoryTrackingLevel but doesn't need it
coleenp
parents:
25351
diff
changeset
|
286 |
FREE_C_HEAP_ARRAY(int, lgrp_ids); |
1 | 287 |
|
288 |
if (changed) { |
|
289 |
for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) { |
|
290 |
thread->set_lgrp_id(-1); |
|
291 |
} |
|
292 |
} |
|
293 |
return true; |
|
294 |
} |
|
295 |
return false; |
|
296 |
} |
|
297 |
||
298 |
// Bias region towards the first-touching lgrp. Set the right page sizes. |
|
388 | 299 |
void MutableNUMASpace::bias_region(MemRegion mr, int lgrp_id) { |
1 | 300 |
HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size()); |
301 |
HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size()); |
|
302 |
if (end > start) { |
|
303 |
MemRegion aligned_region(start, end); |
|
304 |
assert((intptr_t)aligned_region.start() % page_size() == 0 && |
|
305 |
(intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment"); |
|
306 |
assert(region().contains(aligned_region), "Sanity"); |
|
388 | 307 |
// First we tell the OS which page size we want in the given range. The underlying |
308 |
// large page can be broken down if we require small pages. |
|
1 | 309 |
os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size()); |
388 | 310 |
// Then we uncommit the pages in the range. |
11402
739e52129c84
7124829: NUMA: memory leak on Linux with large pages
iveresov
parents:
10565
diff
changeset
|
311 |
os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size()); |
388 | 312 |
// And make them local/first-touch biased. |
313 |
os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), lgrp_id); |
|
1 | 314 |
} |
315 |
} |
|
316 |
||
317 |
// Free all pages in the region. |
|
318 |
void MutableNUMASpace::free_region(MemRegion mr) { |
|
319 |
HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size()); |
|
320 |
HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size()); |
|
321 |
if (end > start) { |
|
322 |
MemRegion aligned_region(start, end); |
|
323 |
assert((intptr_t)aligned_region.start() % page_size() == 0 && |
|
324 |
(intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment"); |
|
325 |
assert(region().contains(aligned_region), "Sanity"); |
|
11402
739e52129c84
7124829: NUMA: memory leak on Linux with large pages
iveresov
parents:
10565
diff
changeset
|
326 |
os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size()); |
1 | 327 |
} |
328 |
} |
|
329 |
||
330 |
// Update space layout. Perform adaptation. |
|
331 |
void MutableNUMASpace::update() { |
|
332 |
if (update_layout(false)) { |
|
333 |
// If the topology has changed, make all chunks zero-sized. |
|
976
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
334 |
// And clear the alloc-rate statistics. |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
335 |
// In future we may want to handle this more gracefully in order |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
336 |
// to avoid the reallocation of the pages as much as possible. |
1 | 337 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
976
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
338 |
LGRPSpace *ls = lgrp_spaces()->at(i); |
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
339 |
MutableSpace *s = ls->space(); |
1 | 340 |
s->set_end(s->bottom()); |
341 |
s->set_top(s->bottom()); |
|
976
241230d48896
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents:
971
diff
changeset
|
342 |
ls->clear_alloc_rate(); |
1 | 343 |
} |
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
344 |
// A NUMA space is never mangled |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
345 |
initialize(region(), |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
346 |
SpaceDecorator::Clear, |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
347 |
SpaceDecorator::DontMangle); |
1 | 348 |
} else { |
349 |
bool should_initialize = false; |
|
388 | 350 |
if (!os::numa_has_static_binding()) { |
351 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
|
352 |
if (!lgrp_spaces()->at(i)->invalid_region().is_empty()) { |
|
353 |
should_initialize = true; |
|
354 |
break; |
|
355 |
} |
|
1 | 356 |
} |
357 |
} |
|
358 |
||
359 |
if (should_initialize || |
|
360 |
(UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) { |
|
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
361 |
// A NUMA space is never mangled |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
362 |
initialize(region(), |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
363 |
SpaceDecorator::Clear, |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
364 |
SpaceDecorator::DontMangle); |
1 | 365 |
} |
366 |
} |
|
367 |
||
368 |
if (NUMAStats) { |
|
369 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
|
370 |
lgrp_spaces()->at(i)->accumulate_statistics(page_size()); |
|
371 |
} |
|
372 |
} |
|
373 |
||
374 |
scan_pages(NUMAPageScanRate); |
|
375 |
} |
|
376 |
||
377 |
// Scan pages. Free pages that have smaller size or wrong placement. |
|
378 |
void MutableNUMASpace::scan_pages(size_t page_count) |
|
379 |
{ |
|
380 |
size_t pages_per_chunk = page_count / lgrp_spaces()->length(); |
|
381 |
if (pages_per_chunk > 0) { |
|
382 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
|
383 |
LGRPSpace *ls = lgrp_spaces()->at(i); |
|
384 |
ls->scan_pages(page_size(), pages_per_chunk); |
|
385 |
} |
|
386 |
} |
|
387 |
} |
|
388 |
||
389 |
// Accumulate statistics about the allocation rate of each lgrp. |
|
390 |
void MutableNUMASpace::accumulate_statistics() { |
|
391 |
if (UseAdaptiveNUMAChunkSizing) { |
|
392 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
|
393 |
lgrp_spaces()->at(i)->sample(); |
|
394 |
} |
|
395 |
increment_samples_count(); |
|
396 |
} |
|
397 |
||
398 |
if (NUMAStats) { |
|
399 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
|
400 |
lgrp_spaces()->at(i)->accumulate_statistics(page_size()); |
|
401 |
} |
|
402 |
} |
|
403 |
} |
|
404 |
||
405 |
// Get the current size of a chunk. |
|
406 |
// This function computes the size of the chunk based on the |
|
407 |
// difference between chunk ends. This allows it to work correctly in |
|
408 |
// case the whole space is resized and during the process of adaptive |
|
409 |
// chunk resizing. |
|
410 |
size_t MutableNUMASpace::current_chunk_size(int i) { |
|
411 |
HeapWord *cur_end, *prev_end; |
|
412 |
if (i == 0) { |
|
413 |
prev_end = bottom(); |
|
414 |
} else { |
|
415 |
prev_end = lgrp_spaces()->at(i - 1)->space()->end(); |
|
416 |
} |
|
417 |
if (i == lgrp_spaces()->length() - 1) { |
|
418 |
cur_end = end(); |
|
419 |
} else { |
|
420 |
cur_end = lgrp_spaces()->at(i)->space()->end(); |
|
421 |
} |
|
422 |
if (cur_end > prev_end) { |
|
423 |
return pointer_delta(cur_end, prev_end, sizeof(char)); |
|
424 |
} |
|
425 |
return 0; |
|
426 |
} |
|
427 |
||
428 |
// Return the default chunk size by equally diving the space. |
|
429 |
// page_size() aligned. |
|
430 |
size_t MutableNUMASpace::default_chunk_size() { |
|
431 |
return base_space_size() / lgrp_spaces()->length() * page_size(); |
|
432 |
} |
|
433 |
||
434 |
// Produce a new chunk size. page_size() aligned. |
|
1423
1233b1e85dfd
6753547: NUMA allocator: Invalid chunk size computation during adaptive resizing
iveresov
parents:
1405
diff
changeset
|
435 |
// This function is expected to be called on sequence of i's from 0 to |
1233b1e85dfd
6753547: NUMA allocator: Invalid chunk size computation during adaptive resizing
iveresov
parents:
1405
diff
changeset
|
436 |
// lgrp_spaces()->length(). |
1 | 437 |
size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) { |
438 |
size_t pages_available = base_space_size(); |
|
439 |
for (int j = 0; j < i; j++) { |
|
440 |
pages_available -= round_down(current_chunk_size(j), page_size()) / page_size(); |
|
441 |
} |
|
442 |
pages_available -= lgrp_spaces()->length() - i - 1; |
|
443 |
assert(pages_available > 0, "No pages left"); |
|
444 |
float alloc_rate = 0; |
|
445 |
for (int j = i; j < lgrp_spaces()->length(); j++) { |
|
446 |
alloc_rate += lgrp_spaces()->at(j)->alloc_rate()->average(); |
|
447 |
} |
|
448 |
size_t chunk_size = 0; |
|
449 |
if (alloc_rate > 0) { |
|
450 |
LGRPSpace *ls = lgrp_spaces()->at(i); |
|
1423
1233b1e85dfd
6753547: NUMA allocator: Invalid chunk size computation during adaptive resizing
iveresov
parents:
1405
diff
changeset
|
451 |
chunk_size = (size_t)(ls->alloc_rate()->average() / alloc_rate * pages_available) * page_size(); |
1 | 452 |
} |
453 |
chunk_size = MAX2(chunk_size, page_size()); |
|
454 |
||
455 |
if (limit > 0) { |
|
456 |
limit = round_down(limit, page_size()); |
|
457 |
if (chunk_size > current_chunk_size(i)) { |
|
1615
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1423
diff
changeset
|
458 |
size_t upper_bound = pages_available * page_size(); |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1423
diff
changeset
|
459 |
if (upper_bound > limit && |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1423
diff
changeset
|
460 |
current_chunk_size(i) < upper_bound - limit) { |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1423
diff
changeset
|
461 |
// The resulting upper bound should not exceed the available |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1423
diff
changeset
|
462 |
// amount of memory (pages_available * page_size()). |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1423
diff
changeset
|
463 |
upper_bound = current_chunk_size(i) + limit; |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1423
diff
changeset
|
464 |
} |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1423
diff
changeset
|
465 |
chunk_size = MIN2(chunk_size, upper_bound); |
1 | 466 |
} else { |
1615
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1423
diff
changeset
|
467 |
size_t lower_bound = page_size(); |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1423
diff
changeset
|
468 |
if (current_chunk_size(i) > limit) { // lower_bound shouldn't underflow. |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1423
diff
changeset
|
469 |
lower_bound = current_chunk_size(i) - limit; |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1423
diff
changeset
|
470 |
} |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1423
diff
changeset
|
471 |
chunk_size = MAX2(chunk_size, lower_bound); |
1 | 472 |
} |
473 |
} |
|
474 |
assert(chunk_size <= pages_available * page_size(), "Chunk size out of range"); |
|
475 |
return chunk_size; |
|
476 |
} |
|
477 |
||
478 |
||
479 |
// Return the bottom_region and the top_region. Align them to page_size() boundary. |
|
480 |
// |------------------new_region---------------------------------| |
|
481 |
// |----bottom_region--|---intersection---|------top_region------| |
|
482 |
void MutableNUMASpace::select_tails(MemRegion new_region, MemRegion intersection, |
|
483 |
MemRegion* bottom_region, MemRegion *top_region) { |
|
484 |
// Is there bottom? |
|
485 |
if (new_region.start() < intersection.start()) { // Yes |
|
486 |
// Try to coalesce small pages into a large one. |
|
1911
b7cfe7eb809c
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
1668
diff
changeset
|
487 |
if (UseLargePages && page_size() >= alignment()) { |
b7cfe7eb809c
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
1668
diff
changeset
|
488 |
HeapWord* p = (HeapWord*)round_to((intptr_t) intersection.start(), alignment()); |
1 | 489 |
if (new_region.contains(p) |
1911
b7cfe7eb809c
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
1668
diff
changeset
|
490 |
&& pointer_delta(p, new_region.start(), sizeof(char)) >= alignment()) { |
1 | 491 |
if (intersection.contains(p)) { |
492 |
intersection = MemRegion(p, intersection.end()); |
|
493 |
} else { |
|
494 |
intersection = MemRegion(p, p); |
|
495 |
} |
|
496 |
} |
|
497 |
} |
|
498 |
*bottom_region = MemRegion(new_region.start(), intersection.start()); |
|
499 |
} else { |
|
500 |
*bottom_region = MemRegion(); |
|
501 |
} |
|
502 |
||
503 |
// Is there top? |
|
504 |
if (intersection.end() < new_region.end()) { // Yes |
|
505 |
// Try to coalesce small pages into a large one. |
|
1911
b7cfe7eb809c
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
1668
diff
changeset
|
506 |
if (UseLargePages && page_size() >= alignment()) { |
b7cfe7eb809c
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
1668
diff
changeset
|
507 |
HeapWord* p = (HeapWord*)round_down((intptr_t) intersection.end(), alignment()); |
1 | 508 |
if (new_region.contains(p) |
1911
b7cfe7eb809c
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
1668
diff
changeset
|
509 |
&& pointer_delta(new_region.end(), p, sizeof(char)) >= alignment()) { |
1 | 510 |
if (intersection.contains(p)) { |
511 |
intersection = MemRegion(intersection.start(), p); |
|
512 |
} else { |
|
513 |
intersection = MemRegion(p, p); |
|
514 |
} |
|
515 |
} |
|
516 |
} |
|
517 |
*top_region = MemRegion(intersection.end(), new_region.end()); |
|
518 |
} else { |
|
519 |
*top_region = MemRegion(); |
|
520 |
} |
|
521 |
} |
|
522 |
||
523 |
// Try to merge the invalid region with the bottom or top region by decreasing |
|
524 |
// the intersection area. Return the invalid_region aligned to the page_size() |
|
525 |
// boundary if it's inside the intersection. Return non-empty invalid_region |
|
526 |
// if it lies inside the intersection (also page-aligned). |
|
527 |
// |------------------new_region---------------------------------| |
|
528 |
// |----------------|-------invalid---|--------------------------| |
|
529 |
// |----bottom_region--|---intersection---|------top_region------| |
|
530 |
void MutableNUMASpace::merge_regions(MemRegion new_region, MemRegion* intersection, |
|
531 |
MemRegion *invalid_region) { |
|
532 |
if (intersection->start() >= invalid_region->start() && intersection->contains(invalid_region->end())) { |
|
533 |
*intersection = MemRegion(invalid_region->end(), intersection->end()); |
|
534 |
*invalid_region = MemRegion(); |
|
535 |
} else |
|
536 |
if (intersection->end() <= invalid_region->end() && intersection->contains(invalid_region->start())) { |
|
537 |
*intersection = MemRegion(intersection->start(), invalid_region->start()); |
|
538 |
*invalid_region = MemRegion(); |
|
539 |
} else |
|
540 |
if (intersection->equals(*invalid_region) || invalid_region->contains(*intersection)) { |
|
541 |
*intersection = MemRegion(new_region.start(), new_region.start()); |
|
542 |
*invalid_region = MemRegion(); |
|
543 |
} else |
|
544 |
if (intersection->contains(invalid_region)) { |
|
545 |
// That's the only case we have to make an additional bias_region() call. |
|
546 |
HeapWord* start = invalid_region->start(); |
|
547 |
HeapWord* end = invalid_region->end(); |
|
1911
b7cfe7eb809c
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
1668
diff
changeset
|
548 |
if (UseLargePages && page_size() >= alignment()) { |
b7cfe7eb809c
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
1668
diff
changeset
|
549 |
HeapWord *p = (HeapWord*)round_down((intptr_t) start, alignment()); |
1 | 550 |
if (new_region.contains(p)) { |
551 |
start = p; |
|
552 |
} |
|
1911
b7cfe7eb809c
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
1668
diff
changeset
|
553 |
p = (HeapWord*)round_to((intptr_t) end, alignment()); |
1 | 554 |
if (new_region.contains(end)) { |
555 |
end = p; |
|
556 |
} |
|
557 |
} |
|
558 |
if (intersection->start() > start) { |
|
559 |
*intersection = MemRegion(start, intersection->end()); |
|
560 |
} |
|
561 |
if (intersection->end() < end) { |
|
562 |
*intersection = MemRegion(intersection->start(), end); |
|
563 |
} |
|
564 |
*invalid_region = MemRegion(start, end); |
|
565 |
} |
|
566 |
} |
|
567 |
||
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
568 |
void MutableNUMASpace::initialize(MemRegion mr, |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
569 |
bool clear_space, |
1911
b7cfe7eb809c
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
1668
diff
changeset
|
570 |
bool mangle_space, |
b7cfe7eb809c
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
1668
diff
changeset
|
571 |
bool setup_pages) { |
22775 | 572 |
assert(clear_space, "Reallocation will destroy data!"); |
1 | 573 |
assert(lgrp_spaces()->length() > 0, "There should be at least one space"); |
574 |
||
575 |
MemRegion old_region = region(), new_region; |
|
576 |
set_bottom(mr.start()); |
|
577 |
set_end(mr.end()); |
|
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
578 |
// Must always clear the space |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
579 |
clear(SpaceDecorator::DontMangle); |
1 | 580 |
|
581 |
// Compute chunk sizes |
|
582 |
size_t prev_page_size = page_size(); |
|
1911
b7cfe7eb809c
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
1668
diff
changeset
|
583 |
set_page_size(UseLargePages ? alignment() : os::vm_page_size()); |
1 | 584 |
HeapWord* rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size()); |
585 |
HeapWord* rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size()); |
|
586 |
size_t base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size(); |
|
587 |
||
588 |
// Try small pages if the chunk size is too small |
|
589 |
if (base_space_size_pages / lgrp_spaces()->length() == 0 |
|
590 |
&& page_size() > (size_t)os::vm_page_size()) { |
|
46312
385a8b027e7d
8023905: Failing to initialize VM with small initial heap when NUMA and large pages are enabled
sangheki
parents:
40655
diff
changeset
|
591 |
// Changing the page size below can lead to freeing of memory. So we fail initialization. |
385a8b027e7d
8023905: Failing to initialize VM with small initial heap when NUMA and large pages are enabled
sangheki
parents:
40655
diff
changeset
|
592 |
if (_must_use_large_pages) { |
385a8b027e7d
8023905: Failing to initialize VM with small initial heap when NUMA and large pages are enabled
sangheki
parents:
40655
diff
changeset
|
593 |
vm_exit_during_initialization("Failed initializing NUMA with large pages. Too small heap size"); |
385a8b027e7d
8023905: Failing to initialize VM with small initial heap when NUMA and large pages are enabled
sangheki
parents:
40655
diff
changeset
|
594 |
} |
1 | 595 |
set_page_size(os::vm_page_size()); |
596 |
rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size()); |
|
597 |
rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size()); |
|
598 |
base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size(); |
|
599 |
} |
|
600 |
guarantee(base_space_size_pages / lgrp_spaces()->length() > 0, "Space too small"); |
|
601 |
set_base_space_size(base_space_size_pages); |
|
602 |
||
603 |
// Handle space resize |
|
604 |
MemRegion top_region, bottom_region; |
|
605 |
if (!old_region.equals(region())) { |
|
606 |
new_region = MemRegion(rounded_bottom, rounded_end); |
|
607 |
MemRegion intersection = new_region.intersection(old_region); |
|
608 |
if (intersection.start() == NULL || |
|
609 |
intersection.end() == NULL || |
|
610 |
prev_page_size > page_size()) { // If the page size got smaller we have to change |
|
611 |
// the page size preference for the whole space. |
|
612 |
intersection = MemRegion(new_region.start(), new_region.start()); |
|
613 |
} |
|
614 |
select_tails(new_region, intersection, &bottom_region, &top_region); |
|
388 | 615 |
bias_region(bottom_region, lgrp_spaces()->at(0)->lgrp_id()); |
616 |
bias_region(top_region, lgrp_spaces()->at(lgrp_spaces()->length() - 1)->lgrp_id()); |
|
1 | 617 |
} |
618 |
||
619 |
// Check if the space layout has changed significantly? |
|
620 |
// This happens when the space has been resized so that either head or tail |
|
621 |
// chunk became less than a page. |
|
622 |
bool layout_valid = UseAdaptiveNUMAChunkSizing && |
|
623 |
current_chunk_size(0) > page_size() && |
|
624 |
current_chunk_size(lgrp_spaces()->length() - 1) > page_size(); |
|
625 |
||
626 |
||
627 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
|
628 |
LGRPSpace *ls = lgrp_spaces()->at(i); |
|
629 |
MutableSpace *s = ls->space(); |
|
630 |
old_region = s->region(); |
|
631 |
||
632 |
size_t chunk_byte_size = 0, old_chunk_byte_size = 0; |
|
633 |
if (i < lgrp_spaces()->length() - 1) { |
|
634 |
if (!UseAdaptiveNUMAChunkSizing || |
|
635 |
(UseAdaptiveNUMAChunkSizing && NUMAChunkResizeWeight == 0) || |
|
636 |
samples_count() < AdaptiveSizePolicyReadyThreshold) { |
|
637 |
// No adaptation. Divide the space equally. |
|
638 |
chunk_byte_size = default_chunk_size(); |
|
639 |
} else |
|
640 |
if (!layout_valid || NUMASpaceResizeRate == 0) { |
|
641 |
// Fast adaptation. If no space resize rate is set, resize |
|
642 |
// the chunks instantly. |
|
643 |
chunk_byte_size = adaptive_chunk_size(i, 0); |
|
644 |
} else { |
|
645 |
// Slow adaptation. Resize the chunks moving no more than |
|
646 |
// NUMASpaceResizeRate bytes per collection. |
|
647 |
size_t limit = NUMASpaceResizeRate / |
|
648 |
(lgrp_spaces()->length() * (lgrp_spaces()->length() + 1) / 2); |
|
649 |
chunk_byte_size = adaptive_chunk_size(i, MAX2(limit * (i + 1), page_size())); |
|
650 |
} |
|
651 |
||
652 |
assert(chunk_byte_size >= page_size(), "Chunk size too small"); |
|
653 |
assert(chunk_byte_size <= capacity_in_bytes(), "Sanity check"); |
|
654 |
} |
|
655 |
||
656 |
if (i == 0) { // Bottom chunk |
|
657 |
if (i != lgrp_spaces()->length() - 1) { |
|
658 |
new_region = MemRegion(bottom(), rounded_bottom + (chunk_byte_size >> LogHeapWordSize)); |
|
659 |
} else { |
|
660 |
new_region = MemRegion(bottom(), end()); |
|
661 |
} |
|
662 |
} else |
|
663 |
if (i < lgrp_spaces()->length() - 1) { // Middle chunks |
|
664 |
MutableSpace *ps = lgrp_spaces()->at(i - 1)->space(); |
|
665 |
new_region = MemRegion(ps->end(), |
|
666 |
ps->end() + (chunk_byte_size >> LogHeapWordSize)); |
|
667 |
} else { // Top chunk |
|
668 |
MutableSpace *ps = lgrp_spaces()->at(i - 1)->space(); |
|
669 |
new_region = MemRegion(ps->end(), end()); |
|
670 |
} |
|
671 |
guarantee(region().contains(new_region), "Region invariant"); |
|
672 |
||
673 |
||
674 |
// The general case: |
|
675 |
// |---------------------|--invalid---|--------------------------| |
|
676 |
// |------------------new_region---------------------------------| |
|
677 |
// |----bottom_region--|---intersection---|------top_region------| |
|
678 |
// |----old_region----| |
|
679 |
// The intersection part has all pages in place we don't need to migrate them. |
|
680 |
// Pages for the top and bottom part should be freed and then reallocated. |
|
681 |
||
682 |
MemRegion intersection = old_region.intersection(new_region); |
|
683 |
||
684 |
if (intersection.start() == NULL || intersection.end() == NULL) { |
|
685 |
intersection = MemRegion(new_region.start(), new_region.start()); |
|
686 |
} |
|
687 |
||
388 | 688 |
if (!os::numa_has_static_binding()) { |
689 |
MemRegion invalid_region = ls->invalid_region().intersection(new_region); |
|
690 |
// Invalid region is a range of memory that could've possibly |
|
691 |
// been allocated on the other node. That's relevant only on Solaris where |
|
692 |
// there is no static memory binding. |
|
693 |
if (!invalid_region.is_empty()) { |
|
694 |
merge_regions(new_region, &intersection, &invalid_region); |
|
695 |
free_region(invalid_region); |
|
696 |
ls->set_invalid_region(MemRegion()); |
|
697 |
} |
|
1 | 698 |
} |
388 | 699 |
|
1 | 700 |
select_tails(new_region, intersection, &bottom_region, &top_region); |
388 | 701 |
|
702 |
if (!os::numa_has_static_binding()) { |
|
703 |
// If that's a system with the first-touch policy then it's enough |
|
704 |
// to free the pages. |
|
705 |
free_region(bottom_region); |
|
706 |
free_region(top_region); |
|
707 |
} else { |
|
708 |
// In a system with static binding we have to change the bias whenever |
|
709 |
// we reshape the heap. |
|
710 |
bias_region(bottom_region, ls->lgrp_id()); |
|
711 |
bias_region(top_region, ls->lgrp_id()); |
|
712 |
} |
|
1 | 713 |
|
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
714 |
// Clear space (set top = bottom) but never mangle. |
1911
b7cfe7eb809c
6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents:
1668
diff
changeset
|
715 |
s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle, MutableSpace::DontSetupPages); |
1 | 716 |
|
717 |
set_adaptation_cycles(samples_count()); |
|
718 |
} |
|
719 |
} |
|
720 |
||
721 |
// Set the top of the whole space. |
|
722 |
// Mark the the holes in chunks below the top() as invalid. |
|
723 |
void MutableNUMASpace::set_top(HeapWord* value) { |
|
724 |
bool found_top = false; |
|
616
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
725 |
for (int i = 0; i < lgrp_spaces()->length();) { |
1 | 726 |
LGRPSpace *ls = lgrp_spaces()->at(i); |
727 |
MutableSpace *s = ls->space(); |
|
728 |
HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom()); |
|
729 |
||
730 |
if (s->contains(value)) { |
|
616
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
731 |
// Check if setting the chunk's top to a given value would create a hole less than |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
732 |
// a minimal object; assuming that's not the last chunk in which case we don't care. |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
733 |
if (i < lgrp_spaces()->length() - 1) { |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
734 |
size_t remainder = pointer_delta(s->end(), value); |
1668
8ec481b8f514
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
1615
diff
changeset
|
735 |
const size_t min_fill_size = CollectedHeap::min_fill_size(); |
8ec481b8f514
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
1615
diff
changeset
|
736 |
if (remainder < min_fill_size && remainder > 0) { |
8ec481b8f514
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
1615
diff
changeset
|
737 |
// Add a minimum size filler object; it will cross the chunk boundary. |
8ec481b8f514
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
1615
diff
changeset
|
738 |
CollectedHeap::fill_with_object(value, min_fill_size); |
8ec481b8f514
6578152: fill_region_with_object has usability and safety issues
jcoomes
parents:
1615
diff
changeset
|
739 |
value += min_fill_size; |
616
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
740 |
assert(!s->contains(value), "Should be in the next chunk"); |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
741 |
// Restart the loop from the same chunk, since the value has moved |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
742 |
// to the next one. |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
743 |
continue; |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
744 |
} |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
745 |
} |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
746 |
|
388 | 747 |
if (!os::numa_has_static_binding() && top < value && top < s->end()) { |
1 | 748 |
ls->add_invalid_region(MemRegion(top, value)); |
749 |
} |
|
750 |
s->set_top(value); |
|
751 |
found_top = true; |
|
752 |
} else { |
|
753 |
if (found_top) { |
|
754 |
s->set_top(s->bottom()); |
|
755 |
} else { |
|
388 | 756 |
if (!os::numa_has_static_binding() && top < s->end()) { |
757 |
ls->add_invalid_region(MemRegion(top, s->end())); |
|
758 |
} |
|
759 |
s->set_top(s->end()); |
|
1 | 760 |
} |
761 |
} |
|
616
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
762 |
i++; |
1 | 763 |
} |
764 |
MutableSpace::set_top(value); |
|
765 |
} |
|
766 |
||
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
767 |
void MutableNUMASpace::clear(bool mangle_space) { |
1 | 768 |
MutableSpace::set_top(bottom()); |
769 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
|
971
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
770 |
// Never mangle NUMA spaces because the mangling will |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
771 |
// bind the memory to a possibly unwanted lgroup. |
f0b20be4165d
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
616
diff
changeset
|
772 |
lgrp_spaces()->at(i)->space()->clear(SpaceDecorator::DontMangle); |
1 | 773 |
} |
774 |
} |
|
775 |
||
388 | 776 |
/* |
777 |
Linux supports static memory binding, therefore the most part of the |
|
778 |
logic dealing with the possible invalid page allocation is effectively |
|
779 |
disabled. Besides there is no notion of the home node in Linux. A |
|
780 |
thread is allowed to migrate freely. Although the scheduler is rather |
|
781 |
reluctant to move threads between the nodes. We check for the current |
|
782 |
node every allocation. And with a high probability a thread stays on |
|
783 |
the same node for some time allowing local access to recently allocated |
|
784 |
objects. |
|
785 |
*/ |
|
786 |
||
1 | 787 |
HeapWord* MutableNUMASpace::allocate(size_t size) { |
388 | 788 |
Thread* thr = Thread::current(); |
789 |
int lgrp_id = thr->lgrp_id(); |
|
790 |
if (lgrp_id == -1 || !os::numa_has_group_homing()) { |
|
1 | 791 |
lgrp_id = os::numa_get_group_id(); |
388 | 792 |
thr->set_lgrp_id(lgrp_id); |
1 | 793 |
} |
794 |
||
795 |
int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
|
796 |
||
797 |
// It is possible that a new CPU has been hotplugged and |
|
798 |
// we haven't reshaped the space accordingly. |
|
799 |
if (i == -1) { |
|
800 |
i = os::random() % lgrp_spaces()->length(); |
|
801 |
} |
|
802 |
||
1405
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
803 |
LGRPSpace* ls = lgrp_spaces()->at(i); |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
804 |
MutableSpace *s = ls->space(); |
1 | 805 |
HeapWord *p = s->allocate(size); |
806 |
||
391
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
807 |
if (p != NULL) { |
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
808 |
size_t remainder = s->free_in_words(); |
5694
1e0532a6abff
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
2154
diff
changeset
|
809 |
if (remainder < CollectedHeap::min_fill_size() && remainder > 0) { |
391
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
810 |
s->set_top(s->top() - size); |
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
811 |
p = NULL; |
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
812 |
} |
1 | 813 |
} |
814 |
if (p != NULL) { |
|
815 |
if (top() < s->top()) { // Keep _top updated. |
|
816 |
MutableSpace::set_top(s->top()); |
|
817 |
} |
|
818 |
} |
|
388 | 819 |
// Make the page allocation happen here if there is no static binding.. |
820 |
if (p != NULL && !os::numa_has_static_binding()) { |
|
1 | 821 |
for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) { |
822 |
*(int*)i = 0; |
|
823 |
} |
|
824 |
} |
|
1405
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
825 |
if (p == NULL) { |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
826 |
ls->set_allocation_failed(); |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
827 |
} |
1 | 828 |
return p; |
829 |
} |
|
830 |
||
831 |
// This version is lock-free. |
|
832 |
HeapWord* MutableNUMASpace::cas_allocate(size_t size) { |
|
388 | 833 |
Thread* thr = Thread::current(); |
834 |
int lgrp_id = thr->lgrp_id(); |
|
835 |
if (lgrp_id == -1 || !os::numa_has_group_homing()) { |
|
1 | 836 |
lgrp_id = os::numa_get_group_id(); |
388 | 837 |
thr->set_lgrp_id(lgrp_id); |
1 | 838 |
} |
839 |
||
840 |
int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
|
841 |
// It is possible that a new CPU has been hotplugged and |
|
842 |
// we haven't reshaped the space accordingly. |
|
843 |
if (i == -1) { |
|
844 |
i = os::random() % lgrp_spaces()->length(); |
|
845 |
} |
|
1405
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
846 |
LGRPSpace *ls = lgrp_spaces()->at(i); |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
847 |
MutableSpace *s = ls->space(); |
1 | 848 |
HeapWord *p = s->cas_allocate(size); |
391
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
849 |
if (p != NULL) { |
616
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
850 |
size_t remainder = pointer_delta(s->end(), p + size); |
5694
1e0532a6abff
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
2154
diff
changeset
|
851 |
if (remainder < CollectedHeap::min_fill_size() && remainder > 0) { |
391
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
852 |
if (s->cas_deallocate(p, size)) { |
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
853 |
// We were the last to allocate and created a fragment less than |
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
854 |
// a minimal object. |
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
855 |
p = NULL; |
616
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
856 |
} else { |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
857 |
guarantee(false, "Deallocation should always succeed"); |
391
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
858 |
} |
1 | 859 |
} |
860 |
} |
|
861 |
if (p != NULL) { |
|
862 |
HeapWord* cur_top, *cur_chunk_top = p + size; |
|
863 |
while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated. |
|
864 |
if (Atomic::cmpxchg_ptr(cur_chunk_top, top_addr(), cur_top) == cur_top) { |
|
865 |
break; |
|
866 |
} |
|
867 |
} |
|
868 |
} |
|
869 |
||
388 | 870 |
// Make the page allocation happen here if there is no static binding. |
871 |
if (p != NULL && !os::numa_has_static_binding() ) { |
|
1 | 872 |
for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) { |
873 |
*(int*)i = 0; |
|
874 |
} |
|
875 |
} |
|
1405
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
876 |
if (p == NULL) { |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
877 |
ls->set_allocation_failed(); |
ce6e6fe90107
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
977
diff
changeset
|
878 |
} |
1 | 879 |
return p; |
880 |
} |
|
881 |
||
882 |
void MutableNUMASpace::print_short_on(outputStream* st) const { |
|
883 |
MutableSpace::print_short_on(st); |
|
884 |
st->print(" ("); |
|
885 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
|
886 |
st->print("lgrp %d: ", lgrp_spaces()->at(i)->lgrp_id()); |
|
887 |
lgrp_spaces()->at(i)->space()->print_short_on(st); |
|
888 |
if (i < lgrp_spaces()->length() - 1) { |
|
889 |
st->print(", "); |
|
890 |
} |
|
891 |
} |
|
892 |
st->print(")"); |
|
893 |
} |
|
894 |
||
895 |
void MutableNUMASpace::print_on(outputStream* st) const { |
|
896 |
MutableSpace::print_on(st); |
|
897 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
|
898 |
LGRPSpace *ls = lgrp_spaces()->at(i); |
|
899 |
st->print(" lgrp %d", ls->lgrp_id()); |
|
900 |
ls->space()->print_on(st); |
|
901 |
if (NUMAStats) { |
|
391
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
902 |
for (int i = 0; i < lgrp_spaces()->length(); i++) { |
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
903 |
lgrp_spaces()->at(i)->accumulate_statistics(page_size()); |
f889070a8684
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents:
388
diff
changeset
|
904 |
} |
24092
e274d864545a
8039743: Use correct format specifier to print size_t values and pointers in the GC code
stefank
parents:
22775
diff
changeset
|
905 |
st->print(" local/remote/unbiased/uncommitted: " SIZE_FORMAT "K/" |
e274d864545a
8039743: Use correct format specifier to print size_t values and pointers in the GC code
stefank
parents:
22775
diff
changeset
|
906 |
SIZE_FORMAT "K/" SIZE_FORMAT "K/" SIZE_FORMAT |
e274d864545a
8039743: Use correct format specifier to print size_t values and pointers in the GC code
stefank
parents:
22775
diff
changeset
|
907 |
"K, large/small pages: " SIZE_FORMAT "/" SIZE_FORMAT "\n", |
1 | 908 |
ls->space_stats()->_local_space / K, |
909 |
ls->space_stats()->_remote_space / K, |
|
910 |
ls->space_stats()->_unbiased_space / K, |
|
911 |
ls->space_stats()->_uncommited_space / K, |
|
912 |
ls->space_stats()->_large_pages, |
|
913 |
ls->space_stats()->_small_pages); |
|
914 |
} |
|
915 |
} |
|
916 |
} |
|
917 |
||
12379 | 918 |
void MutableNUMASpace::verify() { |
22551 | 919 |
// This can be called after setting an arbitrary value to the space's top, |
920 |
// so an object can cross the chunk boundary. We ensure the parsability |
|
616
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
921 |
// of the space and just walk the objects in linear fashion. |
4f2dfc0168e2
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents:
391
diff
changeset
|
922 |
ensure_parsability(); |
12379 | 923 |
MutableSpace::verify(); |
1 | 924 |
} |
925 |
||
926 |
// Scan pages and gather stats about page placement and size. |
|
927 |
void MutableNUMASpace::LGRPSpace::accumulate_statistics(size_t page_size) { |
|
928 |
clear_space_stats(); |
|
929 |
char *start = (char*)round_to((intptr_t) space()->bottom(), page_size); |
|
930 |
char* end = (char*)round_down((intptr_t) space()->end(), page_size); |
|
931 |
if (start < end) { |
|
932 |
for (char *p = start; p < end;) { |
|
933 |
os::page_info info; |
|
934 |
if (os::get_page_info(p, &info)) { |
|
935 |
if (info.size > 0) { |
|
936 |
if (info.size > (size_t)os::vm_page_size()) { |
|
937 |
space_stats()->_large_pages++; |
|
938 |
} else { |
|
939 |
space_stats()->_small_pages++; |
|
940 |
} |
|
941 |
if (info.lgrp_id == lgrp_id()) { |
|
942 |
space_stats()->_local_space += info.size; |
|
943 |
} else { |
|
944 |
space_stats()->_remote_space += info.size; |
|
945 |
} |
|
946 |
p += info.size; |
|
947 |
} else { |
|
948 |
p += os::vm_page_size(); |
|
949 |
space_stats()->_uncommited_space += os::vm_page_size(); |
|
950 |
} |
|
951 |
} else { |
|
952 |
return; |
|
953 |
} |
|
954 |
} |
|
955 |
} |
|
956 |
space_stats()->_unbiased_space = pointer_delta(start, space()->bottom(), sizeof(char)) + |
|
957 |
pointer_delta(space()->end(), end, sizeof(char)); |
|
958 |
||
959 |
} |
|
960 |
||
961 |
// Scan page_count pages and verify if they have the right size and right placement. |
|
962 |
// If invalid pages are found they are freed in hope that subsequent reallocation |
|
963 |
// will be more successful. |
|
964 |
void MutableNUMASpace::LGRPSpace::scan_pages(size_t page_size, size_t page_count) |
|
965 |
{ |
|
966 |
char* range_start = (char*)round_to((intptr_t) space()->bottom(), page_size); |
|
967 |
char* range_end = (char*)round_down((intptr_t) space()->end(), page_size); |
|
968 |
||
969 |
if (range_start > last_page_scanned() || last_page_scanned() >= range_end) { |
|
970 |
set_last_page_scanned(range_start); |
|
971 |
} |
|
972 |
||
973 |
char *scan_start = last_page_scanned(); |
|
974 |
char* scan_end = MIN2(scan_start + page_size * page_count, range_end); |
|
975 |
||
976 |
os::page_info page_expected, page_found; |
|
977 |
page_expected.size = page_size; |
|
978 |
page_expected.lgrp_id = lgrp_id(); |
|
979 |
||
980 |
char *s = scan_start; |
|
981 |
while (s < scan_end) { |
|
982 |
char *e = os::scan_pages(s, (char*)scan_end, &page_expected, &page_found); |
|
983 |
if (e == NULL) { |
|
984 |
break; |
|
985 |
} |
|
986 |
if (e != scan_end) { |
|
33105
294e48b4f704
8080775: Better argument formatting for assert() and friends
david
parents:
31592
diff
changeset
|
987 |
assert(e < scan_end, "e: " PTR_FORMAT " scan_end: " PTR_FORMAT, p2i(e), p2i(scan_end)); |
15955
6d0d8bea2bcc
8004697: SIGSEGV on Solaris sparc with -XX:+UseNUMA
stefank
parents:
14583
diff
changeset
|
988 |
|
1 | 989 |
if ((page_expected.size != page_size || page_expected.lgrp_id != lgrp_id()) |
990 |
&& page_expected.size != 0) { |
|
11402
739e52129c84
7124829: NUMA: memory leak on Linux with large pages
iveresov
parents:
10565
diff
changeset
|
991 |
os::free_memory(s, pointer_delta(e, s, sizeof(char)), page_size); |
1 | 992 |
} |
993 |
page_expected = page_found; |
|
994 |
} |
|
995 |
s = e; |
|
996 |
} |
|
997 |
||
998 |
set_last_page_scanned(scan_end); |
|
999 |
} |