|
1 /* |
|
2 * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #include "precompiled.hpp" |
|
26 #include "gc/g1/g1NUMAStats.hpp" |
|
27 #include "logging/logStream.hpp" |
|
28 |
|
29 double G1NUMAStats::Stat::rate() const { |
|
30 return _requested == 0 ? 0 : (double)_hit / _requested * 100; |
|
31 } |
|
32 |
|
33 G1NUMAStats::NodeDataArray::NodeDataArray(uint num_nodes) { |
|
34 guarantee(num_nodes > 1, "Number of nodes (%u) should be set", num_nodes); |
|
35 |
|
36 // The row represents the number of nodes. |
|
37 _num_column = num_nodes; |
|
38 // +1 for G1MemoryNodeManager::AnyNodeIndex. |
|
39 _num_row = num_nodes + 1; |
|
40 |
|
41 _data = NEW_C_HEAP_ARRAY(size_t*, _num_row, mtGC); |
|
42 for (uint row = 0; row < _num_row; row++) { |
|
43 _data[row] = NEW_C_HEAP_ARRAY(size_t, _num_column, mtGC); |
|
44 } |
|
45 |
|
46 clear(); |
|
47 } |
|
48 |
|
49 G1NUMAStats::NodeDataArray::~NodeDataArray() { |
|
50 for (uint row = 0; row < _num_row; row++) { |
|
51 FREE_C_HEAP_ARRAY(size_t, _data[row]); |
|
52 } |
|
53 FREE_C_HEAP_ARRAY(size_t*, _data); |
|
54 } |
|
55 |
|
56 void G1NUMAStats::NodeDataArray::create_hit_rate(Stat* result) const { |
|
57 size_t requested = 0; |
|
58 size_t hit = 0; |
|
59 |
|
60 for (size_t row = 0; row < _num_row; row++) { |
|
61 for (size_t column = 0; column < _num_column; column++) { |
|
62 requested += _data[row][column]; |
|
63 if (row == column) { |
|
64 hit += _data[row][column]; |
|
65 } |
|
66 } |
|
67 } |
|
68 |
|
69 assert(result != NULL, "Invariant"); |
|
70 result->_hit = hit; |
|
71 result->_requested = requested; |
|
72 } |
|
73 |
|
74 void G1NUMAStats::NodeDataArray::create_hit_rate(Stat* result, uint req_index) const { |
|
75 size_t requested = 0; |
|
76 size_t hit = _data[req_index][req_index]; |
|
77 |
|
78 for (size_t column = 0; column < _num_column; column++) { |
|
79 requested += _data[req_index][column]; |
|
80 } |
|
81 |
|
82 assert(result != NULL, "Invariant"); |
|
83 result->_hit = hit; |
|
84 result->_requested = requested; |
|
85 } |
|
86 |
|
87 size_t G1NUMAStats::NodeDataArray::sum(uint req_index) const { |
|
88 size_t sum = 0; |
|
89 for (size_t column = 0; column < _num_column; column++) { |
|
90 sum += _data[req_index][column]; |
|
91 } |
|
92 |
|
93 return sum; |
|
94 } |
|
95 |
|
96 void G1NUMAStats::NodeDataArray::increase(uint req_index, uint alloc_index) { |
|
97 assert(req_index < _num_row, |
|
98 "Requested index %u should be less than the row size %u", |
|
99 req_index, _num_row); |
|
100 assert(alloc_index < _num_column, |
|
101 "Allocated index %u should be less than the column size %u", |
|
102 alloc_index, _num_column); |
|
103 _data[req_index][alloc_index] += 1; |
|
104 } |
|
105 |
|
106 void G1NUMAStats::NodeDataArray::clear() { |
|
107 for (uint row = 0; row < _num_row; row++) { |
|
108 memset((void*)_data[row], 0, sizeof(size_t) * _num_column); |
|
109 } |
|
110 } |
|
111 |
|
112 size_t G1NUMAStats::NodeDataArray::get(uint req_index, uint alloc_index) { |
|
113 return _data[req_index][alloc_index]; |
|
114 } |
|
115 |
|
116 void G1NUMAStats::NodeDataArray::copy(uint req_index, size_t* stat) { |
|
117 assert(stat != NULL, "Invariant"); |
|
118 |
|
119 for (uint column = 0; column < _num_column; column++) { |
|
120 _data[req_index][column] += stat[column]; |
|
121 } |
|
122 } |
|
123 |
|
124 G1NUMAStats::G1NUMAStats(const int* node_ids, uint num_node_ids) : |
|
125 _node_ids(node_ids), _num_node_ids(num_node_ids), _node_data() { |
|
126 |
|
127 assert(_num_node_ids > 1, "Should have more than one active memory nodes %u", _num_node_ids); |
|
128 |
|
129 for (int i = 0; i < NodeDataItemsSentinel; i++) { |
|
130 _node_data[i] = new NodeDataArray(_num_node_ids); |
|
131 } |
|
132 } |
|
133 |
|
134 G1NUMAStats::~G1NUMAStats() { |
|
135 for (int i = 0; i < NodeDataItemsSentinel; i++) { |
|
136 delete _node_data[i]; |
|
137 } |
|
138 } |
|
139 |
|
140 void G1NUMAStats::clear(G1NUMAStats::NodeDataItems phase) { |
|
141 _node_data[phase]->clear(); |
|
142 } |
|
143 |
|
144 void G1NUMAStats::update(G1NUMAStats::NodeDataItems phase, |
|
145 uint requested_node_index, |
|
146 uint allocated_node_index) { |
|
147 _node_data[phase]->increase(requested_node_index, allocated_node_index); |
|
148 } |
|
149 |
|
150 void G1NUMAStats::copy(G1NUMAStats::NodeDataItems phase, |
|
151 uint requested_node_index, |
|
152 size_t* allocated_stat) { |
|
153 _node_data[phase]->copy(requested_node_index, allocated_stat); |
|
154 } |
|
155 |
|
156 static const char* phase_to_explanatory_string(G1NUMAStats::NodeDataItems phase) { |
|
157 switch(phase) { |
|
158 case G1NUMAStats::NewRegionAlloc: |
|
159 return "Placement match ratio"; |
|
160 case G1NUMAStats::LocalObjProcessAtCopyToSurv: |
|
161 return "Worker task locality match ratio"; |
|
162 default: |
|
163 return ""; |
|
164 } |
|
165 } |
|
166 |
|
167 #define RATE_TOTAL_FORMAT "%0.0f%% " SIZE_FORMAT "/" SIZE_FORMAT |
|
168 |
|
169 void G1NUMAStats::print_info(G1NUMAStats::NodeDataItems phase) { |
|
170 LogTarget(Info, gc, heap, numa) lt; |
|
171 |
|
172 if (lt.is_enabled()) { |
|
173 LogStream ls(lt); |
|
174 Stat result; |
|
175 size_t array_width = _num_node_ids; |
|
176 |
|
177 _node_data[phase]->create_hit_rate(&result); |
|
178 |
|
179 ls.print("%s: " RATE_TOTAL_FORMAT " (", |
|
180 phase_to_explanatory_string(phase), result.rate(), result._hit, result._requested); |
|
181 |
|
182 for (uint i = 0; i < array_width; i++) { |
|
183 if (i != 0) { |
|
184 ls.print(", "); |
|
185 } |
|
186 _node_data[phase]->create_hit_rate(&result, i); |
|
187 ls.print("%d: " RATE_TOTAL_FORMAT, |
|
188 _node_ids[i], result.rate(), result._hit, result._requested); |
|
189 } |
|
190 ls.print_cr(")"); |
|
191 } |
|
192 } |
|
193 |
|
194 void G1NUMAStats::print_mutator_alloc_stat_debug() { |
|
195 LogTarget(Debug, gc, heap, numa) lt; |
|
196 |
|
197 if (lt.is_enabled()) { |
|
198 LogStream ls(lt); |
|
199 uint array_width = _num_node_ids; |
|
200 |
|
201 ls.print("Allocated NUMA ids "); |
|
202 for (uint i = 0; i < array_width; i++) { |
|
203 ls.print("%8d", _node_ids[i]); |
|
204 } |
|
205 ls.print_cr(" Total"); |
|
206 |
|
207 ls.print("Requested NUMA id "); |
|
208 for (uint req = 0; req < array_width; req++) { |
|
209 ls.print("%3d ", _node_ids[req]); |
|
210 for (uint alloc = 0; alloc < array_width; alloc++) { |
|
211 ls.print(SIZE_FORMAT_W(8), _node_data[NewRegionAlloc]->get(req, alloc)); |
|
212 } |
|
213 ls.print(SIZE_FORMAT_W(8), _node_data[NewRegionAlloc]->sum(req)); |
|
214 ls.print_cr(""); |
|
215 // Add padding to align with the string 'Requested NUMA id'. |
|
216 ls.print(" "); |
|
217 } |
|
218 ls.print("Any "); |
|
219 for (uint alloc = 0; alloc < array_width; alloc++) { |
|
220 ls.print(SIZE_FORMAT_W(8), _node_data[NewRegionAlloc]->get(array_width, alloc)); |
|
221 } |
|
222 ls.print(SIZE_FORMAT_W(8), _node_data[NewRegionAlloc]->sum(array_width)); |
|
223 ls.print_cr(""); |
|
224 } |
|
225 } |
|
226 |
|
227 void G1NUMAStats::print_statistics() { |
|
228 print_info(NewRegionAlloc); |
|
229 print_mutator_alloc_stat_debug(); |
|
230 |
|
231 print_info(LocalObjProcessAtCopyToSurv); |
|
232 } |