25946
|
1 |
/*
|
|
2 |
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
20 |
* or visit www.oracle.com if you need additional information or have any
|
|
21 |
* questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
#include "precompiled.hpp"
|
|
25 |
|
|
26 |
#include "runtime/atomic.hpp"
|
|
27 |
#include "runtime/atomic.inline.hpp"
|
|
28 |
#include "services/mallocSiteTable.hpp"
|
|
29 |
#include "services/mallocTracker.hpp"
|
|
30 |
#include "services/mallocTracker.inline.hpp"
|
|
31 |
#include "services/memTracker.hpp"
|
|
32 |
|
|
33 |
size_t MallocMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)];
|
|
34 |
|
|
35 |
// Total malloc'd memory amount
|
|
36 |
size_t MallocMemorySnapshot::total() const {
|
|
37 |
size_t amount = 0;
|
|
38 |
for (int index = 0; index < mt_number_of_types; index ++) {
|
|
39 |
amount += _malloc[index].malloc_size();
|
|
40 |
}
|
|
41 |
amount += _tracking_header.size() + total_arena();
|
|
42 |
return amount;
|
|
43 |
}
|
|
44 |
|
|
45 |
// Total malloc'd memory used by arenas
|
|
46 |
size_t MallocMemorySnapshot::total_arena() const {
|
|
47 |
size_t amount = 0;
|
|
48 |
for (int index = 0; index < mt_number_of_types; index ++) {
|
|
49 |
amount += _malloc[index].arena_size();
|
|
50 |
}
|
|
51 |
return amount;
|
|
52 |
}
|
|
53 |
|
|
54 |
|
|
55 |
void MallocMemorySnapshot::reset() {
|
|
56 |
_tracking_header.reset();
|
|
57 |
for (int index = 0; index < mt_number_of_types; index ++) {
|
|
58 |
_malloc[index].reset();
|
|
59 |
}
|
|
60 |
}
|
|
61 |
|
|
62 |
// Make adjustment by subtracting chunks used by arenas
|
|
63 |
// from total chunks to get total free chunck size
|
|
64 |
void MallocMemorySnapshot::make_adjustment() {
|
|
65 |
size_t arena_size = total_arena();
|
|
66 |
int chunk_idx = NMTUtil::flag_to_index(mtChunk);
|
|
67 |
_malloc[chunk_idx].record_free(arena_size);
|
|
68 |
}
|
|
69 |
|
|
70 |
|
|
71 |
void MallocMemorySummary::initialize() {
|
|
72 |
assert(sizeof(_snapshot) >= sizeof(MallocMemorySnapshot), "Sanity Check");
|
|
73 |
// Uses placement new operator to initialize static area.
|
|
74 |
::new ((void*)_snapshot)MallocMemorySnapshot();
|
|
75 |
}
|
|
76 |
|
|
77 |
void MallocHeader::release() const {
|
|
78 |
// Tracking already shutdown, no housekeeping is needed anymore
|
|
79 |
if (MemTracker::tracking_level() <= NMT_minimal) return;
|
|
80 |
|
|
81 |
MallocMemorySummary::record_free(size(), flags());
|
|
82 |
MallocMemorySummary::record_free_malloc_header(sizeof(MallocHeader));
|
|
83 |
if (tracking_level() == NMT_detail) {
|
|
84 |
MallocSiteTable::deallocation_at(size(), _bucket_idx, _pos_idx);
|
|
85 |
}
|
|
86 |
}
|
|
87 |
|
|
88 |
bool MallocHeader::record_malloc_site(const NativeCallStack& stack, size_t size,
|
|
89 |
size_t* bucket_idx, size_t* pos_idx) const {
|
|
90 |
bool ret = MallocSiteTable::allocation_at(stack, size, bucket_idx, pos_idx);
|
|
91 |
|
|
92 |
// Something went wrong, could be OOM or overflow malloc site table.
|
|
93 |
// We want to keep tracking data under OOM circumstance, so transition to
|
|
94 |
// summary tracking.
|
|
95 |
if (!ret) {
|
|
96 |
MemTracker::transition_to(NMT_summary);
|
|
97 |
}
|
|
98 |
return ret;
|
|
99 |
}
|
|
100 |
|
|
101 |
bool MallocHeader::get_stack(NativeCallStack& stack) const {
|
|
102 |
return MallocSiteTable::access_stack(stack, _bucket_idx, _pos_idx);
|
|
103 |
}
|
|
104 |
|
|
105 |
bool MallocTracker::initialize(NMT_TrackingLevel level) {
|
|
106 |
if (level >= NMT_summary) {
|
|
107 |
MallocMemorySummary::initialize();
|
|
108 |
}
|
|
109 |
|
|
110 |
if (level == NMT_detail) {
|
|
111 |
return MallocSiteTable::initialize();
|
|
112 |
}
|
|
113 |
return true;
|
|
114 |
}
|
|
115 |
|
|
116 |
bool MallocTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
|
|
117 |
assert(from != NMT_off, "Can not transition from off state");
|
|
118 |
assert(to != NMT_off, "Can not transition to off state");
|
|
119 |
if (from == NMT_minimal) {
|
|
120 |
MallocMemorySummary::reset();
|
|
121 |
}
|
|
122 |
|
|
123 |
if (to == NMT_detail) {
|
|
124 |
assert(from == NMT_minimal || from == NMT_summary, "Just check");
|
|
125 |
return MallocSiteTable::initialize();
|
|
126 |
} else if (from == NMT_detail) {
|
|
127 |
assert(to == NMT_minimal || to == NMT_summary, "Just check");
|
|
128 |
MallocSiteTable::shutdown();
|
|
129 |
}
|
|
130 |
return true;
|
|
131 |
}
|
|
132 |
|
|
133 |
// Record a malloc memory allocation
|
|
134 |
void* MallocTracker::record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
|
|
135 |
const NativeCallStack& stack, NMT_TrackingLevel level) {
|
|
136 |
void* memblock; // the address for user data
|
|
137 |
MallocHeader* header = NULL;
|
|
138 |
|
|
139 |
if (malloc_base == NULL) {
|
|
140 |
return NULL;
|
|
141 |
}
|
|
142 |
|
|
143 |
// Check malloc size, size has to <= MAX_MALLOC_SIZE. This is only possible on 32-bit
|
|
144 |
// systems, when malloc size >= 1GB, but is is safe to assume it won't happen.
|
|
145 |
if (size > MAX_MALLOC_SIZE) {
|
|
146 |
fatal("Should not use malloc for big memory block, use virtual memory instead");
|
|
147 |
}
|
|
148 |
// Uses placement global new operator to initialize malloc header
|
|
149 |
switch(level) {
|
|
150 |
case NMT_off:
|
|
151 |
return malloc_base;
|
|
152 |
case NMT_minimal: {
|
|
153 |
MallocHeader* hdr = ::new (malloc_base) MallocHeader();
|
|
154 |
break;
|
|
155 |
}
|
|
156 |
case NMT_summary: {
|
|
157 |
header = ::new (malloc_base) MallocHeader(size, flags);
|
|
158 |
break;
|
|
159 |
}
|
|
160 |
case NMT_detail: {
|
|
161 |
header = ::new (malloc_base) MallocHeader(size, flags, stack);
|
|
162 |
break;
|
|
163 |
}
|
|
164 |
default:
|
|
165 |
ShouldNotReachHere();
|
|
166 |
}
|
|
167 |
memblock = (void*)((char*)malloc_base + sizeof(MallocHeader));
|
|
168 |
|
|
169 |
// The alignment check: 8 bytes alignment for 32 bit systems.
|
|
170 |
// 16 bytes alignment for 64-bit systems.
|
|
171 |
assert(((size_t)memblock & (sizeof(size_t) * 2 - 1)) == 0, "Alignment check");
|
|
172 |
|
|
173 |
// Sanity check
|
|
174 |
assert(get_memory_tracking_level(memblock) == level,
|
|
175 |
"Wrong tracking level");
|
|
176 |
|
|
177 |
#ifdef ASSERT
|
|
178 |
if (level > NMT_minimal) {
|
|
179 |
// Read back
|
|
180 |
assert(get_size(memblock) == size, "Wrong size");
|
|
181 |
assert(get_flags(memblock) == flags, "Wrong flags");
|
|
182 |
}
|
|
183 |
#endif
|
|
184 |
|
|
185 |
return memblock;
|
|
186 |
}
|
|
187 |
|
|
188 |
void* MallocTracker::record_free(void* memblock) {
|
|
189 |
// Never turned on
|
|
190 |
if (MemTracker::tracking_level() == NMT_off ||
|
|
191 |
memblock == NULL) {
|
|
192 |
return memblock;
|
|
193 |
}
|
|
194 |
MallocHeader* header = malloc_header(memblock);
|
|
195 |
header->release();
|
|
196 |
|
|
197 |
return (void*)header;
|
|
198 |
}
|
|
199 |
|
|
200 |
|