1 /* |
1 /* |
2 * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. |
2 * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * |
4 * |
5 * This code is free software; you can redistribute it and/or modify it |
5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as |
6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. |
7 * published by the Free Software Foundation. |
35 #include "utilities/nativeCallStack.hpp" |
35 #include "utilities/nativeCallStack.hpp" |
36 |
36 |
37 // MallocSite represents a code path that eventually calls |
37 // MallocSite represents a code path that eventually calls |
38 // os::malloc() to allocate memory |
38 // os::malloc() to allocate memory |
39 class MallocSite : public AllocationSite<MemoryCounter> { |
39 class MallocSite : public AllocationSite<MemoryCounter> { |
|
40 private: |
|
41 MEMFLAGS _flags; |
|
42 |
40 public: |
43 public: |
41 MallocSite() : |
44 MallocSite() : |
42 AllocationSite<MemoryCounter>(NativeCallStack::EMPTY_STACK) { } |
45 AllocationSite<MemoryCounter>(NativeCallStack::EMPTY_STACK), _flags(mtNone) {} |
43 |
46 |
44 MallocSite(const NativeCallStack& stack) : |
47 MallocSite(const NativeCallStack& stack, MEMFLAGS flags) : |
45 AllocationSite<MemoryCounter>(stack) { } |
48 AllocationSite<MemoryCounter>(stack), _flags(flags) {} |
|
49 |
46 |
50 |
47 void allocate(size_t size) { data()->allocate(size); } |
51 void allocate(size_t size) { data()->allocate(size); } |
48 void deallocate(size_t size) { data()->deallocate(size); } |
52 void deallocate(size_t size) { data()->deallocate(size); } |
49 |
53 |
50 // Memory allocated from this code path |
54 // Memory allocated from this code path |
51 size_t size() const { return peek()->size(); } |
55 size_t size() const { return peek()->size(); } |
52 // The number of calls were made |
56 // The number of calls were made |
53 size_t count() const { return peek()->count(); } |
57 size_t count() const { return peek()->count(); } |
|
58 MEMFLAGS flags() const { return (MEMFLAGS)_flags; } |
54 }; |
59 }; |
55 |
60 |
56 // Malloc site hashtable entry |
61 // Malloc site hashtable entry |
57 class MallocSiteHashtableEntry : public CHeapObj<mtNMT> { |
62 class MallocSiteHashtableEntry : public CHeapObj<mtNMT> { |
58 private: |
63 private: |
60 MallocSiteHashtableEntry* _next; |
65 MallocSiteHashtableEntry* _next; |
61 |
66 |
62 public: |
67 public: |
63 MallocSiteHashtableEntry() : _next(NULL) { } |
68 MallocSiteHashtableEntry() : _next(NULL) { } |
64 |
69 |
65 MallocSiteHashtableEntry(const NativeCallStack& stack): |
70 MallocSiteHashtableEntry(NativeCallStack stack, MEMFLAGS flags): |
66 _malloc_site(stack), _next(NULL) { } |
71 _malloc_site(stack, flags), _next(NULL) { |
|
72 assert(flags != mtNone, "Expect a real memory type"); |
|
73 } |
67 |
74 |
68 inline const MallocSiteHashtableEntry* next() const { |
75 inline const MallocSiteHashtableEntry* next() const { |
69 return _next; |
76 return _next; |
70 } |
77 } |
71 |
78 |
196 // information was recorded. |
203 // information was recorded. |
197 // Return false only occurs under rare scenarios: |
204 // Return false only occurs under rare scenarios: |
198 // 1. out of memory |
205 // 1. out of memory |
199 // 2. overflow hash bucket |
206 // 2. overflow hash bucket |
200 static inline bool allocation_at(const NativeCallStack& stack, size_t size, |
207 static inline bool allocation_at(const NativeCallStack& stack, size_t size, |
201 size_t* bucket_idx, size_t* pos_idx) { |
208 size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags) { |
202 AccessLock locker(&_access_count); |
209 AccessLock locker(&_access_count); |
203 if (locker.sharedLock()) { |
210 if (locker.sharedLock()) { |
204 NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);) |
211 NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);) |
205 MallocSite* site = lookup_or_add(stack, bucket_idx, pos_idx); |
212 MallocSite* site = lookup_or_add(stack, bucket_idx, pos_idx, flags); |
206 if (site != NULL) site->allocate(size); |
213 if (site != NULL) site->allocate(size); |
207 return site != NULL; |
214 return site != NULL; |
208 } |
215 } |
209 return false; |
216 return false; |
210 } |
217 } |
226 |
233 |
227 // Walk this table. |
234 // Walk this table. |
228 static bool walk_malloc_site(MallocSiteWalker* walker); |
235 static bool walk_malloc_site(MallocSiteWalker* walker); |
229 |
236 |
230 private: |
237 private: |
231 static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key); |
238 static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key, MEMFLAGS flags); |
232 static void reset(); |
239 static void reset(); |
233 |
240 |
234 // Delete a bucket linked list |
241 // Delete a bucket linked list |
235 static void delete_linked_list(MallocSiteHashtableEntry* head); |
242 static void delete_linked_list(MallocSiteHashtableEntry* head); |
236 |
243 |
237 static MallocSite* lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx); |
244 static MallocSite* lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags); |
238 static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx); |
245 static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx); |
239 static bool walk(MallocSiteWalker* walker); |
246 static bool walk(MallocSiteWalker* walker); |
240 |
247 |
241 static inline unsigned int hash_to_index(unsigned int hash) { |
248 static inline unsigned int hash_to_index(unsigned int hash) { |
242 return (hash % table_size); |
249 return (hash % table_size); |