1 /* |
1 /* |
2 * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. |
2 * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * |
4 * |
5 * This code is free software; you can redistribute it and/or modify it |
5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as |
6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. |
7 * published by the Free Software Foundation. |
95 NativeCallStack* stack = ::new ((void*)_hash_entry_allocation_stack) |
95 NativeCallStack* stack = ::new ((void*)_hash_entry_allocation_stack) |
96 NativeCallStack(pc, MIN2(((int)(sizeof(pc) / sizeof(address))), ((int)NMT_TrackingStackDepth))); |
96 NativeCallStack(pc, MIN2(((int)(sizeof(pc) / sizeof(address))), ((int)NMT_TrackingStackDepth))); |
97 |
97 |
98 // Instantiate hash entry for hashtable entry allocation callsite |
98 // Instantiate hash entry for hashtable entry allocation callsite |
99 MallocSiteHashtableEntry* entry = ::new ((void*)_hash_entry_allocation_site) |
99 MallocSiteHashtableEntry* entry = ::new ((void*)_hash_entry_allocation_site) |
100 MallocSiteHashtableEntry(*stack); |
100 MallocSiteHashtableEntry(*stack, mtNMT); |
101 |
101 |
102 // Add the allocation site to hashtable. |
102 // Add the allocation site to hashtable. |
103 int index = hash_to_index(stack->hash()); |
103 int index = hash_to_index(stack->hash()); |
104 _table[index] = entry; |
104 _table[index] = entry; |
105 |
105 |
132 * 1. Out of memory, it cannot allocate new hash entry. |
132 * 1. Out of memory, it cannot allocate new hash entry. |
133 * 2. Overflow hash bucket. |
133 * 2. Overflow hash bucket. |
134 * Under any of above circumstances, caller should handle the situation. |
134 * Under any of above circumstances, caller should handle the situation. |
135 */ |
135 */ |
136 MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, |
136 MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, |
137 size_t* pos_idx) { |
137 size_t* pos_idx, MEMFLAGS flags) { |
|
138 assert(flags != mtNone, "Should have a real memory type"); |
138 unsigned int index = hash_to_index(key.hash()); |
139 unsigned int index = hash_to_index(key.hash()); |
139 *bucket_idx = (size_t)index; |
140 *bucket_idx = (size_t)index; |
140 *pos_idx = 0; |
141 *pos_idx = 0; |
141 |
142 |
142 // First entry for this hash bucket |
143 // First entry for this hash bucket |
143 if (_table[index] == NULL) { |
144 if (_table[index] == NULL) { |
144 MallocSiteHashtableEntry* entry = new_entry(key); |
145 MallocSiteHashtableEntry* entry = new_entry(key, flags); |
145 // OOM check |
146 // OOM check |
146 if (entry == NULL) return NULL; |
147 if (entry == NULL) return NULL; |
147 |
148 |
148 // swap in the head |
149 // swap in the head |
149 if (Atomic::cmpxchg_ptr((void*)entry, (volatile void *)&_table[index], NULL) == NULL) { |
150 if (Atomic::cmpxchg_ptr((void*)entry, (volatile void *)&_table[index], NULL) == NULL) { |
154 } |
155 } |
155 |
156 |
156 MallocSiteHashtableEntry* head = _table[index]; |
157 MallocSiteHashtableEntry* head = _table[index]; |
157 while (head != NULL && (*pos_idx) <= MAX_BUCKET_LENGTH) { |
158 while (head != NULL && (*pos_idx) <= MAX_BUCKET_LENGTH) { |
158 MallocSite* site = head->data(); |
159 MallocSite* site = head->data(); |
159 if (site->equals(key)) { |
160 if (site->flags() == flags && site->equals(key)) { |
160 // found matched entry |
|
161 return head->data(); |
161 return head->data(); |
162 } |
162 } |
163 |
163 |
164 if (head->next() == NULL && (*pos_idx) < MAX_BUCKET_LENGTH) { |
164 if (head->next() == NULL && (*pos_idx) < MAX_BUCKET_LENGTH) { |
165 MallocSiteHashtableEntry* entry = new_entry(key); |
165 MallocSiteHashtableEntry* entry = new_entry(key, flags); |
166 // OOM check |
166 // OOM check |
167 if (entry == NULL) return NULL; |
167 if (entry == NULL) return NULL; |
168 if (head->atomic_insert(entry)) { |
168 if (head->atomic_insert(entry)) { |
169 (*pos_idx) ++; |
169 (*pos_idx) ++; |
170 return entry->data(); |
170 return entry->data(); |
189 } |
189 } |
190 |
190 |
191 // Allocates MallocSiteHashtableEntry object. Special call stack |
191 // Allocates MallocSiteHashtableEntry object. Special call stack |
192 // (pre-installed allocation site) has to be used to avoid infinite |
192 // (pre-installed allocation site) has to be used to avoid infinite |
193 // recursion. |
193 // recursion. |
194 MallocSiteHashtableEntry* MallocSiteTable::new_entry(const NativeCallStack& key) { |
194 MallocSiteHashtableEntry* MallocSiteTable::new_entry(const NativeCallStack& key, MEMFLAGS flags) { |
195 void* p = AllocateHeap(sizeof(MallocSiteHashtableEntry), mtNMT, |
195 void* p = AllocateHeap(sizeof(MallocSiteHashtableEntry), mtNMT, |
196 *hash_entry_allocation_stack(), AllocFailStrategy::RETURN_NULL); |
196 *hash_entry_allocation_stack(), AllocFailStrategy::RETURN_NULL); |
197 return ::new (p) MallocSiteHashtableEntry(key); |
197 return ::new (p) MallocSiteHashtableEntry(key, flags); |
198 } |
198 } |
199 |
199 |
200 void MallocSiteTable::reset() { |
200 void MallocSiteTable::reset() { |
201 for (int index = 0; index < table_size; index ++) { |
201 for (int index = 0; index < table_size; index ++) { |
202 MallocSiteHashtableEntry* head = _table[index]; |
202 MallocSiteHashtableEntry* head = _table[index]; |