author | coleenp |
Tue, 10 Jul 2018 11:13:33 -0400 | |
changeset 50958 | e0028bb6dd3d |
parent 50637 | 359607017fb7 |
child 51334 | cc2c79d22508 |
permissions | -rw-r--r-- |
50158 | 1 |
/* |
2 |
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. |
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 |
* or visit www.oracle.com if you need additional information or have any |
|
21 |
* questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
#ifndef SHARE_UTILITIES_CONCURRENT_HASH_TABLE_INLINE_HPP |
|
26 |
#define SHARE_UTILITIES_CONCURRENT_HASH_TABLE_INLINE_HPP |
|
27 |
||
28 |
#include "memory/allocation.inline.hpp" |
|
29 |
#include "runtime/atomic.hpp" |
|
50429
83aec1d357d4
8204301: Make OrderAccess functions available to hpp rather than inline.hpp files
coleenp
parents:
50158
diff
changeset
|
30 |
#include "runtime/orderAccess.hpp" |
50158 | 31 |
#include "runtime/prefetch.inline.hpp" |
32 |
#include "utilities/concurrentHashTable.hpp" |
|
33 |
#include "utilities/globalCounter.inline.hpp" |
|
34 |
#include "utilities/numberSeq.hpp" |
|
35 |
#include "utilities/spinYield.hpp" |
|
36 |
||
37 |
// 2^30 = 1G buckets |
|
38 |
#define SIZE_BIG_LOG2 30 |
|
39 |
// 2^5 = 32 buckets |
|
40 |
#define SIZE_SMALL_LOG2 5 |
|
41 |
||
42 |
// Number from spinYield.hpp. In some loops SpinYield would be unfair. |
|
43 |
#define SPINPAUSES_PER_YIELD 8192 |
|
44 |
||
45 |
#ifdef ASSERT |
|
46 |
#ifdef _LP64 |
|
47 |
// Two low bits are not usable. |
|
48 |
static const void* POISON_PTR = (void*)UCONST64(0xfbadbadbadbadbac); |
|
49 |
#else |
|
50 |
// Two low bits are not usable. |
|
51 |
static const void* POISON_PTR = (void*)0xffbadbac; |
|
52 |
#endif |
|
53 |
#endif |
|
54 |
||
55 |
// Node |
|
56 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
57 |
inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* |
|
58 |
ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
59 |
Node::next() const |
|
60 |
{ |
|
61 |
return OrderAccess::load_acquire(&_next); |
|
62 |
} |
|
63 |
||
64 |
// Bucket |
|
65 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
66 |
inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* |
|
67 |
ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
68 |
Bucket::first_raw() const |
|
69 |
{ |
|
70 |
return OrderAccess::load_acquire(&_first); |
|
71 |
} |
|
72 |
||
73 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
74 |
inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
75 |
Bucket::release_assign_node_ptr( |
|
76 |
typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* const volatile * dst, |
|
77 |
typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node) const |
|
78 |
{ |
|
79 |
// Due to this assert this methods is not static. |
|
80 |
assert(is_locked(), "Must be locked."); |
|
81 |
Node** tmp = (Node**)dst; |
|
82 |
OrderAccess::release_store(tmp, clear_set_state(node, *dst)); |
|
83 |
} |
|
84 |
||
85 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
86 |
inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* |
|
87 |
ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
88 |
Bucket::first() const |
|
89 |
{ |
|
90 |
// We strip the states bit before returning the ptr. |
|
91 |
return clear_state(OrderAccess::load_acquire(&_first)); |
|
92 |
} |
|
93 |
||
94 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
95 |
inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
96 |
Bucket::have_redirect() const |
|
97 |
{ |
|
98 |
return is_state(first_raw(), STATE_REDIRECT_BIT); |
|
99 |
} |
|
100 |
||
101 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
102 |
inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
103 |
Bucket::is_locked() const |
|
104 |
{ |
|
105 |
return is_state(first_raw(), STATE_LOCK_BIT); |
|
106 |
} |
|
107 |
||
108 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
109 |
inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
110 |
Bucket::lock() |
|
111 |
{ |
|
112 |
int i = 0; |
|
113 |
// SpinYield would be unfair here |
|
114 |
while (!this->trylock()) { |
|
115 |
if ((++i) == SPINPAUSES_PER_YIELD) { |
|
116 |
// On contemporary OS yielding will give CPU to another runnable thread if |
|
117 |
// there is no CPU available. |
|
118 |
os::naked_yield(); |
|
119 |
i = 0; |
|
120 |
} else { |
|
121 |
SpinPause(); |
|
122 |
} |
|
123 |
} |
|
124 |
} |
|
125 |
||
126 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
127 |
inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
128 |
Bucket::release_assign_last_node_next( |
|
129 |
typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node) |
|
130 |
{ |
|
131 |
assert(is_locked(), "Must be locked."); |
|
132 |
Node* const volatile * ret = first_ptr(); |
|
133 |
while (clear_state(*ret) != NULL) { |
|
134 |
ret = clear_state(*ret)->next_ptr(); |
|
135 |
} |
|
136 |
release_assign_node_ptr(ret, node); |
|
137 |
} |
|
138 |
||
139 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
140 |
inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
141 |
Bucket::cas_first(typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node, |
|
142 |
typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* expect |
|
143 |
) |
|
144 |
{ |
|
145 |
if (is_locked()) { |
|
146 |
return false; |
|
147 |
} |
|
148 |
if (Atomic::cmpxchg(node, &_first, expect) == expect) { |
|
149 |
return true; |
|
150 |
} |
|
151 |
return false; |
|
152 |
} |
|
153 |
||
154 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
155 |
inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
156 |
Bucket::trylock() |
|
157 |
{ |
|
158 |
if (is_locked()) { |
|
159 |
return false; |
|
160 |
} |
|
161 |
// We will expect a clean first pointer. |
|
162 |
Node* tmp = first(); |
|
163 |
if (Atomic::cmpxchg(set_state(tmp, STATE_LOCK_BIT), &_first, tmp) == tmp) { |
|
164 |
return true; |
|
165 |
} |
|
166 |
return false; |
|
167 |
} |
|
168 |
||
169 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
170 |
inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
171 |
Bucket::unlock() |
|
172 |
{ |
|
173 |
assert(is_locked(), "Must be locked."); |
|
174 |
assert(!have_redirect(), |
|
175 |
"Unlocking a bucket after it has reached terminal state."); |
|
176 |
OrderAccess::release_store(&_first, clear_state(first())); |
|
177 |
} |
|
178 |
||
179 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
180 |
inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
181 |
Bucket::redirect() |
|
182 |
{ |
|
183 |
assert(is_locked(), "Must be locked."); |
|
184 |
OrderAccess::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT)); |
|
185 |
} |
|
186 |
||
187 |
// InternalTable |
|
188 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
189 |
inline ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
190 |
InternalTable::InternalTable(size_t log2_size) |
|
191 |
: _log2_size(log2_size), _size(((size_t)1ul) << _log2_size), |
|
192 |
_hash_mask(~(~((size_t)0) << _log2_size)) |
|
193 |
{ |
|
194 |
assert(_log2_size >= SIZE_SMALL_LOG2 && _log2_size <= SIZE_BIG_LOG2, |
|
195 |
"Bad size"); |
|
196 |
void* memory = NEW_C_HEAP_ARRAY(Bucket, _size, F); |
|
197 |
_buckets = new (memory) Bucket[_size]; |
|
198 |
} |
|
199 |
||
200 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
201 |
inline ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
202 |
InternalTable::~InternalTable() |
|
203 |
{ |
|
204 |
FREE_C_HEAP_ARRAY(Bucket, _buckets); |
|
205 |
} |
|
206 |
||
207 |
// ScopedCS |
|
208 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
209 |
inline ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
210 |
ScopedCS::ScopedCS(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* cht) |
|
211 |
: _thread(thread), _cht(cht) |
|
212 |
{ |
|
213 |
GlobalCounter::critical_section_begin(_thread); |
|
214 |
// This version is published now. |
|
215 |
if (OrderAccess::load_acquire(&_cht->_invisible_epoch) != NULL) { |
|
216 |
OrderAccess::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL); |
|
217 |
} |
|
218 |
} |
|
219 |
||
220 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
221 |
inline ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
222 |
ScopedCS::~ScopedCS() |
|
223 |
{ |
|
224 |
GlobalCounter::critical_section_end(_thread); |
|
225 |
} |
|
226 |
||
227 |
// BaseConfig |
|
228 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
229 |
inline void* ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
230 |
BaseConfig::allocate_node(size_t size, const VALUE& value) |
|
231 |
{ |
|
232 |
return AllocateHeap(size, F); |
|
233 |
} |
|
234 |
||
235 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
236 |
inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
237 |
BaseConfig::free_node(void* memory, const VALUE& value) |
|
238 |
{ |
|
239 |
FreeHeap(memory); |
|
240 |
} |
|
241 |
||
242 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
243 |
template <typename LOOKUP_FUNC> |
|
244 |
inline VALUE* ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
245 |
MultiGetHandle::get(LOOKUP_FUNC& lookup_f, bool* grow_hint) |
|
246 |
{ |
|
247 |
return ScopedCS::_cht->internal_get(ScopedCS::_thread, lookup_f, grow_hint); |
|
248 |
} |
|
249 |
||
250 |
// HaveDeletables |
|
251 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
252 |
template <typename EVALUATE_FUNC> |
|
253 |
inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
254 |
HaveDeletables<true, EVALUATE_FUNC>::have_deletable(Bucket* bucket, |
|
255 |
EVALUATE_FUNC& eval_f, |
|
256 |
Bucket* prefetch_bucket) |
|
257 |
{ |
|
258 |
// Instantiated for pointer type (true), so we can use prefetch. |
|
259 |
// When visiting all Nodes doing this prefetch give around 30%. |
|
260 |
Node* pref = prefetch_bucket != NULL ? prefetch_bucket->first() : NULL; |
|
261 |
for (Node* next = bucket->first(); next != NULL ; next = next->next()) { |
|
262 |
if (pref != NULL) { |
|
263 |
Prefetch::read(*pref->value(), 0); |
|
264 |
pref = pref->next(); |
|
265 |
} |
|
50958
e0028bb6dd3d
8206471: Race with ConcurrentHashTable deleting items on insert with cleanup thread
coleenp
parents:
50637
diff
changeset
|
266 |
// Read next() Node* once. May be racing with a thread moving the next |
e0028bb6dd3d
8206471: Race with ConcurrentHashTable deleting items on insert with cleanup thread
coleenp
parents:
50637
diff
changeset
|
267 |
// pointers. |
e0028bb6dd3d
8206471: Race with ConcurrentHashTable deleting items on insert with cleanup thread
coleenp
parents:
50637
diff
changeset
|
268 |
Node* next_pref = next->next(); |
e0028bb6dd3d
8206471: Race with ConcurrentHashTable deleting items on insert with cleanup thread
coleenp
parents:
50637
diff
changeset
|
269 |
if (next_pref != NULL) { |
e0028bb6dd3d
8206471: Race with ConcurrentHashTable deleting items on insert with cleanup thread
coleenp
parents:
50637
diff
changeset
|
270 |
Prefetch::read(*next_pref->value(), 0); |
50158 | 271 |
} |
272 |
if (eval_f(next->value())) { |
|
273 |
return true; |
|
274 |
} |
|
275 |
} |
|
276 |
return false; |
|
277 |
} |
|
278 |
||
279 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
280 |
template <bool b, typename EVALUATE_FUNC> |
|
281 |
inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
282 |
HaveDeletables<b, EVALUATE_FUNC>::have_deletable(Bucket* bucket, |
|
283 |
EVALUATE_FUNC& eval_f, |
|
284 |
Bucket* preb) |
|
285 |
{ |
|
286 |
for (Node* next = bucket->first(); next != NULL ; next = next->next()) { |
|
287 |
if (eval_f(next->value())) { |
|
288 |
return true; |
|
289 |
} |
|
290 |
} |
|
291 |
return false; |
|
292 |
} |
|
293 |
||
294 |
// ConcurrentHashTable |
|
295 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
296 |
inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
297 |
write_synchonize_on_visible_epoch(Thread* thread) |
|
298 |
{ |
|
50445
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
299 |
assert(_resize_lock_owner == thread, "Re-size lock not held"); |
50158 | 300 |
OrderAccess::fence(); // Prevent below load from floating up. |
301 |
// If no reader saw this version we can skip write_synchronize. |
|
302 |
if (OrderAccess::load_acquire(&_invisible_epoch) == thread) { |
|
303 |
return; |
|
304 |
} |
|
305 |
assert(_invisible_epoch == NULL, "Two thread doing bulk operations"); |
|
306 |
// We set this/next version that we are synchronizing for to not published. |
|
307 |
// A reader will zero this flag if it reads this/next version. |
|
308 |
OrderAccess::release_store(&_invisible_epoch, thread); |
|
309 |
GlobalCounter::write_synchronize(); |
|
310 |
} |
|
311 |
||
312 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
313 |
inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
314 |
try_resize_lock(Thread* locker) |
|
315 |
{ |
|
316 |
if (_resize_lock->try_lock()) { |
|
317 |
if (_resize_lock_owner != NULL) { |
|
318 |
assert(locker != _resize_lock_owner, "Already own lock"); |
|
319 |
// We got mutex but internal state is locked. |
|
320 |
_resize_lock->unlock(); |
|
321 |
return false; |
|
322 |
} |
|
323 |
} else { |
|
324 |
return false; |
|
325 |
} |
|
326 |
_invisible_epoch = 0; |
|
327 |
_resize_lock_owner = locker; |
|
328 |
return true; |
|
329 |
} |
|
330 |
||
331 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
332 |
inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
333 |
lock_resize_lock(Thread* locker) |
|
334 |
{ |
|
335 |
size_t i = 0; |
|
336 |
// If lock is hold by some other thread, the chances that it is return quick |
|
337 |
// is low. So we will prefer yielding. |
|
338 |
SpinYield yield(1, 512); |
|
339 |
do { |
|
340 |
_resize_lock->lock_without_safepoint_check(); |
|
341 |
// If holder of lock dropped mutex for safepoint mutex might be unlocked, |
|
342 |
// and _resize_lock_owner will contain the owner. |
|
343 |
if (_resize_lock_owner != NULL) { |
|
344 |
assert(locker != _resize_lock_owner, "Already own lock"); |
|
345 |
// We got mutex but internal state is locked. |
|
346 |
_resize_lock->unlock(); |
|
347 |
yield.wait(); |
|
348 |
} else { |
|
349 |
break; |
|
350 |
} |
|
351 |
} while(true); |
|
352 |
_resize_lock_owner = locker; |
|
353 |
_invisible_epoch = 0; |
|
354 |
} |
|
355 |
||
356 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
357 |
inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
358 |
unlock_resize_lock(Thread* locker) |
|
359 |
{ |
|
360 |
_invisible_epoch = 0; |
|
361 |
assert(locker == _resize_lock_owner, "Not unlocked by locker."); |
|
362 |
_resize_lock_owner = NULL; |
|
363 |
_resize_lock->unlock(); |
|
364 |
} |
|
365 |
||
366 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
367 |
inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
368 |
free_nodes() |
|
369 |
{ |
|
370 |
// We assume we are not MT during freeing. |
|
371 |
for (size_t node_it = 0; node_it < _table->_size; node_it++) { |
|
372 |
Bucket* bucket = _table->get_buckets() + node_it; |
|
373 |
Node* node = bucket->first(); |
|
374 |
while (node != NULL) { |
|
375 |
Node* free_node = node; |
|
376 |
node = node->next(); |
|
377 |
Node::destroy_node(free_node); |
|
378 |
} |
|
379 |
} |
|
380 |
} |
|
381 |
||
382 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
383 |
inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable* |
|
384 |
ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
385 |
get_table() const |
|
386 |
{ |
|
387 |
return OrderAccess::load_acquire(&_table); |
|
388 |
} |
|
389 |
||
390 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
391 |
inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable* |
|
392 |
ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
393 |
get_new_table() const |
|
394 |
{ |
|
395 |
return OrderAccess::load_acquire(&_new_table); |
|
396 |
} |
|
397 |
||
398 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
399 |
inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable* |
|
400 |
ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
401 |
set_table_from_new() |
|
402 |
{ |
|
403 |
InternalTable* old_table = _table; |
|
404 |
// Publish the new table. |
|
405 |
OrderAccess::release_store(&_table, _new_table); |
|
406 |
// All must see this. |
|
407 |
GlobalCounter::write_synchronize(); |
|
408 |
// _new_table not read any more. |
|
409 |
_new_table = NULL; |
|
410 |
DEBUG_ONLY(_new_table = (InternalTable*)POISON_PTR;) |
|
411 |
return old_table; |
|
412 |
} |
|
413 |
||
414 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
415 |
inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
416 |
internal_grow_range(Thread* thread, size_t start, size_t stop) |
|
417 |
{ |
|
418 |
assert(stop <= _table->_size, "Outside backing array"); |
|
419 |
assert(_new_table != NULL, "Grow not proper setup before start"); |
|
420 |
// The state is also copied here. Hence all buckets in new table will be |
|
421 |
// locked. I call the siblings odd/even, where even have high bit 0 and odd |
|
422 |
// have high bit 1. |
|
423 |
for (size_t even_index = start; even_index < stop; even_index++) { |
|
424 |
Bucket* bucket = _table->get_bucket(even_index); |
|
425 |
||
426 |
bucket->lock(); |
|
427 |
||
428 |
size_t odd_index = even_index + _table->_size; |
|
429 |
_new_table->get_buckets()[even_index] = *bucket; |
|
430 |
_new_table->get_buckets()[odd_index] = *bucket; |
|
431 |
||
432 |
// Moves lockers go to new table, where they will wait until unlock() below. |
|
433 |
bucket->redirect(); /* Must release stores above */ |
|
434 |
||
435 |
// When this is done we have separated the nodes into corresponding buckets |
|
436 |
// in new table. |
|
437 |
if (!unzip_bucket(thread, _table, _new_table, even_index, odd_index)) { |
|
438 |
// If bucket is empty, unzip does nothing. |
|
439 |
// We must make sure readers go to new table before we poison the bucket. |
|
440 |
DEBUG_ONLY(GlobalCounter::write_synchronize();) |
|
441 |
} |
|
442 |
||
443 |
// Unlock for writes into the new table buckets. |
|
444 |
_new_table->get_bucket(even_index)->unlock(); |
|
445 |
_new_table->get_bucket(odd_index)->unlock(); |
|
446 |
||
447 |
DEBUG_ONLY( |
|
448 |
bucket->release_assign_node_ptr( |
|
449 |
_table->get_bucket(even_index)->first_ptr(), (Node*)POISON_PTR); |
|
450 |
) |
|
451 |
} |
|
452 |
} |
|
453 |
||
454 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
455 |
template <typename LOOKUP_FUNC, typename DELETE_FUNC> |
|
456 |
inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
457 |
internal_remove(Thread* thread, LOOKUP_FUNC& lookup_f, DELETE_FUNC& delete_f) |
|
458 |
{ |
|
459 |
Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash()); |
|
460 |
assert(bucket->is_locked(), "Must be locked."); |
|
461 |
Node* const volatile * rem_n_prev = bucket->first_ptr(); |
|
462 |
Node* rem_n = bucket->first(); |
|
463 |
bool have_dead = false; |
|
464 |
while (rem_n != NULL) { |
|
465 |
if (lookup_f.equals(rem_n->value(), &have_dead)) { |
|
466 |
bucket->release_assign_node_ptr(rem_n_prev, rem_n->next()); |
|
467 |
break; |
|
468 |
} else { |
|
469 |
rem_n_prev = rem_n->next_ptr(); |
|
470 |
rem_n = rem_n->next(); |
|
471 |
} |
|
472 |
} |
|
473 |
||
474 |
bucket->unlock(); |
|
475 |
||
476 |
if (rem_n == NULL) { |
|
477 |
return false; |
|
478 |
} |
|
479 |
// Publish the deletion. |
|
480 |
GlobalCounter::write_synchronize(); |
|
481 |
delete_f(rem_n->value()); |
|
482 |
Node::destroy_node(rem_n); |
|
483 |
return true; |
|
484 |
} |
|
485 |
||
486 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
487 |
template <typename EVALUATE_FUNC, typename DELETE_FUNC> |
|
488 |
inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
489 |
do_bulk_delete_locked_for(Thread* thread, size_t start_idx, size_t stop_idx, |
|
50608
1609a43e77ae
8204857: ConcurrentHashTable: Fix parallel processing
rehn
parents:
50445
diff
changeset
|
490 |
EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f, bool is_mt) |
50158 | 491 |
{ |
492 |
// Here we have resize lock so table is SMR safe, and there is no new |
|
493 |
// table. Can do this in parallel if we want. |
|
50608
1609a43e77ae
8204857: ConcurrentHashTable: Fix parallel processing
rehn
parents:
50445
diff
changeset
|
494 |
assert((is_mt && _resize_lock_owner != NULL) || |
1609a43e77ae
8204857: ConcurrentHashTable: Fix parallel processing
rehn
parents:
50445
diff
changeset
|
495 |
(!is_mt && _resize_lock_owner == thread), "Re-size lock not held"); |
50158 | 496 |
Node* ndel[BULK_DELETE_LIMIT]; |
497 |
InternalTable* table = get_table(); |
|
498 |
assert(start_idx < stop_idx, "Must be"); |
|
499 |
assert(stop_idx <= _table->_size, "Must be"); |
|
500 |
// Here manual do critical section since we don't want to take the cost of |
|
501 |
// locking the bucket if there is nothing to delete. But we can have |
|
502 |
// concurrent single deletes. The _invisible_epoch can only be used by the |
|
503 |
// owner of _resize_lock, us here. There we should not changed it in our |
|
504 |
// own read-side. |
|
505 |
GlobalCounter::critical_section_begin(thread); |
|
506 |
for (size_t bucket_it = start_idx; bucket_it < stop_idx; bucket_it++) { |
|
50445
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
507 |
Bucket* bucket = table->get_bucket(bucket_it); |
50158 | 508 |
Bucket* prefetch_bucket = (bucket_it+1) < stop_idx ? |
50445
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
509 |
table->get_bucket(bucket_it+1) : NULL; |
50158 | 510 |
|
511 |
if (!HaveDeletables<IsPointer<VALUE>::value, EVALUATE_FUNC>:: |
|
512 |
have_deletable(bucket, eval_f, prefetch_bucket)) { |
|
513 |
// Nothing to remove in this bucket. |
|
514 |
continue; |
|
515 |
} |
|
516 |
||
517 |
GlobalCounter::critical_section_end(thread); |
|
518 |
// We left critical section but the bucket cannot be removed while we hold |
|
519 |
// the _resize_lock. |
|
520 |
bucket->lock(); |
|
521 |
size_t nd = delete_check_nodes(bucket, eval_f, BULK_DELETE_LIMIT, ndel); |
|
522 |
bucket->unlock(); |
|
50608
1609a43e77ae
8204857: ConcurrentHashTable: Fix parallel processing
rehn
parents:
50445
diff
changeset
|
523 |
if (is_mt) { |
1609a43e77ae
8204857: ConcurrentHashTable: Fix parallel processing
rehn
parents:
50445
diff
changeset
|
524 |
GlobalCounter::write_synchronize(); |
1609a43e77ae
8204857: ConcurrentHashTable: Fix parallel processing
rehn
parents:
50445
diff
changeset
|
525 |
} else { |
1609a43e77ae
8204857: ConcurrentHashTable: Fix parallel processing
rehn
parents:
50445
diff
changeset
|
526 |
write_synchonize_on_visible_epoch(thread); |
1609a43e77ae
8204857: ConcurrentHashTable: Fix parallel processing
rehn
parents:
50445
diff
changeset
|
527 |
} |
50158 | 528 |
for (size_t node_it = 0; node_it < nd; node_it++) { |
529 |
del_f(ndel[node_it]->value()); |
|
530 |
Node::destroy_node(ndel[node_it]); |
|
531 |
DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;) |
|
532 |
} |
|
533 |
GlobalCounter::critical_section_begin(thread); |
|
534 |
} |
|
535 |
GlobalCounter::critical_section_end(thread); |
|
536 |
} |
|
537 |
||
538 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
539 |
template <typename LOOKUP_FUNC> |
|
540 |
inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
541 |
delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f) |
|
542 |
{ |
|
543 |
size_t dels = 0; |
|
544 |
Node* ndel[BULK_DELETE_LIMIT]; |
|
545 |
Node* const volatile * rem_n_prev = bucket->first_ptr(); |
|
546 |
Node* rem_n = bucket->first(); |
|
547 |
while (rem_n != NULL) { |
|
548 |
bool is_dead = false; |
|
549 |
lookup_f.equals(rem_n->value(), &is_dead); |
|
550 |
if (is_dead) { |
|
551 |
ndel[dels++] = rem_n; |
|
50958
e0028bb6dd3d
8206471: Race with ConcurrentHashTable deleting items on insert with cleanup thread
coleenp
parents:
50637
diff
changeset
|
552 |
Node* next_node = rem_n->next(); |
e0028bb6dd3d
8206471: Race with ConcurrentHashTable deleting items on insert with cleanup thread
coleenp
parents:
50637
diff
changeset
|
553 |
bucket->release_assign_node_ptr(rem_n_prev, next_node); |
e0028bb6dd3d
8206471: Race with ConcurrentHashTable deleting items on insert with cleanup thread
coleenp
parents:
50637
diff
changeset
|
554 |
rem_n = next_node; |
50158 | 555 |
if (dels == BULK_DELETE_LIMIT) { |
556 |
break; |
|
557 |
} |
|
558 |
} else { |
|
559 |
rem_n_prev = rem_n->next_ptr(); |
|
560 |
rem_n = rem_n->next(); |
|
561 |
} |
|
562 |
} |
|
563 |
if (dels > 0) { |
|
564 |
GlobalCounter::write_synchronize(); |
|
565 |
for (size_t node_it = 0; node_it < dels; node_it++) { |
|
566 |
Node::destroy_node(ndel[node_it]); |
|
567 |
DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;) |
|
568 |
} |
|
569 |
} |
|
570 |
} |
|
571 |
||
572 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
573 |
inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Bucket* |
|
574 |
ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
575 |
get_bucket(uintx hash) const |
|
576 |
{ |
|
577 |
InternalTable* table = get_table(); |
|
578 |
Bucket* bucket = get_bucket_in(table, hash); |
|
579 |
if (bucket->have_redirect()) { |
|
580 |
table = get_new_table(); |
|
581 |
bucket = get_bucket_in(table, hash); |
|
582 |
} |
|
583 |
return bucket; |
|
584 |
} |
|
585 |
||
586 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
587 |
inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Bucket* |
|
588 |
ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
589 |
get_bucket_locked(Thread* thread, const uintx hash) |
|
590 |
{ |
|
591 |
Bucket* bucket; |
|
592 |
int i = 0; |
|
593 |
// SpinYield would be unfair here |
|
594 |
while(true) { |
|
595 |
{ |
|
596 |
// We need a critical section to protect the table itself. But if we fail |
|
597 |
// we must leave critical section otherwise we would deadlock. |
|
598 |
ScopedCS cs(thread, this); |
|
599 |
bucket = get_bucket(hash); |
|
600 |
if (bucket->trylock()) { |
|
601 |
break; /* ends critical section */ |
|
602 |
} |
|
603 |
} /* ends critical section */ |
|
604 |
if ((++i) == SPINPAUSES_PER_YIELD) { |
|
605 |
// On contemporary OS yielding will give CPU to another runnable thread if |
|
606 |
// there is no CPU available. |
|
607 |
os::naked_yield(); |
|
608 |
i = 0; |
|
609 |
} else { |
|
610 |
SpinPause(); |
|
611 |
} |
|
612 |
} |
|
613 |
return bucket; |
|
614 |
} |
|
615 |
||
616 |
// Always called within critical section |
|
617 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
618 |
template <typename LOOKUP_FUNC> |
|
619 |
typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* |
|
620 |
ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
621 |
get_node(const Bucket* const bucket, LOOKUP_FUNC& lookup_f, |
|
622 |
bool* have_dead, size_t* loops) const |
|
623 |
{ |
|
624 |
size_t loop_count = 0; |
|
625 |
Node* node = bucket->first(); |
|
626 |
while (node != NULL) { |
|
627 |
bool is_dead = false; |
|
628 |
++loop_count; |
|
629 |
if (lookup_f.equals(node->value(), &is_dead)) { |
|
630 |
break; |
|
631 |
} |
|
632 |
if (is_dead && !(*have_dead)) { |
|
633 |
*have_dead = true; |
|
634 |
} |
|
635 |
node = node->next(); |
|
636 |
} |
|
637 |
if (loops != NULL) { |
|
638 |
*loops = loop_count; |
|
639 |
} |
|
640 |
return node; |
|
641 |
} |
|
642 |
||
643 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
644 |
inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
645 |
unzip_bucket(Thread* thread, InternalTable* old_table, |
|
646 |
InternalTable* new_table, size_t even_index, size_t odd_index) |
|
647 |
{ |
|
648 |
Node* aux = old_table->get_bucket(even_index)->first(); |
|
649 |
if (aux == NULL) { |
|
650 |
// This is an empty bucket and in debug we poison first ptr in bucket. |
|
651 |
// Therefore we must make sure no readers are looking at this bucket. |
|
652 |
// If we don't do a write_synch here, caller must do it. |
|
653 |
return false; |
|
654 |
} |
|
655 |
Node* delete_me = NULL; |
|
656 |
Node* const volatile * even = new_table->get_bucket(even_index)->first_ptr(); |
|
657 |
Node* const volatile * odd = new_table->get_bucket(odd_index)->first_ptr(); |
|
658 |
while (aux != NULL) { |
|
659 |
bool dead_hash = false; |
|
660 |
size_t aux_hash = CONFIG::get_hash(*aux->value(), &dead_hash); |
|
50958
e0028bb6dd3d
8206471: Race with ConcurrentHashTable deleting items on insert with cleanup thread
coleenp
parents:
50637
diff
changeset
|
661 |
Node* aux_next = aux->next(); |
50158 | 662 |
if (dead_hash) { |
663 |
delete_me = aux; |
|
664 |
// This item is dead, move both list to next |
|
665 |
new_table->get_bucket(odd_index)->release_assign_node_ptr(odd, |
|
50958
e0028bb6dd3d
8206471: Race with ConcurrentHashTable deleting items on insert with cleanup thread
coleenp
parents:
50637
diff
changeset
|
666 |
aux_next); |
50158 | 667 |
new_table->get_bucket(even_index)->release_assign_node_ptr(even, |
50958
e0028bb6dd3d
8206471: Race with ConcurrentHashTable deleting items on insert with cleanup thread
coleenp
parents:
50637
diff
changeset
|
668 |
aux_next); |
50158 | 669 |
} else { |
670 |
size_t aux_index = bucket_idx_hash(new_table, aux_hash); |
|
671 |
if (aux_index == even_index) { |
|
672 |
// This is a even, so move odd to aux/even next |
|
673 |
new_table->get_bucket(odd_index)->release_assign_node_ptr(odd, |
|
50958
e0028bb6dd3d
8206471: Race with ConcurrentHashTable deleting items on insert with cleanup thread
coleenp
parents:
50637
diff
changeset
|
674 |
aux_next); |
50158 | 675 |
// Keep in even list |
676 |
even = aux->next_ptr(); |
|
677 |
} else if (aux_index == odd_index) { |
|
678 |
// This is a odd, so move odd to aux/odd next |
|
679 |
new_table->get_bucket(even_index)->release_assign_node_ptr(even, |
|
50958
e0028bb6dd3d
8206471: Race with ConcurrentHashTable deleting items on insert with cleanup thread
coleenp
parents:
50637
diff
changeset
|
680 |
aux_next); |
50158 | 681 |
// Keep in odd list |
682 |
odd = aux->next_ptr(); |
|
683 |
} else { |
|
684 |
fatal("aux_index does not match even or odd indices"); |
|
685 |
} |
|
686 |
} |
|
50958
e0028bb6dd3d
8206471: Race with ConcurrentHashTable deleting items on insert with cleanup thread
coleenp
parents:
50637
diff
changeset
|
687 |
aux = aux_next; |
50158 | 688 |
|
689 |
// We can only move 1 pointer otherwise a reader might be moved to the wrong |
|
690 |
// chain. E.g. looking for even hash value but got moved to the odd bucket |
|
691 |
// chain. |
|
692 |
write_synchonize_on_visible_epoch(thread); |
|
693 |
if (delete_me != NULL) { |
|
694 |
Node::destroy_node(delete_me); |
|
695 |
delete_me = NULL; |
|
696 |
} |
|
697 |
} |
|
698 |
return true; |
|
699 |
} |
|
700 |
||
701 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
702 |
inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
703 |
internal_shrink_prolog(Thread* thread, size_t log2_size) |
|
704 |
{ |
|
705 |
if (!try_resize_lock(thread)) { |
|
706 |
return false; |
|
707 |
} |
|
50445
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
708 |
assert(_resize_lock_owner == thread, "Re-size lock not held"); |
50158 | 709 |
if (_table->_log2_size == _log2_start_size || |
710 |
_table->_log2_size <= log2_size) { |
|
711 |
unlock_resize_lock(thread); |
|
712 |
return false; |
|
713 |
} |
|
714 |
_new_table = new InternalTable(_table->_log2_size - 1); |
|
715 |
return true; |
|
716 |
} |
|
717 |
||
718 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
719 |
inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
720 |
internal_shrink_epilog(Thread* thread) |
|
721 |
{ |
|
50445
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
722 |
assert(_resize_lock_owner == thread, "Re-size lock not held"); |
50158 | 723 |
|
724 |
InternalTable* old_table = set_table_from_new(); |
|
725 |
_size_limit_reached = false; |
|
726 |
unlock_resize_lock(thread); |
|
727 |
#ifdef ASSERT |
|
728 |
for (size_t i = 0; i < old_table->_size; i++) { |
|
729 |
assert(old_table->get_bucket(i++)->first() == POISON_PTR, |
|
730 |
"No poison found"); |
|
731 |
} |
|
732 |
#endif |
|
733 |
// ABA safe, old_table not visible to any other threads. |
|
734 |
delete old_table; |
|
735 |
} |
|
736 |
||
737 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
738 |
inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
739 |
internal_shrink_range(Thread* thread, size_t start, size_t stop) |
|
740 |
{ |
|
741 |
// The state is also copied here. |
|
742 |
// Hence all buckets in new table will be locked. |
|
743 |
for (size_t bucket_it = start; bucket_it < stop; bucket_it++) { |
|
744 |
size_t even_hash_index = bucket_it; // High bit 0 |
|
745 |
size_t odd_hash_index = bucket_it + _new_table->_size; // High bit 1 |
|
746 |
||
747 |
Bucket* b_old_even = _table->get_bucket(even_hash_index); |
|
748 |
Bucket* b_old_odd = _table->get_bucket(odd_hash_index); |
|
749 |
||
750 |
b_old_even->lock(); |
|
751 |
b_old_odd->lock(); |
|
752 |
||
753 |
_new_table->get_buckets()[bucket_it] = *b_old_even; |
|
754 |
||
755 |
// Put chains together. |
|
756 |
_new_table->get_bucket(bucket_it)-> |
|
757 |
release_assign_last_node_next(*(b_old_odd->first_ptr())); |
|
758 |
||
759 |
b_old_even->redirect(); |
|
760 |
b_old_odd->redirect(); |
|
761 |
||
762 |
write_synchonize_on_visible_epoch(thread); |
|
763 |
||
764 |
// Unlock for writes into new smaller table. |
|
765 |
_new_table->get_bucket(bucket_it)->unlock(); |
|
766 |
||
767 |
DEBUG_ONLY(b_old_even->release_assign_node_ptr(b_old_even->first_ptr(), |
|
768 |
(Node*)POISON_PTR);) |
|
769 |
DEBUG_ONLY(b_old_odd->release_assign_node_ptr(b_old_odd->first_ptr(), |
|
770 |
(Node*)POISON_PTR);) |
|
771 |
} |
|
772 |
} |
|
773 |
||
774 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
775 |
inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
776 |
internal_shrink(Thread* thread, size_t log2_size) |
|
777 |
{ |
|
778 |
if (!internal_shrink_prolog(thread, log2_size)) { |
|
50445
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
779 |
assert(_resize_lock_owner != thread, "Re-size lock held"); |
50158 | 780 |
return false; |
781 |
} |
|
782 |
assert(_resize_lock_owner == thread, "Should be locked by me"); |
|
783 |
internal_shrink_range(thread, 0, _new_table->_size); |
|
784 |
internal_shrink_epilog(thread); |
|
50445
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
785 |
assert(_resize_lock_owner != thread, "Re-size lock held"); |
50158 | 786 |
return true; |
787 |
} |
|
788 |
||
789 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
790 |
inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
791 |
internal_grow_prolog(Thread* thread, size_t log2_size) |
|
792 |
{ |
|
793 |
// This double checking of _size_limit_reached/is_max_size_reached() |
|
794 |
// we only do in grow path, since grow means high load on table |
|
795 |
// while shrink means low load. |
|
796 |
if (is_max_size_reached()) { |
|
797 |
return false; |
|
798 |
} |
|
799 |
if (!try_resize_lock(thread)) { |
|
800 |
// Either we have an ongoing resize or an operation which doesn't want us |
|
801 |
// to resize now. |
|
802 |
return false; |
|
803 |
} |
|
804 |
if (is_max_size_reached() || _table->_log2_size >= log2_size) { |
|
805 |
unlock_resize_lock(thread); |
|
806 |
return false; |
|
807 |
} |
|
808 |
||
809 |
_new_table = new InternalTable(_table->_log2_size + 1); |
|
810 |
||
811 |
if (_new_table->_log2_size == _log2_size_limit) { |
|
812 |
_size_limit_reached = true; |
|
813 |
} |
|
814 |
||
815 |
return true; |
|
816 |
} |
|
817 |
||
818 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
819 |
inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
820 |
internal_grow_epilog(Thread* thread) |
|
821 |
{ |
|
50445
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
822 |
assert(_resize_lock_owner == thread, "Should be locked"); |
50158 | 823 |
|
824 |
InternalTable* old_table = set_table_from_new(); |
|
825 |
unlock_resize_lock(thread); |
|
826 |
#ifdef ASSERT |
|
827 |
for (size_t i = 0; i < old_table->_size; i++) { |
|
828 |
assert(old_table->get_bucket(i++)->first() == POISON_PTR, |
|
829 |
"No poison found"); |
|
830 |
} |
|
831 |
#endif |
|
832 |
// ABA safe, old_table not visible to any other threads. |
|
833 |
delete old_table; |
|
834 |
} |
|
835 |
||
836 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
837 |
inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
838 |
internal_grow(Thread* thread, size_t log2_size) |
|
839 |
{ |
|
840 |
if (!internal_grow_prolog(thread, log2_size)) { |
|
50445
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
841 |
assert(_resize_lock_owner != thread, "Re-size lock held"); |
50158 | 842 |
return false; |
843 |
} |
|
844 |
assert(_resize_lock_owner == thread, "Should be locked by me"); |
|
845 |
internal_grow_range(thread, 0, _table->_size); |
|
846 |
internal_grow_epilog(thread); |
|
50445
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
847 |
assert(_resize_lock_owner != thread, "Re-size lock held"); |
50158 | 848 |
return true; |
849 |
} |
|
850 |
||
851 |
// Always called within critical section |
|
852 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
853 |
template <typename LOOKUP_FUNC> |
|
854 |
inline VALUE* ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
855 |
internal_get(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint) |
|
856 |
{ |
|
857 |
bool clean = false; |
|
858 |
size_t loops = 0; |
|
859 |
VALUE* ret = NULL; |
|
860 |
||
861 |
const Bucket* bucket = get_bucket(lookup_f.get_hash()); |
|
862 |
Node* node = get_node(bucket, lookup_f, &clean, &loops); |
|
863 |
if (node != NULL) { |
|
864 |
ret = node->value(); |
|
865 |
} |
|
866 |
if (grow_hint != NULL) { |
|
867 |
*grow_hint = loops > _grow_hint; |
|
868 |
} |
|
869 |
||
870 |
return ret; |
|
871 |
} |
|
872 |
||
873 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
874 |
template <typename LOOKUP_FUNC, typename VALUE_FUNC, typename CALLBACK_FUNC> |
|
875 |
inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
876 |
internal_insert(Thread* thread, LOOKUP_FUNC& lookup_f, VALUE_FUNC& value_f, |
|
877 |
CALLBACK_FUNC& callback, bool* grow_hint) |
|
878 |
{ |
|
879 |
bool ret = false; |
|
880 |
bool clean = false; |
|
881 |
bool locked; |
|
882 |
size_t loops = 0; |
|
883 |
size_t i = 0; |
|
884 |
Node* new_node = NULL; |
|
885 |
uintx hash = lookup_f.get_hash(); |
|
886 |
while (true) { |
|
887 |
{ |
|
888 |
ScopedCS cs(thread, this); /* protected the table/bucket */ |
|
889 |
Bucket* bucket = get_bucket(hash); |
|
890 |
||
891 |
Node* first_at_start = bucket->first(); |
|
892 |
Node* old = get_node(bucket, lookup_f, &clean, &loops); |
|
893 |
if (old == NULL) { |
|
894 |
// No duplicate found. |
|
895 |
if (new_node == NULL) { |
|
896 |
new_node = Node::create_node(value_f(), first_at_start); |
|
897 |
} else { |
|
898 |
new_node->set_next(first_at_start); |
|
899 |
} |
|
900 |
if (bucket->cas_first(new_node, first_at_start)) { |
|
901 |
callback(true, new_node->value()); |
|
902 |
new_node = NULL; |
|
903 |
ret = true; |
|
904 |
break; /* leave critical section */ |
|
905 |
} |
|
906 |
// CAS failed we must leave critical section and retry. |
|
907 |
locked = bucket->is_locked(); |
|
908 |
} else { |
|
909 |
// There is a duplicate. |
|
910 |
callback(false, old->value()); |
|
911 |
break; /* leave critical section */ |
|
912 |
} |
|
913 |
} /* leave critical section */ |
|
914 |
i++; |
|
915 |
if (locked) { |
|
916 |
os::naked_yield(); |
|
917 |
} else { |
|
918 |
SpinPause(); |
|
919 |
} |
|
920 |
} |
|
921 |
||
922 |
if (new_node != NULL) { |
|
923 |
// CAS failed and a duplicate was inserted, we must free this node. |
|
924 |
Node::destroy_node(new_node); |
|
925 |
} else if (i == 0 && clean) { |
|
926 |
// We only do cleaning on fast inserts. |
|
927 |
Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash()); |
|
928 |
assert(bucket->is_locked(), "Must be locked."); |
|
929 |
delete_in_bucket(thread, bucket, lookup_f); |
|
930 |
bucket->unlock(); |
|
931 |
} |
|
932 |
||
933 |
if (grow_hint != NULL) { |
|
934 |
*grow_hint = loops > _grow_hint; |
|
935 |
} |
|
936 |
||
937 |
return ret; |
|
938 |
} |
|
939 |
||
940 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
941 |
template <typename FUNC> |
|
942 |
inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
943 |
visit_nodes(Bucket* bucket, FUNC& visitor_f) |
|
944 |
{ |
|
945 |
Node* current_node = bucket->first(); |
|
946 |
while (current_node != NULL) { |
|
947 |
if (!visitor_f(current_node->value())) { |
|
948 |
return false; |
|
949 |
} |
|
950 |
current_node = current_node->next(); |
|
951 |
} |
|
952 |
return true; |
|
953 |
} |
|
954 |
||
955 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
956 |
template <typename FUNC> |
|
957 |
inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
958 |
do_scan_locked(Thread* thread, FUNC& scan_f) |
|
959 |
{ |
|
50445
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
960 |
assert(_resize_lock_owner == thread, "Re-size lock not held"); |
50158 | 961 |
// We can do a critical section over the entire loop but that would block |
962 |
// updates for a long time. Instead we choose to block resizes. |
|
963 |
InternalTable* table = get_table(); |
|
50445
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
964 |
for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) { |
50158 | 965 |
ScopedCS cs(thread, this); |
50445
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
966 |
if (!visit_nodes(table->get_bucket(bucket_it), scan_f)) { |
50158 | 967 |
break; /* ends critical section */ |
968 |
} |
|
969 |
} /* ends critical section */ |
|
970 |
} |
|
971 |
||
972 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
973 |
template <typename EVALUATE_FUNC> |
|
974 |
inline size_t ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
975 |
delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f, |
|
976 |
size_t num_del, Node** ndel) |
|
977 |
{ |
|
978 |
size_t dels = 0; |
|
979 |
Node* const volatile * rem_n_prev = bucket->first_ptr(); |
|
980 |
Node* rem_n = bucket->first(); |
|
981 |
while (rem_n != NULL) { |
|
982 |
if (eval_f(rem_n->value())) { |
|
983 |
ndel[dels++] = rem_n; |
|
50958
e0028bb6dd3d
8206471: Race with ConcurrentHashTable deleting items on insert with cleanup thread
coleenp
parents:
50637
diff
changeset
|
984 |
Node* next_node = rem_n->next(); |
e0028bb6dd3d
8206471: Race with ConcurrentHashTable deleting items on insert with cleanup thread
coleenp
parents:
50637
diff
changeset
|
985 |
bucket->release_assign_node_ptr(rem_n_prev, next_node); |
e0028bb6dd3d
8206471: Race with ConcurrentHashTable deleting items on insert with cleanup thread
coleenp
parents:
50637
diff
changeset
|
986 |
rem_n = next_node; |
50158 | 987 |
if (dels == num_del) { |
988 |
break; |
|
989 |
} |
|
990 |
} else { |
|
991 |
rem_n_prev = rem_n->next_ptr(); |
|
992 |
rem_n = rem_n->next(); |
|
993 |
} |
|
994 |
} |
|
995 |
return dels; |
|
996 |
} |
|
997 |
||
998 |
// Constructor |
|
999 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
1000 |
inline ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
1001 |
ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint) |
|
1002 |
: _new_table(NULL), _log2_start_size(log2size), |
|
1003 |
_log2_size_limit(log2size_limit), _grow_hint(grow_hint), |
|
1004 |
_size_limit_reached(false), _resize_lock_owner(NULL), |
|
1005 |
_invisible_epoch(0) |
|
1006 |
{ |
|
1007 |
_resize_lock = |
|
1008 |
new Mutex(Mutex::leaf, "ConcurrentHashTable", false, |
|
1009 |
Monitor::_safepoint_check_never); |
|
1010 |
_table = new InternalTable(log2size); |
|
1011 |
assert(log2size_limit >= log2size, "bad ergo"); |
|
1012 |
_size_limit_reached = _table->_log2_size == _log2_size_limit; |
|
1013 |
} |
|
1014 |
||
1015 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
1016 |
inline ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
1017 |
~ConcurrentHashTable() |
|
1018 |
{ |
|
1019 |
delete _resize_lock; |
|
1020 |
free_nodes(); |
|
1021 |
delete _table; |
|
1022 |
} |
|
1023 |
||
1024 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
1025 |
inline size_t ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
1026 |
get_size_log2(Thread* thread) |
|
1027 |
{ |
|
1028 |
ScopedCS cs(thread, this); |
|
1029 |
return _table->_log2_size; |
|
1030 |
} |
|
1031 |
||
1032 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
1033 |
inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
1034 |
shrink(Thread* thread, size_t size_limit_log2) |
|
1035 |
{ |
|
1036 |
size_t tmp = size_limit_log2 == 0 ? _log2_start_size : size_limit_log2; |
|
1037 |
bool ret = internal_shrink(thread, tmp); |
|
1038 |
return ret; |
|
1039 |
} |
|
1040 |
||
1041 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
1042 |
inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
1043 |
grow(Thread* thread, size_t size_limit_log2) |
|
1044 |
{ |
|
1045 |
size_t tmp = size_limit_log2 == 0 ? _log2_size_limit : size_limit_log2; |
|
1046 |
return internal_grow(thread, tmp); |
|
1047 |
} |
|
1048 |
||
1049 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
1050 |
template <typename LOOKUP_FUNC, typename FOUND_FUNC> |
|
1051 |
inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
1052 |
get(Thread* thread, LOOKUP_FUNC& lookup_f, FOUND_FUNC& found_f, bool* grow_hint) |
|
1053 |
{ |
|
1054 |
bool ret = false; |
|
1055 |
ScopedCS cs(thread, this); |
|
1056 |
VALUE* val = internal_get(thread, lookup_f, grow_hint); |
|
1057 |
if (val != NULL) { |
|
1058 |
found_f(val); |
|
1059 |
ret = true; |
|
1060 |
} |
|
1061 |
return ret; |
|
1062 |
} |
|
1063 |
||
1064 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
1065 |
template <typename LOOKUP_FUNC> |
|
1066 |
inline VALUE ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
1067 |
get_copy(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint) |
|
1068 |
{ |
|
1069 |
ScopedCS cs(thread, this); |
|
1070 |
VALUE* val = internal_get(thread, lookup_f, grow_hint); |
|
1071 |
return val != NULL ? *val : CONFIG::notfound(); |
|
1072 |
} |
|
1073 |
||
1074 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
1075 |
inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
1076 |
unsafe_insert(const VALUE& value) { |
|
1077 |
bool dead_hash = false; |
|
1078 |
size_t hash = CONFIG::get_hash(value, &dead_hash); |
|
1079 |
if (dead_hash) { |
|
1080 |
return false; |
|
1081 |
} |
|
1082 |
// This is an unsafe operation. |
|
1083 |
InternalTable* table = get_table(); |
|
1084 |
Bucket* bucket = get_bucket_in(table, hash); |
|
1085 |
assert(!bucket->have_redirect() && !bucket->is_locked(), "bad"); |
|
1086 |
Node* new_node = Node::create_node(value, bucket->first()); |
|
1087 |
if (!bucket->cas_first(new_node, bucket->first())) { |
|
1088 |
assert(false, "bad"); |
|
1089 |
} |
|
1090 |
return true; |
|
1091 |
} |
|
1092 |
||
1093 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
1094 |
template <typename SCAN_FUNC> |
|
1095 |
inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
1096 |
try_scan(Thread* thread, SCAN_FUNC& scan_f) |
|
1097 |
{ |
|
50445
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1098 |
if (!try_resize_lock(thread)) { |
50158 | 1099 |
return false; |
1100 |
} |
|
1101 |
do_scan_locked(thread, scan_f); |
|
50445
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1102 |
unlock_resize_lock(thread); |
50158 | 1103 |
return true; |
1104 |
} |
|
1105 |
||
1106 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
1107 |
template <typename SCAN_FUNC> |
|
1108 |
inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
1109 |
do_scan(Thread* thread, SCAN_FUNC& scan_f) |
|
1110 |
{ |
|
50445
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1111 |
assert(_resize_lock_owner != thread, "Re-size lock held"); |
50158 | 1112 |
lock_resize_lock(thread); |
1113 |
do_scan_locked(thread, scan_f); |
|
1114 |
unlock_resize_lock(thread); |
|
50445
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1115 |
assert(_resize_lock_owner != thread, "Re-size lock held"); |
50158 | 1116 |
} |
1117 |
||
1118 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
1119 |
template <typename EVALUATE_FUNC, typename DELETE_FUNC> |
|
1120 |
inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
1121 |
try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) |
|
1122 |
{ |
|
1123 |
if (!try_resize_lock(thread)) { |
|
1124 |
return false; |
|
1125 |
} |
|
1126 |
do_bulk_delete_locked(thread, eval_f, del_f); |
|
1127 |
unlock_resize_lock(thread); |
|
50445
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1128 |
assert(_resize_lock_owner != thread, "Re-size lock held"); |
50158 | 1129 |
return true; |
1130 |
} |
|
1131 |
||
1132 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
1133 |
template <typename EVALUATE_FUNC, typename DELETE_FUNC> |
|
1134 |
inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
1135 |
bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) |
|
1136 |
{ |
|
1137 |
lock_resize_lock(thread); |
|
1138 |
do_bulk_delete_locked(thread, eval_f, del_f); |
|
1139 |
unlock_resize_lock(thread); |
|
1140 |
} |
|
1141 |
||
1142 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
|
1143 |
template <typename VALUE_SIZE_FUNC> |
|
1144 |
inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
|
1145 |
statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f, |
|
1146 |
outputStream* st, const char* table_name) |
|
1147 |
{ |
|
1148 |
NumberSeq summary; |
|
1149 |
size_t literal_bytes = 0; |
|
50445
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1150 |
if (!try_resize_lock(thread)) { |
50158 | 1151 |
st->print_cr("statistics unavailable at this moment"); |
1152 |
return; |
|
1153 |
} |
|
1154 |
||
1155 |
InternalTable* table = get_table(); |
|
50445
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1156 |
for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) { |
50158 | 1157 |
ScopedCS cs(thread, this); |
1158 |
size_t count = 0; |
|
50445
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1159 |
Bucket* bucket = table->get_bucket(bucket_it); |
50158 | 1160 |
if (bucket->have_redirect() || bucket->is_locked()) { |
1161 |
continue; |
|
1162 |
} |
|
1163 |
Node* current_node = bucket->first(); |
|
1164 |
while (current_node != NULL) { |
|
1165 |
++count; |
|
1166 |
literal_bytes += vs_f(current_node->value()); |
|
1167 |
current_node = current_node->next(); |
|
1168 |
} |
|
1169 |
summary.add((double)count); |
|
1170 |
} |
|
1171 |
||
1172 |
double num_buckets = summary.num(); |
|
1173 |
double num_entries = summary.sum(); |
|
1174 |
||
1175 |
size_t bucket_bytes = num_buckets * sizeof(Bucket); |
|
1176 |
size_t entry_bytes = num_entries * sizeof(Node); |
|
1177 |
size_t total_bytes = literal_bytes + bucket_bytes + entry_bytes; |
|
1178 |
||
1179 |
size_t bucket_size = (num_buckets <= 0) ? 0 : (bucket_bytes / num_buckets); |
|
1180 |
size_t entry_size = (num_entries <= 0) ? 0 : (entry_bytes / num_entries); |
|
1181 |
||
1182 |
st->print_cr("%s statistics:", table_name); |
|
1183 |
st->print_cr("Number of buckets : %9" PRIuPTR " = %9" PRIuPTR |
|
1184 |
" bytes, each " SIZE_FORMAT, |
|
1185 |
(size_t)num_buckets, bucket_bytes, bucket_size); |
|
1186 |
st->print_cr("Number of entries : %9" PRIuPTR " = %9" PRIuPTR |
|
1187 |
" bytes, each " SIZE_FORMAT, |
|
1188 |
(size_t)num_entries, entry_bytes, entry_size); |
|
1189 |
if (literal_bytes != 0) { |
|
1190 |
double literal_avg = (num_entries <= 0) ? 0 : (literal_bytes / num_entries); |
|
1191 |
st->print_cr("Number of literals : %9" PRIuPTR " = %9" PRIuPTR |
|
1192 |
" bytes, avg %7.3f", |
|
1193 |
(size_t)num_entries, literal_bytes, literal_avg); |
|
1194 |
} |
|
1195 |
st->print_cr("Total footprsize_t : %9s = %9" PRIuPTR " bytes", "" |
|
1196 |
, total_bytes); |
|
1197 |
st->print_cr("Average bucket size : %9.3f", summary.avg()); |
|
1198 |
st->print_cr("Variance of bucket size : %9.3f", summary.variance()); |
|
1199 |
st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd()); |
|
1200 |
st->print_cr("Maximum bucket size : %9" PRIuPTR, |
|
1201 |
(size_t)summary.maximum()); |
|
50445
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1202 |
unlock_resize_lock(thread); |
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1203 |
} |
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1204 |
|
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1205 |
template <typename VALUE, typename CONFIG, MEMFLAGS F> |
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1206 |
inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1207 |
try_move_nodes_to(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* to_cht) |
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1208 |
{ |
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1209 |
if (!try_resize_lock(thread)) { |
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1210 |
return false; |
50158 | 1211 |
} |
50637
359607017fb7
8205006: Assertion failure when rehashing stringtable
rehn
parents:
50608
diff
changeset
|
1212 |
assert(_new_table == NULL || _new_table == POISON_PTR, "Must be NULL"); |
50445
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1213 |
for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) { |
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1214 |
Bucket* bucket = _table->get_bucket(bucket_it); |
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1215 |
assert(!bucket->have_redirect() && !bucket->is_locked(), "Table must be uncontended"); |
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1216 |
while (bucket->first() != NULL) { |
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1217 |
Node* move_node = bucket->first(); |
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1218 |
bool ok = bucket->cas_first(move_node->next(), move_node); |
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1219 |
assert(ok, "Uncontended cas must work"); |
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1220 |
bool dead_hash = false; |
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1221 |
size_t insert_hash = CONFIG::get_hash(*move_node->value(), &dead_hash); |
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1222 |
if (!dead_hash) { |
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1223 |
Bucket* insert_bucket = to_cht->get_bucket(insert_hash); |
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1224 |
assert(!bucket->have_redirect() && !bucket->is_locked(), "Not bit should be present"); |
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1225 |
move_node->set_next(insert_bucket->first()); |
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1226 |
ok = insert_bucket->cas_first(move_node, insert_bucket->first()); |
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1227 |
assert(ok, "Uncontended cas must work"); |
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1228 |
} |
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1229 |
} |
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1230 |
} |
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1231 |
unlock_resize_lock(thread); |
bd6b78feb6a3
8195097: Make it possible to process StringTable outside safepoint
rehn
parents:
50429
diff
changeset
|
1232 |
return true; |
50158 | 1233 |
} |
1234 |
||
1235 |
#endif // include guard |