8204857: ConcurrentHashTable: Fix parallel processing
Reviewed-by: coleenp, gziemski
--- a/src/hotspot/share/classfile/stringTable.cpp Mon Jun 18 14:36:50 2018 +0200
+++ b/src/hotspot/share/classfile/stringTable.cpp Mon Jun 18 16:13:21 2018 +0200
@@ -456,7 +456,7 @@
log_trace(stringtable)("Started to grow");
{
TraceTime timer("Grow", TRACETIME_LOG(Debug, stringtable, perf));
- while (gt.doTask(jt)) {
+ while (gt.do_task(jt)) {
gt.pause(jt);
{
ThreadBlockInVM tbivm(jt);
@@ -502,7 +502,7 @@
bool interrupted = false;
{
TraceTime timer("Clean", TRACETIME_LOG(Debug, stringtable, perf));
- while(bdt.doTask(jt, stdc, stdd)) {
+ while(bdt.do_task(jt, stdc, stdd)) {
bdt.pause(jt);
{
ThreadBlockInVM tbivm(jt);
--- a/src/hotspot/share/utilities/concurrentHashTable.hpp Mon Jun 18 14:36:50 2018 +0200
+++ b/src/hotspot/share/utilities/concurrentHashTable.hpp Mon Jun 18 16:13:21 2018 +0200
@@ -361,7 +361,7 @@
template <typename EVALUATE_FUNC, typename DELETE_FUNC>
void do_bulk_delete_locked_for(Thread* thread, size_t start_idx,
size_t stop_idx, EVALUATE_FUNC& eval_f,
- DELETE_FUNC& del_f);
+ DELETE_FUNC& del_f, bool is_mt = false);
// Method to delete one items.
template <typename LOOKUP_FUNC>
--- a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp Mon Jun 18 14:36:50 2018 +0200
+++ b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp Mon Jun 18 16:13:21 2018 +0200
@@ -484,11 +484,12 @@
template <typename EVALUATE_FUNC, typename DELETE_FUNC>
inline void ConcurrentHashTable<VALUE, CONFIG, F>::
do_bulk_delete_locked_for(Thread* thread, size_t start_idx, size_t stop_idx,
- EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
+ EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f, bool is_mt)
{
// Here we have resize lock so table is SMR safe, and there is no new
// table. Can do this in parallel if we want.
- assert(_resize_lock_owner == thread, "Re-size lock not held");
+ assert((is_mt && _resize_lock_owner != NULL) ||
+ (!is_mt && _resize_lock_owner == thread), "Re-size lock not held");
Node* ndel[BULK_DELETE_LIMIT];
InternalTable* table = get_table();
assert(start_idx < stop_idx, "Must be");
@@ -516,7 +517,11 @@
bucket->lock();
size_t nd = delete_check_nodes(bucket, eval_f, BULK_DELETE_LIMIT, ndel);
bucket->unlock();
- write_synchonize_on_visible_epoch(thread);
+ if (is_mt) {
+ GlobalCounter::write_synchronize();
+ } else {
+ write_synchonize_on_visible_epoch(thread);
+ }
for (size_t node_it = 0; node_it < nd; node_it++) {
del_f(ndel[node_it]->value());
Node::destroy_node(ndel[node_it]);
--- a/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp Mon Jun 18 14:36:50 2018 +0200
+++ b/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp Mon Jun 18 16:13:21 2018 +0200
@@ -45,9 +45,10 @@
size_t _task_size_log2; // Number of buckets.
size_t _stop_task; // Last task
size_t _size_log2; // Table size.
+ bool _is_mt;
- BucketsOperation(ConcurrentHashTable<VALUE, CONFIG, F>* cht)
- : _cht(cht), _next_to_claim(0), _task_size_log2(DEFAULT_TASK_SIZE_LOG2),
+ BucketsOperation(ConcurrentHashTable<VALUE, CONFIG, F>* cht, bool is_mt = false)
+ : _cht(cht), _is_mt(is_mt), _next_to_claim(0), _task_size_log2(DEFAULT_TASK_SIZE_LOG2),
_stop_task(0), _size_log2(0) {}
// Returns true if you succeeded to claim the range start -> (stop-1).
@@ -107,8 +108,8 @@
public BucketsOperation
{
public:
- BulkDeleteTask(ConcurrentHashTable<VALUE, CONFIG, F>* cht)
- : BucketsOperation(cht) {
+ BulkDeleteTask(ConcurrentHashTable<VALUE, CONFIG, F>* cht, bool is_mt = false)
+ : BucketsOperation(cht, is_mt) {
}
// Before start prepare must be called.
bool prepare(Thread* thread) {
@@ -124,7 +125,7 @@
// Does one range destroying all matching EVALUATE_FUNC and
// DELETE_FUNC is called be destruction. Returns true if there is more work.
template <typename EVALUATE_FUNC, typename DELETE_FUNC>
- bool doTask(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) {
+ bool do_task(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) {
size_t start, stop;
assert(BucketsOperation::_cht->_resize_lock_owner != NULL,
"Should be locked");
@@ -132,7 +133,8 @@
return false;
}
BucketsOperation::_cht->do_bulk_delete_locked_for(thread, start, stop,
- eval_f, del_f);
+ eval_f, del_f,
+ BucketsOperation::_is_mt);
return true;
}
@@ -187,7 +189,7 @@
}
// Re-sizes a portion of the table. Returns true if there is more work.
- bool doTask(Thread* thread) {
+ bool do_task(Thread* thread) {
size_t start, stop;
assert(BucketsOperation::_cht->_resize_lock_owner != NULL,
"Should be locked");
@@ -217,7 +219,7 @@
this->thread_owns_resize_lock(thread);
}
- // Must be called after doTask returns false.
+ // Must be called after do_task returns false.
void done(Thread* thread) {
this->thread_owns_resize_lock(thread);
BucketsOperation::_cht->internal_grow_epilog(thread);
--- a/test/hotspot/gtest/utilities/test_concurrentHashtable.cpp Mon Jun 18 14:36:50 2018 +0200
+++ b/test/hotspot/gtest/utilities/test_concurrentHashtable.cpp Mon Jun 18 16:13:21 2018 +0200
@@ -211,7 +211,7 @@
// Removes all odd values.
SimpleTestTable::BulkDeleteTask bdt(cht);
if (bdt.prepare(thr)) {
- while(bdt.doTask(thr, getinsert_bulkdelete_eval, getinsert_bulkdelete_del)) {
+ while(bdt.do_task(thr, getinsert_bulkdelete_eval, getinsert_bulkdelete_del)) {
bdt.pause(thr);
EXPECT_TRUE(bdt.cont(thr)) << "Uncontended continue should work.";
}
@@ -362,7 +362,7 @@
SimpleTestTable::GrowTask gt(cht);
EXPECT_TRUE(gt.prepare(thr)) << "Growing uncontended should not fail.";
- while(gt.doTask(thr)) { /* grow */ }
+ while(gt.do_task(thr)) { /* grow */ }
gt.done(thr);
EXPECT_TRUE(cht->get_copy(thr, stl) == val) << "Getting an item after grow failed.";
@@ -953,3 +953,73 @@
GI_BD_InserterThread::_shrink = false;
mt_test_doer<RunnerGI_BD_InserterThread>();
}
+
+//#############################################################################################
+
+class MT_BD_Thread : public JavaTestThread {
+ TestTable::BulkDeleteTask* _bd;
+ public:
+ MT_BD_Thread(Semaphore* post, TestTable::BulkDeleteTask* bd)
+ : JavaTestThread(post), _bd(bd){}
+ virtual ~MT_BD_Thread() {}
+ void main_run() {
+ MyDel del;
+ while(_bd->do_task(this, *this, del));
+ }
+
+ bool operator()(uintptr_t* val) {
+ return true;
+ }
+
+ struct MyDel {
+ void operator()(uintptr_t* val) {
+ }
+ };
+};
+
+class Driver_BD_Thread : public JavaTestThread {
+public:
+ Semaphore _done;
+ Driver_BD_Thread(Semaphore* post) : JavaTestThread(post) {
+ };
+ virtual ~Driver_BD_Thread(){}
+
+ void main_run() {
+ Semaphore done(0);
+ TestTable* cht = new TestTable(16, 16, 2);
+ for (uintptr_t v = 1; v < 99999; v++ ) {
+ TestLookup tl(v);
+ EXPECT_TRUE(cht->insert(this, tl, v)) << "Inserting an unique value should work.";
+ }
+ TestTable::BulkDeleteTask bdt(cht, true /* mt */ );
+ EXPECT_TRUE(bdt.prepare(this)) << "Uncontended prepare must work.";
+
+ MT_BD_Thread* tt[4];
+ for (int i = 0; i < 4; i++) {
+ tt[i] = new MT_BD_Thread(&done, &bdt);
+ tt[i]->doit();
+ }
+
+ for (uintptr_t v = 1; v < 99999; v++ ) {
+ TestLookup tl(v);
+ cht->get_copy(this, tl);
+ }
+
+ for (int i = 0; i < 4; i++) {
+ done.wait();
+ }
+
+ bdt.done(this);
+
+ cht->do_scan(this, *this);
+ }
+
+ bool operator()(uintptr_t* val) {
+ EXPECT_TRUE(false) << "No items should left";
+ return true;
+ }
+};
+
+TEST_VM(ConcurrentHashTable, concurrent_mt_bulk_delete) {
+ mt_test_doer<Driver_BD_Thread>();
+}