51 _started_workers = 0; |
51 _started_workers = 0; |
52 _finished_workers = 0; |
52 _finished_workers = 0; |
53 } |
53 } |
54 |
54 |
55 WorkGang::WorkGang(const char* name, |
55 WorkGang::WorkGang(const char* name, |
56 int workers, |
56 uint workers, |
57 bool are_GC_task_threads, |
57 bool are_GC_task_threads, |
58 bool are_ConcurrentGC_threads) : |
58 bool are_ConcurrentGC_threads) : |
59 AbstractWorkGang(name, are_GC_task_threads, are_ConcurrentGC_threads) { |
59 AbstractWorkGang(name, are_GC_task_threads, are_ConcurrentGC_threads) { |
60 _total_workers = workers; |
60 _total_workers = workers; |
61 } |
61 } |
62 |
62 |
63 GangWorker* WorkGang::allocate_worker(int which) { |
63 GangWorker* WorkGang::allocate_worker(uint which) { |
64 GangWorker* new_worker = new GangWorker(this, which); |
64 GangWorker* new_worker = new GangWorker(this, which); |
65 return new_worker; |
65 return new_worker; |
66 } |
66 } |
67 |
67 |
68 // The current implementation will exit if the allocation |
68 // The current implementation will exit if the allocation |
86 if (are_ConcurrentGC_threads()) { |
86 if (are_ConcurrentGC_threads()) { |
87 worker_type = os::cgc_thread; |
87 worker_type = os::cgc_thread; |
88 } else { |
88 } else { |
89 worker_type = os::pgc_thread; |
89 worker_type = os::pgc_thread; |
90 } |
90 } |
91 for (int worker = 0; worker < total_workers(); worker += 1) { |
91 for (uint worker = 0; worker < total_workers(); worker += 1) { |
92 GangWorker* new_worker = allocate_worker(worker); |
92 GangWorker* new_worker = allocate_worker(worker); |
93 assert(new_worker != NULL, "Failed to allocate GangWorker"); |
93 assert(new_worker != NULL, "Failed to allocate GangWorker"); |
94 _gang_workers[worker] = new_worker; |
94 _gang_workers[worker] = new_worker; |
95 if (new_worker == NULL || !os::create_thread(new_worker, worker_type)) { |
95 if (new_worker == NULL || !os::create_thread(new_worker, worker_type)) { |
96 vm_exit_out_of_memory(0, "Cannot create worker GC thread. Out of system resources."); |
96 vm_exit_out_of_memory(0, "Cannot create worker GC thread. Out of system resources."); |
106 AbstractWorkGang::~AbstractWorkGang() { |
106 AbstractWorkGang::~AbstractWorkGang() { |
107 if (TraceWorkGang) { |
107 if (TraceWorkGang) { |
108 tty->print_cr("Destructing work gang %s", name()); |
108 tty->print_cr("Destructing work gang %s", name()); |
109 } |
109 } |
110 stop(); // stop all the workers |
110 stop(); // stop all the workers |
111 for (int worker = 0; worker < total_workers(); worker += 1) { |
111 for (uint worker = 0; worker < total_workers(); worker += 1) { |
112 delete gang_worker(worker); |
112 delete gang_worker(worker); |
113 } |
113 } |
114 delete gang_workers(); |
114 delete gang_workers(); |
115 delete monitor(); |
115 delete monitor(); |
116 } |
116 } |
117 |
117 |
118 GangWorker* AbstractWorkGang::gang_worker(int i) const { |
118 GangWorker* AbstractWorkGang::gang_worker(uint i) const { |
119 // Array index bounds checking. |
119 // Array index bounds checking. |
120 GangWorker* result = NULL; |
120 GangWorker* result = NULL; |
121 assert(gang_workers() != NULL, "No workers for indexing"); |
121 assert(gang_workers() != NULL, "No workers for indexing"); |
122 assert(((i >= 0) && (i < total_workers())), "Worker index out of bounds"); |
122 assert(((i >= 0) && (i < total_workers())), "Worker index out of bounds"); |
123 result = _gang_workers[i]; |
123 result = _gang_workers[i]; |
146 _started_workers = 0; |
146 _started_workers = 0; |
147 _finished_workers = 0; |
147 _finished_workers = 0; |
148 // Tell the workers to get to work. |
148 // Tell the workers to get to work. |
149 monitor()->notify_all(); |
149 monitor()->notify_all(); |
150 // Wait for them to be finished |
150 // Wait for them to be finished |
151 while (finished_workers() < (int) no_of_parallel_workers) { |
151 while (finished_workers() < no_of_parallel_workers) { |
152 if (TraceWorkGang) { |
152 if (TraceWorkGang) { |
153 tty->print_cr("Waiting in work gang %s: %d/%d finished sequence %d", |
153 tty->print_cr("Waiting in work gang %s: %d/%d finished sequence %d", |
154 name(), finished_workers(), no_of_parallel_workers, |
154 name(), finished_workers(), no_of_parallel_workers, |
155 _sequence_number); |
155 _sequence_number); |
156 } |
156 } |
375 WorkGangBarrierSync::WorkGangBarrierSync() |
375 WorkGangBarrierSync::WorkGangBarrierSync() |
376 : _monitor(Mutex::safepoint, "work gang barrier sync", true), |
376 : _monitor(Mutex::safepoint, "work gang barrier sync", true), |
377 _n_workers(0), _n_completed(0), _should_reset(false) { |
377 _n_workers(0), _n_completed(0), _should_reset(false) { |
378 } |
378 } |
379 |
379 |
380 WorkGangBarrierSync::WorkGangBarrierSync(int n_workers, const char* name) |
380 WorkGangBarrierSync::WorkGangBarrierSync(uint n_workers, const char* name) |
381 : _monitor(Mutex::safepoint, name, true), |
381 : _monitor(Mutex::safepoint, name, true), |
382 _n_workers(n_workers), _n_completed(0), _should_reset(false) { |
382 _n_workers(n_workers), _n_completed(0), _should_reset(false) { |
383 } |
383 } |
384 |
384 |
385 void WorkGangBarrierSync::set_n_workers(int n_workers) { |
385 void WorkGangBarrierSync::set_n_workers(uint n_workers) { |
386 _n_workers = n_workers; |
386 _n_workers = n_workers; |
387 _n_completed = 0; |
387 _n_completed = 0; |
388 _should_reset = false; |
388 _should_reset = false; |
389 } |
389 } |
390 |
390 |
417 } |
417 } |
418 } |
418 } |
419 |
419 |
420 // SubTasksDone functions. |
420 // SubTasksDone functions. |
421 |
421 |
422 SubTasksDone::SubTasksDone(int n) : |
422 SubTasksDone::SubTasksDone(uint n) : |
423 _n_tasks(n), _n_threads(1), _tasks(NULL) { |
423 _n_tasks(n), _n_threads(1), _tasks(NULL) { |
424 _tasks = NEW_C_HEAP_ARRAY(jint, n); |
424 _tasks = NEW_C_HEAP_ARRAY(uint, n); |
425 guarantee(_tasks != NULL, "alloc failure"); |
425 guarantee(_tasks != NULL, "alloc failure"); |
426 clear(); |
426 clear(); |
427 } |
427 } |
428 |
428 |
429 bool SubTasksDone::valid() { |
429 bool SubTasksDone::valid() { |
430 return _tasks != NULL; |
430 return _tasks != NULL; |
431 } |
431 } |
432 |
432 |
433 void SubTasksDone::set_n_threads(int t) { |
433 void SubTasksDone::set_n_threads(uint t) { |
434 assert(_claimed == 0 || _threads_completed == _n_threads, |
434 assert(_claimed == 0 || _threads_completed == _n_threads, |
435 "should not be called while tasks are being processed!"); |
435 "should not be called while tasks are being processed!"); |
436 _n_threads = (t == 0 ? 1 : t); |
436 _n_threads = (t == 0 ? 1 : t); |
437 } |
437 } |
438 |
438 |
439 void SubTasksDone::clear() { |
439 void SubTasksDone::clear() { |
440 for (int i = 0; i < _n_tasks; i++) { |
440 for (uint i = 0; i < _n_tasks; i++) { |
441 _tasks[i] = 0; |
441 _tasks[i] = 0; |
442 } |
442 } |
443 _threads_completed = 0; |
443 _threads_completed = 0; |
444 #ifdef ASSERT |
444 #ifdef ASSERT |
445 _claimed = 0; |
445 _claimed = 0; |
446 #endif |
446 #endif |
447 } |
447 } |
448 |
448 |
449 bool SubTasksDone::is_task_claimed(int t) { |
449 bool SubTasksDone::is_task_claimed(uint t) { |
450 assert(0 <= t && t < _n_tasks, "bad task id."); |
450 assert(0 <= t && t < _n_tasks, "bad task id."); |
451 jint old = _tasks[t]; |
451 uint old = _tasks[t]; |
452 if (old == 0) { |
452 if (old == 0) { |
453 old = Atomic::cmpxchg(1, &_tasks[t], 0); |
453 old = Atomic::cmpxchg(1, &_tasks[t], 0); |
454 } |
454 } |
455 assert(_tasks[t] == 1, "What else?"); |
455 assert(_tasks[t] == 1, "What else?"); |
456 bool res = old != 0; |
456 bool res = old != 0; |
457 #ifdef ASSERT |
457 #ifdef ASSERT |
458 if (!res) { |
458 if (!res) { |
459 assert(_claimed < _n_tasks, "Too many tasks claimed; missing clear?"); |
459 assert(_claimed < _n_tasks, "Too many tasks claimed; missing clear?"); |
460 Atomic::inc(&_claimed); |
460 Atomic::inc((volatile jint*) &_claimed); |
461 } |
461 } |
462 #endif |
462 #endif |
463 return res; |
463 return res; |
464 } |
464 } |
465 |
465 |
469 do { |
469 do { |
470 old = observed; |
470 old = observed; |
471 observed = Atomic::cmpxchg(old+1, &_threads_completed, old); |
471 observed = Atomic::cmpxchg(old+1, &_threads_completed, old); |
472 } while (observed != old); |
472 } while (observed != old); |
473 // If this was the last thread checking in, clear the tasks. |
473 // If this was the last thread checking in, clear the tasks. |
474 if (observed+1 == _n_threads) clear(); |
474 if (observed+1 == (jint)_n_threads) clear(); |
475 } |
475 } |
476 |
476 |
477 |
477 |
478 SubTasksDone::~SubTasksDone() { |
478 SubTasksDone::~SubTasksDone() { |
479 if (_tasks != NULL) FREE_C_HEAP_ARRAY(jint, _tasks); |
479 if (_tasks != NULL) FREE_C_HEAP_ARRAY(jint, _tasks); |
488 |
488 |
489 bool SequentialSubTasksDone::valid() { |
489 bool SequentialSubTasksDone::valid() { |
490 return _n_threads > 0; |
490 return _n_threads > 0; |
491 } |
491 } |
492 |
492 |
493 bool SequentialSubTasksDone::is_task_claimed(int& t) { |
493 bool SequentialSubTasksDone::is_task_claimed(uint& t) { |
494 jint* n_claimed_ptr = &_n_claimed; |
494 uint* n_claimed_ptr = &_n_claimed; |
495 t = *n_claimed_ptr; |
495 t = *n_claimed_ptr; |
496 while (t < _n_tasks) { |
496 while (t < _n_tasks) { |
497 jint res = Atomic::cmpxchg(t+1, n_claimed_ptr, t); |
497 jint res = Atomic::cmpxchg(t+1, n_claimed_ptr, t); |
498 if (res == t) { |
498 if (res == (jint)t) { |
499 return false; |
499 return false; |
500 } |
500 } |
501 t = *n_claimed_ptr; |
501 t = *n_claimed_ptr; |
502 } |
502 } |
503 return true; |
503 return true; |
504 } |
504 } |
505 |
505 |
506 bool SequentialSubTasksDone::all_tasks_completed() { |
506 bool SequentialSubTasksDone::all_tasks_completed() { |
507 jint* n_completed_ptr = &_n_completed; |
507 uint* n_completed_ptr = &_n_completed; |
508 jint complete = *n_completed_ptr; |
508 uint complete = *n_completed_ptr; |
509 while (true) { |
509 while (true) { |
510 jint res = Atomic::cmpxchg(complete+1, n_completed_ptr, complete); |
510 uint res = Atomic::cmpxchg(complete+1, n_completed_ptr, complete); |
511 if (res == complete) { |
511 if (res == complete) { |
512 break; |
512 break; |
513 } |
513 } |
514 complete = res; |
514 complete = res; |
515 } |
515 } |