150 |
150 |
151 void RefProcTaskExecutor::execute(ProcessTask& task) |
151 void RefProcTaskExecutor::execute(ProcessTask& task) |
152 { |
152 { |
153 ParallelScavengeHeap* heap = PSParallelCompact::gc_heap(); |
153 ParallelScavengeHeap* heap = PSParallelCompact::gc_heap(); |
154 uint parallel_gc_threads = heap->gc_task_manager()->workers(); |
154 uint parallel_gc_threads = heap->gc_task_manager()->workers(); |
|
155 uint active_gc_threads = heap->gc_task_manager()->active_workers(); |
155 RegionTaskQueueSet* qset = ParCompactionManager::region_array(); |
156 RegionTaskQueueSet* qset = ParCompactionManager::region_array(); |
156 ParallelTaskTerminator terminator(parallel_gc_threads, qset); |
157 ParallelTaskTerminator terminator(active_gc_threads, qset); |
157 GCTaskQueue* q = GCTaskQueue::create(); |
158 GCTaskQueue* q = GCTaskQueue::create(); |
158 for(uint i=0; i<parallel_gc_threads; i++) { |
159 for(uint i=0; i<parallel_gc_threads; i++) { |
159 q->enqueue(new RefProcTaskProxy(task, i)); |
160 q->enqueue(new RefProcTaskProxy(task, i)); |
160 } |
161 } |
161 if (task.marks_oops_alive()) { |
162 if (task.marks_oops_alive()) { |
162 if (parallel_gc_threads>1) { |
163 if (parallel_gc_threads>1) { |
163 for (uint j=0; j<parallel_gc_threads; j++) { |
164 for (uint j=0; j<active_gc_threads; j++) { |
164 q->enqueue(new StealMarkingTask(&terminator)); |
165 q->enqueue(new StealMarkingTask(&terminator)); |
165 } |
166 } |
166 } |
167 } |
167 } |
168 } |
168 PSParallelCompact::gc_task_manager()->execute_and_wait(q); |
169 PSParallelCompact::gc_task_manager()->execute_and_wait(q); |
214 |
215 |
215 // |
216 // |
216 // StealRegionCompactionTask |
217 // StealRegionCompactionTask |
217 // |
218 // |
218 |
219 |
219 |
|
220 StealRegionCompactionTask::StealRegionCompactionTask(ParallelTaskTerminator* t): |
220 StealRegionCompactionTask::StealRegionCompactionTask(ParallelTaskTerminator* t): |
221 _terminator(t) {} |
221 _terminator(t) {} |
222 |
222 |
223 void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) { |
223 void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) { |
224 assert(Universe::heap()->is_gc_active(), "called outside gc"); |
224 assert(Universe::heap()->is_gc_active(), "called outside gc"); |
226 NOT_PRODUCT(TraceTime tm("StealRegionCompactionTask", |
226 NOT_PRODUCT(TraceTime tm("StealRegionCompactionTask", |
227 PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); |
227 PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); |
228 |
228 |
229 ParCompactionManager* cm = |
229 ParCompactionManager* cm = |
230 ParCompactionManager::gc_thread_compaction_manager(which); |
230 ParCompactionManager::gc_thread_compaction_manager(which); |
|
231 |
|
232 |
|
233 // If not all threads are active, get a draining stack |
|
234 // from the list. Else, just use this threads draining stack. |
|
235 uint which_stack_index; |
|
236 bool use_all_workers = manager->all_workers_active(); |
|
237 if (use_all_workers) { |
|
238 which_stack_index = which; |
|
239 assert(manager->active_workers() == ParallelGCThreads, |
|
240 err_msg("all_workers_active has been incorrectly set: " |
|
241 " active %d ParallelGCThreads %d", manager->active_workers(), |
|
242 ParallelGCThreads)); |
|
243 } else { |
|
244 which_stack_index = ParCompactionManager::pop_recycled_stack_index(); |
|
245 } |
|
246 |
|
247 cm->set_region_stack_index(which_stack_index); |
|
248 cm->set_region_stack(ParCompactionManager::region_list(which_stack_index)); |
|
249 if (TraceDynamicGCThreads) { |
|
250 gclog_or_tty->print_cr("StealRegionCompactionTask::do_it " |
|
251 "region_stack_index %d region_stack = 0x%x " |
|
252 " empty (%d) use all workers %d", |
|
253 which_stack_index, ParCompactionManager::region_list(which_stack_index), |
|
254 cm->region_stack()->is_empty(), |
|
255 use_all_workers); |
|
256 } |
231 |
257 |
232 // Has to drain stacks first because there may be regions on |
258 // Has to drain stacks first because there may be regions on |
233 // preloaded onto the stack and this thread may never have |
259 // preloaded onto the stack and this thread may never have |
234 // done a draining task. Are the draining tasks needed? |
260 // done a draining task. Are the draining tasks needed? |
235 |
261 |
283 PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); |
309 PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); |
284 |
310 |
285 ParCompactionManager* cm = |
311 ParCompactionManager* cm = |
286 ParCompactionManager::gc_thread_compaction_manager(which); |
312 ParCompactionManager::gc_thread_compaction_manager(which); |
287 |
313 |
|
314 uint which_stack_index; |
|
315 bool use_all_workers = manager->all_workers_active(); |
|
316 if (use_all_workers) { |
|
317 which_stack_index = which; |
|
318 assert(manager->active_workers() == ParallelGCThreads, |
|
319 err_msg("all_workers_active has been incorrectly set: " |
|
320 " active %d ParallelGCThreads %d", manager->active_workers(), |
|
321 ParallelGCThreads)); |
|
322 } else { |
|
323 which_stack_index = stack_index(); |
|
324 } |
|
325 |
|
326 cm->set_region_stack(ParCompactionManager::region_list(which_stack_index)); |
|
327 if (TraceDynamicGCThreads) { |
|
328 gclog_or_tty->print_cr("DrainStacksCompactionTask::do_it which = %d " |
|
329 "which_stack_index = %d/empty(%d) " |
|
330 "use all workers %d", |
|
331 which, which_stack_index, |
|
332 cm->region_stack()->is_empty(), |
|
333 use_all_workers); |
|
334 } |
|
335 |
|
336 cm->set_region_stack_index(which_stack_index); |
|
337 |
288 // Process any regions already in the compaction managers stacks. |
338 // Process any regions already in the compaction managers stacks. |
289 cm->drain_region_stacks(); |
339 cm->drain_region_stacks(); |
290 } |
340 |
|
341 assert(cm->region_stack()->is_empty(), "Not empty"); |
|
342 |
|
343 if (!use_all_workers) { |
|
344 // Always give up the region stack. |
|
345 assert(cm->region_stack() == |
|
346 ParCompactionManager::region_list(cm->region_stack_index()), |
|
347 "region_stack and region_stack_index are inconsistent"); |
|
348 ParCompactionManager::push_recycled_stack_index(cm->region_stack_index()); |
|
349 |
|
350 if (TraceDynamicGCThreads) { |
|
351 void* old_region_stack = (void*) cm->region_stack(); |
|
352 int old_region_stack_index = cm->region_stack_index(); |
|
353 gclog_or_tty->print_cr("Pushing region stack 0x%x/%d", |
|
354 old_region_stack, old_region_stack_index); |
|
355 } |
|
356 |
|
357 cm->set_region_stack(NULL); |
|
358 cm->set_region_stack_index((uint)max_uintx); |
|
359 } |
|
360 } |