equal
deleted
inserted
replaced
985 // Initialize weak reference processing. |
985 // Initialize weak reference processing. |
986 virtual void ref_processing_init(); |
986 virtual void ref_processing_init(); |
987 |
987 |
988 void set_par_threads(int t) { |
988 void set_par_threads(int t) { |
989 SharedHeap::set_par_threads(t); |
989 SharedHeap::set_par_threads(t); |
|
990 // Done in SharedHeap but oddly there are |
|
991 // two _process_strong_tasks's in a G1CollectedHeap |
|
992 // so do it here too. |
|
993 _process_strong_tasks->set_n_threads(t); |
|
994 } |
|
995 |
|
996 // Set _n_par_threads according to a policy TBD. |
|
997 void set_par_threads(); |
|
998 |
|
999 void set_n_termination(int t) { |
990 _process_strong_tasks->set_n_threads(t); |
1000 _process_strong_tasks->set_n_threads(t); |
991 } |
1001 } |
992 |
1002 |
993 virtual CollectedHeap::Name kind() const { |
1003 virtual CollectedHeap::Name kind() const { |
994 return CollectedHeap::G1CollectedHeap; |
1004 return CollectedHeap::G1CollectedHeap; |
1274 // setting the claim value of the second and subsequent regions of the |
1284 // setting the claim value of the second and subsequent regions of the |
1275 // chunk.) For now requires that "doHeapRegion" always returns "false", |
1285 // chunk.) For now requires that "doHeapRegion" always returns "false", |
1276 // i.e., that a closure never attempt to abort a traversal. |
1286 // i.e., that a closure never attempt to abort a traversal. |
1277 void heap_region_par_iterate_chunked(HeapRegionClosure* blk, |
1287 void heap_region_par_iterate_chunked(HeapRegionClosure* blk, |
1278 int worker, |
1288 int worker, |
|
1289 int no_of_par_workers, |
1279 jint claim_value); |
1290 jint claim_value); |
1280 |
1291 |
1281 // It resets all the region claim values to the default. |
1292 // It resets all the region claim values to the default. |
1282 void reset_heap_region_claim_values(); |
1293 void reset_heap_region_claim_values(); |
1283 |
1294 |