Merge
authorstefank
Fri, 22 May 2015 10:44:24 +0000
changeset 30877 dae0933ba77a
parent 30872 eccf4a34928e (current diff)
parent 30876 44a71334fd94 (diff)
child 30879 31b89265eed8
Merge
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Fri May 22 10:56:37 2015 +0200
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Fri May 22 10:44:24 2015 +0000
@@ -5072,14 +5072,9 @@
   FlexibleWorkGang* workers = gch->workers();
   assert(workers != NULL, "Need parallel worker threads.");
   // Choose to use the number of GC workers most recently set
-  // into "active_workers".  If active_workers is not set, set it
-  // to ParallelGCThreads.
+  // into "active_workers".
   uint n_workers = workers->active_workers();
-  if (n_workers == 0) {
-    assert(n_workers > 0, "Should have been set during scavenge");
-    n_workers = ParallelGCThreads;
-    workers->set_active_workers(n_workers);
-  }
+
   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
 
   StrongRootsScope srs(n_workers);
--- a/hotspot/src/share/vm/gc/cms/parNewGeneration.cpp	Fri May 22 10:56:37 2015 +0200
+++ b/hotspot/src/share/vm/gc/cms/parNewGeneration.cpp	Fri May 22 10:44:24 2015 +0000
@@ -1475,9 +1475,9 @@
     _ref_processor =
       new ReferenceProcessor(_reserved,                  // span
                              ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
-                             (int) ParallelGCThreads,    // mt processing degree
+                             (uint) ParallelGCThreads,   // mt processing degree
                              refs_discovery_is_mt(),     // mt discovery
-                             (int) ParallelGCThreads,    // mt discovery degree
+                             (uint) ParallelGCThreads,   // mt discovery degree
                              refs_discovery_is_atomic(), // atomic_discovery
                              NULL);                      // is_alive_non_header
   }
--- a/hotspot/src/share/vm/gc/g1/collectionSetChooser.cpp	Fri May 22 10:56:37 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/collectionSetChooser.cpp	Fri May 22 10:44:24 2015 +0000
@@ -158,20 +158,10 @@
   hr->calc_gc_efficiency();
 }
 
-void CollectionSetChooser::prepare_for_par_region_addition(uint n_regions,
+void CollectionSetChooser::prepare_for_par_region_addition(uint n_threads,
+                                                           uint n_regions,
                                                            uint chunk_size) {
   _first_par_unreserved_idx = 0;
-  uint n_threads = (uint) ParallelGCThreads;
-  if (UseDynamicNumberOfGCThreads) {
-    assert(G1CollectedHeap::heap()->workers()->active_workers() > 0,
-      "Should have been set earlier");
-    // This is defensive code. As the assertion above says, the number
-    // of active threads should be > 0, but in case there is some path
-    // or some improperly initialized variable with leads to no
-    // active threads, protect against that in a product build.
-    n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(),
-                     1U);
-  }
   uint max_waste = n_threads * chunk_size;
   // it should be aligned with respect to chunk_size
   uint aligned_n_regions = (n_regions + chunk_size - 1) / chunk_size * chunk_size;
--- a/hotspot/src/share/vm/gc/g1/collectionSetChooser.hpp	Fri May 22 10:56:37 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/collectionSetChooser.hpp	Fri May 22 10:44:24 2015 +0000
@@ -121,7 +121,7 @@
 
   // Must be called before calls to claim_array_chunk().
   // n_regions is the number of regions, chunk_size the chunk size.
-  void prepare_for_par_region_addition(uint n_regions, uint chunk_size);
+  void prepare_for_par_region_addition(uint n_threads, uint n_regions, uint chunk_size);
   // Returns the first index in a contiguous chunk of chunk_size indexes
   // that the calling thread has reserved.  These must be set by the
   // calling thread using set_region() (to NULL if necessary).
--- a/hotspot/src/share/vm/gc/g1/concurrentG1Refine.cpp	Fri May 22 10:56:37 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/concurrentG1Refine.cpp	Fri May 22 10:44:24 2015 +0000
@@ -35,7 +35,7 @@
 {
   // Ergonomically select initial concurrent refinement parameters
   if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
-    FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2<int>(ParallelGCThreads, 1));
+    FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, (intx)ParallelGCThreads);
   }
   set_green_zone(G1ConcRefinementGreenZone);
 
--- a/hotspot/src/share/vm/gc/g1/concurrentMark.cpp	Fri May 22 10:56:37 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/concurrentMark.cpp	Fri May 22 10:44:24 2015 +0000
@@ -518,7 +518,7 @@
   _markStack(this),
   // _finger set in set_non_marking_state
 
-  _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
+  _max_worker_id((uint)ParallelGCThreads),
   // _active_tasks set in set_non_marking_state
   // _tasks set inside the constructor
   _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
@@ -1218,15 +1218,13 @@
     "Maximum number of marking threads exceeded");
 
   uint active_workers = MAX2(1U, parallel_marking_threads());
+  assert(active_workers > 0, "Should have been set");
 
   // Parallel task terminator is set in "set_concurrency_and_phase()"
   set_concurrency_and_phase(active_workers, true /* concurrent */);
 
   CMConcurrentMarkingTask markingTask(this, cmThread());
   _parallel_workers->set_active_workers(active_workers);
-  // Don't set _n_par_threads because it affects MT in process_roots()
-  // and the decisions on that MT processing is made elsewhere.
-  assert(_parallel_workers->active_workers() > 0, "Should have been set");
   _parallel_workers->run_task(&markingTask);
   print_stats();
 }
@@ -2007,7 +2005,7 @@
 
   // this will also free any regions totally full of garbage objects,
   // and sort the regions.
-  g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
+  g1h->g1_policy()->record_concurrent_mark_cleanup_end();
 
   // Statistics.
   double end = os::elapsedTime();
@@ -2593,11 +2591,6 @@
 
   // this is remark, so we'll use up all active threads
   uint active_workers = g1h->workers()->active_workers();
-  if (active_workers == 0) {
-    assert(active_workers > 0, "Should have been set earlier");
-    active_workers = (uint) ParallelGCThreads;
-    g1h->workers()->set_active_workers(active_workers);
-  }
   set_concurrency_and_phase(active_workers, false /* concurrent */);
   // Leave _parallel_marking_threads at it's
   // value originally calculated in the ConcurrentMark
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Fri May 22 10:56:37 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Fri May 22 10:44:24 2015 +0000
@@ -1326,15 +1326,9 @@
         AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
                                                 workers()->active_workers(),
                                                 Threads::number_of_non_daemon_threads());
-      assert(UseDynamicNumberOfGCThreads ||
-             n_workers == workers()->total_workers(),
-             "If not dynamic should be using all the  workers");
       workers()->set_active_workers(n_workers);
 
       ParRebuildRSTask rebuild_rs_task(this);
-      assert(UseDynamicNumberOfGCThreads ||
-             workers()->active_workers() == workers()->total_workers(),
-             "Unless dynamic should use total workers");
       workers()->run_task(&rebuild_rs_task);
 
       // Rebuild the strong code root lists for each region
@@ -1758,7 +1752,7 @@
   _allocator = G1Allocator::create_allocator(this);
   _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
 
-  int n_queues = MAX2((int)ParallelGCThreads, 1);
+  int n_queues = (int)ParallelGCThreads;
   _task_queues = new RefToScanQueueSet(n_queues);
 
   uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
@@ -2070,11 +2064,11 @@
     new ReferenceProcessor(mr,    // span
                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
                                 // mt processing
-                           (int) ParallelGCThreads,
+                           (uint) ParallelGCThreads,
                                 // degree of mt processing
                            (ParallelGCThreads > 1) || (ConcGCThreads > 1),
                                 // mt discovery
-                           (int) MAX2(ParallelGCThreads, ConcGCThreads),
+                           (uint) MAX2(ParallelGCThreads, ConcGCThreads),
                                 // degree of mt discovery
                            false,
                                 // Reference discovery is not atomic
@@ -2087,11 +2081,11 @@
     new ReferenceProcessor(mr,    // span
                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
                                 // mt processing
-                           MAX2((int)ParallelGCThreads, 1),
+                           (uint) ParallelGCThreads,
                                 // degree of mt processing
                            (ParallelGCThreads > 1),
                                 // mt discovery
-                           MAX2((int)ParallelGCThreads, 1),
+                           (uint) ParallelGCThreads,
                                 // degree of mt discovery
                            true,
                                 // Reference discovery is atomic
@@ -2491,8 +2485,7 @@
   assert(_worker_cset_start_region != NULL, "sanity");
   assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
 
-  int n_queues = MAX2((int)ParallelGCThreads, 1);
-  for (int i = 0; i < n_queues; i++) {
+  for (uint i = 0; i < ParallelGCThreads; i++) {
     _worker_cset_start_region[i] = NULL;
     _worker_cset_start_region_time_stamp[i] = 0;
   }
@@ -2530,9 +2523,6 @@
   result = g1_policy()->collection_set();
   uint cs_size = g1_policy()->cset_region_length();
   uint active_workers = workers()->active_workers();
-  assert(UseDynamicNumberOfGCThreads ||
-           active_workers == workers()->total_workers(),
-           "Unless dynamic should use total workers");
 
   uint end_ind   = (cs_size * worker_i) / active_workers;
   uint start_ind = 0;
@@ -3031,9 +3021,6 @@
     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
 
       G1ParVerifyTask task(this, vo);
-      assert(UseDynamicNumberOfGCThreads ||
-        workers()->active_workers() == workers()->total_workers(),
-        "If not dynamic should be using all the workers");
       workers()->run_task(&task);
       if (task.failures()) {
         failures = true;
@@ -3682,9 +3669,6 @@
     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
                                                                   workers()->active_workers(),
                                                                   Threads::number_of_non_daemon_threads());
-    assert(UseDynamicNumberOfGCThreads ||
-           active_workers == workers()->total_workers(),
-           "If not dynamic should be using all the  workers");
     workers()->set_active_workers(active_workers);
 
     double pause_start_sec = os::elapsedTime();
@@ -3859,8 +3843,7 @@
 
         if (evacuation_failed()) {
           _allocator->set_used(recalculate_used());
-          uint n_queues = MAX2((int)ParallelGCThreads, 1);
-          for (uint i = 0; i < n_queues; i++) {
+          for (uint i = 0; i < ParallelGCThreads; i++) {
             if (_evacuation_failed_info_array[i].has_failed()) {
               _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
             }
@@ -5189,7 +5172,7 @@
 };
 
 // Weak Reference processing during an evacuation pause (part 1).
-void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
+void G1CollectedHeap::process_discovered_references() {
   double ref_proc_start = os::elapsedTime();
 
   ReferenceProcessor* rp = _ref_processor_stw;
@@ -5216,7 +5199,7 @@
   // referents points to another object which is also referenced by an
   // object discovered by the STW ref processor.
 
-  assert(no_of_gc_workers == workers()->active_workers(), "Need to reset active GC workers");
+  uint no_of_gc_workers = workers()->active_workers();
 
   G1ParPreserveCMReferentsTask keep_cm_referents(this,
                                                  no_of_gc_workers,
@@ -5297,7 +5280,7 @@
 }
 
 // Weak Reference processing during an evacuation pause (part 2).
-void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
+void G1CollectedHeap::enqueue_discovered_references() {
   double ref_enq_start = os::elapsedTime();
 
   ReferenceProcessor* rp = _ref_processor_stw;
@@ -5311,12 +5294,12 @@
   } else {
     // Parallel reference enqueueing
 
-    assert(no_of_gc_workers == workers()->active_workers(),
-           "Need to reset active workers");
-    assert(rp->num_q() == no_of_gc_workers, "sanity");
-    assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
-
-    G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
+    uint n_workers = workers()->active_workers();
+
+    assert(rp->num_q() == n_workers, "sanity");
+    assert(n_workers <= rp->max_num_q(), "sanity");
+
+    G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, n_workers);
     rp->enqueue_discovered_references(&par_task_executor);
   }
 
@@ -5347,9 +5330,6 @@
   hot_card_cache->set_use_cache(false);
 
   const uint n_workers = workers()->active_workers();
-  assert(UseDynamicNumberOfGCThreads ||
-         n_workers == workers()->total_workers(),
-         "If not dynamic should be using all the  workers");
 
   init_for_evac_failure(NULL);
 
@@ -5365,12 +5345,9 @@
       ClassLoaderDataGraph::clear_claimed_marks();
     }
 
-     // The individual threads will set their evac-failure closures.
-     if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr();
-     // These tasks use ShareHeap::_process_strong_tasks
-     assert(UseDynamicNumberOfGCThreads ||
-            workers()->active_workers() == workers()->total_workers(),
-            "If not dynamic should be using all the  workers");
+    // The individual threads will set their evac-failure closures.
+    if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr();
+
     workers()->run_task(&g1_par_task);
     end_par_time_sec = os::elapsedTime();
 
@@ -5395,7 +5372,7 @@
   // as we may have to copy some 'reachable' referent
   // objects (and their reachable sub-graphs) that were
   // not copied during the pause.
-  process_discovered_references(n_workers);
+  process_discovered_references();
 
   if (G1StringDedup::is_enabled()) {
     double fixup_start = os::elapsedTime();
@@ -5437,7 +5414,7 @@
   // will log these updates (and dirty their associated
   // cards). We need these updates logged to update any
   // RSets.
-  enqueue_discovered_references(n_workers);
+  enqueue_discovered_references();
 
   redirty_logged_cards();
   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Fri May 22 10:56:37 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Fri May 22 10:44:24 2015 +0000
@@ -606,11 +606,11 @@
 
   // Process any reference objects discovered during
   // an incremental evacuation pause.
-  void process_discovered_references(uint no_of_gc_workers);
+  void process_discovered_references();
 
   // Enqueue any remaining discovered references
   // after processing.
-  void enqueue_discovered_references(uint no_of_gc_workers);
+  void enqueue_discovered_references();
 
 public:
   FlexibleWorkGang* workers() const { return _workers; }
--- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Fri May 22 10:56:37 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Fri May 22 10:44:24 2015 +0000
@@ -1587,14 +1587,17 @@
 }
 
 void
-G1CollectorPolicy::record_concurrent_mark_cleanup_end(uint n_workers) {
+G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
   _collectionSetChooser->clear();
 
+  FlexibleWorkGang* workers = _g1->workers();
+  uint n_workers = workers->active_workers();
+
   uint n_regions = _g1->num_regions();
   uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
-  _collectionSetChooser->prepare_for_par_region_addition(n_regions, chunk_size);
+  _collectionSetChooser->prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
   ParKnownGarbageTask par_known_garbage_task(_collectionSetChooser, chunk_size, n_workers);
-  _g1->workers()->run_task(&par_known_garbage_task);
+  workers->run_task(&par_known_garbage_task);
 
   _collectionSetChooser->sort_regions();
 
--- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Fri May 22 10:56:37 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Fri May 22 10:44:24 2015 +0000
@@ -692,7 +692,7 @@
 
   // Record start, end, and completion of cleanup.
   void record_concurrent_mark_cleanup_start();
-  void record_concurrent_mark_cleanup_end(uint n_workers);
+  void record_concurrent_mark_cleanup_end();
   void record_concurrent_mark_cleanup_completed();
 
   // Records the information about the heap size for reporting in
--- a/hotspot/src/share/vm/gc/g1/g1OopClosures.cpp	Fri May 22 10:56:37 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1OopClosures.cpp	Fri May 22 10:44:24 2015 +0000
@@ -50,8 +50,8 @@
   _par_scan_state = par_scan_state;
   _worker_id = par_scan_state->queue_num();
 
-  assert(_worker_id < MAX2((uint)ParallelGCThreads, 1u),
-         err_msg("The given worker id %u must be less than the number of threads %u", _worker_id, MAX2((uint)ParallelGCThreads, 1u)));
+  assert(_worker_id < ParallelGCThreads,
+         err_msg("The given worker id %u must be less than the number of threads " UINTX_FORMAT, _worker_id, ParallelGCThreads));
 }
 
 // Generate G1 specialized oop_oop_iterate functions.
--- a/hotspot/src/share/vm/gc/g1/g1StringDedupQueue.cpp	Fri May 22 10:56:37 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1StringDedupQueue.cpp	Fri May 22 10:44:24 2015 +0000
@@ -42,7 +42,7 @@
   _cancel(false),
   _empty(true),
   _dropped(0) {
-  _nqueues = MAX2(ParallelGCThreads, (size_t)1);
+  _nqueues = ParallelGCThreads;
   _queues = NEW_C_HEAP_ARRAY(G1StringDedupWorkerQueue, _nqueues, mtGC);
   for (size_t i = 0; i < _nqueues; i++) {
     new (_queues + i) G1StringDedupWorkerQueue(G1StringDedupWorkerQueue::default_segment_size(), _max_cache_size, _max_size);
--- a/hotspot/src/share/vm/gc/g1/g1StringDedupTable.cpp	Fri May 22 10:56:37 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1StringDedupTable.cpp	Fri May 22 10:44:24 2015 +0000
@@ -112,7 +112,7 @@
 };
 
 G1StringDedupEntryCache::G1StringDedupEntryCache() {
-  _nlists = MAX2(ParallelGCThreads, (size_t)1);
+  _nlists = ParallelGCThreads;
   _lists = PaddedArray<G1StringDedupEntryFreeList, mtGC>::create_unfreeable((uint)_nlists);
 }
 
--- a/hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp	Fri May 22 10:56:37 2015 +0200
+++ b/hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp	Fri May 22 10:44:24 2015 +0000
@@ -832,9 +832,9 @@
   _ref_processor =
     new ReferenceProcessor(mr,            // span
                            ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
-                           (int) ParallelGCThreads, // mt processing degree
+                           (uint) ParallelGCThreads, // mt processing degree
                            true,          // mt discovery
-                           (int) ParallelGCThreads, // mt discovery degree
+                           (uint) ParallelGCThreads, // mt discovery degree
                            true,          // atomic_discovery
                            &_is_alive_closure); // non-header is alive closure
   _counters = new CollectorCounters("PSParallelCompact", 1);
--- a/hotspot/src/share/vm/gc/parallel/psScavenge.cpp	Fri May 22 10:56:37 2015 +0200
+++ b/hotspot/src/share/vm/gc/parallel/psScavenge.cpp	Fri May 22 10:44:24 2015 +0000
@@ -845,9 +845,9 @@
   _ref_processor =
     new ReferenceProcessor(mr,                         // span
                            ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
-                           (int) ParallelGCThreads,    // mt processing degree
+                           (uint) ParallelGCThreads,   // mt processing degree
                            true,                       // mt discovery
-                           (int) ParallelGCThreads,    // mt discovery degree
+                           (uint) ParallelGCThreads,   // mt discovery degree
                            true,                       // atomic_discovery
                            NULL);                      // header provides liveness info
 
--- a/hotspot/src/share/vm/gc/shared/adaptiveSizePolicy.cpp	Fri May 22 10:56:37 2015 +0200
+++ b/hotspot/src/share/vm/gc/shared/adaptiveSizePolicy.cpp	Fri May 22 10:44:24 2015 +0000
@@ -161,7 +161,7 @@
       }
       _debug_perturbation = !_debug_perturbation;
     }
-    assert((new_active_workers <= (uintx) ParallelGCThreads) &&
+    assert((new_active_workers <= ParallelGCThreads) &&
            (new_active_workers >= min_workers),
       "Jiggled active workers too much");
   }
--- a/hotspot/src/share/vm/gc/shared/workgroup.hpp	Fri May 22 10:56:37 2015 +0200
+++ b/hotspot/src/share/vm/gc/shared/workgroup.hpp	Fri May 22 10:44:24 2015 +0000
@@ -315,16 +315,20 @@
   uint _active_workers;
  public:
   // Constructor and destructor.
-  // Initialize active_workers to a minimum value.  Setting it to
-  // the parameter "workers" will initialize it to a maximum
-  // value which is not desirable.
   FlexibleWorkGang(const char* name, uint workers,
                    bool are_GC_task_threads,
                    bool  are_ConcurrentGC_threads) :
     WorkGang(name, workers, are_GC_task_threads, are_ConcurrentGC_threads),
-    _active_workers(UseDynamicNumberOfGCThreads ? 1U : ParallelGCThreads) {}
-  // Accessors for fields
-  virtual uint active_workers() const { return _active_workers; }
+    _active_workers(UseDynamicNumberOfGCThreads ? 1U : workers) {}
+
+  // Accessors for fields.
+  virtual uint active_workers() const {
+    assert(_active_workers <= _total_workers,
+           err_msg("_active_workers: %u > _total_workers: %u", _active_workers, _total_workers));
+    assert(UseDynamicNumberOfGCThreads || _active_workers == _total_workers,
+           "Unless dynamic should use total workers");
+    return _active_workers;
+  }
   void set_active_workers(uint v) {
     assert(v <= _total_workers,
            "Trying to set more workers active than there are");
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Fri May 22 10:56:37 2015 +0200
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Fri May 22 10:44:24 2015 +0000
@@ -1278,10 +1278,8 @@
 
   // Preferred young gen size for "short" pauses:
   // upper bound depends on # of threads and NewRatio.
-  const uintx parallel_gc_threads =
-    (ParallelGCThreads == 0 ? 1 : ParallelGCThreads);
   const size_t preferred_max_new_size_unaligned =
-    MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * parallel_gc_threads));
+    MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * ParallelGCThreads));
   size_t preferred_max_new_size =
     align_size_up(preferred_max_new_size_unaligned, os::vm_page_size());