Merge
authorjwilhelm
Tue, 28 Apr 2015 23:37:11 +0200
changeset 30572 3d36f972d68b
parent 30571 9223db5721fe (diff)
parent 30570 bc171531c562 (current diff)
child 30573 a0bd2a51cdb3
Merge
hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
--- a/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp	Tue Apr 28 20:02:31 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp	Tue Apr 28 23:37:11 2015 +0200
@@ -83,7 +83,7 @@
                             &_retained_old_gc_alloc_region);
 }
 
-void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
+void G1DefaultAllocator::release_gc_alloc_regions(EvacuationInfo& evacuation_info) {
   AllocationContext_t context = AllocationContext::current();
   evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
                                          old_gc_alloc_region(context)->count());
@@ -99,8 +99,8 @@
   }
 
   if (ResizePLAB) {
-    _g1h->alloc_buffer_stats(InCSetState::Young)->adjust_desired_plab_sz(no_of_gc_workers);
-    _g1h->alloc_buffer_stats(InCSetState::Old)->adjust_desired_plab_sz(no_of_gc_workers);
+    _g1h->alloc_buffer_stats(InCSetState::Young)->adjust_desired_plab_sz();
+    _g1h->alloc_buffer_stats(InCSetState::Old)->adjust_desired_plab_sz();
   }
 }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp	Tue Apr 28 20:02:31 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp	Tue Apr 28 23:37:11 2015 +0200
@@ -53,7 +53,7 @@
    virtual void release_mutator_alloc_region() = 0;
 
    virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
-   virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) = 0;
+   virtual void release_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
    virtual void abandon_gc_alloc_regions() = 0;
 
    virtual MutatorAllocRegion*    mutator_alloc_region(AllocationContext_t context) = 0;
@@ -114,7 +114,7 @@
   virtual void release_mutator_alloc_region();
 
   virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
-  virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
+  virtual void release_gc_alloc_regions(EvacuationInfo& evacuation_info);
   virtual void abandon_gc_alloc_regions();
 
   virtual bool is_retained_old_region(HeapRegion* hr) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Apr 28 20:02:31 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Apr 28 23:37:11 2015 +0200
@@ -5439,7 +5439,7 @@
     phase_times->record_string_dedup_fixup_time(fixup_time_ms);
   }
 
-  _allocator->release_gc_alloc_regions(n_workers, evacuation_info);
+  _allocator->release_gc_alloc_regions(evacuation_info);
   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
 
   // Reset and re-enable the hot card cache.
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Apr 28 20:02:31 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Apr 28 23:37:11 2015 +0200
@@ -276,7 +276,7 @@
   void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
 
   // It releases the GC alloc regions at the end of a GC.
-  void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
+  void release_gc_alloc_regions(EvacuationInfo& evacuation_info);
 
   // It does any cleanup that needs to be done on the GC alloc regions
   // before a Full GC.
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Tue Apr 28 20:02:31 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Tue Apr 28 23:37:11 2015 +0200
@@ -48,7 +48,7 @@
 }
 
 size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) {
-  size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz();
+  size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(G1CollectedHeap::heap()->workers()->active_workers());
   // Prevent humongous PLAB sizes for two reasons:
   // * PLABs are allocated using a similar paths as oops, but should
   //   never be in a humongous region
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Apr 28 20:02:31 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Apr 28 23:37:11 2015 +0200
@@ -1033,7 +1033,7 @@
   to()->set_concurrent_iteration_safe_limit(to()->top());
 
   if (ResizePLAB) {
-    plab_stats()->adjust_desired_plab_sz(n_workers);
+    plab_stats()->adjust_desired_plab_sz();
   }
 
   if (PrintGC && !PrintGCDetails) {
@@ -1071,6 +1071,10 @@
   _gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
 }
 
+size_t ParNewGeneration::desired_plab_sz() {
+  return _plab_stats.desired_plab_sz(GenCollectedHeap::heap()->workers()->active_workers());
+}
+
 static int sum;
 void ParNewGeneration::waste_some_time() {
   for (int i = 0; i < 100; i++) {
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Tue Apr 28 20:02:31 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Tue Apr 28 23:37:11 2015 +0200
@@ -411,9 +411,7 @@
     return &_plab_stats;
   }
 
-  size_t desired_plab_sz() {
-    return _plab_stats.desired_plab_sz();
-  }
+  size_t desired_plab_sz();
 
   const ParNewTracer* gc_tracer() const {
     return &_gc_tracer;
--- a/hotspot/src/share/vm/gc_implementation/shared/plab.cpp	Tue Apr 28 20:02:31 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/plab.cpp	Tue Apr 28 23:37:11 2015 +0200
@@ -109,10 +109,17 @@
   }
 }
 
-// Compute desired plab size and latch result for later
+// Calculates plab size for current number of gc worker threads.
+size_t PLABStats::desired_plab_sz(uint no_of_gc_workers) {
+  assert(no_of_gc_workers > 0, "Number of GC workers should be larger than zero");
+
+  return align_object_size(_desired_net_plab_sz / MAX2(no_of_gc_workers, 1U));
+}
+
+// Compute desired plab size for one gc worker thread and latch result for later
 // use. This should be called once at the end of parallel
 // scavenge; it clears the sensor accumulators.
-void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) {
+void PLABStats::adjust_desired_plab_sz() {
   assert(ResizePLAB, "Not set");
 
   assert(is_object_aligned(max_size()) && min_size() <= max_size(),
@@ -135,7 +142,8 @@
     target_refills = 1;
   }
   size_t used = _allocated - _wasted - _unused;
-  size_t recent_plab_sz = used / (target_refills * no_of_gc_workers);
+  // Assumed to have 1 gc worker thread
+  size_t recent_plab_sz = used / target_refills;
   // Take historical weighted average
   _filter.sample(recent_plab_sz);
   // Clip from above and below, and align to object boundary
@@ -146,7 +154,7 @@
   if (PrintPLAB) {
     gclog_or_tty->print(" (plab_sz = " SIZE_FORMAT" desired_plab_sz = " SIZE_FORMAT") ", recent_plab_sz, new_plab_sz);
   }
-  _desired_plab_sz = new_plab_sz;
+  _desired_net_plab_sz = new_plab_sz;
 
   reset();
 }
--- a/hotspot/src/share/vm/gc_implementation/shared/plab.hpp	Tue Apr 28 20:02:31 2015 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/plab.hpp	Tue Apr 28 23:37:11 2015 +0200
@@ -150,13 +150,13 @@
 
 // PLAB book-keeping.
 class PLABStats VALUE_OBJ_CLASS_SPEC {
-  size_t _allocated;      // Total allocated
-  size_t _wasted;         // of which wasted (internal fragmentation)
-  size_t _undo_wasted;    // of which wasted on undo (is not used for calculation of PLAB size)
-  size_t _unused;         // Unused in last buffer
-  size_t _desired_plab_sz;// Output of filter (below), suitably trimmed and quantized
+  size_t _allocated;           // Total allocated
+  size_t _wasted;              // of which wasted (internal fragmentation)
+  size_t _undo_wasted;         // of which wasted on undo (is not used for calculation of PLAB size)
+  size_t _unused;              // Unused in last buffer
+  size_t _desired_net_plab_sz; // Output of filter (below), suitably trimmed and quantized
   AdaptiveWeightedAverage
-         _filter;         // Integrator with decay
+         _filter;              // Integrator with decay
 
   void reset() {
     _allocated   = 0;
@@ -165,12 +165,12 @@
     _unused      = 0;
   }
  public:
-  PLABStats(size_t desired_plab_sz_, unsigned wt) :
+  PLABStats(size_t desired_net_plab_sz_, unsigned wt) :
     _allocated(0),
     _wasted(0),
     _undo_wasted(0),
     _unused(0),
-    _desired_plab_sz(desired_plab_sz_),
+    _desired_net_plab_sz(desired_net_plab_sz_),
     _filter(wt)
   { }
 
@@ -182,13 +182,12 @@
     return PLAB::max_size();
   }
 
-  size_t desired_plab_sz() {
-    return _desired_plab_sz;
-  }
+  // Calculates plab size for current number of gc worker threads.
+  size_t desired_plab_sz(uint no_of_gc_workers);
 
-  // Updates the current desired PLAB size. Computes the new desired PLAB size,
+  // Updates the current desired PLAB size. Computes the new desired PLAB size with one gc worker thread,
   // updates _desired_plab_sz and clears sensor accumulators.
-  void adjust_desired_plab_sz(uint no_of_gc_workers);
+  void adjust_desired_plab_sz();
 
   void add_allocated(size_t v) {
     Atomic::add_ptr(v, &_allocated);