Merge
authortschatzl
Wed, 25 Nov 2015 19:27:38 +0000
changeset 34302 c932a347d579
parent 34297 b7ee28694686 (current diff)
parent 34301 080f957bd40f (diff)
child 34304 3063f2e58c1b
Merge
hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp
hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Wed Nov 25 17:32:44 2015 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Wed Nov 25 19:27:38 2015 +0000
@@ -414,6 +414,11 @@
   return new_obj;
 }
 
+size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) {
+  assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size);
+  return align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
+}
+
 // If could fit into free regions w/o expansion, try.
 // Otherwise, if can expand, do so.
 // Otherwise, if using ex regions might help, try with ex given back.
@@ -423,7 +428,7 @@
   verify_region_sets_optional();
 
   uint first = G1_NO_HRM_INDEX;
-  uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
+  uint obj_regions = (uint) humongous_obj_size_in_regions(word_size);
 
   if (obj_regions == 1) {
     // Only one region to allocate, try to use a fast path by directly allocating
@@ -1023,6 +1028,8 @@
       // collection hoping that there's enough space in the heap.
       result = humongous_obj_allocate(word_size, AllocationContext::current());
       if (result != NULL) {
+        size_t size_in_regions = humongous_obj_size_in_regions(word_size);
+        g1_policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
         return result;
       }
 
@@ -5210,6 +5217,8 @@
 }
 
 void G1CollectedHeap::record_obj_copy_mem_stats() {
+  g1_policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);
+
   _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
                                                create_g1_evac_summary(&_old_evac_stats));
 }
@@ -5594,6 +5603,14 @@
         cur->set_young_index_in_cset(-1);
       }
       cur->set_evacuation_failed(false);
+      // When moving a young gen region to old gen, we "allocate" that whole region
+      // there. This is in addition to any already evacuated objects. Notify the
+      // policy about that.
+      // Old gen regions do not cause an additional allocation: both the objects
+      // still in the region and the ones already moved are accounted for elsewhere.
+      if (cur->is_young()) {
+        policy->add_bytes_allocated_in_old_since_last_gc(HeapRegion::GrainBytes);
+      }
       // The region is now considered to be old.
       cur->set_old();
       // Do some allocation statistics accounting. Regions that failed evacuation
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Wed Nov 25 17:32:44 2015 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Wed Nov 25 19:27:38 2015 +0000
@@ -1343,6 +1343,10 @@
     return (region_size / 2);
   }
 
+  // Returns the number of regions the humongous object of the given word size
+  // requires.
+  static size_t humongous_obj_size_in_regions(size_t word_size);
+
   // Print the maximum heap capacity.
   virtual size_t max_capacity() const;
 
--- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Wed Nov 25 17:32:44 2015 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Wed Nov 25 19:27:38 2015 +0000
@@ -28,6 +28,7 @@
 #include "gc/g1/concurrentMarkThread.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
+#include "gc/g1/g1IHOPControl.hpp"
 #include "gc/g1/g1ErgoVerbose.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
 #include "gc/g1/g1Log.hpp"
@@ -38,6 +39,7 @@
 #include "runtime/java.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "utilities/debug.hpp"
+#include "utilities/pair.hpp"
 
 // Different defaults for different number of GC threads
 // They were chosen by running GCOld and SPECjbb on debris with different
@@ -148,7 +150,11 @@
   _recorded_survivor_tail(NULL),
   _survivors_age_table(true),
 
-  _gc_overhead_perc(0.0) {
+  _gc_overhead_perc(0.0),
+
+  _bytes_allocated_in_old_since_last_gc(0),
+  _ihop_control(NULL),
+  _initial_mark_to_mixed() {
 
   // SurvRateGroups below must be initialized after the predictor because they
   // indirectly use it through this object passed to their constructor.
@@ -288,6 +294,10 @@
   _collectionSetChooser = new CollectionSetChooser();
 }
 
+G1CollectorPolicy::~G1CollectorPolicy() {
+  delete _ihop_control;
+}
+
 double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const {
   return _predictor.get_new_prediction(seq);
 }
@@ -317,6 +327,8 @@
   if (max_young_size != MaxNewSize) {
     FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size);
   }
+
+  _ihop_control = create_ihop_control();
 }
 
 G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); }
@@ -522,25 +534,26 @@
   return _young_gen_sizer->max_desired_young_length();
 }
 
-void G1CollectorPolicy::update_young_list_max_and_target_length() {
-  update_young_list_max_and_target_length(get_new_prediction(_rs_lengths_seq));
+uint G1CollectorPolicy::update_young_list_max_and_target_length() {
+  return update_young_list_max_and_target_length(get_new_prediction(_rs_lengths_seq));
 }
 
-void G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) {
-  update_young_list_target_length(rs_lengths);
+uint G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) {
+  uint unbounded_target_length = update_young_list_target_length(rs_lengths);
   update_max_gc_locker_expansion();
+  return unbounded_target_length;
 }
 
-void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
-  _young_list_target_length = bounded_young_list_target_length(rs_lengths);
+uint G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
+  YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths);
+  _young_list_target_length = young_lengths.first;
+  return young_lengths.second;
 }
 
-void G1CollectorPolicy::update_young_list_target_length() {
-  update_young_list_target_length(get_new_prediction(_rs_lengths_seq));
-}
+G1CollectorPolicy::YoungTargetLengths G1CollectorPolicy::young_list_target_lengths(size_t rs_lengths) const {
+  YoungTargetLengths result;
 
-uint G1CollectorPolicy::bounded_young_list_target_length(size_t rs_lengths) const {
-  // Calculate the absolute and desired min bounds.
+  // Calculate the absolute and desired min bounds first.
 
   // This is how many young regions we already have (currently: the survivors).
   uint base_min_length = recorded_survivor_regions();
@@ -552,15 +565,7 @@
   desired_min_length = MAX2(desired_min_length, absolute_min_length);
   // Calculate the absolute and desired max bounds.
 
-  // We will try our best not to "eat" into the reserve.
-  uint absolute_max_length = 0;
-  if (_free_regions_at_end_of_collection > _reserve_regions) {
-    absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
-  }
   uint desired_max_length = calculate_young_list_desired_max_length();
-  if (desired_max_length > absolute_max_length) {
-    desired_max_length = absolute_max_length;
-  }
 
   uint young_list_target_length = 0;
   if (adaptive_young_list_length()) {
@@ -581,6 +586,17 @@
     young_list_target_length = _young_list_fixed_length;
   }
 
+  result.second = young_list_target_length;
+
+  // We will try our best not to "eat" into the reserve.
+  uint absolute_max_length = 0;
+  if (_free_regions_at_end_of_collection > _reserve_regions) {
+    absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
+  }
+  if (desired_max_length > absolute_max_length) {
+    desired_max_length = absolute_max_length;
+  }
+
   // Make sure we don't go over the desired max length, nor under the
   // desired min length. In case they clash, desired_min_length wins
   // which is why that test is second.
@@ -595,7 +611,8 @@
          "we should be able to allocate at least one eden region");
   assert(young_list_target_length >= absolute_min_length, "post-condition");
 
-  return young_list_target_length;
+  result.first = young_list_target_length;
+  return result;
 }
 
 uint
@@ -838,6 +855,10 @@
   update_young_list_max_and_target_length();
   update_rs_lengths_prediction();
   _collectionSetChooser->clear();
+
+  _bytes_allocated_in_old_since_last_gc = 0;
+
+  record_pause(FullGC, _full_collection_start_sec, end_sec);
 }
 
 void G1CollectorPolicy::record_stop_world_start() {
@@ -895,7 +916,7 @@
   _cur_mark_stop_world_time_ms += elapsed_time_ms;
   _prev_collection_pause_end_ms += elapsed_time_ms;
 
-  _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec);
+  record_pause(Remark, _mark_remark_start_sec, end_time_sec);
 }
 
 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
@@ -906,6 +927,10 @@
   bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
                                                               "skip last young-only gc");
   collector_state()->set_last_young_gc(should_continue_with_reclaim);
+  // We skip the marking phase.
+  if (!should_continue_with_reclaim) {
+    abort_time_to_mixed_tracking();
+  }
   collector_state()->set_in_marking_window(false);
 }
 
@@ -952,12 +977,13 @@
     return false;
   }
 
-  size_t marking_initiating_used_threshold =
-    (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
+  size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold();
+
   size_t cur_used_bytes = _g1->non_young_capacity_bytes();
   size_t alloc_byte_size = alloc_word_size * HeapWordSize;
+  size_t marking_request_bytes = cur_used_bytes + alloc_byte_size;
 
-  if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
+  if (marking_request_bytes > marking_initiating_used_threshold) {
     if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) {
       ergo_verbose5(ErgoConcCycles,
         "request concurrent cycle initiation",
@@ -969,7 +995,7 @@
         cur_used_bytes,
         alloc_byte_size,
         marking_initiating_used_threshold,
-        (double) InitiatingHeapOccupancyPercent,
+        (double) marking_initiating_used_threshold / _g1->capacity() * 100,
         source);
       return true;
     } else {
@@ -996,10 +1022,7 @@
 
 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned) {
   double end_time_sec = os::elapsedTime();
-  assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
-         "otherwise, the subtraction below does not make sense");
-  size_t rs_size =
-            _cur_collection_pause_used_regions_at_start - cset_region_length();
+
   size_t cur_used_bytes = _g1->used();
   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
   bool last_pause_included_initial_mark = false;
@@ -1013,6 +1036,8 @@
   }
 #endif // PRODUCT
 
+  record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
+
   last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
   if (last_pause_included_initial_mark) {
     record_concurrent_mark_init_end(0.0);
@@ -1020,19 +1045,16 @@
     maybe_start_marking();
   }
 
-  _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, end_time_sec);
+  double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
+  if (app_time_ms < MIN_TIMER_GRANULARITY) {
+    // This usually happens due to the timer not having the required
+    // granularity. Some Linuxes are the usual culprits.
+    // We'll just set it to something (arbitrarily) small.
+    app_time_ms = 1.0;
+  }
 
   if (update_stats) {
     _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
-    // this is where we update the allocation rate of the application
-    double app_time_ms =
-      (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
-    if (app_time_ms < MIN_TIMER_GRANULARITY) {
-      // This usually happens due to the timer not having the required
-      // granularity. Some Linuxes are the usual culprits.
-      // We'll just set it to something (arbitrarily) small.
-      app_time_ms = 1.0;
-    }
     // We maintain the invariant that all objects allocated by mutator
     // threads will be allocated out of eden regions. So, we can use
     // the eden region number allocated since the previous GC to
@@ -1077,6 +1099,9 @@
     if (next_gc_should_be_mixed("start mixed GCs",
                                 "do not start mixed GCs")) {
       collector_state()->set_gcs_are_young(false);
+    } else {
+      // We aborted the mixed GC phase early.
+      abort_time_to_mixed_tracking();
     }
 
     collector_state()->set_last_young_gc(false);
@@ -1085,7 +1110,6 @@
   if (!collector_state()->last_gc_was_young()) {
     // This is a mixed GC. Here we decide whether to continue doing
     // mixed GCs or not.
-
     if (!next_gc_should_be_mixed("continue mixed GCs",
                                  "do not continue mixed GCs")) {
       collector_state()->set_gcs_are_young(true);
@@ -1177,9 +1201,20 @@
   collector_state()->set_in_marking_window(new_in_marking_window);
   collector_state()->set_in_marking_window_im(new_in_marking_window_im);
   _free_regions_at_end_of_collection = _g1->num_free_regions();
-  update_young_list_max_and_target_length();
+  // IHOP control wants to know the expected young gen length if it were not
+  // restrained by the heap reserve. Using the actual length would make the
+  // prediction too small and the limit the young gen every time we get to the
+  // predicted target occupancy.
+  size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
   update_rs_lengths_prediction();
 
+  update_ihop_prediction(app_time_ms / 1000.0,
+                         _bytes_allocated_in_old_since_last_gc,
+                         last_unrestrained_young_length * HeapRegion::GrainBytes);
+  _bytes_allocated_in_old_since_last_gc = 0;
+
+  _ihop_control->send_trace_event(_g1->gc_tracer_stw());
+
   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
 
@@ -1205,6 +1240,61 @@
   _collectionSetChooser->verify();
 }
 
+G1IHOPControl* G1CollectorPolicy::create_ihop_control() const {
+  if (G1UseAdaptiveIHOP) {
+    return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
+                                     G1CollectedHeap::heap()->max_capacity(),
+                                     &_predictor,
+                                     G1ReservePercent,
+                                     G1HeapWastePercent);
+  } else {
+    return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent,
+                                   G1CollectedHeap::heap()->max_capacity());
+  }
+}
+
+void G1CollectorPolicy::update_ihop_prediction(double mutator_time_s,
+                                               size_t mutator_alloc_bytes,
+                                               size_t young_gen_size) {
+  // Always try to update IHOP prediction. Even evacuation failures give information
+  // about e.g. whether to start IHOP earlier next time.
+
+  // Avoid using really small application times that might create samples with
+  // very high or very low values. They may be caused by e.g. back-to-back gcs.
+  double const min_valid_time = 1e-6;
+
+  bool report = false;
+
+  double marking_to_mixed_time = -1.0;
+  if (!collector_state()->last_gc_was_young() && _initial_mark_to_mixed.has_result()) {
+    marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time();
+    assert(marking_to_mixed_time > 0.0,
+           "Initial mark to mixed time must be larger than zero but is %.3f",
+           marking_to_mixed_time);
+    if (marking_to_mixed_time > min_valid_time) {
+      _ihop_control->update_marking_length(marking_to_mixed_time);
+      report = true;
+    }
+  }
+
+  // As an approximation for the young gc promotion rates during marking we use
+  // all of them. In many applications there are only a few if any young gcs during
+  // marking, which makes any prediction useless. This increases the accuracy of the
+  // prediction.
+  if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) {
+    _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
+    report = true;
+  }
+
+  if (report) {
+    report_ihop_statistics();
+  }
+}
+
+void G1CollectorPolicy::report_ihop_statistics() {
+  _ihop_control->print();
+}
+
 #define EXT_SIZE_FORMAT "%.1f%s"
 #define EXT_SIZE_PARAMS(bytes)                                  \
   byte_size_in_proper_unit((double)(bytes)),                    \
@@ -1216,7 +1306,6 @@
   _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
   _heap_capacity_bytes_before_gc = _g1->capacity();
   _heap_used_bytes_before_gc = _g1->used();
-  _cur_collection_pause_used_regions_at_start = _g1->num_used_regions();
 
   _eden_capacity_bytes_before_gc =
          (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
@@ -1717,8 +1806,7 @@
   return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
 }
 
-void
-G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
+void G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
   _collectionSetChooser->clear();
 
   WorkGang* workers = _g1->workers();
@@ -1737,7 +1825,8 @@
   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
   _cur_mark_stop_world_time_ms += elapsed_time_ms;
   _prev_collection_pause_end_ms += elapsed_time_ms;
-  _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec);
+
+  record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
 }
 
 // Add the heap region at the head of the non-incremental collection set
@@ -1953,6 +2042,59 @@
   }
 }
 
+G1CollectorPolicy::PauseKind G1CollectorPolicy::young_gc_pause_kind() const {
+  assert(!collector_state()->full_collection(), "must be");
+  if (collector_state()->during_initial_mark_pause()) {
+    assert(collector_state()->last_gc_was_young(), "must be");
+    assert(!collector_state()->last_young_gc(), "must be");
+    return InitialMarkGC;
+  } else if (collector_state()->last_young_gc()) {
+    assert(!collector_state()->during_initial_mark_pause(), "must be");
+    assert(collector_state()->last_gc_was_young(), "must be");
+    return LastYoungGC;
+  } else if (!collector_state()->last_gc_was_young()) {
+    assert(!collector_state()->during_initial_mark_pause(), "must be");
+    assert(!collector_state()->last_young_gc(), "must be");
+    return MixedGC;
+  } else {
+    assert(collector_state()->last_gc_was_young(), "must be");
+    assert(!collector_state()->during_initial_mark_pause(), "must be");
+    assert(!collector_state()->last_young_gc(), "must be");
+    return YoungOnlyGC;
+  }
+}
+
+void G1CollectorPolicy::record_pause(PauseKind kind, double start, double end) {
+  // Manage the MMU tracker. For some reason it ignores Full GCs.
+  if (kind != FullGC) {
+    _mmu_tracker->add_pause(start, end);
+  }
+  // Manage the mutator time tracking from initial mark to first mixed gc.
+  switch (kind) {
+    case FullGC:
+      abort_time_to_mixed_tracking();
+      break;
+    case Cleanup:
+    case Remark:
+    case YoungOnlyGC:
+    case LastYoungGC:
+      _initial_mark_to_mixed.add_pause(end - start);
+      break;
+    case InitialMarkGC:
+      _initial_mark_to_mixed.record_initial_mark_end(end);
+      break;
+    case MixedGC:
+      _initial_mark_to_mixed.record_mixed_gc_start(start);
+      break;
+    default:
+      ShouldNotReachHere();
+  }
+}
+
+void G1CollectorPolicy::abort_time_to_mixed_tracking() {
+  _initial_mark_to_mixed.reset();
+}
+
 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
                                                 const char* false_action_str) const {
   CollectionSetChooser* cset_chooser = _collectionSetChooser;
--- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Wed Nov 25 17:32:44 2015 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Wed Nov 25 19:27:38 2015 +0000
@@ -29,9 +29,11 @@
 #include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
 #include "gc/g1/g1InCSetState.hpp"
+#include "gc/g1/g1InitialMarkToMixedTimeTracker.hpp"
 #include "gc/g1/g1MMUTracker.hpp"
 #include "gc/g1/g1Predictions.hpp"
 #include "gc/shared/collectorPolicy.hpp"
+#include "utilities/pair.hpp"
 
 // A G1CollectorPolicy makes policy decisions that determine the
 // characteristics of the collector.  Examples include:
@@ -40,6 +42,7 @@
 
 class HeapRegion;
 class CollectionSetChooser;
+class G1IHOPControl;
 
 // TraceYoungGenTime collects data on _both_ young and mixed evacuation pauses
 // (the latter may contain non-young regions - i.e. regions that are
@@ -163,6 +166,15 @@
 
 class G1CollectorPolicy: public CollectorPolicy {
  private:
+  G1IHOPControl* _ihop_control;
+
+  G1IHOPControl* create_ihop_control() const;
+  // Update the IHOP control with necessary statistics.
+  void update_ihop_prediction(double mutator_time_s,
+                              size_t mutator_alloc_bytes,
+                              size_t young_gen_size);
+  void report_ihop_statistics();
+
   G1Predictions _predictor;
 
   double get_new_prediction(TruncatedSeq const* seq) const;
@@ -182,7 +194,6 @@
   CollectionSetChooser* _collectionSetChooser;
 
   double _full_collection_start_sec;
-  uint   _cur_collection_pause_used_regions_at_start;
 
   // These exclude marking times.
   TruncatedSeq* _recent_gc_times_ms;
@@ -271,9 +282,17 @@
 
   size_t _pending_cards;
 
+  // The amount of allocated bytes in old gen during the last mutator and the following
+  // young GC phase.
+  size_t _bytes_allocated_in_old_since_last_gc;
+
+  G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed;
 public:
   const G1Predictions& predictor() const { return _predictor; }
 
+  // Add the given number of bytes to the total number of allocated bytes in the old gen.
+  void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; }
+
   // Accessors
 
   void set_region_eden(HeapRegion* hr, int young_index_in_cset) {
@@ -473,16 +492,18 @@
   double _mark_remark_start_sec;
   double _mark_cleanup_start_sec;
 
-  void update_young_list_max_and_target_length();
-  void update_young_list_max_and_target_length(size_t rs_lengths);
+  // Updates the internal young list maximum and target lengths. Returns the
+  // unbounded young list target length.
+  uint update_young_list_max_and_target_length();
+  uint update_young_list_max_and_target_length(size_t rs_lengths);
 
   // Update the young list target length either by setting it to the
   // desired fixed value or by calculating it using G1's pause
   // prediction model. If no rs_lengths parameter is passed, predict
   // the RS lengths using the prediction model, otherwise use the
   // given rs_lengths as the prediction.
-  void update_young_list_target_length();
-  void update_young_list_target_length(size_t rs_lengths);
+  // Returns the unbounded young list target length.
+  uint update_young_list_target_length(size_t rs_lengths);
 
   // Calculate and return the minimum desired young list target
   // length. This is the minimum desired young list length according
@@ -505,7 +526,10 @@
                                           uint desired_min_length,
                                           uint desired_max_length) const;
 
-  uint bounded_young_list_target_length(size_t rs_lengths) const;
+  // Result of the bounded_young_list_target_length() method, containing both the
+  // bounded as well as the unbounded young list target lengths in this order.
+  typedef Pair<uint, uint, StackObj> YoungTargetLengths;
+  YoungTargetLengths young_list_target_lengths(size_t rs_lengths) const;
 
   void update_rs_lengths_prediction();
   void update_rs_lengths_prediction(size_t prediction);
@@ -536,10 +560,30 @@
 
   // Sets up marking if proper conditions are met.
   void maybe_start_marking();
+
+  // The kind of STW pause.
+  enum PauseKind {
+    FullGC,
+    YoungOnlyGC,
+    MixedGC,
+    LastYoungGC,
+    InitialMarkGC,
+    Cleanup,
+    Remark
+  };
+
+  // Calculate PauseKind from internal state.
+  PauseKind young_gc_pause_kind() const;
+  // Record the given STW pause with the given start and end times (in s).
+  void record_pause(PauseKind kind, double start, double end);
+  // Indicate that we aborted marking before doing any mixed GCs.
+  void abort_time_to_mixed_tracking();
 public:
 
   G1CollectorPolicy();
 
+  virtual ~G1CollectorPolicy();
+
   virtual G1CollectorPolicy* as_g1_policy() { return this; }
 
   G1CollectorState* collector_state() const;
--- a/hotspot/src/share/vm/gc/g1/g1ErgoVerbose.cpp	Wed Nov 25 17:32:44 2015 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1ErgoVerbose.cpp	Wed Nov 25 19:27:38 2015 +0000
@@ -57,6 +57,7 @@
   case ErgoConcCycles:        return "Concurrent Cycles";
   case ErgoMixedGCs:          return "Mixed GCs";
   case ErgoTiming:            return "Timing";
+  case ErgoIHOP:              return "IHOP";
   default:
     ShouldNotReachHere();
     // Keep the Windows compiler happy
--- a/hotspot/src/share/vm/gc/g1/g1ErgoVerbose.hpp	Wed Nov 25 17:32:44 2015 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1ErgoVerbose.hpp	Wed Nov 25 19:27:38 2015 +0000
@@ -71,6 +71,7 @@
   ErgoConcCycles,
   ErgoMixedGCs,
   ErgoTiming,
+  ErgoIHOP,
 
   ErgoHeuristicNum
 } ErgoHeuristic;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1IHOPControl.cpp	Wed Nov 25 19:27:38 2015 +0000
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1ErgoVerbose.hpp"
+#include "gc/g1/g1IHOPControl.hpp"
+#include "gc/g1/g1Predictions.hpp"
+#include "gc/shared/gcTrace.hpp"
+
+G1IHOPControl::G1IHOPControl(double initial_ihop_percent, size_t target_occupancy) :
+  _initial_ihop_percent(initial_ihop_percent),
+  _target_occupancy(target_occupancy),
+  _last_allocated_bytes(0),
+  _last_allocation_time_s(0.0)
+{
+  assert(_initial_ihop_percent >= 0.0 && _initial_ihop_percent <= 100.0, "Initial IHOP value must be between 0 and 100 but is %.3f", initial_ihop_percent);
+}
+
+void G1IHOPControl::update_allocation_info(double allocation_time_s, size_t allocated_bytes, size_t additional_buffer_size) {
+  assert(allocation_time_s >= 0.0, "Allocation time must be positive but is %.3f", allocation_time_s);
+
+  _last_allocation_time_s = allocation_time_s;
+  _last_allocated_bytes = allocated_bytes;
+}
+
+void G1IHOPControl::print() {
+  size_t cur_conc_mark_start_threshold = get_conc_mark_start_threshold();
+  ergo_verbose6(ErgoIHOP,
+                "basic information",
+                ergo_format_reason("value update")
+                ergo_format_byte_perc("threshold")
+                ergo_format_byte("target occupancy")
+                ergo_format_byte("current occupancy")
+                ergo_format_double("recent old gen allocation rate")
+                ergo_format_double("recent marking phase length"),
+                cur_conc_mark_start_threshold,
+                cur_conc_mark_start_threshold * 100.0 / _target_occupancy,
+                _target_occupancy,
+                G1CollectedHeap::heap()->used(),
+                _last_allocation_time_s > 0.0 ? _last_allocated_bytes / _last_allocation_time_s : 0.0,
+                last_marking_length_s());
+}
+
+void G1IHOPControl::send_trace_event(G1NewTracer* tracer) {
+  tracer->report_basic_ihop_statistics(get_conc_mark_start_threshold(),
+                                       _target_occupancy,
+                                       G1CollectedHeap::heap()->used(),
+                                       _last_allocated_bytes,
+                                       _last_allocation_time_s,
+                                       last_marking_length_s());
+}
+
+G1StaticIHOPControl::G1StaticIHOPControl(double ihop_percent, size_t target_occupancy) :
+  G1IHOPControl(ihop_percent, target_occupancy),
+  _last_marking_length_s(0.0) {
+  assert(_target_occupancy > 0, "Target occupancy must be larger than zero.");
+}
+
+#ifndef PRODUCT
+static void test_update(G1IHOPControl* ctrl, double alloc_time, size_t alloc_amount, size_t young_size, double mark_time) {
+  for (int i = 0; i < 100; i++) {
+    ctrl->update_allocation_info(alloc_time, alloc_amount, young_size);
+    ctrl->update_marking_length(mark_time);
+  }
+}
+
+void G1StaticIHOPControl::test() {
+  size_t const initial_ihop = 45;
+
+  G1StaticIHOPControl ctrl(initial_ihop, 100);
+
+  size_t threshold = ctrl.get_conc_mark_start_threshold();
+  assert(threshold == initial_ihop,
+         "Expected IHOP threshold of " SIZE_FORMAT " but is " SIZE_FORMAT, initial_ihop, threshold);
+
+  ctrl.update_allocation_info(100.0, 100, 100);
+  threshold = ctrl.get_conc_mark_start_threshold();
+  assert(threshold == initial_ihop,
+         "Expected IHOP threshold of " SIZE_FORMAT " but is " SIZE_FORMAT, initial_ihop, threshold);
+
+  ctrl.update_marking_length(1000.0);
+  threshold = ctrl.get_conc_mark_start_threshold();
+  assert(threshold == initial_ihop,
+         "Expected IHOP threshold of " SIZE_FORMAT " but is " SIZE_FORMAT, initial_ihop, threshold);
+
+  // Whatever we pass, the IHOP value must stay the same.
+  test_update(&ctrl, 2, 10, 10, 3);
+  threshold = ctrl.get_conc_mark_start_threshold();
+  assert(threshold == initial_ihop,
+         "Expected IHOP threshold of " SIZE_FORMAT " but is " SIZE_FORMAT, initial_ihop, threshold);
+
+  test_update(&ctrl, 12, 10, 10, 3);
+  threshold = ctrl.get_conc_mark_start_threshold();
+  assert(threshold == initial_ihop,
+         "Expected IHOP threshold of " SIZE_FORMAT " but is " SIZE_FORMAT, initial_ihop, threshold);
+}
+#endif
+
+G1AdaptiveIHOPControl::G1AdaptiveIHOPControl(double ihop_percent,
+                                             size_t initial_target_occupancy,
+                                             G1Predictions const* predictor,
+                                             size_t heap_reserve_percent,
+                                             size_t heap_waste_percent) :
+  G1IHOPControl(ihop_percent, initial_target_occupancy),
+  _predictor(predictor),
+  _marking_times_s(10, 0.95),
+  _allocation_rate_s(10, 0.95),
+  _last_unrestrained_young_size(0),
+  _heap_reserve_percent(heap_reserve_percent),
+  _heap_waste_percent(heap_waste_percent)
+{
+}
+
+size_t G1AdaptiveIHOPControl::actual_target_threshold() const {
+  // The actual target threshold takes the heap reserve and the expected waste in
+  // free space  into account.
+  // _heap_reserve is that part of the total heap capacity that is reserved for
+  // eventual promotion failure.
+  // _heap_waste is the amount of space will never be reclaimed in any
+  // heap, so can not be used for allocation during marking and must always be
+  // considered.
+
+  double safe_total_heap_percentage = MIN2((double)(_heap_reserve_percent + _heap_waste_percent), 100.0);
+
+  return MIN2(
+    G1CollectedHeap::heap()->max_capacity() * (100.0 - safe_total_heap_percentage) / 100.0,
+    _target_occupancy * (100.0 - _heap_waste_percent) / 100.0
+    );
+}
+
+bool G1AdaptiveIHOPControl::have_enough_data_for_prediction() const {
+  return ((size_t)_marking_times_s.num() >= G1AdaptiveIHOPNumInitialSamples) &&
+         ((size_t)_allocation_rate_s.num() >= G1AdaptiveIHOPNumInitialSamples);
+}
+
+size_t G1AdaptiveIHOPControl::get_conc_mark_start_threshold() {
+  if (have_enough_data_for_prediction()) {
+    double pred_marking_time = _predictor->get_new_prediction(&_marking_times_s);
+    double pred_promotion_rate = _predictor->get_new_prediction(&_allocation_rate_s);
+
+    size_t predicted_needed_bytes_during_marking =
+      (pred_marking_time * pred_promotion_rate +
+      _last_unrestrained_young_size); // In reality we would need the maximum size of the young gen during marking. This is a conservative estimate.
+
+    size_t internal_threshold = actual_target_threshold();
+    size_t predicted_initiating_threshold = predicted_needed_bytes_during_marking < internal_threshold ?
+                                            internal_threshold - predicted_needed_bytes_during_marking :
+                                            0;
+    return predicted_initiating_threshold;
+  } else {
+    // Use the initial value.
+    return _initial_ihop_percent * _target_occupancy / 100.0;
+  }
+}
+
+void G1AdaptiveIHOPControl::update_allocation_info(double allocation_time_s, size_t allocated_bytes, size_t additional_buffer_size) {
+  G1IHOPControl::update_allocation_info(allocation_time_s, allocated_bytes, additional_buffer_size);
+
+  double allocation_rate = (double) allocated_bytes / allocation_time_s;
+  _allocation_rate_s.add(allocation_rate);
+
+  _last_unrestrained_young_size = additional_buffer_size;
+}
+
+void G1AdaptiveIHOPControl::update_marking_length(double marking_length_s) {
+   assert(marking_length_s >= 0.0, "Marking length must be larger than zero but is %.3f", marking_length_s);
+  _marking_times_s.add(marking_length_s);
+}
+
+void G1AdaptiveIHOPControl::print() {
+  G1IHOPControl::print();
+  size_t actual_target = actual_target_threshold();
+  ergo_verbose6(ErgoIHOP,
+                "adaptive IHOP information",
+                ergo_format_reason("value update")
+                ergo_format_byte_perc("threshold")
+                ergo_format_byte("internal target occupancy")
+                ergo_format_double("predicted old gen allocation rate")
+                ergo_format_double("predicted marking phase length")
+                ergo_format_str("prediction active"),
+                get_conc_mark_start_threshold(),
+                percent_of(get_conc_mark_start_threshold(), actual_target),
+                actual_target,
+                _predictor->get_new_prediction(&_allocation_rate_s),
+                _predictor->get_new_prediction(&_marking_times_s),
+                have_enough_data_for_prediction() ? "true" : "false"
+                );
+}
+
+void G1AdaptiveIHOPControl::send_trace_event(G1NewTracer* tracer) {
+  G1IHOPControl::send_trace_event(tracer);
+  tracer->report_adaptive_ihop_statistics(get_conc_mark_start_threshold(),
+                                          actual_target_threshold(),
+                                          G1CollectedHeap::heap()->used(),
+                                          _last_unrestrained_young_size,
+                                          _predictor->get_new_prediction(&_allocation_rate_s),
+                                          _predictor->get_new_prediction(&_marking_times_s),
+                                          have_enough_data_for_prediction());
+}
+
+#ifndef PRODUCT
+void G1AdaptiveIHOPControl::test() {
+  size_t const initial_threshold = 45;
+  size_t const young_size = 10;
+  size_t const target_size = 100;
+
+  // The final IHOP value is always
+  // target_size - (young_size + alloc_amount/alloc_time * marking_time)
+
+  G1Predictions pred(0.95);
+  G1AdaptiveIHOPControl ctrl(initial_threshold, target_size, &pred, 0, 0);
+
+  // First "load".
+  size_t const alloc_time1 = 2;
+  size_t const alloc_amount1 = 10;
+  size_t const marking_time1 = 2;
+  size_t const settled_ihop1 = target_size - (young_size + alloc_amount1/alloc_time1 * marking_time1);
+
+  size_t threshold;
+  threshold = ctrl.get_conc_mark_start_threshold();
+  assert(threshold == initial_threshold,
+         "Expected IHOP threshold of " SIZE_FORMAT " but is " SIZE_FORMAT, initial_threshold, threshold);
+  for (size_t i = 0; i < G1AdaptiveIHOPNumInitialSamples - 1; i++) {
+    ctrl.update_allocation_info(alloc_time1, alloc_amount1, young_size);
+    ctrl.update_marking_length(marking_time1);
+    // Not enough data yet.
+    threshold = ctrl.get_conc_mark_start_threshold();
+    assert(threshold == initial_threshold,
+           "Expected IHOP threshold of " SIZE_FORMAT " but is " SIZE_FORMAT, initial_threshold, threshold);
+  }
+
+  test_update(&ctrl, alloc_time1, alloc_amount1, young_size, marking_time1);
+
+  threshold = ctrl.get_conc_mark_start_threshold();
+  assert(threshold == settled_ihop1,
+         "Expected IHOP threshold to settle at " SIZE_FORMAT " but is " SIZE_FORMAT, settled_ihop1, threshold);
+
+  // Second "load". A bit higher allocation rate.
+  size_t const alloc_time2 = 2;
+  size_t const alloc_amount2 = 30;
+  size_t const marking_time2 = 2;
+  size_t const settled_ihop2 = target_size - (young_size + alloc_amount2/alloc_time2 * marking_time2);
+
+  test_update(&ctrl, alloc_time2, alloc_amount2, young_size, marking_time2);
+
+  threshold = ctrl.get_conc_mark_start_threshold();
+  assert(threshold < settled_ihop1,
+         "Expected IHOP threshold to settle at a value lower than " SIZE_FORMAT " but is " SIZE_FORMAT, settled_ihop1, threshold);
+
+  // Third "load". Very high (impossible) allocation rate.
+  size_t const alloc_time3 = 1;
+  size_t const alloc_amount3 = 50;
+  size_t const marking_time3 = 2;
+  size_t const settled_ihop3 = 0;
+
+  test_update(&ctrl, alloc_time3, alloc_amount3, young_size, marking_time3);
+  threshold = ctrl.get_conc_mark_start_threshold();
+
+  assert(threshold == settled_ihop3,
+         "Expected IHOP threshold to settle at " SIZE_FORMAT " but is " SIZE_FORMAT, settled_ihop3, threshold);
+
+  // And back to some arbitrary value.
+  test_update(&ctrl, alloc_time2, alloc_amount2, young_size, marking_time2);
+
+  threshold = ctrl.get_conc_mark_start_threshold();
+  assert(threshold > settled_ihop3,
+         "Expected IHOP threshold to settle at value larger than " SIZE_FORMAT " but is " SIZE_FORMAT, settled_ihop3, threshold);
+}
+
+void IHOP_test() {
+  G1StaticIHOPControl::test();
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1IHOPControl.hpp	Wed Nov 25 19:27:38 2015 +0000
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1IHOPCONTROL_HPP
+#define SHARE_VM_GC_G1_G1IHOPCONTROL_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/numberSeq.hpp"
+
+class G1Predictions;
+class G1NewTracer;
+
+// Base class for algorithms that calculate the heap occupancy at which
+// concurrent marking should start. This heap usage threshold should be relative
+// to old gen size.
+class G1IHOPControl : public CHeapObj<mtGC> {
+ protected:
+  // The initial IHOP value relative to the target occupancy.
+  double _initial_ihop_percent;
+  // The target maximum occupancy of the heap.
+  size_t _target_occupancy;
+
+  // Most recent complete mutator allocation period in seconds.
+  double _last_allocation_time_s;
+  // Amount of bytes allocated during _last_allocation_time_s.
+  size_t _last_allocated_bytes;
+
+  // Initialize an instance with the initial IHOP value in percent and the target
+  // occupancy. The target occupancy is the number of bytes when marking should
+  // be finished and reclaim started.
+  G1IHOPControl(double initial_ihop_percent, size_t target_occupancy);
+
+  // Most recent time from the end of the initial mark to the start of the first
+  // mixed gc.
+  virtual double last_marking_length_s() const = 0;
+ public:
+  virtual ~G1IHOPControl() { }
+
+  // Get the current non-young occupancy at which concurrent marking should start.
+  virtual size_t get_conc_mark_start_threshold() = 0;
+
+  // Update information about time during which allocations in the Java heap occurred,
+  // how large these allocations were in bytes, and an additional buffer.
+  // The allocations should contain any amount of space made unusable for further
+  // allocation, e.g. any waste caused by TLAB allocation, space at the end of
+  // humongous objects that can not be used for allocation, etc.
+  // Together with the target occupancy, this additional buffer should contain the
+  // difference between old gen size and total heap size at the start of reclamation,
+  // and space required for that reclamation.
+  virtual void update_allocation_info(double allocation_time_s, size_t allocated_bytes, size_t additional_buffer_size);
+  // Update the time spent in the mutator beginning from the end of initial mark to
+  // the first mixed gc.
+  virtual void update_marking_length(double marking_length_s) = 0;
+
+  virtual void print();
+  virtual void send_trace_event(G1NewTracer* tracer);
+};
+
+// The returned concurrent mark starting occupancy threshold is a fixed value
+// relative to the maximum heap size.
+class G1StaticIHOPControl : public G1IHOPControl {
+  // Most recent mutator time between the end of initial mark to the start of the
+  // first mixed gc.
+  double _last_marking_length_s;
+ protected:
+  double last_marking_length_s() const { return _last_marking_length_s; }
+ public:
+  G1StaticIHOPControl(double ihop_percent, size_t target_occupancy);
+
+  size_t get_conc_mark_start_threshold() { return (size_t) (_initial_ihop_percent * _target_occupancy / 100.0); }
+
+  virtual void update_marking_length(double marking_length_s) {
+   assert(marking_length_s > 0.0, "Marking length must be larger than zero but is %.3f", marking_length_s);
+    _last_marking_length_s = marking_length_s;
+  }
+
+#ifndef PRODUCT
+  static void test();
+#endif
+};
+
+// This algorithm tries to return a concurrent mark starting occupancy value that
+// makes sure that during marking the given target occupancy is never exceeded,
+// based on predictions of current allocation rate and time periods between
+// initial mark and the first mixed gc.
+class G1AdaptiveIHOPControl : public G1IHOPControl {
+  size_t _heap_reserve_percent; // Percentage of maximum heap capacity we should avoid to touch
+  size_t _heap_waste_percent;   // Percentage of free heap that should be considered as waste.
+
+  const G1Predictions * _predictor;
+
+  TruncatedSeq _marking_times_s;
+  TruncatedSeq _allocation_rate_s;
+
+  // The most recent unrestrained size of the young gen. This is used as an additional
+  // factor in the calculation of the threshold, as the threshold is based on
+  // non-young gen occupancy at the end of GC. For the IHOP threshold, we need to
+  // consider the young gen size during that time too.
+  // Since we cannot know what young gen sizes are used in the future, we will just
+  // use the current one. We expect that this one will be one with a fairly large size,
+  // as there is no marking or mixed gc that could impact its size too much.
+  size_t _last_unrestrained_young_size;
+
+  bool have_enough_data_for_prediction() const;
+
+  // The "actual" target threshold the algorithm wants to keep during and at the
+  // end of marking. This is typically lower than the requested threshold, as the
+  // algorithm needs to consider restrictions by the environment.
+  size_t actual_target_threshold() const;
+ protected:
+  virtual double last_marking_length_s() const { return _marking_times_s.last(); }
+ public:
+  G1AdaptiveIHOPControl(double ihop_percent,
+                        size_t initial_target_occupancy,
+                        G1Predictions const* predictor,
+                        size_t heap_reserve_percent, // The percentage of total heap capacity that should not be tapped into.
+                        size_t heap_waste_percent);  // The percentage of the free space in the heap that we think is not usable for allocation.
+
+  virtual size_t get_conc_mark_start_threshold();
+
+  virtual void update_allocation_info(double allocation_time_s, size_t allocated_bytes, size_t additional_buffer_size);
+  virtual void update_marking_length(double marking_length_s);
+
+  virtual void print();
+  virtual void send_trace_event(G1NewTracer* tracer);
+#ifndef PRODUCT
+  static void test();
+#endif
+};
+
+#endif // SHARE_VM_GC_G1_G1IHOPCONTROL_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1InitialMarkToMixedTimeTracker.hpp	Wed Nov 25 19:27:38 2015 +0000
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1INITIALMARKTOMIXEDTIMETRACKER_HPP
+#define SHARE_VM_GC_G1_G1INITIALMARKTOMIXEDTIMETRACKER_HPP
+
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/debug.hpp"
+
+// Used to track time from the end of initial mark to the first mixed GC.
+// After calling the initial mark/mixed gc notifications, the result can be
+// obtained in last_marking_time() once, after which the tracking resets.
+// Any pauses recorded by add_pause() will be subtracted from that results.
+class G1InitialMarkToMixedTimeTracker VALUE_OBJ_CLASS_SPEC {
+private:
+  bool _active;
+  double _initial_mark_end_time;
+  double _mixed_start_time;
+  double _total_pause_time;
+
+  double wall_time() const {
+    return _mixed_start_time - _initial_mark_end_time;
+  }
+public:
+  G1InitialMarkToMixedTimeTracker() { reset(); }
+
+  // Record initial mark pause end, starting the time tracking.
+  void record_initial_mark_end(double end_time) {
+    assert(!_active, "Initial mark out of order.");
+    _initial_mark_end_time = end_time;
+    _active = true;
+  }
+
+  // Record the first mixed gc pause start, ending the time tracking.
+  void record_mixed_gc_start(double start_time) {
+    if (_active) {
+      _mixed_start_time = start_time;
+      _active = false;
+    }
+  }
+
+  double last_marking_time() {
+    assert(has_result(), "Do not have all measurements yet.");
+    double result = (_mixed_start_time - _initial_mark_end_time) - _total_pause_time;
+    reset();
+    return result;
+  }
+
+  void reset() {
+    _active = false;
+    _total_pause_time = 0.0;
+    _initial_mark_end_time = -1.0;
+    _mixed_start_time = -1.0;
+  }
+
+  void add_pause(double time) {
+    if (_active) {
+      _total_pause_time += time;
+    }
+  }
+
+  // Returns whether we have a result that can be retrieved.
+  bool has_result() const { return _mixed_start_time > 0.0 && _initial_mark_end_time > 0.0; }
+};
+
+#endif // SHARE_VM_GC_G1_G1INITIALMARKTOMIXEDTIMETRACKER_HPP
--- a/hotspot/src/share/vm/gc/g1/g1RemSetSummary.cpp	Wed Nov 25 17:32:44 2015 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1RemSetSummary.cpp	Wed Nov 25 19:27:38 2015 +0000
@@ -125,14 +125,6 @@
   _sampling_thread_vtime = other->sampling_thread_vtime() - _sampling_thread_vtime;
 }
 
-static double percent_of(size_t numerator, size_t denominator) {
-  if (denominator != 0) {
-    return (double)numerator / denominator * 100.0f;
-  } else {
-    return 0.0f;
-  }
-}
-
 static size_t round_to_K(size_t value) {
   return value / K;
 }
--- a/hotspot/src/share/vm/gc/g1/g1_globals.hpp	Wed Nov 25 17:32:44 2015 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1_globals.hpp	Wed Nov 25 19:27:38 2015 +0000
@@ -33,6 +33,16 @@
 
 #define G1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw, range, constraint) \
                                                                             \
+  product(bool, G1UseAdaptiveIHOP, false,                                   \
+          "Adaptively adjust InitiatingHeapOccupancyPercent from the "      \
+          "initial value.")                                                 \
+                                                                            \
+  experimental(size_t, G1AdaptiveIHOPNumInitialSamples, 3,                  \
+          "How many completed time periods from initial mark to first "     \
+          "mixed gc are required to use the input values for prediction "   \
+          "of the optimal occupancy to start marking.")                     \
+          range(1, max_intx)                                                \
+                                                                            \
   product(uintx, G1ConfidencePercent, 50,                                   \
           "Confidence level for MMU/pause predictions")                     \
           range(0, 100)                                                     \
--- a/hotspot/src/share/vm/gc/shared/gcTrace.cpp	Wed Nov 25 17:32:44 2015 +0000
+++ b/hotspot/src/share/vm/gc/shared/gcTrace.cpp	Wed Nov 25 19:27:38 2015 +0000
@@ -212,4 +212,34 @@
   send_old_evacuation_statistics(old_summary);
 }
 
+void G1NewTracer::report_basic_ihop_statistics(size_t threshold,
+                                               size_t target_ccupancy,
+                                               size_t current_occupancy,
+                                               size_t last_allocation_size,
+                                               double last_allocation_duration,
+                                               double last_marking_length) {
+  send_basic_ihop_statistics(threshold,
+                             target_ccupancy,
+                             current_occupancy,
+                             last_allocation_size,
+                             last_allocation_duration,
+                             last_marking_length);
+}
+
+void G1NewTracer::report_adaptive_ihop_statistics(size_t threshold,
+                                                  size_t internal_target_occupancy,
+                                                  size_t current_occupancy,
+                                                  size_t additional_buffer_size,
+                                                  double predicted_allocation_rate,
+                                                  double predicted_marking_length,
+                                                  bool prediction_active) {
+  send_adaptive_ihop_statistics(threshold,
+                                internal_target_occupancy,
+                                additional_buffer_size,
+                                current_occupancy,
+                                predicted_allocation_rate,
+                                predicted_marking_length,
+                                prediction_active);
+}
+
 #endif
--- a/hotspot/src/share/vm/gc/shared/gcTrace.hpp	Wed Nov 25 17:32:44 2015 +0000
+++ b/hotspot/src/share/vm/gc/shared/gcTrace.hpp	Wed Nov 25 19:27:38 2015 +0000
@@ -253,6 +253,20 @@
   void report_evacuation_failed(EvacuationFailedInfo& ef_info);
 
   void report_evacuation_statistics(const G1EvacSummary& young_summary, const G1EvacSummary& old_summary) const;
+
+  void report_basic_ihop_statistics(size_t threshold,
+                                    size_t target_occupancy,
+                                    size_t current_occupancy,
+                                    size_t last_allocation_size,
+                                    double last_allocation_duration,
+                                    double last_marking_length);
+  void report_adaptive_ihop_statistics(size_t threshold,
+                                       size_t internal_target_occupancy,
+                                       size_t current_occupancy,
+                                       size_t additional_buffer_size,
+                                       double predicted_allocation_rate,
+                                       double predicted_marking_length,
+                                       bool prediction_active);
  private:
   void send_g1_young_gc_event();
   void send_evacuation_info_event(EvacuationInfo* info);
@@ -260,6 +274,20 @@
 
   void send_young_evacuation_statistics(const G1EvacSummary& summary) const;
   void send_old_evacuation_statistics(const G1EvacSummary& summary) const;
+
+  void send_basic_ihop_statistics(size_t threshold,
+                                  size_t target_occupancy,
+                                  size_t current_occupancy,
+                                  size_t last_allocation_size,
+                                  double last_allocation_duration,
+                                  double last_marking_length);
+  void send_adaptive_ihop_statistics(size_t threshold,
+                                     size_t internal_target_occupancy,
+                                     size_t current_occupancy,
+                                     size_t additional_buffer_size,
+                                     double predicted_allocation_rate,
+                                     double predicted_marking_length,
+                                     bool prediction_active);
 };
 #endif
 
--- a/hotspot/src/share/vm/gc/shared/gcTraceSend.cpp	Wed Nov 25 17:32:44 2015 +0000
+++ b/hotspot/src/share/vm/gc/shared/gcTraceSend.cpp	Wed Nov 25 19:27:38 2015 +0000
@@ -35,6 +35,7 @@
 #if INCLUDE_ALL_GCS
 #include "gc/g1/evacuationInfo.hpp"
 #include "gc/g1/g1YCTypes.hpp"
+#include "tracefiles/traceEventClasses.hpp"
 #endif
 
 // All GC dependencies against the trace framework is contained within this file.
@@ -265,6 +266,50 @@
     old_evt.commit();
   }
 }
+
+void G1NewTracer::send_basic_ihop_statistics(size_t threshold,
+                                             size_t target_occupancy,
+                                             size_t current_occupancy,
+                                             size_t last_allocation_size,
+                                             double last_allocation_duration,
+                                             double last_marking_length) {
+  EventGCG1BasicIHOP evt;
+  if (evt.should_commit()) {
+    evt.set_gcId(GCId::current());
+    evt.set_threshold(threshold);
+    evt.set_targetOccupancy(target_occupancy);
+    evt.set_thresholdPercentage(target_occupancy > 0 ? threshold * 100.0 / target_occupancy : 0.0);
+    evt.set_currentOccupancy(current_occupancy);
+    evt.set_lastAllocationSize(last_allocation_size);
+    evt.set_lastAllocationDuration(last_allocation_duration);
+    evt.set_lastAllocationRate(last_allocation_duration != 0.0 ? last_allocation_size / last_allocation_duration : 0.0);
+    evt.set_lastMarkingLength(last_marking_length);
+    evt.commit();
+  }
+}
+
+void G1NewTracer::send_adaptive_ihop_statistics(size_t threshold,
+                                                size_t internal_target_occupancy,
+                                                size_t current_occupancy,
+                                                size_t additional_buffer_size,
+                                                double predicted_allocation_rate,
+                                                double predicted_marking_length,
+                                                bool prediction_active) {
+  EventGCG1AdaptiveIHOP evt;
+  if (evt.should_commit()) {
+    evt.set_gcId(GCId::current());
+    evt.set_threshold(threshold);
+    evt.set_thresholdPercentage(internal_target_occupancy > 0 ? threshold * 100.0 / internal_target_occupancy : 0.0);
+    evt.set_internalTargetOccupancy(internal_target_occupancy);
+    evt.set_currentOccupancy(current_occupancy);
+    evt.set_additionalBufferSize(additional_buffer_size);
+    evt.set_predictedAllocationRate(predicted_allocation_rate);
+    evt.set_predictedMarkingLength(predicted_marking_length);
+    evt.set_predictionActive(prediction_active);
+    evt.commit();
+  }
+}
+
 #endif
 
 static TraceStructVirtualSpace to_trace_struct(const VirtualSpaceSummary& summary) {
--- a/hotspot/src/share/vm/prims/jni.cpp	Wed Nov 25 17:32:44 2015 +0000
+++ b/hotspot/src/share/vm/prims/jni.cpp	Wed Nov 25 19:27:38 2015 +0000
@@ -3889,6 +3889,7 @@
 void TestBufferingOopClosure_test();
 void TestCodeCacheRemSet_test();
 void FreeRegionList_test();
+void IHOP_test();
 void test_memset_with_concurrent_readers();
 void TestPredictions_test();
 void WorkerDataArray_test();
@@ -3937,6 +3938,7 @@
     run_unit_test(TestCodeCacheRemSet_test());
     if (UseG1GC) {
       run_unit_test(FreeRegionList_test());
+      run_unit_test(IHOP_test());
     }
     run_unit_test(test_memset_with_concurrent_readers());
     run_unit_test(TestPredictions_test());
--- a/hotspot/src/share/vm/trace/trace.xml	Wed Nov 25 17:32:44 2015 +0000
+++ b/hotspot/src/share/vm/trace/trace.xml	Wed Nov 25 19:27:38 2015 +0000
@@ -369,6 +369,32 @@
       <structvalue type="G1EvacStats" field="stats" label="Evacuation statistics"/>
     </event>
 
+    <event id="GCG1BasicIHOP" path="vm/gc/detailed/g1_basic_ihop_status" label="G1 Basic IHOP statistics" is_instant="true"
+           description="Basic statistics related to current IHOP calculation">
+      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
+      <value type="BYTES64" field="threshold" label="Current IHOP threshold" description="Current IHOP threshold in bytes"/>
+      <value type="BYTES64" field="thresholdPercentage" label="Current IHOP threshold in percent" description="Current IHOP threshold in percent of old gen"/>
+      <value type="BYTES64" field="targetOccupancy" label="Target occupancy" description="Target old gen occupancy to reach at the start of mixed GC in bytes"/>
+      <value type="BYTES64" field="currentOccupancy" label="Current occupancy" description="Current old gen occupancy in bytes"/>
+      <value type="BYTES64" field="lastAllocationSize" label="Last mutator allocation size" description="Mutator allocation during mutator operation since last GC in bytes"/>
+      <value type="DOUBLE" field="lastAllocationDuration" label="Last mutator operation duration" description="Time the mutator ran since last GC in seconds"/>
+      <value type="DOUBLE" field="lastAllocationRate" label="Last mutator allocation rate" description="Allocation rate of the mutator since last GC in bytes/second"/>
+      <value type="DOUBLE" field="lastMarkingLength" label="Last mutator time from initial mark to first mixed GC" description="Last time from the end of the last initial mark to the first mixed GC in seconds"/>
+    </event>
+
+    <event id="GCG1AdaptiveIHOP" path="vm/gc/detailed/g1_adaptive_ihop_status" label="G1 Adaptive IHOP statistics" is_instant="true"
+           description="Statistics related to current adaptive IHOP calculation">
+      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
+      <value type="BYTES64" field="threshold" label="Current IHOP threshold" description="Current IHOP threshold in bytes"/>
+      <value type="BYTES64" field="thresholdPercentage" label="Current IHOP threshold in percent" description="Current IHOP threshold in percent of the internal target occupancy"/>
+      <value type="BYTES64" field="internalTargetOccupancy" label="Target occupancy" description="Internal target old gen occupancy to reach at the start of mixed GC in bytes"/>
+      <value type="BYTES64" field="currentOccupancy" label="Current occupancy" description="Current old gen occupancy in bytes"/>
+      <value type="BYTES64" field="additionalBufferSize" label="Additional buffer size" description="Additional buffer size in bytes"/>
+      <value type="DOUBLE" field="predictedAllocationRate" label="Predicted mutator allocation rate" description="Current predicted allocation rate for the mutator in bytes/second"/>
+      <value type="DOUBLE" field="predictedMarkingLength" label="Predicted time from initial mark to first mixed GC" description="Current predicted time from the end of the last initial mark to the first mixed GC in seconds"/>
+      <value type="BOOLEAN" field="predictionActive" label="Prediction active" description="Indicates whether the adaptive IHOP prediction is active"/>
+    </event>
+
     <!-- Promotion events, Supported GCs are Parallel Scavange, G1 and CMS with Parallel New. -->
     <event id="PromoteObjectInNewPLAB" path="vm/gc/detailed/object_promotion_in_new_PLAB" label="Promotion in new PLAB"
         description="Object survived scavenge and was copied to a new Promotion Local Allocation Buffer (PLAB). Supported GCs are Parallel Scavange, G1 and CMS with Parallel New. Due to promotion being done in parallel an object might be reported multiple times as the GC threads race to copy all objects." 
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Wed Nov 25 17:32:44 2015 +0000
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Wed Nov 25 19:27:38 2015 +0000
@@ -565,6 +565,13 @@
   return fabs(value);
 }
 
+// Returns numerator/denominator as percentage value from 0 to 100. If denominator
+// is zero, return 0.0.
+template<typename T>
+inline double percent_of(T numerator, T denominator) {
+  return denominator != 0 ? (double)numerator / denominator * 100.0 : 0.0;
+}
+
 //----------------------------------------------------------------------------------------------------
 // Special casts
 // Cast floats into same-size integers and vice-versa w/o changing bit-pattern