Merge
authorjmasa
Fri, 23 Aug 2013 15:59:20 -0700
changeset 19551 8360536a13fa
parent 19530 0c39c59e1624 (current diff)
parent 19550 7b831493a832 (diff)
child 19552 9c6d409d0fc4
child 19726 c086d352d3f7
Merge
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Aug 23 03:14:54 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Aug 23 15:59:20 2013 -0700
@@ -3460,7 +3460,9 @@
 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
   assert_locked_or_safepoint(Heap_lock);
   size_t size = ReservedSpace::page_align_size_down(bytes);
-  if (size > 0) {
+  // Only shrink if a compaction was done so that all the free space
+  // in the generation is in a contiguous block at the end.
+  if (size > 0 && did_compact()) {
     shrink_by(size);
   }
 }
@@ -8696,9 +8698,10 @@
   assert(inFreeRange(), "Should only be called if currently in a free range.");
   HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
   assert(_sp->used_region().contains(eob - 1),
-         err_msg("eob = " PTR_FORMAT " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
+         err_msg("eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
+                 " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
                  " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
-                 _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
+                 eob, eob-1, _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
   if (eob >= _limit) {
     assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
     if (CMSTraceSweeper) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Aug 23 03:14:54 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Aug 23 15:59:20 2013 -0700
@@ -981,7 +981,8 @@
 
     if (should_try_gc) {
       bool succeeded;
-      result = do_collection_pause(word_size, gc_count_before, &succeeded);
+      result = do_collection_pause(word_size, gc_count_before, &succeeded,
+          GCCause::_g1_inc_collection_pause);
       if (result != NULL) {
         assert(succeeded, "only way to get back a non-NULL result");
         return result;
@@ -1106,7 +1107,8 @@
       // enough space for the allocation to succeed after the pause.
 
       bool succeeded;
-      result = do_collection_pause(word_size, gc_count_before, &succeeded);
+      result = do_collection_pause(word_size, gc_count_before, &succeeded,
+          GCCause::_g1_humongous_allocation);
       if (result != NULL) {
         assert(succeeded, "only way to get back a non-NULL result");
         return result;
@@ -3700,14 +3702,15 @@
 
 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
                                                unsigned int gc_count_before,
-                                               bool* succeeded) {
+                                               bool* succeeded,
+                                               GCCause::Cause gc_cause) {
   assert_heap_not_locked_and_not_at_safepoint();
   g1_policy()->record_stop_world_start();
   VM_G1IncCollectionPause op(gc_count_before,
                              word_size,
                              false, /* should_initiate_conc_mark */
                              g1_policy()->max_pause_time_ms(),
-                             GCCause::_g1_inc_collection_pause);
+                             gc_cause);
   VMThread::execute(&op);
 
   HeapWord* result = op.result();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Aug 23 03:14:54 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Aug 23 15:59:20 2013 -0700
@@ -776,9 +776,10 @@
   // it has to be read while holding the Heap_lock. Currently, both
   // methods that call do_collection_pause() release the Heap_lock
   // before the call, so it's easy to read gc_count_before just before.
-  HeapWord* do_collection_pause(size_t       word_size,
-                                unsigned int gc_count_before,
-                                bool*        succeeded);
+  HeapWord* do_collection_pause(size_t         word_size,
+                                unsigned int   gc_count_before,
+                                bool*          succeeded,
+                                GCCause::Cause gc_cause);
 
   // The guts of the incremental collection pause, executed by the vm
   // thread. It returns false if it is unable to do the collection due
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Fri Aug 23 03:14:54 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Fri Aug 23 15:59:20 2013 -0700
@@ -70,9 +70,6 @@
   guarantee(target_pause_time_ms > 0.0,
             err_msg("target_pause_time_ms = %1.6lf should be positive",
                     target_pause_time_ms));
-  guarantee(word_size == 0 || gc_cause == GCCause::_g1_inc_collection_pause,
-            "we can only request an allocation if the GC cause is for "
-            "an incremental GC pause");
   _gc_cause = gc_cause;
 }