Merge
authorjmasa
Fri, 11 Dec 2009 08:39:30 -0800
changeset 4466 b7fda949ec06
parent 4453 a431438150d4 (current diff)
parent 4465 7b73e0380188 (diff)
child 4467 a28742868e2f
child 4472 f351a22c51ad
child 4474 faa140ac71cd
Merge
hotspot/src/share/vm/classfile/javaClasses.cpp
hotspot/src/share/vm/includeDB_core
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp	Fri Dec 11 08:39:30 2009 -0800
@@ -1124,8 +1124,7 @@
     if (_dirty && _methods != NULL) {
       BarrierSet* bs = Universe::heap()->barrier_set();
       assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
-      bs->write_ref_array(MemRegion((HeapWord*)_methods->base(),
-                                    _methods->array_size()));
+      bs->write_ref_array((HeapWord*)_methods->base(), _methods->length());
       _dirty = false;
     }
   }
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Dec 11 08:39:30 2009 -0800
@@ -709,7 +709,8 @@
 
   // Support for parallelizing survivor space rescan
   if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) {
-    size_t max_plab_samples = MaxNewSize/((SurvivorRatio+2)*MinTLABSize);
+    size_t max_plab_samples = cp->max_gen0_size()/
+                                ((SurvivorRatio+2)*MinTLABSize);
     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads);
     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples);
     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads);
--- a/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp	Fri Dec 11 08:39:30 2009 -0800
@@ -155,7 +155,7 @@
 }
 
 DirtyCardQueueSet::CompletedBufferNode*
-DirtyCardQueueSet::get_completed_buffer_lock(int stop_at) {
+DirtyCardQueueSet::get_completed_buffer(int stop_at) {
   CompletedBufferNode* nd = NULL;
   MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
 
@@ -175,28 +175,6 @@
   return nd;
 }
 
-// We only do this in contexts where there is no concurrent enqueueing.
-DirtyCardQueueSet::CompletedBufferNode*
-DirtyCardQueueSet::get_completed_buffer_CAS() {
-  CompletedBufferNode* nd = _completed_buffers_head;
-
-  while (nd != NULL) {
-    CompletedBufferNode* next = nd->next;
-    CompletedBufferNode* result =
-      (CompletedBufferNode*)Atomic::cmpxchg_ptr(next,
-                                                &_completed_buffers_head,
-                                                nd);
-    if (result == nd) {
-      return result;
-    } else {
-      nd = _completed_buffers_head;
-    }
-  }
-  assert(_completed_buffers_head == NULL, "Loop post");
-  _completed_buffers_tail = NULL;
-  return NULL;
-}
-
 bool DirtyCardQueueSet::
 apply_closure_to_completed_buffer_helper(int worker_i,
                                          CompletedBufferNode* nd) {
@@ -222,15 +200,10 @@
 
 bool DirtyCardQueueSet::apply_closure_to_completed_buffer(int worker_i,
                                                           int stop_at,
-                                                          bool with_CAS)
+                                                          bool during_pause)
 {
-  CompletedBufferNode* nd = NULL;
-  if (with_CAS) {
-    guarantee(stop_at == 0, "Precondition");
-    nd = get_completed_buffer_CAS();
-  } else {
-    nd = get_completed_buffer_lock(stop_at);
-  }
+  assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
+  CompletedBufferNode* nd = get_completed_buffer(stop_at);
   bool res = apply_closure_to_completed_buffer_helper(worker_i, nd);
   if (res) Atomic::inc(&_processed_buffers_rs_thread);
   return res;
--- a/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp	Fri Dec 11 08:39:30 2009 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -120,12 +120,13 @@
   // is returned to the completed buffer set, and this call returns false.
   bool apply_closure_to_completed_buffer(int worker_i = 0,
                                          int stop_at = 0,
-                                         bool with_CAS = false);
+                                         bool during_pause = false);
+
   bool apply_closure_to_completed_buffer_helper(int worker_i,
                                                 CompletedBufferNode* nd);
 
-  CompletedBufferNode* get_completed_buffer_CAS();
-  CompletedBufferNode* get_completed_buffer_lock(int stop_at);
+  CompletedBufferNode* get_completed_buffer(int stop_at);
+
   // Applies the current closure to all completed buffers,
   // non-consumptively.
   void apply_closure_to_all_completed_buffers();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Dec 11 08:39:30 2009 -0800
@@ -928,6 +928,8 @@
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
     TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty);
 
+    TraceMemoryManagerStats tms(true /* fullGC */);
+
     double start = os::elapsedTime();
     g1_policy()->record_full_collection_start();
 
@@ -1001,6 +1003,8 @@
 
     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 
+    MemoryService::track_memory_usage();
+
     if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
       HandleMark hm;  // Discard invalid handles created during verification
       gclog_or_tty->print(" VerifyAfterGC:");
@@ -1732,13 +1736,6 @@
   return car->free();
 }
 
-void G1CollectedHeap::collect(GCCause::Cause cause) {
-  // The caller doesn't have the Heap_lock
-  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
-  MutexLocker ml(Heap_lock);
-  collect_locked(cause);
-}
-
 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
   assert(Thread::current()->is_VM_thread(), "Precondition#1");
   assert(Heap_lock->is_locked(), "Precondition#2");
@@ -1755,17 +1752,31 @@
   }
 }
 
-
-void G1CollectedHeap::collect_locked(GCCause::Cause cause) {
-  // Don't want to do a GC until cleanup is completed.
-  wait_for_cleanup_complete();
-
-  // Read the GC count while holding the Heap_lock
-  int gc_count_before = SharedHeap::heap()->total_collections();
+void G1CollectedHeap::collect(GCCause::Cause cause) {
+  // The caller doesn't have the Heap_lock
+  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
+
+  int gc_count_before;
   {
-    MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
-    VM_G1CollectFull op(gc_count_before, cause);
-    VMThread::execute(&op);
+    MutexLocker ml(Heap_lock);
+    // Read the GC count while holding the Heap_lock
+    gc_count_before = SharedHeap::heap()->total_collections();
+
+    // Don't want to do a GC until cleanup is completed.
+    wait_for_cleanup_complete();
+  } // We give up heap lock; VMThread::execute gets it back below
+  switch (cause) {
+    case GCCause::_scavenge_alot: {
+      // Do an incremental pause, which might sometimes be abandoned.
+      VM_G1IncCollectionPause op(gc_count_before, cause);
+      VMThread::execute(&op);
+      break;
+    }
+    default: {
+      // In all other cases, we currently do a full gc.
+      VM_G1CollectFull op(gc_count_before, cause);
+      VMThread::execute(&op);
+    }
   }
 }
 
@@ -2119,7 +2130,7 @@
 }
 
 size_t G1CollectedHeap::max_capacity() const {
-  return _g1_committed.byte_size();
+  return g1_reserved_obj_bytes();
 }
 
 jlong G1CollectedHeap::millis_since_last_gc() {
@@ -2638,6 +2649,8 @@
   }
 
   {
+    ResourceMark rm;
+
     char verbose_str[128];
     sprintf(verbose_str, "GC pause ");
     if (g1_policy()->in_young_gc_mode()) {
@@ -2649,8 +2662,6 @@
     if (g1_policy()->should_initiate_conc_mark())
       strcat(verbose_str, " (initial-mark)");
 
-    GCCauseSetter x(this, GCCause::_g1_inc_collection_pause);
-
     // if PrintGCDetails is on, we'll print long statistics information
     // in the collector policy code, so let's not print this as the output
     // is messy if we do.
@@ -2658,7 +2669,8 @@
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
     TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
 
-    ResourceMark rm;
+    TraceMemoryManagerStats tms(false /* fullGC */);
+
     assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
     assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
     guarantee(!is_gc_active(), "collection is not reentrant");
@@ -2802,6 +2814,22 @@
           _young_list->reset_auxilary_lists();
         }
       } else {
+        if (_in_cset_fast_test != NULL) {
+          assert(_in_cset_fast_test_base != NULL, "Since _in_cset_fast_test isn't");
+          FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
+          //  this is more for peace of mind; we're nulling them here and
+          // we're expecting them to be null at the beginning of the next GC
+          _in_cset_fast_test = NULL;
+          _in_cset_fast_test_base = NULL;
+        }
+        // This looks confusing, because the DPT should really be empty
+        // at this point -- since we have not done any collection work,
+        // there should not be any derived pointers in the table to update;
+        // however, there is some additional state in the DPT which is
+        // reset at the end of the (null) "gc" here via the following call.
+        // A better approach might be to split off that state resetting work
+        // into a separate method that asserts that the DPT is empty and call
+        // that here. That is deferred for now.
         COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
       }
 
@@ -2838,6 +2866,8 @@
 
       assert(regions_accounted_for(), "Region leakage.");
 
+      MemoryService::track_memory_usage();
+
       if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
         HandleMark hm;  // Discard invalid handles created during verification
         gclog_or_tty->print(" VerifyAfterGC:");
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Dec 11 08:39:30 2009 -0800
@@ -692,7 +692,7 @@
 
   // Reserved (g1 only; super method includes perm), capacity and the used
   // portion in bytes.
-  size_t g1_reserved_obj_bytes() { return _g1_reserved.byte_size(); }
+  size_t g1_reserved_obj_bytes() const { return _g1_reserved.byte_size(); }
   virtual size_t capacity() const;
   virtual size_t used() const;
   // This should be called when we're not holding the heap lock. The
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Dec 11 08:39:30 2009 -0800
@@ -1516,8 +1516,30 @@
       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
     update_recent_gc_times(end_time_sec, elapsed_ms);
     _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
-    // using 1.01 to account for floating point inaccuracies
-    assert(recent_avg_pause_time_ratio() < 1.01, "All GC?");
+    if (recent_avg_pause_time_ratio() < 0.0 ||
+        (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
+#ifndef PRODUCT
+      // Dump info to allow post-facto debugging
+      gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
+      gclog_or_tty->print_cr("-------------------------------------------");
+      gclog_or_tty->print_cr("Recent GC Times (ms):");
+      _recent_gc_times_ms->dump();
+      gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
+      _recent_prev_end_times_for_all_gcs_sec->dump();
+      gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
+                             _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
+      // In debug mode, terminate the JVM if the user wants to debug at this point.
+      assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
+#endif  // !PRODUCT
+      // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
+      // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
+      if (_recent_avg_pause_time_ratio < 0.0) {
+        _recent_avg_pause_time_ratio = 0.0;
+      } else {
+        assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
+        _recent_avg_pause_time_ratio = 1.0;
+      }
+    }
   }
 
   if (G1PolicyVerbose > 1) {
@@ -2825,8 +2847,15 @@
   double non_young_start_time_sec;
   start_recording_regions();
 
-  guarantee(_target_pause_time_ms > -1.0,
+  guarantee(_target_pause_time_ms > -1.0
+            NOT_PRODUCT(|| Universe::heap()->gc_cause() == GCCause::_scavenge_alot),
             "_target_pause_time_ms should have been set!");
+#ifndef PRODUCT
+  if (_target_pause_time_ms <= -1.0) {
+    assert(ScavengeALot && Universe::heap()->gc_cause() == GCCause::_scavenge_alot, "Error");
+    _target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
+  }
+#endif
   assert(_collection_set == NULL, "Precondition");
 
   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
@@ -2972,7 +3001,3 @@
   G1CollectorPolicy::record_collection_pause_end(abandoned);
   assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
 }
-
-// Local Variables: ***
-// c-indentation-style: gnu ***
-// End: ***
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp	Fri Dec 11 08:39:30 2009 -0800
@@ -86,12 +86,22 @@
     //   increase the array size (:-)
     //   remove the oldest entry (this might allow more GC time for
     //     the time slice than what's allowed)
-    //   concolidate the two entries with the minimum gap between them
-    //     (this mighte allow less GC time than what's allowed)
-    guarantee(0, "array full, currently we can't recover");
+    //   consolidate the two entries with the minimum gap between them
+    //     (this might allow less GC time than what's allowed)
+    guarantee(NOT_PRODUCT(ScavengeALot ||) G1ForgetfulMMUTracker,
+              "array full, currently we can't recover unless +G1ForgetfulMMUTracker");
+    // In the case where ScavengeALot is true, such overflow is not
+    // uncommon; in such cases, we can, without much loss of precision
+    // or performance (we are GC'ing most of the time anyway!),
+    // simply overwrite the oldest entry in the tracker: this
+    // is also the behaviour when G1ForgetfulMMUTracker is enabled.
+    _head_index = trim_index(_head_index + 1);
+    assert(_head_index == _tail_index, "Because we have a full circular buffer");
+    _tail_index = trim_index(_tail_index + 1);
+  } else {
+    _head_index = trim_index(_head_index + 1);
+    ++_no_entries;
   }
-  _head_index = trim_index(_head_index + 1);
-  ++_no_entries;
   _array[_head_index] = G1MMUTrackerQueueElem(start, end);
 }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp	Fri Dec 11 08:39:30 2009 -0800
@@ -99,7 +99,10 @@
   // The array is of fixed size and I don't think we'll need more than
   // two or three entries with the current behaviour of G1 pauses.
   // If the array is full, an easy fix is to look for the pauses with
-  // the shortest gap between them and concolidate them.
+  // the shortest gap between them and consolidate them.
+  // For now, we have taken the expedient alternative of forgetting
+  // the oldest entry in the event that +G1ForgetfulMMUTracker, thus
+  // potentially violating MMU specs for some time thereafter.
 
   G1MMUTrackerQueueElem _array[QueueLength];
   int                   _head_index;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Fri Dec 11 08:39:30 2009 -0800
@@ -242,6 +242,10 @@
   product(bool, G1UseSurvivorSpaces, true,                                  \
           "When true, use survivor space.")                                 \
                                                                             \
+  develop(bool, G1FailOnFPError, false,                                     \
+          "When set, G1 will fail when it encounters an FP 'error', "       \
+          "so as to allow debugging")                                       \
+                                                                            \
   develop(bool, G1FixedTenuringThreshold, false,                            \
           "When set, G1 will not adjust the tenuring threshold")            \
                                                                             \
@@ -252,6 +256,9 @@
           "If non-0 is the size of the G1 survivor space, "                 \
           "otherwise SurvivorRatio is used to determine the size")          \
                                                                             \
+  product(bool, G1ForgetfulMMUTracker, false,                               \
+          "If the MMU tracker's memory is full, forget the oldest entry")   \
+                                                                            \
   product(uintx, G1HeapRegionSize, 0,                                       \
           "Size of the G1 regions.")                                        \
                                                                             \
--- a/hotspot/src/share/vm/gc_implementation/g1/ptrQueue.cpp	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/ptrQueue.cpp	Fri Dec 11 08:39:30 2009 -0800
@@ -107,7 +107,7 @@
     res[0] = NULL;
     return res;
   } else {
-    return NEW_C_HEAP_ARRAY(void*, _sz);
+    return (void**) NEW_C_HEAP_ARRAY(char, _sz);
   }
 }
 
@@ -127,7 +127,8 @@
     assert(_buf_free_list != NULL, "_buf_free_list_sz must be wrong.");
     void** head = _buf_free_list;
     _buf_free_list = (void**)_buf_free_list[0];
-    FREE_C_HEAP_ARRAY(void*,head);
+    FREE_C_HEAP_ARRAY(char, head);
+    _buf_free_list_sz --;
     n--;
   }
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Fri Dec 11 08:39:30 2009 -0800
@@ -42,7 +42,7 @@
 void VM_G1IncCollectionPause::doit() {
   JvmtiGCForAllocationMarker jgcm;
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  GCCauseSetter x(g1h, GCCause::_g1_inc_collection_pause);
+  GCCauseSetter x(g1h, _gc_cause);
   g1h->do_collection_pause_at_safepoint();
 }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Fri Dec 11 08:39:30 2009 -0800
@@ -68,8 +68,9 @@
 
 class VM_G1IncCollectionPause: public VM_GC_Operation {
  public:
-  VM_G1IncCollectionPause(int gc_count_before) :
-    VM_GC_Operation(gc_count_before) {}
+  VM_G1IncCollectionPause(int gc_count_before,
+                          GCCause::Cause gc_cause = GCCause::_g1_inc_collection_pause) :
+    VM_GC_Operation(gc_count_before) { _gc_cause = gc_cause; }
   virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; }
   virtual void doit();
   virtual const char* name() const {
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_g1	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_g1	Fri Dec 11 08:39:30 2009 -0800
@@ -222,6 +222,15 @@
 g1MarkSweep.hpp                         timer.hpp
 g1MarkSweep.hpp                         universe.hpp
 
+g1MemoryPool.cpp                        heapRegion.hpp
+g1MemoryPool.cpp                        g1CollectedHeap.inline.hpp
+g1MemoryPool.cpp                        g1CollectedHeap.hpp
+g1MemoryPool.cpp                        g1CollectorPolicy.hpp
+g1MemoryPool.cpp                        g1MemoryPool.hpp
+
+g1MemoryPool.hpp                        memoryUsage.hpp
+g1MemoryPool.hpp                        memoryPool.hpp
+
 g1OopClosures.inline.hpp		concurrentMark.hpp
 g1OopClosures.inline.hpp		g1OopClosures.hpp
 g1OopClosures.inline.hpp		g1CollectedHeap.hpp
@@ -303,6 +312,8 @@
 
 klass.hpp				g1OopClosures.hpp
 
+memoryService.cpp                       g1MemoryPool.hpp
+
 ptrQueue.cpp                            allocation.hpp
 ptrQueue.cpp                            allocation.inline.hpp
 ptrQueue.cpp                            mutex.hpp
--- a/hotspot/src/share/vm/includeDB_core	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/includeDB_core	Fri Dec 11 08:39:30 2009 -0800
@@ -289,7 +289,7 @@
 attachListener.hpp                      debug.hpp
 attachListener.hpp                      ostream.hpp
 
-barrierSet.cpp				barrierSet.hpp
+barrierSet.cpp				barrierSet.inline.hpp
 barrierSet.cpp			        collectedHeap.hpp
 barrierSet.cpp				universe.hpp
 
--- a/hotspot/src/share/vm/memory/barrierSet.cpp	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/memory/barrierSet.cpp	Fri Dec 11 08:39:30 2009 -0800
@@ -41,11 +41,6 @@
 
 // count is number of array elements being written
 void BarrierSet::static_write_ref_array_post(HeapWord* start, size_t count) {
-  assert(count <= (size_t)max_intx, "count too large");
-  HeapWord* end = start + objArrayOopDesc::array_size((int)count);
-#if 0
-  warning("Post:\t" INTPTR_FORMAT "[" SIZE_FORMAT "] : [" INTPTR_FORMAT","INTPTR_FORMAT")\t",
-                   start,            count,              start,          end);
-#endif
-  Universe::heap()->barrier_set()->write_ref_array_work(MemRegion(start, end));
+  // simply delegate to instance method
+  Universe::heap()->barrier_set()->write_ref_array(start, count);
 }
--- a/hotspot/src/share/vm/memory/barrierSet.hpp	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/memory/barrierSet.hpp	Fri Dec 11 08:39:30 2009 -0800
@@ -121,17 +121,20 @@
   virtual void read_ref_array(MemRegion mr) = 0;
   virtual void read_prim_array(MemRegion mr) = 0;
 
+  // Below length is the # array elements being written
   virtual void write_ref_array_pre(      oop* dst, int length) {}
   virtual void write_ref_array_pre(narrowOop* dst, int length) {}
+  // Below MemRegion mr is expected to be HeapWord-aligned
   inline void write_ref_array(MemRegion mr);
+  // Below count is the # array elements being written, starting
+  // at the address "start", which may not necessarily be HeapWord-aligned
+  inline void write_ref_array(HeapWord* start, size_t count);
 
-  // Static versions, suitable for calling from generated code.
+  // Static versions, suitable for calling from generated code;
+  // count is # array elements being written, starting with "start",
+  // which may not necessarily be HeapWord-aligned.
   static void static_write_ref_array_pre(HeapWord* start, size_t count);
   static void static_write_ref_array_post(HeapWord* start, size_t count);
-  // Narrow oop versions of the above; count is # of array elements being written,
-  // starting with "start", which is HeapWord-aligned.
-  static void static_write_ref_array_pre_narrow(HeapWord* start, size_t count);
-  static void static_write_ref_array_post_narrow(HeapWord* start, size_t count);
 
 protected:
   virtual void write_ref_array_work(MemRegion mr) = 0;
--- a/hotspot/src/share/vm/memory/barrierSet.inline.hpp	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/memory/barrierSet.inline.hpp	Fri Dec 11 08:39:30 2009 -0800
@@ -43,6 +43,8 @@
 }
 
 void BarrierSet::write_ref_array(MemRegion mr) {
+  assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start() , "Unaligned start");
+  assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),    "Unaligned end"  );
   if (kind() == CardTableModRef) {
     ((CardTableModRefBS*)this)->inline_write_ref_array(mr);
   } else {
@@ -50,6 +52,34 @@
   }
 }
 
+// count is number of array elements being written
+void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
+  assert(count <= (size_t)max_intx, "count too large");
+  HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize));
+  // In the case of compressed oops, start and end may potentially be misaligned;
+  // so we need to conservatively align the first downward (this is not
+  // strictly necessary for current uses, but a case of good hygiene and,
+  // if you will, aesthetics) and the second upward (this is essential for
+  // current uses) to a HeapWord boundary, so we mark all cards overlapping
+  // this write. In the event that this evolves in the future to calling a
+  // logging barrier of narrow oop granularity, like the pre-barrier for G1
+  // (mentioned here merely by way of example), we will need to change this
+  // interface, much like the pre-barrier one above, so it is "exactly precise"
+  // (if i may be allowed the adverbial redundancy for emphasis) and does not
+  // include narrow oop slots not included in the original write interval.
+  HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize);
+  HeapWord* aligned_end   = (HeapWord*)align_size_up  ((uintptr_t)end,   HeapWordSize);
+  // If compressed oops were not being used, these should already be aligned
+  assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
+         "Expected heap word alignment of start and end");
+#if 0
+  warning("Post:\t" INTPTR_FORMAT "[" SIZE_FORMAT "] : [" INTPTR_FORMAT","INTPTR_FORMAT")\t",
+                   start,            count,              aligned_start,   aligned_end);
+#endif
+  write_ref_array_work(MemRegion(aligned_start, aligned_end));
+}
+
+
 void BarrierSet::write_region(MemRegion mr) {
   if (kind() == CardTableModRef) {
     ((CardTableModRefBS*)this)->inline_write_region(mr);
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Fri Dec 11 08:39:30 2009 -0800
@@ -511,6 +511,8 @@
 }
 
 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
+  assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
+  assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
   jbyte* cur  = byte_for(mr.start());
   jbyte* last = byte_after(mr.last());
   while (cur < last) {
@@ -520,6 +522,8 @@
 }
 
 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
+  assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
+  assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
   for (int i = 0; i < _cur_covered_regions; i++) {
     MemRegion mri = mr.intersection(_covered[i]);
     if (!mri.is_empty()) dirty_MemRegion(mri);
--- a/hotspot/src/share/vm/memory/sharedHeap.hpp	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/memory/sharedHeap.hpp	Fri Dec 11 08:39:30 2009 -0800
@@ -224,10 +224,6 @@
                           CodeBlobClosure* code_roots,
                           OopClosure* non_root_closure);
 
-
-  // Like CollectedHeap::collect, but assume that the caller holds the Heap_lock.
-  virtual void collect_locked(GCCause::Cause cause) = 0;
-
   // The functions below are helper functions that a subclass of
   // "SharedHeap" can use in the implementation of its virtual
   // functions.
--- a/hotspot/src/share/vm/oops/objArrayKlass.cpp	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/oops/objArrayKlass.cpp	Fri Dec 11 08:39:30 2009 -0800
@@ -127,16 +127,14 @@
           // pointer delta is scaled to number of elements (length field in
           // objArrayOop) which we assume is 32 bit.
           assert(pd == (size_t)(int)pd, "length field overflow");
-          const size_t done_word_len = objArrayOopDesc::array_size((int)pd);
-          bs->write_ref_array(MemRegion((HeapWord*)dst, done_word_len));
+          bs->write_ref_array((HeapWord*)dst, pd);
           THROW(vmSymbols::java_lang_ArrayStoreException());
           return;
         }
       }
     }
   }
-  const size_t word_len = objArrayOopDesc::array_size(length);
-  bs->write_ref_array(MemRegion((HeapWord*)dst, word_len));
+  bs->write_ref_array((HeapWord*)dst, length);
 }
 
 void objArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d,
--- a/hotspot/src/share/vm/oops/objArrayOop.hpp	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/oops/objArrayOop.hpp	Fri Dec 11 08:39:30 2009 -0800
@@ -37,6 +37,32 @@
     return &((T*)base())[index];
   }
 
+private:
+  // Give size of objArrayOop in HeapWords minus the header
+  static int array_size(int length) {
+    const int OopsPerHeapWord = HeapWordSize/heapOopSize;
+    assert(OopsPerHeapWord >= 1 && (HeapWordSize % heapOopSize == 0),
+           "Else the following (new) computation would be in error");
+#ifdef ASSERT
+    // The old code is left in for sanity-checking; it'll
+    // go away pretty soon. XXX
+    // Without UseCompressedOops, this is simply:
+    // oop->length() * HeapWordsPerOop;
+    // With narrowOops, HeapWordsPerOop is 1/2 or equal 0 as an integer.
+    // The oop elements are aligned up to wordSize
+    const int HeapWordsPerOop = heapOopSize/HeapWordSize;
+    int old_res;
+    if (HeapWordsPerOop > 0) {
+      old_res = length * HeapWordsPerOop;
+    } else {
+      old_res = align_size_up(length, OopsPerHeapWord)/OopsPerHeapWord;
+    }
+#endif  // ASSERT
+    int res = ((uint)length + OopsPerHeapWord - 1)/OopsPerHeapWord;
+    assert(res == old_res, "Inconsistency between old and new.");
+    return res;
+  }
+
  public:
   // Returns the offset of the first element.
   static int base_offset_in_bytes() {
@@ -67,27 +93,14 @@
   // Sizing
   static int header_size()    { return arrayOopDesc::header_size(T_OBJECT); }
   int object_size()           { return object_size(length()); }
-  int array_size()            { return array_size(length()); }
 
   static int object_size(int length) {
     // This returns the object size in HeapWords.
-    return align_object_size(header_size() + array_size(length));
-  }
-
-  // Give size of objArrayOop in HeapWords minus the header
-  static int array_size(int length) {
-    // Without UseCompressedOops, this is simply:
-    // oop->length() * HeapWordsPerOop;
-    // With narrowOops, HeapWordsPerOop is 1/2 or equal 0 as an integer.
-    // The oop elements are aligned up to wordSize
-    const int HeapWordsPerOop = heapOopSize/HeapWordSize;
-    if (HeapWordsPerOop > 0) {
-      return length * HeapWordsPerOop;
-    } else {
-      const int OopsPerHeapWord = HeapWordSize/heapOopSize;
-      int word_len = align_size_up(length, OopsPerHeapWord)/OopsPerHeapWord;
-      return word_len;
-    }
+    uint asz = array_size(length);
+    uint osz = align_object_size(header_size() + asz);
+    assert(osz >= asz,   "no overflow");
+    assert((int)osz > 0, "no overflow");
+    return (int)osz;
   }
 
   // special iterators for index ranges, returns size of object
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/g1MemoryPool.cpp	Fri Dec 11 08:39:30 2009 -0800
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_g1MemoryPool.cpp.incl"
+
+G1MemoryPoolSuper::G1MemoryPoolSuper(G1CollectedHeap* g1h,
+                                     const char* name,
+                                     size_t init_size,
+                                     size_t max_size,
+                                     bool support_usage_threshold) :
+  _g1h(g1h), CollectedMemoryPool(name,
+                                 MemoryPool::Heap,
+                                 init_size,
+                                 max_size,
+                                 support_usage_threshold) {
+  assert(UseG1GC, "sanity");
+}
+
+// See the comment at the top of g1MemoryPool.hpp
+size_t G1MemoryPoolSuper::eden_space_committed(G1CollectedHeap* g1h) {
+  return MAX2(eden_space_used(g1h), (size_t) HeapRegion::GrainBytes);
+}
+
+// See the comment at the top of g1MemoryPool.hpp
+size_t G1MemoryPoolSuper::eden_space_used(G1CollectedHeap* g1h) {
+  size_t young_list_length = g1h->young_list_length();
+  size_t eden_used = young_list_length * HeapRegion::GrainBytes;
+  size_t survivor_used = survivor_space_used(g1h);
+  eden_used = subtract_up_to_zero(eden_used, survivor_used);
+  return eden_used;
+}
+
+// See the comment at the top of g1MemoryPool.hpp
+size_t G1MemoryPoolSuper::eden_space_max(G1CollectedHeap* g1h) {
+  // This should ensure that it returns a value no smaller than the
+  // region size. Currently, eden_space_committed() guarantees that.
+  return eden_space_committed(g1h);
+}
+
+// See the comment at the top of g1MemoryPool.hpp
+size_t G1MemoryPoolSuper::survivor_space_committed(G1CollectedHeap* g1h) {
+  return MAX2(survivor_space_used(g1h), (size_t) HeapRegion::GrainBytes);
+}
+
+// See the comment at the top of g1MemoryPool.hpp
+size_t G1MemoryPoolSuper::survivor_space_used(G1CollectedHeap* g1h) {
+  size_t survivor_num = g1h->g1_policy()->recorded_survivor_regions();
+  size_t survivor_used = survivor_num * HeapRegion::GrainBytes;
+  return survivor_used;
+}
+
+// See the comment at the top of g1MemoryPool.hpp
+size_t G1MemoryPoolSuper::survivor_space_max(G1CollectedHeap* g1h) {
+  // This should ensure that it returns a value no smaller than the
+  // region size. Currently, survivor_space_committed() guarantees that.
+  return survivor_space_committed(g1h);
+}
+
+// See the comment at the top of g1MemoryPool.hpp
+size_t G1MemoryPoolSuper::old_space_committed(G1CollectedHeap* g1h) {
+  size_t committed = overall_committed(g1h);
+  size_t eden_committed = eden_space_committed(g1h);
+  size_t survivor_committed = survivor_space_committed(g1h);
+  committed = subtract_up_to_zero(committed, eden_committed);
+  committed = subtract_up_to_zero(committed, survivor_committed);
+  committed = MAX2(committed, (size_t) HeapRegion::GrainBytes);
+  return committed;
+}
+
+// See the comment at the top of g1MemoryPool.hpp
+size_t G1MemoryPoolSuper::old_space_used(G1CollectedHeap* g1h) {
+  size_t used = overall_used(g1h);
+  size_t eden_used = eden_space_used(g1h);
+  size_t survivor_used = survivor_space_used(g1h);
+  used = subtract_up_to_zero(used, eden_used);
+  used = subtract_up_to_zero(used, survivor_used);
+  return used;
+}
+
+// See the comment at the top of g1MemoryPool.hpp
+size_t G1MemoryPoolSuper::old_space_max(G1CollectedHeap* g1h) {
+  size_t max = overall_max(g1h);
+  size_t eden_max = eden_space_max(g1h);
+  size_t survivor_max = survivor_space_max(g1h);
+  max = subtract_up_to_zero(max, eden_max);
+  max = subtract_up_to_zero(max, survivor_max);
+  max = MAX2(max, (size_t) HeapRegion::GrainBytes);
+  return max;
+}
+
+G1EdenPool::G1EdenPool(G1CollectedHeap* g1h) :
+  G1MemoryPoolSuper(g1h,
+                    "G1 Eden",
+                    eden_space_committed(g1h), /* init_size */
+                    eden_space_max(g1h), /* max_size */
+                    false /* support_usage_threshold */) {
+}
+
+MemoryUsage G1EdenPool::get_memory_usage() {
+  size_t initial_sz = initial_size();
+  size_t max_sz     = max_size();
+  size_t used       = used_in_bytes();
+  size_t committed  = eden_space_committed(_g1h);
+
+  return MemoryUsage(initial_sz, used, committed, max_sz);
+}
+
+G1SurvivorPool::G1SurvivorPool(G1CollectedHeap* g1h) :
+  G1MemoryPoolSuper(g1h,
+                    "G1 Survivor",
+                    survivor_space_committed(g1h), /* init_size */
+                    survivor_space_max(g1h), /* max_size */
+                    false /* support_usage_threshold */) {
+}
+
+MemoryUsage G1SurvivorPool::get_memory_usage() {
+  size_t initial_sz = initial_size();
+  size_t max_sz     = max_size();
+  size_t used       = used_in_bytes();
+  size_t committed  = survivor_space_committed(_g1h);
+
+  return MemoryUsage(initial_sz, used, committed, max_sz);
+}
+
+G1OldGenPool::G1OldGenPool(G1CollectedHeap* g1h) :
+  G1MemoryPoolSuper(g1h,
+                    "G1 Old Gen",
+                    old_space_committed(g1h), /* init_size */
+                    old_space_max(g1h), /* max_size */
+                    true /* support_usage_threshold */) {
+}
+
+MemoryUsage G1OldGenPool::get_memory_usage() {
+  size_t initial_sz = initial_size();
+  size_t max_sz     = max_size();
+  size_t used       = used_in_bytes();
+  size_t committed  = old_space_committed(_g1h);
+
+  return MemoryUsage(initial_sz, used, committed, max_sz);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/services/g1MemoryPool.hpp	Fri Dec 11 08:39:30 2009 -0800
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+class G1CollectedHeap;
+
+// This file contains the three classes that represent the memory
+// pools of the G1 spaces: G1EdenPool, G1SurvivorPool, and
+// G1OldGenPool. In G1, unlike our other GCs, we do not have a
+// physical space for each of those spaces. Instead, we allocate
+// regions for all three spaces out of a single pool of regions (that
+// pool basically covers the entire heap). As a result, the eden,
+// survivor, and old gen are considered logical spaces in G1, as each
+// is a set of non-contiguous regions. This is also reflected in the
+// way we map them to memory pools here. The easiest way to have done
+// this would have been to map the entire G1 heap to a single memory
+// pool. However, it's helpful to show how large the eden and survivor
+// get, as this does affect the performance and behavior of G1. Which
+// is why we introduce the three memory pools implemented here.
+//
+// The above approach inroduces a couple of challenging issues in the
+// implementation of the three memory pools:
+//
+// 1) The used space calculation for a pool is not necessarily
+// independent of the others. We can easily get from G1 the overall
+// used space in the entire heap, the number of regions in the young
+// generation (includes both eden and survivors), and the number of
+// survivor regions. So, from that we calculate:
+//
+//  survivor_used = survivor_num * region_size
+//  eden_used     = young_region_num * region_size - survivor_used
+//  old_gen_used  = overall_used - eden_used - survivor_used
+//
+// Note that survivor_used and eden_used are upper bounds. To get the
+// actual value we would have to iterate over the regions and add up
+// ->used(). But that'd be expensive. So, we'll accept some lack of
+// accuracy for those two. But, we have to be careful when calculating
+// old_gen_used, in case we subtract from overall_used more then the
+// actual number and our result goes negative.
+//
+// 2) Calculating the used space is straightforward, as described
+// above. However, how do we calculate the committed space, given that
+// we allocate space for the eden, survivor, and old gen out of the
+// same pool of regions? One way to do this is to use the used value
+// as also the committed value for the eden and survivor spaces and
+// then calculate the old gen committed space as follows:
+//
+//  old_gen_committed = overall_committed - eden_committed - survivor_committed
+//
+// Maybe a better way to do that would be to calculate used for eden
+// and survivor as a sum of ->used() over their regions and then
+// calculate committed as region_num * region_size (i.e., what we use
+// to calculate the used space now). This is something to consider
+// in the future.
+//
+// 3) Another decision that is again not straightforward is what is
+// the max size that each memory pool can grow to. Right now, we set
+// that the committed size for the eden and the survivors and
+// calculate the old gen max as follows (basically, it's a similar
+// pattern to what we use for the committed space, as described
+// above):
+//
+//  old_gen_max = overall_max - eden_max - survivor_max
+//
+// 4) Now, there is a very subtle issue with all the above. The
+// framework will call get_memory_usage() on the three pools
+// asynchronously. As a result, each call might get a different value
+// for, say, survivor_num which will yield inconsistent values for
+// eden_used, survivor_used, and old_gen_used (as survivor_num is used
+// in the calculation of all three). This would normally be
+// ok. However, it's possible that this might cause the sum of
+// eden_used, survivor_used, and old_gen_used to go over the max heap
+// size and this seems to sometimes cause JConsole (and maybe other
+// clients) to get confused. There's not a really an easy / clean
+// solution to this problem, due to the asynchrounous nature of the
+// framework.
+
+
+// This class is shared by the three G1 memory pool classes
+// (G1EdenPool, G1SurvivorPool, G1OldGenPool). Given that the way we
+// calculate used / committed bytes for these three pools is related
+// (see comment above), we put the calculations in this class so that
+// we can easily share them among the subclasses.
+class G1MemoryPoolSuper : public CollectedMemoryPool {
+private:
+  // It returns x - y if x > y, 0 otherwise.
+  // As described in the comment above, some of the inputs to the
+  // calculations we have to do are obtained concurrently and hence
+  // may be inconsistent with each other. So, this provides a
+  // defensive way of performing the subtraction and avoids the value
+  // going negative (which would mean a very large result, given that
+  // the parameter are size_t).
+  static size_t subtract_up_to_zero(size_t x, size_t y) {
+    if (x > y) {
+      return x - y;
+    } else {
+      return 0;
+    }
+  }
+
+protected:
+  G1CollectedHeap* _g1h;
+
+  // Would only be called from subclasses.
+  G1MemoryPoolSuper(G1CollectedHeap* g1h,
+                    const char* name,
+                    size_t init_size,
+                    size_t max_size,
+                    bool support_usage_threshold);
+
+  // The reason why all the code is in static methods is so that it
+  // can be safely called from the constructors of the subclasses.
+
+  static size_t overall_committed(G1CollectedHeap* g1h) {
+    return g1h->capacity();
+  }
+  static size_t overall_used(G1CollectedHeap* g1h) {
+    return g1h->used_unlocked();
+  }
+  static size_t overall_max(G1CollectedHeap* g1h) {
+    return g1h->g1_reserved_obj_bytes();
+  }
+
+  static size_t eden_space_committed(G1CollectedHeap* g1h);
+  static size_t eden_space_used(G1CollectedHeap* g1h);
+  static size_t eden_space_max(G1CollectedHeap* g1h);
+
+  static size_t survivor_space_committed(G1CollectedHeap* g1h);
+  static size_t survivor_space_used(G1CollectedHeap* g1h);
+  static size_t survivor_space_max(G1CollectedHeap* g1h);
+
+  static size_t old_space_committed(G1CollectedHeap* g1h);
+  static size_t old_space_used(G1CollectedHeap* g1h);
+  static size_t old_space_max(G1CollectedHeap* g1h);
+};
+
+// Memory pool that represents the G1 eden.
+class G1EdenPool : public G1MemoryPoolSuper {
+public:
+  G1EdenPool(G1CollectedHeap* g1h);
+
+  size_t used_in_bytes() {
+    return eden_space_used(_g1h);
+  }
+  size_t max_size() const {
+    return eden_space_max(_g1h);
+  }
+  MemoryUsage get_memory_usage();
+};
+
+// Memory pool that represents the G1 survivor.
+class G1SurvivorPool : public G1MemoryPoolSuper {
+public:
+  G1SurvivorPool(G1CollectedHeap* g1h);
+
+  size_t used_in_bytes() {
+    return survivor_space_used(_g1h);
+  }
+  size_t max_size() const {
+    return survivor_space_max(_g1h);
+  }
+  MemoryUsage get_memory_usage();
+};
+
+// Memory pool that represents the G1 old gen.
+class G1OldGenPool : public G1MemoryPoolSuper {
+public:
+  G1OldGenPool(G1CollectedHeap* g1h);
+
+  size_t used_in_bytes() {
+    return old_space_used(_g1h);
+  }
+  size_t max_size() const {
+    return old_space_max(_g1h);
+  }
+  MemoryUsage get_memory_usage();
+};
--- a/hotspot/src/share/vm/services/memoryManager.cpp	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/services/memoryManager.cpp	Fri Dec 11 08:39:30 2009 -0800
@@ -72,6 +72,14 @@
   return (GCMemoryManager*) new PSMarkSweepMemoryManager();
 }
 
+GCMemoryManager* MemoryManager::get_g1YoungGen_memory_manager() {
+  return (GCMemoryManager*) new G1YoungGenMemoryManager();
+}
+
+GCMemoryManager* MemoryManager::get_g1OldGen_memory_manager() {
+  return (GCMemoryManager*) new G1OldGenMemoryManager();
+}
+
 instanceOop MemoryManager::get_memory_manager_instance(TRAPS) {
   // Must do an acquire so as to force ordering of subsequent
   // loads from anything _memory_mgr_obj points to or implies.
--- a/hotspot/src/share/vm/services/memoryManager.hpp	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/services/memoryManager.hpp	Fri Dec 11 08:39:30 2009 -0800
@@ -54,7 +54,9 @@
     ParNew,
     ConcurrentMarkSweep,
     PSScavenge,
-    PSMarkSweep
+    PSMarkSweep,
+    G1YoungGen,
+    G1OldGen
   };
 
   MemoryManager();
@@ -85,6 +87,8 @@
   static GCMemoryManager* get_cms_memory_manager();
   static GCMemoryManager* get_psScavenge_memory_manager();
   static GCMemoryManager* get_psMarkSweep_memory_manager();
+  static GCMemoryManager* get_g1YoungGen_memory_manager();
+  static GCMemoryManager* get_g1OldGen_memory_manager();
 
 };
 
@@ -231,3 +235,21 @@
   MemoryManager::Name kind() { return MemoryManager::PSMarkSweep; }
   const char* name()         { return "PS MarkSweep"; }
 };
+
+class G1YoungGenMemoryManager : public GCMemoryManager {
+private:
+public:
+  G1YoungGenMemoryManager() : GCMemoryManager() {}
+
+  MemoryManager::Name kind() { return MemoryManager::G1YoungGen; }
+  const char* name()         { return "G1 Young Generation"; }
+};
+
+class G1OldGenMemoryManager : public GCMemoryManager {
+private:
+public:
+  G1OldGenMemoryManager() : GCMemoryManager() {}
+
+  MemoryManager::Name kind() { return MemoryManager::G1OldGen; }
+  const char* name()         { return "G1 Old Generation"; }
+};
--- a/hotspot/src/share/vm/services/memoryService.cpp	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/services/memoryService.cpp	Fri Dec 11 08:39:30 2009 -0800
@@ -60,8 +60,8 @@
       break;
     }
     case CollectedHeap::G1CollectedHeap : {
-      G1CollectedHeap::g1_unimplemented();
-      return;
+      add_g1_heap_info(G1CollectedHeap::heap());
+      break;
     }
 #endif // SERIALGC
     default: {
@@ -164,6 +164,19 @@
   add_psOld_memory_pool(heap->old_gen(), _major_gc_manager);
   add_psPerm_memory_pool(heap->perm_gen(), _major_gc_manager);
 }
+
+void MemoryService::add_g1_heap_info(G1CollectedHeap* g1h) {
+  assert(UseG1GC, "sanity");
+
+  _minor_gc_manager = MemoryManager::get_g1YoungGen_memory_manager();
+  _major_gc_manager = MemoryManager::get_g1OldGen_memory_manager();
+  _managers_list->append(_minor_gc_manager);
+  _managers_list->append(_major_gc_manager);
+
+  add_g1YoungGen_memory_pool(g1h, _major_gc_manager, _minor_gc_manager);
+  add_g1OldGen_memory_pool(g1h, _major_gc_manager);
+  add_g1PermGen_memory_pool(g1h, _major_gc_manager);
+}
 #endif // SERIALGC
 
 MemoryPool* MemoryService::add_gen(Generation* gen,
@@ -384,6 +397,64 @@
   mgr->add_pool(perm_gen);
   _pools_list->append(perm_gen);
 }
+
+void MemoryService::add_g1YoungGen_memory_pool(G1CollectedHeap* g1h,
+                                               MemoryManager* major_mgr,
+                                               MemoryManager* minor_mgr) {
+  assert(major_mgr != NULL && minor_mgr != NULL, "should have two managers");
+
+  G1EdenPool* eden = new G1EdenPool(g1h);
+  G1SurvivorPool* survivor = new G1SurvivorPool(g1h);
+
+  major_mgr->add_pool(eden);
+  major_mgr->add_pool(survivor);
+  minor_mgr->add_pool(eden);
+  minor_mgr->add_pool(survivor);
+  _pools_list->append(eden);
+  _pools_list->append(survivor);
+}
+
+void MemoryService::add_g1OldGen_memory_pool(G1CollectedHeap* g1h,
+                                             MemoryManager* mgr) {
+  assert(mgr != NULL, "should have one manager");
+
+  G1OldGenPool* old_gen = new G1OldGenPool(g1h);
+  mgr->add_pool(old_gen);
+  _pools_list->append(old_gen);
+}
+
+void MemoryService::add_g1PermGen_memory_pool(G1CollectedHeap* g1h,
+                                              MemoryManager* mgr) {
+  assert(mgr != NULL, "should have one manager");
+
+  CompactingPermGenGen* perm_gen = (CompactingPermGenGen*) g1h->perm_gen();
+  PermanentGenerationSpec* spec = perm_gen->spec();
+  size_t max_size = spec->max_size() - spec->read_only_size()
+                                     - spec->read_write_size();
+  MemoryPool* pool = add_space(perm_gen->unshared_space(),
+                               "G1 Perm Gen",
+                               false, /* is_heap */
+                               max_size,
+                               true   /* support_usage_threshold */);
+  mgr->add_pool(pool);
+
+  // in case we support CDS in G1
+  if (UseSharedSpaces) {
+    pool = add_space(perm_gen->ro_space(),
+                     "G1 Perm Gen [shared-ro]",
+                     false, /* is_heap */
+                     spec->read_only_size(),
+                     true   /* support_usage_threshold */);
+    mgr->add_pool(pool);
+
+    pool = add_space(perm_gen->rw_space(),
+                     "G1 Perm Gen [shared-rw]",
+                     false, /* is_heap */
+                     spec->read_write_size(),
+                     true   /* support_usage_threshold */);
+    mgr->add_pool(pool);
+  }
+}
 #endif // SERIALGC
 
 void MemoryService::add_code_heap_memory_pool(CodeHeap* heap) {
--- a/hotspot/src/share/vm/services/memoryService.hpp	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/services/memoryService.hpp	Fri Dec 11 08:39:30 2009 -0800
@@ -40,6 +40,7 @@
 class ParallelScavengeHeap;
 class CompactingPermGenGen;
 class CMSPermGenGen;
+class G1CollectedHeap;
 
 // VM Monitoring and Management Support
 
@@ -88,6 +89,13 @@
   static void add_psPerm_memory_pool(PSPermGen* perm,
                                      MemoryManager* mgr);
 
+  static void add_g1YoungGen_memory_pool(G1CollectedHeap* g1h,
+                                         MemoryManager* major_mgr,
+                                         MemoryManager* minor_mgr);
+  static void add_g1OldGen_memory_pool(G1CollectedHeap* g1h,
+                                       MemoryManager* mgr);
+  static void add_g1PermGen_memory_pool(G1CollectedHeap* g1h,
+                                        MemoryManager* mgr);
 
   static MemoryPool* add_space(ContiguousSpace* space,
                                const char* name,
@@ -111,6 +119,7 @@
 
   static void add_gen_collected_heap_info(GenCollectedHeap* heap);
   static void add_parallel_scavenge_heap_info(ParallelScavengeHeap* heap);
+  static void add_g1_heap_info(G1CollectedHeap* g1h);
 
 public:
   static void set_universe_heap(CollectedHeap* heap);
--- a/hotspot/src/share/vm/utilities/numberSeq.cpp	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/utilities/numberSeq.cpp	Fri Dec 11 08:39:30 2009 -0800
@@ -241,3 +241,33 @@
 
   return b0 + b1 * num;
 }
+
+
+// Printing/Debugging Support
+
+void AbsSeq::dump() { dump_on(gclog_or_tty); }
+
+void AbsSeq::dump_on(outputStream* s) {
+  s->print_cr("\t _num = %d, _sum = %7.3f, _sum_of_squares = %7.3f",
+                  _num,      _sum,         _sum_of_squares);
+  s->print_cr("\t _davg = %7.3f, _dvariance = %7.3f, _alpha = %7.3f",
+                  _davg,         _dvariance,         _alpha);
+}
+
+void NumberSeq::dump_on(outputStream* s) {
+  AbsSeq::dump_on(s);
+  s->print_cr("\t\t _last = %7.3f, _maximum = %7.3f");
+}
+
+void TruncatedSeq::dump_on(outputStream* s) {
+  AbsSeq::dump_on(s);
+  s->print_cr("\t\t _length = %d, _next = %d", _length, _next);
+  for (int i = 0; i < _length; i++) {
+    if (i%5 == 0) {
+      s->cr();
+      s->print("\t");
+    }
+    s->print("\t[%d]=%7.3f", i, _sequence[i]);
+  }
+  s->print_cr("");
+}
--- a/hotspot/src/share/vm/utilities/numberSeq.hpp	Wed Dec 02 13:29:00 2009 -0800
+++ b/hotspot/src/share/vm/utilities/numberSeq.hpp	Fri Dec 11 08:39:30 2009 -0800
@@ -74,6 +74,10 @@
   double davg() const; // decaying average
   double dvariance() const; // decaying variance
   double dsd() const; // decaying "standard deviation"
+
+  // Debugging/Printing
+  virtual void dump();
+  virtual void dump_on(outputStream* s);
 };
 
 class NumberSeq: public AbsSeq {
@@ -91,6 +95,9 @@
   virtual void add(double val);
   virtual double maximum() const { return _maximum; }
   virtual double last() const { return _last; }
+
+  // Debugging/Printing
+  virtual void dump_on(outputStream* s);
 };
 
 class TruncatedSeq: public AbsSeq {
@@ -114,4 +121,7 @@
 
   double oldest() const; // the oldest valid value in the sequence
   double predict_next() const; // prediction based on linear regression
+
+  // Debugging/Printing
+  virtual void dump_on(outputStream* s);
 };