6887571: Increase default heap config sizes
authorphh
Wed, 28 Oct 2009 16:25:51 -0400
changeset 4434 4b41e5b42f81
parent 4090 3dd0ee516f2d
child 4435 e72b7032de68
6887571: Increase default heap config sizes Summary: Apply modification of existing server heap size ergo to all collectors except CMS. Reviewed-by: jmasa, ysr, xlu
hotspot/src/cpu/sparc/vm/c1_globals_sparc.hpp
hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp
hotspot/src/cpu/sparc/vm/globals_sparc.hpp
hotspot/src/cpu/x86/vm/c1_globals_x86.hpp
hotspot/src/cpu/x86/vm/c2_globals_x86.hpp
hotspot/src/cpu/x86/vm/globals_x86.hpp
hotspot/src/cpu/zero/vm/globals_zero.hpp
hotspot/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp
hotspot/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp
hotspot/src/os_cpu/windows_x86/vm/globals_windows_x86.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.cpp
hotspot/src/share/vm/memory/collectorPolicy.cpp
hotspot/src/share/vm/runtime/arguments.cpp
hotspot/src/share/vm/runtime/arguments.hpp
hotspot/src/share/vm/runtime/globals.cpp
hotspot/src/share/vm/runtime/globals.hpp
hotspot/src/share/vm/runtime/globals_extension.hpp
hotspot/src/share/vm/services/management.cpp
--- a/hotspot/src/cpu/sparc/vm/c1_globals_sparc.hpp	Fri Oct 23 18:44:33 2009 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_globals_sparc.hpp	Wed Oct 28 16:25:51 2009 -0400
@@ -22,10 +22,9 @@
  *
  */
 
-//
 // Sets the default values for platform dependent flags used by the client compiler.
 // (see c1_globals.hpp)
-//
+
 #ifndef TIERED
 define_pd_global(bool, BackgroundCompilation,        true );
 define_pd_global(bool, CICompileOSR,                 true );
@@ -48,27 +47,24 @@
 define_pd_global(bool, UseTLAB,                      true );
 define_pd_global(bool, ProfileInterpreter,           false);
 define_pd_global(intx, FreqInlineSize,               325  );
-define_pd_global(intx, NewRatio,                     8    ); // Design center runs on 1.3.1
 define_pd_global(bool, ResizeTLAB,                   true );
 define_pd_global(intx, ReservedCodeCacheSize,        32*M );
 define_pd_global(intx, CodeCacheExpansionSize,       32*K );
 define_pd_global(uintx,CodeCacheMinBlockLength,      1);
-define_pd_global(uintx, PermSize,                    12*M );
-define_pd_global(uintx, MaxPermSize,                 64*M );
-define_pd_global(bool, NeverActAsServerClassMachine, true);
+define_pd_global(uintx,PermSize,                     12*M );
+define_pd_global(uintx,MaxPermSize,                  64*M );
+define_pd_global(bool, NeverActAsServerClassMachine, true );
 define_pd_global(intx, NewSizeThreadIncrease,        16*K );
-define_pd_global(uintx, DefaultMaxRAM,               1*G);
+define_pd_global(uint64_t,MaxRAM,                    1ULL*G);
 define_pd_global(intx, InitialCodeCacheSize,         160*K);
-#endif // TIERED
+#endif // !TIERED
 
 define_pd_global(bool, UseTypeProfile,               false);
 define_pd_global(bool, RoundFPResults,               false);
 
-
-define_pd_global(bool, LIRFillDelaySlots,            true);
+define_pd_global(bool, LIRFillDelaySlots,            true );
 define_pd_global(bool, OptimizeSinglePrecision,      false);
-define_pd_global(bool, CSEArrayLength,               true);
+define_pd_global(bool, CSEArrayLength,               true );
 define_pd_global(bool, TwoOperandLIRForm,            false);
 
-
-define_pd_global(intx, SafepointPollOffset, 0);
+define_pd_global(intx, SafepointPollOffset,          0    );
--- a/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp	Fri Oct 23 18:44:33 2009 -0700
+++ b/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp	Wed Oct 28 16:25:51 2009 -0400
@@ -59,7 +59,6 @@
 define_pd_global(intx, FreqInlineSize,               175);
 define_pd_global(intx, INTPRESSURE,                  48);  // large register set
 define_pd_global(intx, InteriorEntryAlignment,       16);  // = CodeEntryAlignment
-define_pd_global(intx, NewRatio,                     2);
 define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
 // The default setting 16/16 seems to work best.
 // (For _228_jack 16/16 is 2% better than 4/4, 16/4, 32/32, 32/16, or 16/32.)
@@ -83,25 +82,25 @@
 // sequence of instructions to load a 64 bit pointer.
 //
 // InitialCodeCacheSize derived from specjbb2000 run.
-define_pd_global(intx, InitialCodeCacheSize,     2048*K); // Integral multiple of CodeCacheExpansionSize
-define_pd_global(intx, ReservedCodeCacheSize,    48*M);
-define_pd_global(intx, CodeCacheExpansionSize,   64*K);
+define_pd_global(intx, InitialCodeCacheSize,         2048*K); // Integral multiple of CodeCacheExpansionSize
+define_pd_global(intx, ReservedCodeCacheSize,        48*M);
+define_pd_global(intx, CodeCacheExpansionSize,       64*K);
 
 // Ergonomics related flags
-define_pd_global(uintx, DefaultMaxRAM,           32*G);
+define_pd_global(uint64_t,MaxRAM,                    128ULL*G);
 #else
 // InitialCodeCacheSize derived from specjbb2000 run.
-define_pd_global(intx, InitialCodeCacheSize,     1536*K); // Integral multiple of CodeCacheExpansionSize
-define_pd_global(intx, ReservedCodeCacheSize,    32*M);
-define_pd_global(intx, CodeCacheExpansionSize,   32*K);
+define_pd_global(intx, InitialCodeCacheSize,         1536*K); // Integral multiple of CodeCacheExpansionSize
+define_pd_global(intx, ReservedCodeCacheSize,        32*M);
+define_pd_global(intx, CodeCacheExpansionSize,       32*K);
 // Ergonomics related flags
-define_pd_global(uintx, DefaultMaxRAM,           1*G);
+define_pd_global(uint64_t,MaxRAM,                    4ULL*G);
 #endif
-define_pd_global(uintx,CodeCacheMinBlockLength,  4);
+define_pd_global(uintx,CodeCacheMinBlockLength,      4);
 
 // Heap related flags
-define_pd_global(uintx, PermSize,    ScaleForWordSize(16*M));
-define_pd_global(uintx, MaxPermSize, ScaleForWordSize(64*M));
+define_pd_global(uintx,PermSize,    ScaleForWordSize(16*M));
+define_pd_global(uintx,MaxPermSize, ScaleForWordSize(64*M));
 
 // Ergonomics related flags
 define_pd_global(bool, NeverActAsServerClassMachine, false);
--- a/hotspot/src/cpu/sparc/vm/globals_sparc.hpp	Fri Oct 23 18:44:33 2009 -0700
+++ b/hotspot/src/cpu/sparc/vm/globals_sparc.hpp	Wed Oct 28 16:25:51 2009 -0400
@@ -22,10 +22,8 @@
  *
  */
 
-//
 // Sets the default values for platform dependent flags used by the runtime system.
 // (see globals.hpp)
-//
 
 // For sparc we do not do call backs when a thread is in the interpreter, because the
 // interpreter dispatch needs at least two instructions - first to load the dispatch address
@@ -41,26 +39,23 @@
 define_pd_global(bool, ImplicitNullChecks,          true);  // Generate code for implicit null checks
 define_pd_global(bool, UncommonNullCast,            true);  // Uncommon-trap NULLs past to check cast
 
-define_pd_global(intx,  CodeEntryAlignment,    32);
-define_pd_global(uintx, TLABSize,              0);
-define_pd_global(uintx, NewSize, ScaleForWordSize((2048 * K) + (2 * (64 * K))));
-define_pd_global(intx,  SurvivorRatio,         8);
-define_pd_global(intx,  InlineFrequencyCount,  50);  // we can use more inlining on the SPARC
-define_pd_global(intx,  InlineSmallCode,       1500);
+define_pd_global(intx, CodeEntryAlignment,    32);
+define_pd_global(intx, InlineFrequencyCount,  50);  // we can use more inlining on the SPARC
+define_pd_global(intx, InlineSmallCode,       1500);
 #ifdef _LP64
 // Stack slots are 2X larger in LP64 than in the 32 bit VM.
-define_pd_global(intx,  ThreadStackSize,       1024);
-define_pd_global(intx,  VMThreadStackSize,     1024);
+define_pd_global(intx, ThreadStackSize,       1024);
+define_pd_global(intx, VMThreadStackSize,     1024);
 #else
-define_pd_global(intx,  ThreadStackSize,       512);
-define_pd_global(intx,  VMThreadStackSize,     512);
+define_pd_global(intx, ThreadStackSize,       512);
+define_pd_global(intx, VMThreadStackSize,     512);
 #endif
 
 define_pd_global(intx, StackYellowPages, 2);
 define_pd_global(intx, StackRedPages, 1);
 define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+1));
 
-define_pd_global(intx,  PreInflateSpin,        40);  // Determined by running design center
+define_pd_global(intx, PreInflateSpin,       40);  // Determined by running design center
 
 define_pd_global(bool, RewriteBytecodes,     true);
 define_pd_global(bool, RewriteFrequentPairs, true);
--- a/hotspot/src/cpu/x86/vm/c1_globals_x86.hpp	Fri Oct 23 18:44:33 2009 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_globals_x86.hpp	Wed Oct 28 16:25:51 2009 -0400
@@ -22,10 +22,8 @@
  *
  */
 
-//
 // Sets the default values for platform dependent flags used by the client compiler.
 // (see c1_globals.hpp)
-//
 
 #ifndef TIERED
 define_pd_global(bool, BackgroundCompilation,        true );
@@ -48,27 +46,24 @@
 
 define_pd_global(intx, OnStackReplacePercentage,     933  );
 define_pd_global(intx, FreqInlineSize,               325  );
-define_pd_global(intx, NewRatio,                     12   );
 define_pd_global(intx, NewSizeThreadIncrease,        4*K  );
 define_pd_global(intx, InitialCodeCacheSize,         160*K);
 define_pd_global(intx, ReservedCodeCacheSize,        32*M );
 define_pd_global(bool, ProfileInterpreter,           false);
 define_pd_global(intx, CodeCacheExpansionSize,       32*K );
 define_pd_global(uintx,CodeCacheMinBlockLength,      1);
-define_pd_global(uintx, PermSize,                    12*M );
-define_pd_global(uintx, MaxPermSize,                 64*M );
-define_pd_global(bool, NeverActAsServerClassMachine, true);
-define_pd_global(uintx, DefaultMaxRAM,               1*G);
+define_pd_global(uintx,PermSize,                     12*M );
+define_pd_global(uintx,MaxPermSize,                  64*M );
+define_pd_global(bool, NeverActAsServerClassMachine, true );
+define_pd_global(uint64_t,MaxRAM,                    1ULL*G);
 define_pd_global(bool, CICompileOSR,                 true );
-#endif // TIERED
+#endif // !TIERED
 define_pd_global(bool, UseTypeProfile,               false);
 define_pd_global(bool, RoundFPResults,               true );
 
-
 define_pd_global(bool, LIRFillDelaySlots,            false);
-define_pd_global(bool, OptimizeSinglePrecision,      true);
+define_pd_global(bool, OptimizeSinglePrecision,      true );
 define_pd_global(bool, CSEArrayLength,               false);
-define_pd_global(bool, TwoOperandLIRForm,            true);
+define_pd_global(bool, TwoOperandLIRForm,            true );
 
-
-define_pd_global(intx, SafepointPollOffset, 256);
+define_pd_global(intx, SafepointPollOffset,          256  );
--- a/hotspot/src/cpu/x86/vm/c2_globals_x86.hpp	Fri Oct 23 18:44:33 2009 -0700
+++ b/hotspot/src/cpu/x86/vm/c2_globals_x86.hpp	Wed Oct 28 16:25:51 2009 -0400
@@ -22,7 +22,6 @@
  *
  */
 
-//
 // Sets the default values for platform dependent flags used by the server compiler.
 // (see c2_globals.hpp).  Alpha-sorted.
 
@@ -46,8 +45,8 @@
 define_pd_global(intx, CompileThreshold,             10000);
 #endif // TIERED
 define_pd_global(intx, Tier2CompileThreshold,        10000);
-define_pd_global(intx, Tier3CompileThreshold,        20000 );
-define_pd_global(intx, Tier4CompileThreshold,        40000 );
+define_pd_global(intx, Tier3CompileThreshold,        20000);
+define_pd_global(intx, Tier4CompileThreshold,        40000);
 
 define_pd_global(intx, BackEdgeThreshold,            100000);
 define_pd_global(intx, Tier2BackEdgeThreshold,       100000);
@@ -61,7 +60,6 @@
 #ifdef AMD64
 define_pd_global(intx, INTPRESSURE,                  13);
 define_pd_global(intx, InteriorEntryAlignment,       16);
-define_pd_global(intx, NewRatio,                     2);
 define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
 define_pd_global(intx, LoopUnrollLimit,              60);
 // InitialCodeCacheSize derived from specjbb2000 run.
@@ -69,19 +67,18 @@
 define_pd_global(intx, CodeCacheExpansionSize,       64*K);
 
 // Ergonomics related flags
-define_pd_global(uintx, DefaultMaxRAM, 32*G);
+define_pd_global(uint64_t,MaxRAM,                    128ULL*G);
 #else
 define_pd_global(intx, INTPRESSURE,                  6);
 define_pd_global(intx, InteriorEntryAlignment,       4);
-define_pd_global(intx, NewRatio,                     8); // Design center runs on 1.3.1
 define_pd_global(intx, NewSizeThreadIncrease,        4*K);
-define_pd_global(intx, LoopUnrollLimit,              50); // Design center runs on 1.3.1
+define_pd_global(intx, LoopUnrollLimit,              50);     // Design center runs on 1.3.1
 // InitialCodeCacheSize derived from specjbb2000 run.
 define_pd_global(intx, InitialCodeCacheSize,         2304*K); // Integral multiple of CodeCacheExpansionSize
 define_pd_global(intx, CodeCacheExpansionSize,       32*K);
 
 // Ergonomics related flags
-define_pd_global(uintx, DefaultMaxRAM, 1*G);
+define_pd_global(uint64_t,MaxRAM,                    4ULL*G);
 #endif // AMD64
 define_pd_global(intx, OptoLoopAlignment,            16);
 define_pd_global(intx, RegisterCostAreaRatio,        16000);
@@ -97,8 +94,8 @@
 define_pd_global(uintx,CodeCacheMinBlockLength,      4);
 
 // Heap related flags
-define_pd_global(uintx, PermSize,    ScaleForWordSize(16*M));
-define_pd_global(uintx, MaxPermSize, ScaleForWordSize(64*M));
+define_pd_global(uintx,PermSize,    ScaleForWordSize(16*M));
+define_pd_global(uintx,MaxPermSize, ScaleForWordSize(64*M));
 
 // Ergonomics related flags
 define_pd_global(bool, NeverActAsServerClassMachine, false);
--- a/hotspot/src/cpu/x86/vm/globals_x86.hpp	Fri Oct 23 18:44:33 2009 -0700
+++ b/hotspot/src/cpu/x86/vm/globals_x86.hpp	Wed Oct 28 16:25:51 2009 -0400
@@ -22,17 +22,16 @@
  *
  */
 
-//
 // Sets the default values for platform dependent flags used by the runtime system.
 // (see globals.hpp)
-//
 
-define_pd_global(bool,  ConvertSleepToYield,      true);
-define_pd_global(bool,  ShareVtableStubs,         true);
-define_pd_global(bool,  CountInterpCalls,         true);
+define_pd_global(bool, ConvertSleepToYield,      true);
+define_pd_global(bool, ShareVtableStubs,         true);
+define_pd_global(bool, CountInterpCalls,         true);
+define_pd_global(bool, NeedsDeoptSuspend,        false); // only register window machines need this
 
-define_pd_global(bool, ImplicitNullChecks,          true);  // Generate code for implicit null checks
-define_pd_global(bool, UncommonNullCast,            true);  // Uncommon-trap NULLs past to check cast
+define_pd_global(bool, ImplicitNullChecks,       true);  // Generate code for implicit null checks
+define_pd_global(bool, UncommonNullCast,         true);  // Uncommon-trap NULLs past to check cast
 
 // See 4827828 for this change. There is no globals_core_i486.hpp. I can't
 // assign a different value for C2 without touching a number of files. Use
@@ -42,29 +41,24 @@
 // the uep and the vep doesn't get real alignment but just slops on by
 // only assured that the entry instruction meets the 5 byte size requirement.
 #ifdef COMPILER2
-define_pd_global(intx,  CodeEntryAlignment,       32);
+define_pd_global(intx, CodeEntryAlignment,       32);
 #else
-define_pd_global(intx,  CodeEntryAlignment,       16);
+define_pd_global(intx, CodeEntryAlignment,       16);
 #endif // COMPILER2
+define_pd_global(intx, InlineFrequencyCount,     100);
+define_pd_global(intx, InlineSmallCode,          1000);
 
-define_pd_global(bool, NeedsDeoptSuspend,           false); // only register window machines need this
-
-define_pd_global(uintx, TLABSize,                 0);
+define_pd_global(intx, StackYellowPages, 2);
+define_pd_global(intx, StackRedPages, 1);
 #ifdef AMD64
-define_pd_global(uintx, NewSize, ScaleForWordSize(2048 * K));
 // Very large C++ stack frames using solaris-amd64 optimized builds
 // due to lack of optimization caused by C++ compiler bugs
 define_pd_global(intx, StackShadowPages, SOLARIS_ONLY(20) NOT_SOLARIS(6) DEBUG_ONLY(+2));
 #else
-define_pd_global(uintx, NewSize,                  1024 * K);
 define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+1));
 #endif // AMD64
-define_pd_global(intx,  InlineFrequencyCount,     100);
-define_pd_global(intx,  InlineSmallCode,          1000);
-define_pd_global(intx,  PreInflateSpin,           10);
 
-define_pd_global(intx, StackYellowPages, 2);
-define_pd_global(intx, StackRedPages, 1);
+define_pd_global(intx, PreInflateSpin,           10);
 
 define_pd_global(bool, RewriteBytecodes,     true);
 define_pd_global(bool, RewriteFrequentPairs, true);
--- a/hotspot/src/cpu/zero/vm/globals_zero.hpp	Fri Oct 23 18:44:33 2009 -0700
+++ b/hotspot/src/cpu/zero/vm/globals_zero.hpp	Wed Oct 28 16:25:51 2009 -0400
@@ -23,10 +23,8 @@
  *
  */
 
-//
 // Set the default values for platform dependent flags used by the
 // runtime system.  See globals.hpp for details of what they do.
-//
 
 define_pd_global(bool,  ConvertSleepToYield,  true);
 define_pd_global(bool,  ShareVtableStubs,     true);
@@ -37,12 +35,6 @@
 define_pd_global(bool,  UncommonNullCast,     true);
 
 define_pd_global(intx,  CodeEntryAlignment,   32);
-define_pd_global(uintx, TLABSize,             0);
-#ifdef _LP64
-define_pd_global(uintx, NewSize,              ScaleForWordSize(2048 * K));
-#else
-define_pd_global(uintx, NewSize,              ScaleForWordSize(1024 * K));
-#endif // _LP64
 define_pd_global(intx,  InlineFrequencyCount, 100);
 define_pd_global(intx,  InlineSmallCode,      1000);
 define_pd_global(intx,  PreInflateSpin,       10);
--- a/hotspot/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp	Fri Oct 23 18:44:33 2009 -0700
+++ b/hotspot/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp	Wed Oct 28 16:25:51 2009 -0400
@@ -22,10 +22,9 @@
  *
  */
 
-//
 // Sets the default values for platform dependent flags used by the runtime system.
 // (see globals.hpp)
-//
+
 define_pd_global(bool, DontYieldALot,            false);
 #ifdef AMD64
 define_pd_global(intx, ThreadStackSize,          1024); // 0 => use system default
@@ -39,11 +38,10 @@
 #endif // AMD64
 
 define_pd_global(intx, CompilerThreadStackSize,  0);
-define_pd_global(intx, SurvivorRatio,            8);
 
-define_pd_global(uintx, JVMInvokeMethodSlack,    8192);
+define_pd_global(uintx,JVMInvokeMethodSlack,     8192);
 
 // Only used on 64 bit platforms
-define_pd_global(uintx, HeapBaseMinAddress,      2*G);
+define_pd_global(uintx,HeapBaseMinAddress,       2*G);
 // Only used on 64 bit Windows platforms
 define_pd_global(bool, UseVectoredExceptions,    false);
--- a/hotspot/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp	Fri Oct 23 18:44:33 2009 -0700
+++ b/hotspot/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp	Wed Oct 28 16:25:51 2009 -0400
@@ -22,31 +22,25 @@
  *
  */
 
-//
 // Sets the default values for platform dependent flags used by the runtime system.
 // (see globals.hpp)
-//
+
 define_pd_global(bool, DontYieldALot,            true); // Determined in the design center
 #ifdef AMD64
 define_pd_global(intx, ThreadStackSize,          1024); // 0 => use system default
 define_pd_global(intx, VMThreadStackSize,        1024);
-define_pd_global(intx, SurvivorRatio,            6);
-define_pd_global(uintx, JVMInvokeMethodSlack,    8*K);
+define_pd_global(uintx,JVMInvokeMethodSlack,     8*K);
 #else
-//  UseStackBanging is not pd
-// define_pd_global(bool, UseStackBanging,          true);
-
 // ThreadStackSize 320 allows TaggedStackInterpreter and a couple of test cases
 // to run while keeping the number of threads that can be created high.
 define_pd_global(intx, ThreadStackSize,          320);
 define_pd_global(intx, VMThreadStackSize,        512);
-define_pd_global(intx, SurvivorRatio,            8);
-define_pd_global(uintx, JVMInvokeMethodSlack,    10*K);
+define_pd_global(uintx,JVMInvokeMethodSlack,     10*K);
 #endif // AMD64
 
 define_pd_global(intx, CompilerThreadStackSize,  0);
 
 // Only used on 64 bit platforms
-define_pd_global(uintx, HeapBaseMinAddress,      256*M);
+define_pd_global(uintx,HeapBaseMinAddress,       256*M);
 // Only used on 64 bit Windows platforms
 define_pd_global(bool, UseVectoredExceptions,    false);
--- a/hotspot/src/os_cpu/windows_x86/vm/globals_windows_x86.hpp	Fri Oct 23 18:44:33 2009 -0700
+++ b/hotspot/src/os_cpu/windows_x86/vm/globals_windows_x86.hpp	Wed Oct 28 16:25:51 2009 -0400
@@ -22,10 +22,9 @@
  *
  */
 
-//
 // Sets the default values for platform dependent flags used by the runtime system.
 // (see globals.hpp)
-//
+
 define_pd_global(bool, DontYieldALot,            false);
 
 // Default stack size on Windows is determined by the executable (java.exe
@@ -35,8 +34,6 @@
 define_pd_global(intx, ThreadStackSize,          0); // 0 => use system default
 define_pd_global(intx, VMThreadStackSize,        0); // 0 => use system default
 
-define_pd_global(intx, SurvivorRatio,            8);
-
 #ifdef ASSERT
 define_pd_global(intx, CompilerThreadStackSize,  1024);
 #else
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.cpp	Fri Oct 23 18:44:33 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.cpp	Wed Oct 28 16:25:51 2009 -0400
@@ -51,7 +51,7 @@
 
     cname = PerfDataManager::counter_name(name_space(), "oldCapacity");
     _old_capacity = PerfDataManager::create_variable(SUN_GC, cname,
-      PerfData::U_Bytes, (jlong) Arguments::initial_heap_size(), CHECK);
+      PerfData::U_Bytes, (jlong) InitialHeapSize, CHECK);
 
     cname = PerfDataManager::counter_name(name_space(), "boundaryMoved");
     _boundary_moved = PerfDataManager::create_variable(SUN_GC, cname,
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp	Fri Oct 23 18:44:33 2009 -0700
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp	Wed Oct 28 16:25:51 2009 -0400
@@ -55,7 +55,7 @@
 
 void CollectorPolicy::initialize_size_info() {
   // User inputs from -mx and ms are aligned
-  set_initial_heap_byte_size(Arguments::initial_heap_size());
+  set_initial_heap_byte_size(InitialHeapSize);
   if (initial_heap_byte_size() == 0) {
     set_initial_heap_byte_size(NewSize + OldSize);
   }
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Fri Oct 23 18:44:33 2009 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Wed Oct 28 16:25:51 2009 -0400
@@ -37,7 +37,6 @@
 const char*  Arguments::_gc_log_filename        = NULL;
 bool   Arguments::_has_profile                  = false;
 bool   Arguments::_has_alloc_profile            = false;
-uintx  Arguments::_initial_heap_size            = 0;
 uintx  Arguments::_min_heap_size                = 0;
 Arguments::Mode Arguments::_mode                = _mixed;
 bool   Arguments::_java_compiler                = false;
@@ -182,6 +181,9 @@
   { "ProcessingToTenuringRatio",     JDK_Version::jdk(5), JDK_Version::jdk(7) },
   { "MinTrainLength",                JDK_Version::jdk(5), JDK_Version::jdk(7) },
   { "AppendRatio",         JDK_Version::jdk_update(6,10), JDK_Version::jdk(7) },
+  { "DefaultMaxRAM",       JDK_Version::jdk_update(6,18), JDK_Version::jdk(7) },
+  { "DefaultInitialRAMFraction",
+                           JDK_Version::jdk_update(6,18), JDK_Version::jdk(7) },
   { NULL, JDK_Version(0), JDK_Version(0) }
 };
 
@@ -555,6 +557,10 @@
   if (!is_neg && CommandLineFlags::uintxAtPut(name, &uintx_v, origin)) {
     return true;
   }
+  uint64_t uint64_t_v = (uint64_t) v;
+  if (!is_neg && CommandLineFlags::uint64_tAtPut(name, &uint64_t_v, origin)) {
+    return true;
+  }
   return false;
 }
 
@@ -947,7 +953,7 @@
 // UseParNewGC and not explicitly set ParallelGCThreads we
 // set it, unless this is a single cpu machine.
 void Arguments::set_parnew_gc_flags() {
-  assert(!UseSerialGC && !UseParallelGC && !UseG1GC,
+  assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC && !UseG1GC,
          "control point invariant");
   assert(UseParNewGC, "Error");
 
@@ -960,13 +966,13 @@
   if (ParallelGCThreads == 0) {
     FLAG_SET_DEFAULT(ParallelGCThreads,
                      Abstract_VM_Version::parallel_worker_threads());
-    if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) {
+    if (ParallelGCThreads == 1) {
       FLAG_SET_DEFAULT(UseParNewGC, false);
+      FLAG_SET_DEFAULT(ParallelGCThreads, 0);
     }
   }
-  if (!UseParNewGC) {
-    FLAG_SET_DEFAULT(ParallelGCThreads, 0);
-  } else {
+  if (UseParNewGC) {
+    // CDS doesn't work with ParNew yet
     no_shared_spaces();
 
     // By default YoungPLABSize and OldPLABSize are set to 4096 and 1024 respectively,
@@ -980,7 +986,7 @@
       FLAG_SET_DEFAULT(OldPLABSize, (intx)1024);
     }
 
-    // AlwaysTenure flag should make ParNew to promote all at first collection.
+    // AlwaysTenure flag should make ParNew promote all at first collection.
     // See CR 6362902.
     if (AlwaysTenure) {
       FLAG_SET_CMDLINE(intx, MaxTenuringThreshold, 0);
@@ -1003,7 +1009,7 @@
 // further optimization and tuning efforts, and would almost
 // certainly gain from analysis of platform and environment.
 void Arguments::set_cms_and_parnew_gc_flags() {
-  assert(!UseSerialGC && !UseParallelGC, "Error");
+  assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC, "Error");
   assert(UseConcMarkSweepGC, "CMS is expected to be on here");
 
   // If we are using CMS, we prefer to UseParNewGC,
@@ -1068,7 +1074,7 @@
     } else {
       FLAG_SET_ERGO(uintx, MaxNewSize, preferred_max_new_size);
     }
-    if(PrintGCDetails && Verbose) {
+    if (PrintGCDetails && Verbose) {
       // Too early to use gclog_or_tty
       tty->print_cr("Ergo set MaxNewSize: " SIZE_FORMAT, MaxNewSize);
     }
@@ -1097,15 +1103,15 @@
     } else {
       min_new = NewSize;
     }
-    size_t prev_initial_size = initial_heap_size();
-    if (prev_initial_size != 0 && prev_initial_size < min_new+OldSize) {
-      set_initial_heap_size(min_new+OldSize);
+    size_t prev_initial_size = InitialHeapSize;
+    if (prev_initial_size != 0 && prev_initial_size < min_new + OldSize) {
+      FLAG_SET_ERGO(uintx, InitialHeapSize, min_new + OldSize);
       // Currently minimum size and the initial heap sizes are the same.
-      set_min_heap_size(initial_heap_size());
+      set_min_heap_size(InitialHeapSize);
       if (PrintGCDetails && Verbose) {
         warning("Initial heap size increased to " SIZE_FORMAT " M from "
                 SIZE_FORMAT " M; use -XX:NewSize=... for finer control.",
-                initial_heap_size()/M, prev_initial_size/M);
+                InitialHeapSize/M, prev_initial_size/M);
       }
     }
 
@@ -1114,12 +1120,12 @@
       align_size_down(MaxHeapSize,
                       CardTableRS::ct_max_alignment_constraint());
 
-    if(PrintGCDetails && Verbose) {
+    if (PrintGCDetails && Verbose) {
       // Too early to use gclog_or_tty
       tty->print_cr("CMS set min_heap_size: " SIZE_FORMAT
            " initial_heap_size:  " SIZE_FORMAT
            " max_heap: " SIZE_FORMAT,
-           min_heap_size(), initial_heap_size(), max_heap);
+           min_heap_size(), InitialHeapSize, max_heap);
     }
     if (max_heap > min_new) {
       // Unless explicitly requested otherwise, make young gen
@@ -1127,7 +1133,7 @@
       if (FLAG_IS_DEFAULT(NewSize)) {
         FLAG_SET_ERGO(uintx, NewSize, MAX2(NewSize, min_new));
         FLAG_SET_ERGO(uintx, NewSize, MIN2(preferred_max_new_size, NewSize));
-        if(PrintGCDetails && Verbose) {
+        if (PrintGCDetails && Verbose) {
           // Too early to use gclog_or_tty
           tty->print_cr("Ergo set NewSize: " SIZE_FORMAT, NewSize);
         }
@@ -1137,8 +1143,8 @@
       // later NewRatio will decide how it grows; see above.
       if (FLAG_IS_DEFAULT(OldSize)) {
         if (max_heap > NewSize) {
-          FLAG_SET_ERGO(uintx, OldSize, MIN2(3*NewSize,  max_heap - NewSize));
-          if(PrintGCDetails && Verbose) {
+          FLAG_SET_ERGO(uintx, OldSize, MIN2(3*NewSize, max_heap - NewSize));
+          if (PrintGCDetails && Verbose) {
             // Too early to use gclog_or_tty
             tty->print_cr("Ergo set OldSize: " SIZE_FORMAT, OldSize);
           }
@@ -1186,7 +1192,7 @@
 
 inline uintx max_heap_for_compressed_oops() {
   LP64_ONLY(return oopDesc::OopEncodingHeapMax - MaxPermSize - os::vm_page_size());
-  NOT_LP64(return DefaultMaxRAM);
+  NOT_LP64(ShouldNotReachHere(); return 0);
 }
 
 bool Arguments::should_auto_select_low_pause_collector() {
@@ -1205,7 +1211,7 @@
 
 void Arguments::set_ergonomics_flags() {
   // Parallel GC is not compatible with sharing. If one specifies
-  // that they want sharing explicitly, do not set ergonmics flags.
+  // that they want sharing explicitly, do not set ergonomics flags.
   if (DumpSharedSpaces || ForceSharedSpaces) {
     return;
   }
@@ -1271,8 +1277,6 @@
     FLAG_SET_ERGO(uintx, ParallelGCThreads,
                   Abstract_VM_Version::parallel_worker_threads());
 
-    // PS is a server collector, setup the heap sizes accordingly.
-    set_server_heap_size();
     // If InitialSurvivorRatio or MinSurvivorRatio were not specified, but the
     // SurvivorRatio has been set, reset their default values to SurvivorRatio +
     // 2.  By doing this we make SurvivorRatio also work for Parallel Scavenger.
@@ -1302,8 +1306,6 @@
 
 void Arguments::set_g1_gc_flags() {
   assert(UseG1GC, "Error");
-  // G1 is a server collector, setup the heap sizes accordingly.
-  set_server_heap_size();
 #ifdef COMPILER1
   FastTLABRefill = false;
 #endif
@@ -1321,50 +1323,77 @@
   }
 }
 
-void Arguments::set_server_heap_size() {
+void Arguments::set_heap_size() {
+  if (!FLAG_IS_DEFAULT(DefaultMaxRAMFraction)) {
+    // Deprecated flag
+    FLAG_SET_CMDLINE(uintx, MaxRAMFraction, DefaultMaxRAMFraction);
+  }
+
+  const julong phys_mem =
+    FLAG_IS_DEFAULT(MaxRAM) ? MIN2(os::physical_memory(), (julong)MaxRAM)
+                            : (julong)MaxRAM;
+
+  // If the maximum heap size has not been set with -Xmx,
+  // then set it as fraction of the size of physical memory,
+  // respecting the maximum and minimum sizes of the heap.
   if (FLAG_IS_DEFAULT(MaxHeapSize)) {
-    const uint64_t reasonable_fraction =
-      os::physical_memory() / DefaultMaxRAMFraction;
-    const uint64_t maximum_size = (uint64_t)
-                 (FLAG_IS_DEFAULT(DefaultMaxRAM) && UseCompressedOops ?
-                     MIN2(max_heap_for_compressed_oops(), DefaultMaxRAM) :
-                     DefaultMaxRAM);
-    size_t reasonable_max =
-      (size_t) os::allocatable_physical_memory(reasonable_fraction);
-    if (reasonable_max > maximum_size) {
-      reasonable_max = maximum_size;
+    julong reasonable_max = phys_mem / MaxRAMFraction;
+
+    if (phys_mem <= MaxHeapSize * MinRAMFraction) {
+      // Small physical memory, so use a minimum fraction of it for the heap
+      reasonable_max = phys_mem / MinRAMFraction;
+    } else {
+      // Not-small physical memory, so require a heap at least
+      // as large as MaxHeapSize
+      reasonable_max = MAX2(reasonable_max, (julong)MaxHeapSize);
+    }
+    if (!FLAG_IS_DEFAULT(ErgoHeapSizeLimit) && ErgoHeapSizeLimit != 0) {
+      // Limit the heap size to ErgoHeapSizeLimit
+      reasonable_max = MIN2(reasonable_max, (julong)ErgoHeapSizeLimit);
     }
+    if (UseCompressedOops) {
+      // Limit the heap size to the maximum possible when using compressed oops
+      reasonable_max = MIN2(reasonable_max, (julong)max_heap_for_compressed_oops());
+    }
+    reasonable_max = os::allocatable_physical_memory(reasonable_max);
+
+    if (!FLAG_IS_DEFAULT(InitialHeapSize)) {
+      // An initial heap size was specified on the command line,
+      // so be sure that the maximum size is consistent.  Done
+      // after call to allocatable_physical_memory because that
+      // method might reduce the allocation size.
+      reasonable_max = MAX2(reasonable_max, (julong)InitialHeapSize);
+    }
+
     if (PrintGCDetails && Verbose) {
       // Cannot use gclog_or_tty yet.
-      tty->print_cr("  Max heap size for server class platform "
-                    SIZE_FORMAT, reasonable_max);
+      tty->print_cr("  Maximum heap size " SIZE_FORMAT, reasonable_max);
     }
-    // If the initial_heap_size has not been set with -Xms,
-    // then set it as fraction of size of physical memory
-    // respecting the maximum and minimum sizes of the heap.
-    if (initial_heap_size() == 0) {
-      const uint64_t reasonable_initial_fraction =
-        os::physical_memory() / DefaultInitialRAMFraction;
-      const size_t reasonable_initial =
-        (size_t) os::allocatable_physical_memory(reasonable_initial_fraction);
-      const size_t minimum_size = NewSize + OldSize;
-      set_initial_heap_size(MAX2(MIN2(reasonable_initial, reasonable_max),
-                                minimum_size));
-      // Currently the minimum size and the initial heap sizes are the same.
-      set_min_heap_size(initial_heap_size());
-      if (PrintGCDetails && Verbose) {
-        // Cannot use gclog_or_tty yet.
-        tty->print_cr("  Initial heap size for server class platform "
-                      SIZE_FORMAT, initial_heap_size());
-      }
-    } else {
-      // A minimum size was specified on the command line.  Be sure
-      // that the maximum size is consistent.
-      if (initial_heap_size() > reasonable_max) {
-        reasonable_max = initial_heap_size();
-      }
+    FLAG_SET_ERGO(uintx, MaxHeapSize, (uintx)reasonable_max);
+  }
+
+  // If the initial_heap_size has not been set with InitialHeapSize
+  // or -Xms, then set it as fraction of the size of physical memory,
+  // respecting the maximum and minimum sizes of the heap.
+  if (FLAG_IS_DEFAULT(InitialHeapSize)) {
+    julong reasonable_initial = phys_mem / InitialRAMFraction;
+
+    reasonable_initial = MAX2(reasonable_initial, (julong)(OldSize + NewSize));
+    reasonable_initial = MIN2(reasonable_initial, (julong)MaxHeapSize);
+
+    reasonable_initial = os::allocatable_physical_memory(reasonable_initial);
+
+    if (PrintGCDetails && Verbose) {
+      // Cannot use gclog_or_tty yet.
+      tty->print_cr("  Initial heap size " SIZE_FORMAT, (uintx)reasonable_initial);
     }
-    FLAG_SET_ERGO(uintx, MaxHeapSize, (uintx) reasonable_max);
+    FLAG_SET_ERGO(uintx, InitialHeapSize, (uintx)reasonable_initial);
+
+    // Subsequent ergonomics code may expect min_heap_size to be set
+    // if InitialHeapSize is.  Use whatever the current values are
+    // for OldSize and NewSize, whether or not they were set on the
+    // command line.
+    set_min_heap_size(OldSize + NewSize);
   }
 }
 
@@ -1448,7 +1477,7 @@
   return false;
 }
 
-static void set_serial_gc_flags() {
+static void force_serial_gc() {
   FLAG_SET_DEFAULT(UseSerialGC, true);
   FLAG_SET_DEFAULT(UseParNewGC, false);
   FLAG_SET_DEFAULT(UseConcMarkSweepGC, false);
@@ -1584,15 +1613,15 @@
     // force sharing off.
     if (DumpSharedSpaces || ForceSharedSpaces) {
       jio_fprintf(defaultStream::error_stream(),
-                  "Reverting to Serial GC because of %s \n",
+                  "Reverting to Serial GC because of %s\n",
                   ForceSharedSpaces ? " -Xshare:on" : "-Xshare:dump");
-      set_serial_gc_flags();
+      force_serial_gc();
       FLAG_SET_DEFAULT(SOLARIS_ONLY(UseISM) NOT_SOLARIS(UseLargePages), false);
     } else {
-      if (UseSharedSpaces) {
+      if (UseSharedSpaces && Verbose) {
         jio_fprintf(defaultStream::error_stream(),
                     "Turning off use of shared archive because of "
-                    "choice of garbage collector or large pages \n");
+                    "choice of garbage collector or large pages\n");
       }
       no_shared_spaces();
     }
@@ -1925,8 +1954,8 @@
         describe_range_error(errcode);
         return JNI_EINVAL;
       }
-      FLAG_SET_CMDLINE(uintx, MaxNewSize, (size_t) long_initial_eden_size);
-      FLAG_SET_CMDLINE(uintx, NewSize, (size_t) long_initial_eden_size);
+      FLAG_SET_CMDLINE(uintx, MaxNewSize, (uintx)long_initial_eden_size);
+      FLAG_SET_CMDLINE(uintx, NewSize, (uintx)long_initial_eden_size);
     // -Xms
     } else if (match_option(option, "-Xms", &tail)) {
       julong long_initial_heap_size = 0;
@@ -1937,9 +1966,9 @@
         describe_range_error(errcode);
         return JNI_EINVAL;
       }
-      set_initial_heap_size((size_t) long_initial_heap_size);
+      FLAG_SET_CMDLINE(uintx, InitialHeapSize, (uintx)long_initial_heap_size);
       // Currently the minimum size and the initial heap sizes are the same.
-      set_min_heap_size(initial_heap_size());
+      set_min_heap_size(InitialHeapSize);
     // -Xmx
     } else if (match_option(option, "-Xmx", &tail)) {
       julong long_max_heap_size = 0;
@@ -1950,7 +1979,7 @@
         describe_range_error(errcode);
         return JNI_EINVAL;
       }
-      FLAG_SET_CMDLINE(uintx, MaxHeapSize, (size_t) long_max_heap_size);
+      FLAG_SET_CMDLINE(uintx, MaxHeapSize, (uintx)long_max_heap_size);
     // Xmaxf
     } else if (match_option(option, "-Xmaxf", &tail)) {
       int maxf = (int)(atof(tail) * 100);
@@ -2196,9 +2225,9 @@
 
       if (FLAG_IS_DEFAULT(MaxHeapSize)) {
          FLAG_SET_CMDLINE(uintx, MaxHeapSize, initHeapSize);
-         set_initial_heap_size(MaxHeapSize);
+         FLAG_SET_CMDLINE(uintx, InitialHeapSize, initHeapSize);
          // Currently the minimum size and the initial heap sizes are the same.
-         set_min_heap_size(initial_heap_size());
+         set_min_heap_size(initHeapSize);
       }
       if (FLAG_IS_DEFAULT(NewSize)) {
          // Make the young generation 3/8ths of the total heap.
@@ -2676,7 +2705,7 @@
   }
 
 #ifdef SERIALGC
-  set_serial_gc_flags();
+  force_serial_gc();
 #endif // SERIALGC
 #ifdef KERNEL
   no_shared_spaces();
@@ -2690,18 +2719,22 @@
     return JNI_EINVAL;
   }
 
-  if (UseParallelGC || UseParallelOldGC) {
-    // Set some flags for ParallelGC if needed.
-    set_parallel_gc_flags();
-  } else if (UseConcMarkSweepGC) {
-    // Set some flags for CMS
+  if (UseConcMarkSweepGC) {
+    // Set flags for CMS and ParNew.  Check UseConcMarkSweep first
+    // to ensure that when both UseConcMarkSweepGC and UseParNewGC
+    // are true, we don't call set_parnew_gc_flags() as well.
     set_cms_and_parnew_gc_flags();
-  } else if (UseParNewGC) {
-    // Set some flags for ParNew
-    set_parnew_gc_flags();
-  } else if (UseG1GC) {
-    // Set some flags for garbage-first, if needed.
-    set_g1_gc_flags();
+  } else {
+    // Set heap size based on available physical memory
+    set_heap_size();
+    // Set per-collector flags
+    if (UseParallelGC || UseParallelOldGC) {
+      set_parallel_gc_flags();
+    } else if (UseParNewGC) {
+      set_parnew_gc_flags();
+    } else if (UseG1GC) {
+      set_g1_gc_flags();
+    }
   }
 
 #ifdef SERIALGC
--- a/hotspot/src/share/vm/runtime/arguments.hpp	Fri Oct 23 18:44:33 2009 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.hpp	Wed Oct 28 16:25:51 2009 -0400
@@ -254,7 +254,6 @@
   static bool   _has_profile;
   static bool   _has_alloc_profile;
   static const char*  _gc_log_filename;
-  static uintx  _initial_heap_size;
   static uintx  _min_heap_size;
 
   // -Xrun arguments
@@ -300,8 +299,8 @@
   static void set_g1_gc_flags();
   // GC ergonomics
   static void set_ergonomics_flags();
-  // Setup heap size for a server platform
-  static void set_server_heap_size();
+  // Setup heap size
+  static void set_heap_size();
   // Based on automatic selection criteria, should the
   // low pause collector be used.
   static bool should_auto_select_low_pause_collector();
@@ -434,9 +433,7 @@
   static bool has_profile()                 { return _has_profile; }
   static bool has_alloc_profile()           { return _has_alloc_profile; }
 
-  // -Xms , -Xmx
-  static uintx initial_heap_size()          { return _initial_heap_size; }
-  static void  set_initial_heap_size(uintx v) { _initial_heap_size = v;  }
+  // -Xms, -Xmx
   static uintx min_heap_size()              { return _min_heap_size; }
   static void  set_min_heap_size(uintx v)   { _min_heap_size = v;  }
 
--- a/hotspot/src/share/vm/runtime/globals.cpp	Fri Oct 23 18:44:33 2009 -0700
+++ b/hotspot/src/share/vm/runtime/globals.cpp	Wed Oct 28 16:25:51 2009 -0400
@@ -324,6 +324,32 @@
   faddr->origin = origin;
 }
 
+bool CommandLineFlags::uint64_tAt(char* name, size_t len, uint64_t* value) {
+  Flag* result = Flag::find_flag(name, len);
+  if (result == NULL) return false;
+  if (!result->is_uint64_t()) return false;
+  *value = result->get_uint64_t();
+  return true;
+}
+
+bool CommandLineFlags::uint64_tAtPut(char* name, size_t len, uint64_t* value, FlagValueOrigin origin) {
+  Flag* result = Flag::find_flag(name, len);
+  if (result == NULL) return false;
+  if (!result->is_uint64_t()) return false;
+  uint64_t old_value = result->get_uint64_t();
+  result->set_uint64_t(*value);
+  *value = old_value;
+  result->origin = origin;
+  return true;
+}
+
+void CommandLineFlagsEx::uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, FlagValueOrigin origin) {
+  Flag* faddr = address_of_flag(flag);
+  guarantee(faddr != NULL && faddr->is_uint64_t(), "wrong flag type");
+  faddr->set_uint64_t(value);
+  faddr->origin = origin;
+}
+
 bool CommandLineFlags::doubleAt(char* name, size_t len, double* value) {
   Flag* result = Flag::find_flag(name, len);
   if (result == NULL) return false;
--- a/hotspot/src/share/vm/runtime/globals.hpp	Fri Oct 23 18:44:33 2009 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Wed Oct 28 16:25:51 2009 -0400
@@ -47,8 +47,8 @@
 define_pd_global(intx, OnStackReplacePercentage,     0);
 define_pd_global(bool, ResizeTLAB,                   false);
 define_pd_global(intx, FreqInlineSize,               0);
+define_pd_global(intx, InlineSmallCode,              0);
 define_pd_global(intx, NewSizeThreadIncrease,        4*K);
-define_pd_global(intx, NewRatio,                     4);
 define_pd_global(intx, InlineClassNatives,           true);
 define_pd_global(intx, InlineUnsafeOps,              true);
 define_pd_global(intx, InitialCodeCacheSize,         160*K);
@@ -58,7 +58,7 @@
 define_pd_global(uintx,PermSize,    ScaleForWordSize(4*M));
 define_pd_global(uintx,MaxPermSize, ScaleForWordSize(64*M));
 define_pd_global(bool, NeverActAsServerClassMachine, true);
-define_pd_global(uintx, DefaultMaxRAM,               1*G);
+define_pd_global(uint64_t,MaxRAM,                    1ULL*G);
 #define CI_COMPILER_COUNT 0
 #else
 
@@ -113,6 +113,10 @@
   uintx get_uintx() const     { return *((uintx*) addr); }
   void set_uintx(uintx value) { *((uintx*) addr) = value; }
 
+  bool is_uint64_t() const          { return strcmp(type, "uint64_t") == 0; }
+  uint64_t get_uint64_t() const     { return *((uint64_t*) addr); }
+  void set_uint64_t(uint64_t value) { *((uint64_t*) addr) = value; }
+
   bool is_double() const        { return strcmp(type, "double") == 0; }
   double get_double() const     { return *((double*) addr); }
   void set_double(double value) { *((double*) addr) = value; }
@@ -188,6 +192,11 @@
   static bool uintxAtPut(char* name, size_t len, uintx* value, FlagValueOrigin origin);
   static bool uintxAtPut(char* name, uintx* value, FlagValueOrigin origin) { return uintxAtPut(name, strlen(name), value, origin); }
 
+  static bool uint64_tAt(char* name, size_t len, uint64_t* value);
+  static bool uint64_tAt(char* name, uint64_t* value) { return uint64_tAt(name, strlen(name), value); }
+  static bool uint64_tAtPut(char* name, size_t len, uint64_t* value, FlagValueOrigin origin);
+  static bool uint64_tAtPut(char* name, uint64_t* value, FlagValueOrigin origin) { return uint64_tAtPut(name, strlen(name), value, origin); }
+
   static bool doubleAt(char* name, size_t len, double* value);
   static bool doubleAt(char* name, double* value)    { return doubleAt(name, strlen(name), value); }
   static bool doubleAtPut(char* name, size_t len, double* value, FlagValueOrigin origin);
@@ -785,7 +794,7 @@
   product(bool, ProfilerRecordPC, false,                                    \
           "Collects tick for each 16 byte interval of compiled code")       \
                                                                             \
-  product(bool, ProfileVM,  false,                                          \
+  product(bool, ProfileVM, false,                                           \
           "Profiles ticks that fall within VM (either in the VM Thread "    \
           "or VM code called through stubs)")                               \
                                                                             \
@@ -815,7 +824,7 @@
                                                                             \
   product(bool, RegisterFinalizersAtInit, true,                             \
           "Register finalizable objects at end of Object.<init> or "        \
-          "after allocation.")                                              \
+          "after allocation")                                               \
                                                                             \
   develop(bool, RegisterReferences, true,                                   \
           "Tells whether the VM should register soft/weak/final/phantom "   \
@@ -862,14 +871,14 @@
   product(bool, AlwaysLockClassLoader, false,                               \
           "Require the VM to acquire the class loader lock before calling " \
           "loadClass() even for class loaders registering "                 \
-          "as parallel capable. Default false. ")                           \
+          "as parallel capable")                                            \
                                                                             \
   product(bool, AllowParallelDefineClass, false,                            \
           "Allow parallel defineClass requests for class loaders "          \
-          "registering as parallel capable. Default false")                 \
+          "registering as parallel capable")                                \
                                                                             \
   product(bool, MustCallLoadClassInternal, false,                           \
-          "Call loadClassInternal() rather than loadClass().Default false") \
+          "Call loadClassInternal() rather than loadClass()")               \
                                                                             \
   product_pd(bool, DontYieldALot,                                           \
           "Throw away obvious excess yield calls (for SOLARIS only)")       \
@@ -921,9 +930,9 @@
          "(Unstable, Linux-specific)"                                       \
          " avoid NPTL-FUTEX hang pthread_cond_timedwait" )                  \
                                                                             \
-  product(bool, FilterSpuriousWakeups , true,                               \
-          "Prevent spurious or premature wakeups from object.wait"              \
-          "(Solaris only)")                                                     \
+  product(bool, FilterSpuriousWakeups, true,                                \
+          "Prevent spurious or premature wakeups from object.wait "         \
+          "(Solaris only)")                                                 \
                                                                             \
   product(intx, NativeMonitorTimeout, -1, "(Unstable)" )                    \
   product(intx, NativeMonitorFlags, 0, "(Unstable)" )                       \
@@ -973,7 +982,7 @@
                                                                             \
   product(bool, UseAltSigs, false,                                          \
           "Use alternate signals instead of SIGUSR1 & SIGUSR2 for VM "      \
-          "internal signals. (Solaris only)")                               \
+          "internal signals (Solaris only)")                                \
                                                                             \
   product(bool, UseSpinning, false,                                         \
           "Use spinning in monitor inflation and before entry")             \
@@ -1265,12 +1274,12 @@
           "Always tenure objects in eden. (ParallelGC only)")               \
                                                                             \
   product(bool, NeverTenure, false,                                         \
-          "Never tenure objects in eden, May tenure on overflow"            \
-          " (ParallelGC only)")                                             \
+          "Never tenure objects in eden, May tenure on overflow "           \
+          "(ParallelGC only)")                                              \
                                                                             \
   product(bool, ScavengeBeforeFullGC, true,                                 \
-          "Scavenge youngest generation before each full GC,"               \
-          " used with UseParallelGC")                                       \
+          "Scavenge youngest generation before each full GC, "              \
+          "used with UseParallelGC")                                        \
                                                                             \
   develop(bool, ScavengeWithObjectsInToSpace, false,                        \
           "Allow scavenges to occur when to_space contains objects.")       \
@@ -1283,9 +1292,9 @@
           " (effective only when UseConcMarkSweepGC)")                      \
                                                                             \
   product(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false,        \
-          "A System.gc() request invokes a concurrent collection and"       \
-          " also unloads classes during such a concurrent gc cycle  "       \
-          " (effective only when UseConcMarkSweepGC)")                      \
+          "A System.gc() request invokes a concurrent collection and "      \
+          "also unloads classes during such a concurrent gc cycle "         \
+          "(effective only when UseConcMarkSweepGC)")                       \
                                                                             \
   develop(bool, UseCMSAdaptiveFreeLists, true,                              \
           "Use Adaptive Free Lists in the CMS generation")                  \
@@ -1340,8 +1349,8 @@
           "Whether we should simulate work queue overflow in ParNew")       \
                                                                             \
   notproduct(uintx, ParGCWorkQueueOverflowInterval, 1000,                   \
-          "An `interval' counter that determines how frequently"            \
-          " we simulate overflow; a smaller number increases frequency")    \
+          "An `interval' counter that determines how frequently "           \
+          "we simulate overflow; a smaller number increases frequency")     \
                                                                             \
   product(uintx, ParGCDesiredObjsFromOverflowList, 20,                      \
           "The desired number of objects to claim from the overflow list")  \
@@ -1354,12 +1363,12 @@
           "It forces all freshly committed pages to be pre-touched.")       \
                                                                             \
   product(bool, CMSUseOldDefaults, false,                                   \
-          "A flag temporarily  introduced to allow reverting to some older" \
-          "default settings; older as of 6.0 ")                             \
+          "A flag temporarily introduced to allow reverting to some "       \
+          "older default settings; older as of 6.0")                        \
                                                                             \
   product(intx, CMSYoungGenPerWorker, 16*M,                                 \
           "The amount of young gen chosen by default per GC worker "        \
-          "thread available ")                                              \
+          "thread available")                                               \
                                                                             \
   product(bool, GCOverheadReporting, false,                                 \
          "Enables the GC overhead reporting facility")                      \
@@ -1380,43 +1389,44 @@
           "automatically adjusted")                                         \
                                                                             \
   product(uintx, CMSIncrementalDutyCycleMin, 0,                             \
-          "Lower bound on the duty cycle when CMSIncrementalPacing is"      \
-          "enabled (a percentage, 0-100).")                                 \
+          "Lower bound on the duty cycle when CMSIncrementalPacing is "     \
+          "enabled (a percentage, 0-100)")                                  \
                                                                             \
   product(uintx, CMSIncrementalSafetyFactor, 10,                            \
-          "Percentage (0-100) used to add conservatism when computing the"  \
-          "duty cycle.")                                                    \
+          "Percentage (0-100) used to add conservatism when computing the " \
+          "duty cycle")                                                     \
                                                                             \
   product(uintx, CMSIncrementalOffset, 0,                                   \
           "Percentage (0-100) by which the CMS incremental mode duty cycle" \
-          "is shifted to the right within the period between young GCs")    \
+          " is shifted to the right within the period between young GCs")   \
                                                                             \
   product(uintx, CMSExpAvgFactor, 25,                                       \
-          "Percentage (0-100) used to weight the current sample when"       \
-          "computing exponential averages for CMS statistics.")             \
+          "Percentage (0-100) used to weight the current sample when "      \
+          "computing exponential averages for CMS statistics")              \
                                                                             \
   product(uintx, CMS_FLSWeight, 50,                                         \
-          "Percentage (0-100) used to weight the current sample when"       \
-          "computing exponentially decating averages for CMS FLS statistics.") \
+          "Percentage (0-100) used to weight the current sample when "      \
+          "computing exponentially decating averages for CMS FLS statistics") \
                                                                             \
   product(uintx, CMS_FLSPadding, 2,                                         \
-          "The multiple of deviation from mean to use for buffering"        \
+          "The multiple of deviation from mean to use for buffering "       \
           "against volatility in free list demand.")                        \
                                                                             \
   product(uintx, FLSCoalescePolicy, 2,                                      \
           "CMS: Aggression level for coalescing, increasing from 0 to 4")   \
                                                                             \
   product(uintx, CMS_SweepWeight, 50,                                       \
-          "Percentage (0-100) used to weight the current sample when"       \
-          "computing exponentially decaying average for inter-sweep duration.") \
+          "Percentage (0-100) used to weight the current sample when "      \
+          "computing exponentially decaying average for inter-sweep "       \
+          "duration")                                                       \
                                                                             \
   product(uintx, CMS_SweepPadding, 2,                                       \
-          "The multiple of deviation from mean to use for buffering"        \
+          "The multiple of deviation from mean to use for buffering "       \
           "against volatility in inter-sweep duration.")                    \
                                                                             \
   product(uintx, CMS_SweepTimerThresholdMillis, 10,                         \
           "Skip block flux-rate sampling for an epoch unless inter-sweep "  \
-          " duration exceeds this threhold in milliseconds")                \
+          "duration exceeds this threhold in milliseconds")                 \
                                                                             \
   develop(bool, CMSTraceIncrementalMode, false,                             \
           "Trace CMS incremental mode")                                     \
@@ -1617,35 +1627,36 @@
                                                                             \
   product(intx, CMSTriggerRatio, 80,                                        \
           "Percentage of MinHeapFreeRatio in CMS generation that is "       \
-          "  allocated before a CMS collection cycle commences")            \
+          "allocated before a CMS collection cycle commences")              \
                                                                             \
   product(intx, CMSTriggerPermRatio, 80,                                    \
-          "Percentage of MinHeapFreeRatio in the CMS perm generation that"  \
-          "  is allocated before a CMS collection cycle commences, that  "  \
-          "  also collects the perm generation")                            \
+          "Percentage of MinHeapFreeRatio in the CMS perm generation that " \
+          "is allocated before a CMS collection cycle commences, that "     \
+          "also collects the perm generation")                              \
                                                                             \
   product(uintx, CMSBootstrapOccupancy, 50,                                 \
           "Percentage CMS generation occupancy at which to "                \
-          " initiate CMS collection for bootstrapping collection stats")    \
+          "initiate CMS collection for bootstrapping collection stats")     \
                                                                             \
   product(intx, CMSInitiatingOccupancyFraction, -1,                         \
           "Percentage CMS generation occupancy to start a CMS collection "  \
-          " cycle (A negative value means that CMSTriggerRatio is used)")   \
+          "cycle. A negative value means that CMSTriggerRatio is used")     \
                                                                             \
   product(intx, CMSInitiatingPermOccupancyFraction, -1,                     \
-          "Percentage CMS perm generation occupancy to start a CMScollection"\
-          " cycle (A negative value means that CMSTriggerPermRatio is used)")\
+          "Percentage CMS perm generation occupancy to start a "            \
+          "CMScollection cycle. A negative value means that "               \
+          "CMSTriggerPermRatio is used")                                    \
                                                                             \
   product(bool, UseCMSInitiatingOccupancyOnly, false,                       \
           "Only use occupancy as a crierion for starting a CMS collection") \
                                                                             \
   product(intx, CMSIsTooFullPercentage, 98,                                 \
-          "An absolute ceiling above which CMS will always consider the"    \
-          " perm gen ripe for collection")                                  \
+          "An absolute ceiling above which CMS will always consider the "   \
+          "perm gen ripe for collection")                                   \
                                                                             \
   develop(bool, CMSTestInFreeList, false,                                   \
           "Check if the coalesced range is already in the "                 \
-          "free lists as claimed.")                                         \
+          "free lists as claimed")                                          \
                                                                             \
   notproduct(bool, CMSVerifyReturnedBytes, false,                           \
           "Check that all the garbage collected was returned to the "       \
@@ -1663,8 +1674,8 @@
           "Enforce ScavengeALot/GCALot at all potential safepoints")        \
                                                                             \
   product(bool, HandlePromotionFailure, true,                               \
-          "The youngest generation collection does not require"             \
-          " a guarantee of full promotion of all live objects.")            \
+          "The youngest generation collection does not require "            \
+          "a guarantee of full promotion of all live objects.")             \
                                                                             \
   notproduct(bool, PromotionFailureALot, false,                             \
           "Use promotion failure handling on every youngest generation "    \
@@ -1692,7 +1703,7 @@
           "Ratio of hard spins to calls to yield")                          \
                                                                             \
   product(uintx, PreserveMarkStackSize, 1024,                               \
-           "Size for stack used in promotion failure handling")             \
+          "Size for stack used in promotion failure handling")              \
                                                                             \
   product_pd(bool, UseTLAB, "Use thread-local object allocation")           \
                                                                             \
@@ -1720,14 +1731,27 @@
   product(bool, AlwaysActAsServerClassMachine, false,                       \
           "Always act like a server-class machine")                         \
                                                                             \
-  product_pd(uintx, DefaultMaxRAM,                                          \
-          "Maximum real memory size for setting server class heap size")    \
+  product_pd(uint64_t, MaxRAM,                                              \
+          "Real memory size (in bytes) used to set maximum heap size")      \
+                                                                            \
+  product(uintx, ErgoHeapSizeLimit, 0,                                      \
+          "Maximum ergonomically set heap size (in bytes); zero means use " \
+          "MaxRAM / MaxRAMFraction")                                        \
+                                                                            \
+  product(uintx, MaxRAMFraction, 4,                                         \
+          "Maximum fraction (1/n) of real memory used for maximum heap "    \
+          "size")                                                           \
                                                                             \
   product(uintx, DefaultMaxRAMFraction, 4,                                  \
-          "Fraction (1/n) of real memory used for server class max heap")   \
-                                                                            \
-  product(uintx, DefaultInitialRAMFraction, 64,                             \
-          "Fraction (1/n) of real memory used for server class initial heap")  \
+          "Maximum fraction (1/n) of real memory used for maximum heap "    \
+          "size; deprecated: to be renamed to MaxRAMFraction")              \
+                                                                            \
+  product(uintx, MinRAMFraction, 2,                                         \
+          "Minimum fraction (1/n) of real memory used for maxmimum heap "   \
+          "size on systems with small physical memory size")                \
+                                                                            \
+  product(uintx, InitialRAMFraction, 64,                                    \
+          "Fraction (1/n) of real memory used for initial heap size")       \
                                                                             \
   product(bool, UseAutoGCSelectPolicy, false,                               \
           "Use automatic collection selection policy")                      \
@@ -1778,7 +1802,7 @@
           "Number of collections before the adaptive sizing is started")    \
                                                                             \
   product(uintx, AdaptiveSizePolicyOutputInterval, 0,                       \
-          "Collecton interval for printing information, zero => never")     \
+          "Collecton interval for printing information; zero => never")     \
                                                                             \
   product(bool, UseAdaptiveSizePolicyFootprintGoal, true,                   \
           "Use adaptive minimum footprint as a goal")                       \
@@ -1808,7 +1832,8 @@
           "Allowed collection cost difference between generations")         \
                                                                             \
   product(uintx, AdaptiveSizePolicyCollectionCostMargin, 50,                \
-          "If collection costs are within margin, reduce both by full delta") \
+          "If collection costs are within margin, reduce both by full "     \
+          "delta")                                                          \
                                                                             \
   product(uintx, YoungGenerationSizeIncrement, 20,                          \
           "Adaptive size percentage change in young generation")            \
@@ -2527,7 +2552,7 @@
                                                                             \
   develop(bool, VerifyCompiledCode, false,                                  \
           "Include miscellaneous runtime verifications in nmethod code; "   \
-          "off by default because it disturbs nmethod size heuristics.")    \
+          "default off because it disturbs nmethod size heuristics")        \
                                                                             \
                                                                             \
   /* compilation */                                                         \
@@ -2789,20 +2814,28 @@
           "an OS lock")                                                     \
                                                                             \
   /* gc parameters */                                                       \
-  product(uintx, MaxHeapSize, ScaleForWordSize(64*M),                       \
-          "Default maximum size for object heap (in bytes)")                \
-                                                                            \
-  product_pd(uintx, NewSize,                                                \
-          "Default size of new generation (in bytes)")                      \
+  product(uintx, InitialHeapSize, 0,                                        \
+          "Initial heap size (in bytes); zero means OldSize + NewSize")     \
+                                                                            \
+  product(uintx, MaxHeapSize, ScaleForWordSize(96*M),                       \
+          "Maximum heap size (in bytes)")                                   \
+                                                                            \
+  product(uintx, OldSize, ScaleForWordSize(4*M),                            \
+          "Initial tenured generation size (in bytes)")                     \
+                                                                            \
+  product(uintx, NewSize, ScaleForWordSize(4*M),                            \
+          "Initial new generation size (in bytes)")                         \
                                                                             \
   product(uintx, MaxNewSize, max_uintx,                                     \
-          "Maximum size of new generation (in bytes)")                      \
+          "Maximum new generation size (in bytes), max_uintx means set "    \
+          "ergonomically")                                                  \
                                                                             \
   product(uintx, PretenureSizeThreshold, 0,                                 \
-          "Max size in bytes of objects allocated in DefNew generation")    \
-                                                                            \
-  product_pd(uintx, TLABSize,                                               \
-          "Default (or starting) size of TLAB (in bytes)")                  \
+          "Maximum size in bytes of objects allocated in DefNew "           \
+          "generation; zero means no maximum")                              \
+                                                                            \
+  product(uintx, TLABSize, 0,                                               \
+          "Starting TLAB size (in bytes); zero means set ergonomically")    \
                                                                             \
   product(uintx, MinTLABSize, 2*K,                                          \
           "Minimum allowed TLAB size (in bytes)")                           \
@@ -2819,10 +2852,10 @@
   product(uintx, TLABWasteIncrement,    4,                                  \
           "Increment allowed waste at slow allocation")                     \
                                                                             \
-  product_pd(intx, SurvivorRatio,                                           \
+  product(intx, SurvivorRatio, 8,                                           \
           "Ratio of eden/survivor space size")                              \
                                                                             \
-  product_pd(intx, NewRatio,                                                \
+  product(intx, NewRatio, 2,                                                \
           "Ratio of new/old generation sizes")                              \
                                                                             \
   product(uintx, MaxLiveObjectEvacuationRatio, 100,                         \
@@ -2832,11 +2865,8 @@
           "Additional size added to desired new generation size per "       \
           "non-daemon thread (in bytes)")                                   \
                                                                             \
-  product(uintx, OldSize, ScaleForWordSize(4096*K),                         \
-          "Default size of tenured generation (in bytes)")                  \
-                                                                            \
   product_pd(uintx, PermSize,                                               \
-          "Default size of permanent generation (in bytes)")                \
+          "Initial size of permanent generation (in bytes)")                \
                                                                             \
   product_pd(uintx, MaxPermSize,                                            \
           "Maximum size of permanent generation (in bytes)")                \
--- a/hotspot/src/share/vm/runtime/globals_extension.hpp	Fri Oct 23 18:44:33 2009 -0700
+++ b/hotspot/src/share/vm/runtime/globals_extension.hpp	Wed Oct 28 16:25:51 2009 -0400
@@ -202,6 +202,7 @@
   static void boolAtPut(CommandLineFlagWithType flag, bool value, FlagValueOrigin origin);
   static void intxAtPut(CommandLineFlagWithType flag, intx value, FlagValueOrigin origin);
   static void uintxAtPut(CommandLineFlagWithType flag, uintx value, FlagValueOrigin origin);
+  static void uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, FlagValueOrigin origin);
   static void doubleAtPut(CommandLineFlagWithType flag, double value, FlagValueOrigin origin);
   static void ccstrAtPut(CommandLineFlagWithType flag, ccstr value, FlagValueOrigin origin);
 
--- a/hotspot/src/share/vm/services/management.cpp	Fri Oct 23 18:44:33 2009 -0700
+++ b/hotspot/src/share/vm/services/management.cpp	Wed Oct 28 16:25:51 2009 -0400
@@ -790,7 +790,7 @@
   assert(!has_undefined_init_size, "Undefined init size");
   assert(!has_undefined_max_size, "Undefined max size");
 
-  MemoryUsage usage((heap ? Arguments::initial_heap_size() : total_init),
+  MemoryUsage usage((heap ? InitialHeapSize : total_init),
                     total_used,
                     total_committed,
                     (heap ? Universe::heap()->max_capacity() : total_max));