8074459: Flags handling memory sizes should be of type size_t
Summary: Changed the type to size_t for flags that handles memory sizes
Reviewed-by: kbarrett, tschatzl
--- a/hotspot/src/cpu/ppc/vm/c2_globals_ppc.hpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/cpu/ppc/vm/c2_globals_ppc.hpp Tue Mar 03 18:01:27 2015 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -49,7 +49,7 @@
define_pd_global(intx, MinJumpTableSize, 10);
define_pd_global(intx, INTPRESSURE, 25);
define_pd_global(intx, InteriorEntryAlignment, 16);
-define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
+define_pd_global(size_t, NewSizeThreadIncrease, ScaleForWordSize(4*K));
define_pd_global(intx, RegisterCostAreaRatio, 16000);
define_pd_global(bool, UseTLAB, true);
define_pd_global(bool, ResizeTLAB, true);
@@ -85,14 +85,14 @@
define_pd_global(intx, CodeCacheExpansionSize, 64*K);
// Ergonomics related flags
-define_pd_global(uint64_t,MaxRAM, 4ULL*G);
+define_pd_global(uint64_t, MaxRAM, 4ULL*G);
define_pd_global(uintx, CodeCacheMinBlockLength, 4);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
define_pd_global(bool, TrapBasedRangeChecks, true);
// Heap related flags
-define_pd_global(uintx,MetaspaceSize, ScaleForWordSize(16*M));
+define_pd_global(size_t, MetaspaceSize, ScaleForWordSize(16*M));
// Ergonomics related flags
define_pd_global(bool, NeverActAsServerClassMachine, false);
--- a/hotspot/src/cpu/ppc/vm/globals_ppc.hpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/cpu/ppc/vm/globals_ppc.hpp Tue Mar 03 18:01:27 2015 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -56,7 +56,7 @@
define_pd_global(bool, UseMembar, false);
// GC Ergo Flags
-define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // Default max size of CMS young gen, per GC worker thread.
+define_pd_global(size_t, CMSYoungGenPerWorker, 16*M); // Default max size of CMS young gen, per GC worker thread.
define_pd_global(uintx, TypeProfileLevel, 0);
--- a/hotspot/src/cpu/sparc/vm/c1_globals_sparc.hpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/cpu/sparc/vm/c1_globals_sparc.hpp Tue Mar 03 18:01:27 2015 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -53,10 +53,10 @@
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
-define_pd_global(uintx, MetaspaceSize, 12*M );
+define_pd_global(size_t, MetaspaceSize, 12*M );
define_pd_global(bool, NeverActAsServerClassMachine, true );
-define_pd_global(intx, NewSizeThreadIncrease, 16*K );
-define_pd_global(uint64_t,MaxRAM, 1ULL*G);
+define_pd_global(size_t, NewSizeThreadIncrease, 16*K );
+define_pd_global(uint64_t, MaxRAM, 1ULL*G);
define_pd_global(intx, InitialCodeCacheSize, 160*K);
#endif // !TIERED
--- a/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp Tue Mar 03 18:01:27 2015 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -51,7 +51,7 @@
define_pd_global(intx, FreqInlineSize, 175);
define_pd_global(intx, INTPRESSURE, 48); // large register set
define_pd_global(intx, InteriorEntryAlignment, 16); // = CodeEntryAlignment
-define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
+define_pd_global(size_t, NewSizeThreadIncrease, ScaleForWordSize(4*K));
define_pd_global(intx, RegisterCostAreaRatio, 12000);
define_pd_global(bool, UseTLAB, true);
define_pd_global(bool, ResizeTLAB, true);
@@ -90,7 +90,7 @@
define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
define_pd_global(intx, CodeCacheExpansionSize, 32*K);
// Ergonomics related flags
-define_pd_global(uint64_t,MaxRAM, 4ULL*G);
+define_pd_global(uint64_t, MaxRAM, 4ULL*G);
#endif
define_pd_global(uintx, CodeCacheMinBlockLength, 4);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
@@ -98,7 +98,7 @@
define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed on sparc.
// Heap related flags
-define_pd_global(uintx,MetaspaceSize, ScaleForWordSize(16*M));
+define_pd_global(size_t, MetaspaceSize, ScaleForWordSize(16*M));
// Ergonomics related flags
define_pd_global(bool, NeverActAsServerClassMachine, false);
--- a/hotspot/src/cpu/sparc/vm/globals_sparc.hpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/cpu/sparc/vm/globals_sparc.hpp Tue Mar 03 18:01:27 2015 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -75,7 +75,7 @@
define_pd_global(bool, UseMembar, false);
// GC Ergo Flags
-define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
+define_pd_global(size_t, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
define_pd_global(uintx, TypeProfileLevel, 0);
--- a/hotspot/src/cpu/x86/vm/c1_globals_x86.hpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/cpu/x86/vm/c1_globals_x86.hpp Tue Mar 03 18:01:27 2015 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,39 +32,39 @@
// (see c1_globals.hpp)
#ifndef TIERED
-define_pd_global(bool, BackgroundCompilation, true );
-define_pd_global(bool, UseTLAB, true );
-define_pd_global(bool, ResizeTLAB, true );
-define_pd_global(bool, InlineIntrinsics, true );
-define_pd_global(bool, PreferInterpreterNativeStubs, false);
-define_pd_global(bool, ProfileTraps, false);
-define_pd_global(bool, UseOnStackReplacement, true );
-define_pd_global(bool, TieredCompilation, false);
-define_pd_global(intx, CompileThreshold, 1500 );
+define_pd_global(bool, BackgroundCompilation, true );
+define_pd_global(bool, UseTLAB, true );
+define_pd_global(bool, ResizeTLAB, true );
+define_pd_global(bool, InlineIntrinsics, true );
+define_pd_global(bool, PreferInterpreterNativeStubs, false);
+define_pd_global(bool, ProfileTraps, false);
+define_pd_global(bool, UseOnStackReplacement, true );
+define_pd_global(bool, TieredCompilation, false);
+define_pd_global(intx, CompileThreshold, 1500 );
-define_pd_global(intx, OnStackReplacePercentage, 933 );
-define_pd_global(intx, FreqInlineSize, 325 );
-define_pd_global(intx, NewSizeThreadIncrease, 4*K );
-define_pd_global(intx, InitialCodeCacheSize, 160*K);
-define_pd_global(intx, ReservedCodeCacheSize, 32*M );
-define_pd_global(intx, NonProfiledCodeHeapSize, 13*M );
-define_pd_global(intx, ProfiledCodeHeapSize, 14*M );
-define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
-define_pd_global(bool, ProfileInterpreter, false);
-define_pd_global(intx, CodeCacheExpansionSize, 32*K );
-define_pd_global(uintx, CodeCacheMinBlockLength, 1);
-define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
-define_pd_global(uintx, MetaspaceSize, 12*M );
-define_pd_global(bool, NeverActAsServerClassMachine, true );
-define_pd_global(uint64_t,MaxRAM, 1ULL*G);
-define_pd_global(bool, CICompileOSR, true );
+define_pd_global(intx, OnStackReplacePercentage, 933 );
+define_pd_global(intx, FreqInlineSize, 325 );
+define_pd_global(size_t, NewSizeThreadIncrease, 4*K );
+define_pd_global(intx, InitialCodeCacheSize, 160*K);
+define_pd_global(intx, ReservedCodeCacheSize, 32*M );
+define_pd_global(intx, NonProfiledCodeHeapSize, 13*M );
+define_pd_global(intx, ProfiledCodeHeapSize, 14*M );
+define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
+define_pd_global(bool, ProfileInterpreter, false);
+define_pd_global(intx, CodeCacheExpansionSize, 32*K );
+define_pd_global(uintx, CodeCacheMinBlockLength, 1 );
+define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
+define_pd_global(size_t, MetaspaceSize, 12*M );
+define_pd_global(bool, NeverActAsServerClassMachine, true );
+define_pd_global(uint64_t, MaxRAM, 1ULL*G);
+define_pd_global(bool, CICompileOSR, true );
#endif // !TIERED
-define_pd_global(bool, UseTypeProfile, false);
-define_pd_global(bool, RoundFPResults, true );
+define_pd_global(bool, UseTypeProfile, false);
+define_pd_global(bool, RoundFPResults, true );
-define_pd_global(bool, LIRFillDelaySlots, false);
-define_pd_global(bool, OptimizeSinglePrecision, true );
-define_pd_global(bool, CSEArrayLength, false);
-define_pd_global(bool, TwoOperandLIRForm, true );
+define_pd_global(bool, LIRFillDelaySlots, false);
+define_pd_global(bool, OptimizeSinglePrecision, true );
+define_pd_global(bool, CSEArrayLength, false);
+define_pd_global(bool, TwoOperandLIRForm, true );
#endif // CPU_X86_VM_C1_GLOBALS_X86_HPP
--- a/hotspot/src/cpu/x86/vm/c2_globals_x86.hpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/cpu/x86/vm/c2_globals_x86.hpp Tue Mar 03 18:01:27 2015 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -54,25 +54,25 @@
#ifdef AMD64
define_pd_global(intx, INTPRESSURE, 13);
define_pd_global(intx, InteriorEntryAlignment, 16);
-define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
+define_pd_global(size_t, NewSizeThreadIncrease, ScaleForWordSize(4*K));
define_pd_global(intx, LoopUnrollLimit, 60);
// InitialCodeCacheSize derived from specjbb2000 run.
define_pd_global(intx, InitialCodeCacheSize, 2496*K); // Integral multiple of CodeCacheExpansionSize
define_pd_global(intx, CodeCacheExpansionSize, 64*K);
// Ergonomics related flags
-define_pd_global(uint64_t,MaxRAM, 128ULL*G);
+define_pd_global(uint64_t, MaxRAM, 128ULL*G);
#else
define_pd_global(intx, INTPRESSURE, 6);
define_pd_global(intx, InteriorEntryAlignment, 4);
-define_pd_global(intx, NewSizeThreadIncrease, 4*K);
+define_pd_global(size_t, NewSizeThreadIncrease, 4*K);
define_pd_global(intx, LoopUnrollLimit, 50); // Design center runs on 1.3.1
// InitialCodeCacheSize derived from specjbb2000 run.
define_pd_global(intx, InitialCodeCacheSize, 2304*K); // Integral multiple of CodeCacheExpansionSize
define_pd_global(intx, CodeCacheExpansionSize, 32*K);
// Ergonomics related flags
-define_pd_global(uint64_t,MaxRAM, 4ULL*G);
+define_pd_global(uint64_t, MaxRAM, 4ULL*G);
#endif // AMD64
define_pd_global(intx, RegisterCostAreaRatio, 16000);
@@ -93,7 +93,7 @@
define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed on x86.
// Heap related flags
-define_pd_global(uintx,MetaspaceSize, ScaleForWordSize(16*M));
+define_pd_global(size_t, MetaspaceSize, ScaleForWordSize(16*M));
// Ergonomics related flags
define_pd_global(bool, NeverActAsServerClassMachine, false);
--- a/hotspot/src/cpu/x86/vm/globals_x86.hpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/cpu/x86/vm/globals_x86.hpp Tue Mar 03 18:01:27 2015 +0100
@@ -78,7 +78,7 @@
#endif
// GC Ergo Flags
-define_pd_global(uintx, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread
+define_pd_global(size_t, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread
define_pd_global(uintx, TypeProfileLevel, 111);
--- a/hotspot/src/cpu/zero/vm/globals_zero.hpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/cpu/zero/vm/globals_zero.hpp Tue Mar 03 18:01:27 2015 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -57,7 +57,7 @@
define_pd_global(bool, UseMembar, true);
// GC Ergo Flags
-define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
+define_pd_global(size_t, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
define_pd_global(uintx, TypeProfileLevel, 0);
--- a/hotspot/src/cpu/zero/vm/shark_globals_zero.hpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/cpu/zero/vm/shark_globals_zero.hpp Tue Mar 03 18:01:27 2015 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2008, 2009, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -50,7 +50,7 @@
define_pd_global(intx, OnStackReplacePercentage, 933 );
define_pd_global(intx, FreqInlineSize, 325 );
define_pd_global(uintx, NewRatio, 12 );
-define_pd_global(intx, NewSizeThreadIncrease, 4*K );
+define_pd_global(size_t, NewSizeThreadIncrease, 4*K );
define_pd_global(intx, InitialCodeCacheSize, 160*K);
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
define_pd_global(intx, NonProfiledCodeHeapSize, 13*M );
@@ -61,7 +61,7 @@
define_pd_global(uintx, CodeCacheMinBlockLength, 1 );
define_pd_global(uintx, CodeCacheMinimumUseSpace, 200*K);
-define_pd_global(uintx, MetaspaceSize, 12*M );
+define_pd_global(size_t, MetaspaceSize, 12*M );
define_pd_global(bool, NeverActAsServerClassMachine, true );
define_pd_global(uint64_t, MaxRAM, 1ULL*G);
define_pd_global(bool, CICompileOSR, true );
--- a/hotspot/src/os_cpu/aix_ppc/vm/globals_aix_ppc.hpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/os_cpu/aix_ppc/vm/globals_aix_ppc.hpp Tue Mar 03 18:01:27 2015 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -40,14 +40,14 @@
define_pd_global(intx, CompilerThreadStackSize, 4096);
// Allow extra space in DEBUG builds for asserts.
-define_pd_global(uintx,JVMInvokeMethodSlack, 8192);
+define_pd_global(size_t, JVMInvokeMethodSlack, 8192);
define_pd_global(intx, StackYellowPages, 6);
define_pd_global(intx, StackRedPages, 1);
define_pd_global(intx, StackShadowPages, 6 DEBUG_ONLY(+2));
// Only used on 64 bit platforms
-define_pd_global(uintx,HeapBaseMinAddress, 2*G);
+define_pd_global(size_t, HeapBaseMinAddress, 2*G);
// Only used on 64 bit Windows platforms
define_pd_global(bool, UseVectoredExceptions, false);
--- a/hotspot/src/os_cpu/bsd_x86/vm/globals_bsd_x86.hpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/os_cpu/bsd_x86/vm/globals_bsd_x86.hpp Tue Mar 03 18:01:27 2015 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -43,9 +43,9 @@
define_pd_global(intx, CompilerThreadStackSize, 0);
-define_pd_global(uintx, JVMInvokeMethodSlack, 8192);
+define_pd_global(size_t, JVMInvokeMethodSlack, 8192);
// Used on 64 bit platforms for UseCompressedOops base address
-define_pd_global(uintx, HeapBaseMinAddress, 2*G);
+define_pd_global(size_t, HeapBaseMinAddress, 2*G);
#endif // OS_CPU_BSD_X86_VM_GLOBALS_BSD_X86_HPP
--- a/hotspot/src/os_cpu/bsd_zero/vm/globals_bsd_zero.hpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/os_cpu/bsd_zero/vm/globals_bsd_zero.hpp Tue Mar 03 18:01:27 2015 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -39,9 +39,9 @@
define_pd_global(intx, VMThreadStackSize, 512);
#endif // _LP64
define_pd_global(intx, CompilerThreadStackSize, 0);
-define_pd_global(uintx, JVMInvokeMethodSlack, 8192);
+define_pd_global(size_t, JVMInvokeMethodSlack, 8192);
// Used on 64 bit platforms for UseCompressedOops base address
-define_pd_global(uintx, HeapBaseMinAddress, 2*G);
+define_pd_global(size_t, HeapBaseMinAddress, 2*G);
#endif // OS_CPU_BSD_ZERO_VM_GLOBALS_BSD_ZERO_HPP
--- a/hotspot/src/os_cpu/linux_ppc/vm/globals_linux_ppc.hpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/os_cpu/linux_ppc/vm/globals_linux_ppc.hpp Tue Mar 03 18:01:27 2015 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -40,14 +40,14 @@
define_pd_global(intx, CompilerThreadStackSize, 4096);
// Allow extra space in DEBUG builds for asserts.
-define_pd_global(uintx,JVMInvokeMethodSlack, 8192);
+define_pd_global(size_t, JVMInvokeMethodSlack, 8192);
define_pd_global(intx, StackYellowPages, 6);
define_pd_global(intx, StackRedPages, 1);
define_pd_global(intx, StackShadowPages, 6 DEBUG_ONLY(+2));
// Only used on 64 bit platforms
-define_pd_global(uintx,HeapBaseMinAddress, 2*G);
+define_pd_global(size_t, HeapBaseMinAddress, 2*G);
// Only used on 64 bit Windows platforms
define_pd_global(bool, UseVectoredExceptions, false);
--- a/hotspot/src/os_cpu/linux_sparc/vm/globals_linux_sparc.hpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/os_cpu/linux_sparc/vm/globals_linux_sparc.hpp Tue Mar 03 18:01:27 2015 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,10 +30,10 @@
// runtime system. (see globals.hpp)
//
-define_pd_global(uintx, JVMInvokeMethodSlack, 12288);
+define_pd_global(size_t, JVMInvokeMethodSlack, 12288);
define_pd_global(intx, CompilerThreadStackSize, 0);
// Used on 64 bit platforms for UseCompressedOops base address
-define_pd_global(uintx, HeapBaseMinAddress, CONST64(4)*G);
+define_pd_global(size_t, HeapBaseMinAddress, CONST64(4)*G);
#endif // OS_CPU_LINUX_SPARC_VM_GLOBALS_LINUX_SPARC_HPP
--- a/hotspot/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp Tue Mar 03 18:01:27 2015 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -42,9 +42,9 @@
define_pd_global(intx, CompilerThreadStackSize, 0);
-define_pd_global(uintx,JVMInvokeMethodSlack, 8192);
+define_pd_global(size_t, JVMInvokeMethodSlack, 8192);
// Used on 64 bit platforms for UseCompressedOops base address
-define_pd_global(uintx,HeapBaseMinAddress, 2*G);
+define_pd_global(size_t, HeapBaseMinAddress, 2*G);
#endif // OS_CPU_LINUX_X86_VM_GLOBALS_LINUX_X86_HPP
--- a/hotspot/src/os_cpu/linux_zero/vm/globals_linux_zero.hpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/os_cpu/linux_zero/vm/globals_linux_zero.hpp Tue Mar 03 18:01:27 2015 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -39,9 +39,9 @@
define_pd_global(intx, VMThreadStackSize, 512);
#endif // _LP64
define_pd_global(intx, CompilerThreadStackSize, 0);
-define_pd_global(uintx, JVMInvokeMethodSlack, 8192);
+define_pd_global(size_t, JVMInvokeMethodSlack, 8192);
// Used on 64 bit platforms for UseCompressedOops base address
-define_pd_global(uintx, HeapBaseMinAddress, 2*G);
+define_pd_global(size_t, HeapBaseMinAddress, 2*G);
#endif // OS_CPU_LINUX_ZERO_VM_GLOBALS_LINUX_ZERO_HPP
--- a/hotspot/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp Tue Mar 03 18:01:27 2015 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,16 +30,16 @@
// (see globals.hpp)
//
-define_pd_global(uintx, JVMInvokeMethodSlack, 12288);
+define_pd_global(size_t, JVMInvokeMethodSlack, 12288);
define_pd_global(intx, CompilerThreadStackSize, 0);
// Used on 64 bit platforms for UseCompressedOops base address
#ifdef _LP64
// use 6G as default base address because by default the OS maps the application
// to 4G on Solaris-Sparc. This leaves at least 2G for the native heap.
-define_pd_global(uintx, HeapBaseMinAddress, CONST64(6)*G);
+define_pd_global(size_t, HeapBaseMinAddress, CONST64(6)*G);
#else
-define_pd_global(uintx, HeapBaseMinAddress, 2*G);
+define_pd_global(size_t, HeapBaseMinAddress, 2*G);
#endif
--- a/hotspot/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp Tue Mar 03 18:01:27 2015 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,18 +32,18 @@
#ifdef AMD64
define_pd_global(intx, ThreadStackSize, 1024); // 0 => use system default
define_pd_global(intx, VMThreadStackSize, 1024);
-define_pd_global(uintx,JVMInvokeMethodSlack, 8*K);
+define_pd_global(size_t, JVMInvokeMethodSlack, 8*K);
#else
// ThreadStackSize 320 allows a couple of test cases to run while
// keeping the number of threads that can be created high.
define_pd_global(intx, ThreadStackSize, 320);
define_pd_global(intx, VMThreadStackSize, 512);
-define_pd_global(uintx,JVMInvokeMethodSlack, 10*K);
+define_pd_global(size_t, JVMInvokeMethodSlack, 10*K);
#endif // AMD64
define_pd_global(intx, CompilerThreadStackSize, 0);
// Used on 64 bit platforms for UseCompressedOops base address
-define_pd_global(uintx,HeapBaseMinAddress, 2*G);
+define_pd_global(size_t, HeapBaseMinAddress, 2*G);
#endif // OS_CPU_SOLARIS_X86_VM_GLOBALS_SOLARIS_X86_HPP
--- a/hotspot/src/os_cpu/windows_x86/vm/globals_windows_x86.hpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/os_cpu/windows_x86/vm/globals_windows_x86.hpp Tue Mar 03 18:01:27 2015 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -43,9 +43,9 @@
define_pd_global(intx, CompilerThreadStackSize, 0);
#endif
-define_pd_global(uintx, JVMInvokeMethodSlack, 8192);
+define_pd_global(size_t, JVMInvokeMethodSlack, 8192);
// Used on 64 bit platforms for UseCompressedOops base address
-define_pd_global(uintx, HeapBaseMinAddress, 2*G);
+define_pd_global(size_t, HeapBaseMinAddress, 2*G);
#endif // OS_CPU_WINDOWS_X86_VM_GLOBALS_WINDOWS_X86_HPP
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Tue Mar 03 18:01:27 2015 +0100
@@ -2662,8 +2662,8 @@
// Need to smooth wrt historical average
if (ResizeOldPLAB) {
_blocks_to_claim[i].sample(
- MAX2((size_t)CMSOldPLABMin,
- MIN2((size_t)CMSOldPLABMax,
+ MAX2(CMSOldPLABMin,
+ MIN2(CMSOldPLABMax,
_global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
}
// Reset counters for next round
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Tue Mar 03 18:01:27 2015 +0100
@@ -696,32 +696,32 @@
}
if (FLAG_IS_DEFAULT(MarkStackSize)) {
- uintx mark_stack_size =
+ size_t mark_stack_size =
MIN2(MarkStackSizeMax,
- MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE)));
+ MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE)));
// Verify that the calculated value for MarkStackSize is in range.
// It would be nice to use the private utility routine from Arguments.
if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
- warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): "
- "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
- mark_stack_size, (uintx) 1, MarkStackSizeMax);
+ warning("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
+ "must be between 1 and " SIZE_FORMAT,
+ mark_stack_size, MarkStackSizeMax);
return;
}
- FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size);
+ FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size);
} else {
// Verify MarkStackSize is in range.
if (FLAG_IS_CMDLINE(MarkStackSize)) {
if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
- warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): "
- "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
- MarkStackSize, (uintx) 1, MarkStackSizeMax);
+ warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
+ "must be between 1 and " SIZE_FORMAT,
+ MarkStackSize, MarkStackSizeMax);
return;
}
} else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
- warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")"
- " or for MarkStackSizeMax (" UINTX_FORMAT ")",
+ warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
+ " or for MarkStackSizeMax (" SIZE_FORMAT ")",
MarkStackSize, MarkStackSizeMax);
return;
}
@@ -745,7 +745,7 @@
// so that the assertion in MarkingTaskQueue::task_queue doesn't fail
_active_tasks = _max_worker_id;
- size_t max_regions = (size_t) _g1h->max_regions();
+ uint max_regions = _g1h->max_regions();
for (uint i = 0; i < _max_worker_id; ++i) {
CMTaskQueue* task_queue = new CMTaskQueue();
task_queue->initialize();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Tue Mar 03 18:01:27 2015 +0100
@@ -321,7 +321,7 @@
void G1CollectorPolicy::initialize_flags() {
if (G1HeapRegionSize != HeapRegion::GrainBytes) {
- FLAG_SET_ERGO(uintx, G1HeapRegionSize, HeapRegion::GrainBytes);
+ FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes);
}
if (SurvivorRatio < 1) {
@@ -335,7 +335,7 @@
uintx max_regions = G1CollectedHeap::heap()->max_regions();
size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes;
if (max_young_size != MaxNewSize) {
- FLAG_SET_ERGO(uintx, MaxNewSize, max_young_size);
+ FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size);
}
}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp Tue Mar 03 18:01:27 2015 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -107,7 +107,7 @@
double _strong_code_root_scan_time_sec;
uint _worker_i;
- int _block_size;
+ size_t _block_size;
bool _try_claimed;
public:
@@ -125,7 +125,7 @@
_g1h = G1CollectedHeap::heap();
_bot_shared = _g1h->bot_shared();
_ct_bs = _g1h->g1_barrier_set();
- _block_size = MAX2<int>(G1RSetScanBlockSize, 1);
+ _block_size = MAX2<size_t>(G1RSetScanBlockSize, 1);
}
void set_try_claimed() { _try_claimed = true; }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp Tue Mar 03 18:01:27 2015 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -86,7 +86,7 @@
"If true, enable reference discovery during concurrent " \
"marking and reference processing at the end of remark.") \
\
- product(intx, G1SATBBufferSize, 1*K, \
+ product(size_t, G1SATBBufferSize, 1*K, \
"Number of entries in an SATB log buffer.") \
\
develop(intx, G1SATBProcessCompletedThreshold, 20, \
@@ -112,7 +112,7 @@
"Prints the liveness information for all regions in the heap " \
"at the end of a marking cycle.") \
\
- product(intx, G1UpdateBufferSize, 256, \
+ product(size_t, G1UpdateBufferSize, 256, \
"Size of an update buffer") \
\
product(intx, G1ConcRefinementYellowZone, 0, \
@@ -148,7 +148,7 @@
"Select green, yellow and red zones adaptively to meet the " \
"the pause requirements.") \
\
- product(uintx, G1ConcRSLogCacheSize, 10, \
+ product(size_t, G1ConcRSLogCacheSize, 10, \
"Log base 2 of the length of conc RS hot-card cache.") \
\
product(uintx, G1ConcRSHotCardLimit, 4, \
@@ -210,7 +210,7 @@
"When set, G1 will fail when it encounters an FP 'error', " \
"so as to allow debugging") \
\
- product(uintx, G1HeapRegionSize, 0, \
+ product(size_t, G1HeapRegionSize, 0, \
"Size of the G1 regions.") \
\
product(uintx, G1ConcRefinementThreads, 0, \
@@ -220,7 +220,7 @@
develop(bool, G1VerifyCTCleanup, false, \
"Verify card table cleanup.") \
\
- product(uintx, G1RSetScanBlockSize, 64, \
+ product(size_t, G1RSetScanBlockSize, 64, \
"Size of a work unit of cards claimed by a worker thread" \
"during RSet scanning.") \
\
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Tue Mar 03 18:01:27 2015 +0100
@@ -106,18 +106,18 @@
}
void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
- uintx region_size = G1HeapRegionSize;
+ size_t region_size = G1HeapRegionSize;
if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(),
- (uintx) HeapRegionBounds::min_size());
+ HeapRegionBounds::min_size());
}
int region_size_log = log2_long((jlong) region_size);
// Recalculate the region size to make sure it's a power of
// 2. This means that region_size is the largest power of 2 that's
// <= what we've calculated so far.
- region_size = ((uintx)1 << region_size_log);
+ region_size = ((size_t)1 << region_size_log);
// Now make sure that we don't go over or under our limits.
if (region_size < HeapRegionBounds::min_size()) {
@@ -139,7 +139,7 @@
guarantee(GrainBytes == 0, "we should only set it once");
// The cast to int is safe, given that we've bounded region_size by
// MIN_REGION_SIZE and MAX_REGION_SIZE.
- GrainBytes = (size_t)region_size;
+ GrainBytes = region_size;
guarantee(GrainWords == 0, "we should only set it once");
GrainWords = GrainBytes >> LogHeapWordSize;
--- a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp Tue Mar 03 18:01:27 2015 +0100
@@ -98,7 +98,7 @@
if (!is_init_completed()) {
vm_exit_during_initialization(
err_msg("GC triggered before VM initialization completed. Try increasing "
- "NewSize, current value " UINTX_FORMAT "%s.",
+ "NewSize, current value " SIZE_FORMAT "%s.",
byte_size_in_proper_unit(NewSize),
proper_unit_for_byte_size(NewSize)));
}
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp Tue Mar 03 18:01:27 2015 +0100
@@ -104,15 +104,15 @@
// User inputs from -Xmx and -Xms must be aligned
_min_heap_byte_size = align_size_up(_min_heap_byte_size, _heap_alignment);
- uintx aligned_initial_heap_size = align_size_up(InitialHeapSize, _heap_alignment);
- uintx aligned_max_heap_size = align_size_up(MaxHeapSize, _heap_alignment);
+ size_t aligned_initial_heap_size = align_size_up(InitialHeapSize, _heap_alignment);
+ size_t aligned_max_heap_size = align_size_up(MaxHeapSize, _heap_alignment);
// Write back to flags if the values changed
if (aligned_initial_heap_size != InitialHeapSize) {
- FLAG_SET_ERGO(uintx, InitialHeapSize, aligned_initial_heap_size);
+ FLAG_SET_ERGO(size_t, InitialHeapSize, aligned_initial_heap_size);
}
if (aligned_max_heap_size != MaxHeapSize) {
- FLAG_SET_ERGO(uintx, MaxHeapSize, aligned_max_heap_size);
+ FLAG_SET_ERGO(size_t, MaxHeapSize, aligned_max_heap_size);
}
if (FLAG_IS_CMDLINE(InitialHeapSize) && _min_heap_byte_size != 0 &&
@@ -120,9 +120,9 @@
vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
}
if (!FLAG_IS_DEFAULT(InitialHeapSize) && InitialHeapSize > MaxHeapSize) {
- FLAG_SET_ERGO(uintx, MaxHeapSize, InitialHeapSize);
+ FLAG_SET_ERGO(size_t, MaxHeapSize, InitialHeapSize);
} else if (!FLAG_IS_DEFAULT(MaxHeapSize) && InitialHeapSize > MaxHeapSize) {
- FLAG_SET_ERGO(uintx, InitialHeapSize, MaxHeapSize);
+ FLAG_SET_ERGO(size_t, InitialHeapSize, MaxHeapSize);
if (InitialHeapSize < _min_heap_byte_size) {
_min_heap_byte_size = InitialHeapSize;
}
@@ -131,7 +131,7 @@
_initial_heap_byte_size = InitialHeapSize;
_max_heap_byte_size = MaxHeapSize;
- FLAG_SET_ERGO(uintx, MinHeapDeltaBytes, align_size_up(MinHeapDeltaBytes, _space_alignment));
+ FLAG_SET_ERGO(size_t, MinHeapDeltaBytes, align_size_up(MinHeapDeltaBytes, _space_alignment));
DEBUG_ONLY(CollectorPolicy::assert_flags();)
}
@@ -282,18 +282,18 @@
// All generational heaps have a youngest gen; handle those flags here
// Make sure the heap is large enough for two generations
- uintx smallest_new_size = young_gen_size_lower_bound();
- uintx smallest_heap_size = align_size_up(smallest_new_size + align_size_up(_space_alignment, _gen_alignment),
+ size_t smallest_new_size = young_gen_size_lower_bound();
+ size_t smallest_heap_size = align_size_up(smallest_new_size + align_size_up(_space_alignment, _gen_alignment),
_heap_alignment);
if (MaxHeapSize < smallest_heap_size) {
- FLAG_SET_ERGO(uintx, MaxHeapSize, smallest_heap_size);
+ FLAG_SET_ERGO(size_t, MaxHeapSize, smallest_heap_size);
_max_heap_byte_size = MaxHeapSize;
}
// If needed, synchronize _min_heap_byte size and _initial_heap_byte_size
if (_min_heap_byte_size < smallest_heap_size) {
_min_heap_byte_size = smallest_heap_size;
if (InitialHeapSize < _min_heap_byte_size) {
- FLAG_SET_ERGO(uintx, InitialHeapSize, smallest_heap_size);
+ FLAG_SET_ERGO(size_t, InitialHeapSize, smallest_heap_size);
_initial_heap_byte_size = smallest_heap_size;
}
}
@@ -306,8 +306,8 @@
// Now take the actual NewSize into account. We will silently increase NewSize
// if the user specified a smaller or unaligned value.
- uintx bounded_new_size = bound_minus_alignment(NewSize, MaxHeapSize);
- bounded_new_size = MAX2(smallest_new_size, (uintx)align_size_down(bounded_new_size, _gen_alignment));
+ size_t bounded_new_size = bound_minus_alignment(NewSize, MaxHeapSize);
+ bounded_new_size = MAX2(smallest_new_size, (size_t)align_size_down(bounded_new_size, _gen_alignment));
if (bounded_new_size != NewSize) {
// Do not use FLAG_SET_ERGO to update NewSize here, since this will override
// if NewSize was set on the command line or not. This information is needed
@@ -320,21 +320,21 @@
if (!FLAG_IS_DEFAULT(MaxNewSize)) {
if (MaxNewSize >= MaxHeapSize) {
// Make sure there is room for an old generation
- uintx smaller_max_new_size = MaxHeapSize - _gen_alignment;
+ size_t smaller_max_new_size = MaxHeapSize - _gen_alignment;
if (FLAG_IS_CMDLINE(MaxNewSize)) {
warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or greater than the entire "
"heap (" SIZE_FORMAT "k). A new max generation size of " SIZE_FORMAT "k will be used.",
MaxNewSize/K, MaxHeapSize/K, smaller_max_new_size/K);
}
- FLAG_SET_ERGO(uintx, MaxNewSize, smaller_max_new_size);
+ FLAG_SET_ERGO(size_t, MaxNewSize, smaller_max_new_size);
if (NewSize > MaxNewSize) {
- FLAG_SET_ERGO(uintx, NewSize, MaxNewSize);
+ FLAG_SET_ERGO(size_t, NewSize, MaxNewSize);
_initial_young_size = NewSize;
}
} else if (MaxNewSize < _initial_young_size) {
- FLAG_SET_ERGO(uintx, MaxNewSize, _initial_young_size);
+ FLAG_SET_ERGO(size_t, MaxNewSize, _initial_young_size);
} else if (!is_size_aligned(MaxNewSize, _gen_alignment)) {
- FLAG_SET_ERGO(uintx, MaxNewSize, align_size_down(MaxNewSize, _gen_alignment));
+ FLAG_SET_ERGO(size_t, MaxNewSize, align_size_down(MaxNewSize, _gen_alignment));
}
_max_young_size = MaxNewSize;
}
@@ -347,7 +347,7 @@
"A new max generation size of " SIZE_FORMAT "k will be used.",
NewSize/K, MaxNewSize/K, NewSize/K);
}
- FLAG_SET_ERGO(uintx, MaxNewSize, NewSize);
+ FLAG_SET_ERGO(size_t, MaxNewSize, NewSize);
_max_young_size = MaxNewSize;
}
@@ -369,9 +369,9 @@
size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1);
calculated_heapsize = align_size_up(calculated_heapsize, _heap_alignment);
- FLAG_SET_ERGO(uintx, MaxHeapSize, calculated_heapsize);
+ FLAG_SET_ERGO(size_t, MaxHeapSize, calculated_heapsize);
_max_heap_byte_size = MaxHeapSize;
- FLAG_SET_ERGO(uintx, InitialHeapSize, calculated_heapsize);
+ FLAG_SET_ERGO(size_t, InitialHeapSize, calculated_heapsize);
_initial_heap_byte_size = InitialHeapSize;
}
@@ -380,19 +380,19 @@
if (_max_heap_size_cmdline) {
// Somebody has set a maximum heap size with the intention that we should not
// exceed it. Adjust New/OldSize as necessary.
- uintx calculated_size = NewSize + OldSize;
+ size_t calculated_size = NewSize + OldSize;
double shrink_factor = (double) MaxHeapSize / calculated_size;
- uintx smaller_new_size = align_size_down((uintx)(NewSize * shrink_factor), _gen_alignment);
- FLAG_SET_ERGO(uintx, NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size));
+ size_t smaller_new_size = align_size_down((size_t)(NewSize * shrink_factor), _gen_alignment);
+ FLAG_SET_ERGO(size_t, NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size));
_initial_young_size = NewSize;
// OldSize is already aligned because above we aligned MaxHeapSize to
// _heap_alignment, and we just made sure that NewSize is aligned to
// _gen_alignment. In initialize_flags() we verified that _heap_alignment
// is a multiple of _gen_alignment.
- FLAG_SET_ERGO(uintx, OldSize, MaxHeapSize - NewSize);
+ FLAG_SET_ERGO(size_t, OldSize, MaxHeapSize - NewSize);
} else {
- FLAG_SET_ERGO(uintx, MaxHeapSize, align_size_up(NewSize + OldSize, _heap_alignment));
+ FLAG_SET_ERGO(size_t, MaxHeapSize, align_size_up(NewSize + OldSize, _heap_alignment));
_max_heap_byte_size = MaxHeapSize;
}
}
@@ -405,7 +405,7 @@
// Need to compare against the flag value for max since _max_young_size
// might not have been set yet.
if (new_size >= _min_young_size && new_size <= MaxNewSize) {
- FLAG_SET_ERGO(uintx, NewSize, new_size);
+ FLAG_SET_ERGO(size_t, NewSize, new_size);
_initial_young_size = NewSize;
}
}
@@ -561,15 +561,15 @@
// Write back to flags if necessary.
if (NewSize != _initial_young_size) {
- FLAG_SET_ERGO(uintx, NewSize, _initial_young_size);
+ FLAG_SET_ERGO(size_t, NewSize, _initial_young_size);
}
if (MaxNewSize != _max_young_size) {
- FLAG_SET_ERGO(uintx, MaxNewSize, _max_young_size);
+ FLAG_SET_ERGO(size_t, MaxNewSize, _max_young_size);
}
if (OldSize != _initial_old_size) {
- FLAG_SET_ERGO(uintx, OldSize, _initial_old_size);
+ FLAG_SET_ERGO(size_t, OldSize, _initial_old_size);
}
if (PrintGCDetails && Verbose) {
@@ -907,7 +907,7 @@
//
void MarkSweepPolicy::initialize_alignments() {
- _space_alignment = _gen_alignment = (uintx)Generation::GenGrain;
+ _space_alignment = _gen_alignment = (size_t)Generation::GenGrain;
_heap_alignment = compute_heap_alignment();
}
@@ -939,18 +939,18 @@
// for both min and initial young size if less than min heap.
flag_value = 20 * M;
set_basic_flag_values();
- FLAG_SET_CMDLINE(uintx, NewSize, flag_value);
+ FLAG_SET_CMDLINE(size_t, NewSize, flag_value);
verify_young_min(flag_value);
set_basic_flag_values();
- FLAG_SET_CMDLINE(uintx, NewSize, flag_value);
+ FLAG_SET_CMDLINE(size_t, NewSize, flag_value);
verify_young_initial(flag_value);
// If NewSize is set on command line, but is larger than the min
// heap size, it should only be used for initial young size.
flag_value = 80 * M;
set_basic_flag_values();
- FLAG_SET_CMDLINE(uintx, NewSize, flag_value);
+ FLAG_SET_CMDLINE(size_t, NewSize, flag_value);
verify_young_initial(flag_value);
// If NewSize has been ergonomically set, the collector policy
@@ -958,11 +958,11 @@
// using NewRatio.
flag_value = 20 * M;
set_basic_flag_values();
- FLAG_SET_ERGO(uintx, NewSize, flag_value);
+ FLAG_SET_ERGO(size_t, NewSize, flag_value);
verify_young_min(flag_value);
set_basic_flag_values();
- FLAG_SET_ERGO(uintx, NewSize, flag_value);
+ FLAG_SET_ERGO(size_t, NewSize, flag_value);
verify_scaled_young_initial(InitialHeapSize);
restore_flags();
@@ -978,11 +978,11 @@
// for both min and initial old size if less than min heap.
flag_value = 20 * M;
set_basic_flag_values();
- FLAG_SET_CMDLINE(uintx, OldSize, flag_value);
+ FLAG_SET_CMDLINE(size_t, OldSize, flag_value);
verify_old_min(flag_value);
set_basic_flag_values();
- FLAG_SET_CMDLINE(uintx, OldSize, flag_value);
+ FLAG_SET_CMDLINE(size_t, OldSize, flag_value);
// Calculate what we expect the flag to be.
size_t expected_old_initial = align_size_up(InitialHeapSize, heap_alignment) - MaxNewSize;
verify_old_initial(expected_old_initial);
@@ -993,10 +993,10 @@
// We intentionally set MaxNewSize + OldSize > MaxHeapSize (see over_size).
flag_value = 30 * M;
set_basic_flag_values();
- FLAG_SET_CMDLINE(uintx, OldSize, flag_value);
+ FLAG_SET_CMDLINE(size_t, OldSize, flag_value);
size_t over_size = 20*M;
size_t new_size_value = align_size_up(MaxHeapSize, heap_alignment) - flag_value + over_size;
- FLAG_SET_CMDLINE(uintx, MaxNewSize, new_size_value);
+ FLAG_SET_CMDLINE(size_t, MaxNewSize, new_size_value);
// Calculate what we expect the flag to be.
expected_old_initial = align_size_up(MaxHeapSize, heap_alignment) - MaxNewSize;
verify_old_initial(expected_old_initial);
@@ -1057,11 +1057,11 @@
static size_t original_OldSize;
static void set_basic_flag_values() {
- FLAG_SET_ERGO(uintx, MaxHeapSize, 180 * M);
- FLAG_SET_ERGO(uintx, InitialHeapSize, 100 * M);
- FLAG_SET_ERGO(uintx, OldSize, 4 * M);
- FLAG_SET_ERGO(uintx, NewSize, 1 * M);
- FLAG_SET_ERGO(uintx, MaxNewSize, 80 * M);
+ FLAG_SET_ERGO(size_t, MaxHeapSize, 180 * M);
+ FLAG_SET_ERGO(size_t, InitialHeapSize, 100 * M);
+ FLAG_SET_ERGO(size_t, OldSize, 4 * M);
+ FLAG_SET_ERGO(size_t, NewSize, 1 * M);
+ FLAG_SET_ERGO(size_t, MaxNewSize, 80 * M);
Arguments::set_min_heap_size(40 * M);
}
--- a/hotspot/src/share/vm/memory/metaspace.cpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/share/vm/memory/metaspace.cpp Tue Mar 03 18:01:27 2015 +0100
@@ -3131,7 +3131,7 @@
void Metaspace::initialize_class_space(ReservedSpace rs) {
// The reserved space size may be bigger because of alignment, esp with UseLargePages
assert(rs.size() >= CompressedClassSpaceSize,
- err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
+ err_msg(SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize));
assert(using_class_space(), "Must be using class space");
_class_space_list = new VirtualSpaceList(rs);
_chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
--- a/hotspot/src/share/vm/memory/metaspaceShared.cpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp Tue Mar 03 18:01:27 2015 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -410,7 +410,7 @@
// Split up and initialize the misc code and data spaces
ReservedSpace* shared_rs = MetaspaceShared::shared_rs();
- int metadata_size = SharedReadOnlySize+SharedReadWriteSize;
+ size_t metadata_size = SharedReadOnlySize + SharedReadWriteSize;
ReservedSpace shared_ro_rw = shared_rs->first_part(metadata_size);
ReservedSpace misc_section = shared_rs->last_part(metadata_size);
--- a/hotspot/src/share/vm/prims/jvm.cpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/share/vm/prims/jvm.cpp Tue Mar 03 18:01:27 2015 +0100
@@ -402,7 +402,7 @@
PUTPROP(props, "sun.nio.MaxDirectMemorySize", "-1");
} else {
char as_chars[256];
- jio_snprintf(as_chars, sizeof(as_chars), UINTX_FORMAT, MaxDirectMemorySize);
+ jio_snprintf(as_chars, sizeof(as_chars), SIZE_FORMAT, MaxDirectMemorySize);
PUTPROP(props, "sun.nio.MaxDirectMemorySize", as_chars);
}
}
--- a/hotspot/src/share/vm/runtime/arguments.cpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/share/vm/runtime/arguments.cpp Tue Mar 03 18:01:27 2015 +0100
@@ -67,16 +67,16 @@
} \
} while(0)
-char** Arguments::_jvm_flags_array = NULL;
-int Arguments::_num_jvm_flags = 0;
-char** Arguments::_jvm_args_array = NULL;
-int Arguments::_num_jvm_args = 0;
+char** Arguments::_jvm_flags_array = NULL;
+int Arguments::_num_jvm_flags = 0;
+char** Arguments::_jvm_args_array = NULL;
+int Arguments::_num_jvm_args = 0;
char* Arguments::_java_command = NULL;
SystemProperty* Arguments::_system_properties = NULL;
const char* Arguments::_gc_log_filename = NULL;
bool Arguments::_has_profile = false;
size_t Arguments::_conservative_max_heap_alignment = 0;
-uintx Arguments::_min_heap_size = 0;
+size_t Arguments::_min_heap_size = 0;
uintx Arguments::_min_heap_free_ratio = 0;
uintx Arguments::_max_heap_free_ratio = 0;
Arguments::Mode Arguments::_mode = _mixed;
@@ -1343,9 +1343,9 @@
// NewSize was set on the command line and it is larger than
// preferred_max_new_size.
if (!FLAG_IS_DEFAULT(NewSize)) { // NewSize explicitly set at command-line
- FLAG_SET_ERGO(uintx, MaxNewSize, MAX2(NewSize, preferred_max_new_size));
+ FLAG_SET_ERGO(size_t, MaxNewSize, MAX2(NewSize, preferred_max_new_size));
} else {
- FLAG_SET_ERGO(uintx, MaxNewSize, preferred_max_new_size);
+ FLAG_SET_ERGO(size_t, MaxNewSize, preferred_max_new_size);
}
if (PrintGCDetails && Verbose) {
// Too early to use gclog_or_tty
@@ -1368,8 +1368,8 @@
// Unless explicitly requested otherwise, make young gen
// at least min_new, and at most preferred_max_new_size.
if (FLAG_IS_DEFAULT(NewSize)) {
- FLAG_SET_ERGO(uintx, NewSize, MAX2(NewSize, min_new));
- FLAG_SET_ERGO(uintx, NewSize, MIN2(preferred_max_new_size, NewSize));
+ FLAG_SET_ERGO(size_t, NewSize, MAX2(NewSize, min_new));
+ FLAG_SET_ERGO(size_t, NewSize, MIN2(preferred_max_new_size, NewSize));
if (PrintGCDetails && Verbose) {
// Too early to use gclog_or_tty
tty->print_cr("CMS ergo set NewSize: " SIZE_FORMAT, NewSize);
@@ -1379,7 +1379,7 @@
// so it's NewRatio x of NewSize.
if (FLAG_IS_DEFAULT(OldSize)) {
if (max_heap > NewSize) {
- FLAG_SET_ERGO(uintx, OldSize, MIN2(NewRatio*NewSize, max_heap - NewSize));
+ FLAG_SET_ERGO(size_t, OldSize, MIN2(NewRatio*NewSize, max_heap - NewSize));
if (PrintGCDetails && Verbose) {
// Too early to use gclog_or_tty
tty->print_cr("CMS ergo set OldSize: " SIZE_FORMAT, OldSize);
@@ -1410,7 +1410,7 @@
// OldPLAB sizing manually turned off: Use a larger default setting,
// unless it was manually specified. This is because a too-low value
// will slow down scavenges.
- FLAG_SET_ERGO(uintx, OldPLABSize, CFLS_LAB::_default_static_old_plab_size); // default value before 6631166
+ FLAG_SET_ERGO(size_t, OldPLABSize, CFLS_LAB::_default_static_old_plab_size); // default value before 6631166
} else {
FLAG_SET_DEFAULT(OldPLABSize, CFLS_LAB::_default_dynamic_old_plab_size); // old CMSParPromoteBlocksToClaim default
}
@@ -1790,7 +1790,7 @@
}
// Use static initialization to get the default before parsing
-static const uintx DefaultHeapBaseMinAddress = HeapBaseMinAddress;
+static const size_t DefaultHeapBaseMinAddress = HeapBaseMinAddress;
void Arguments::set_heap_size() {
if (!FLAG_IS_DEFAULT(DefaultMaxRAMFraction)) {
@@ -1830,14 +1830,14 @@
// matches compressed oops printing flags
if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
jio_fprintf(defaultStream::error_stream(),
- "HeapBaseMinAddress must be at least " UINTX_FORMAT
- " (" UINTX_FORMAT "G) which is greater than value given "
- UINTX_FORMAT "\n",
+ "HeapBaseMinAddress must be at least " SIZE_FORMAT
+ " (" SIZE_FORMAT "G) which is greater than value given "
+ SIZE_FORMAT "\n",
DefaultHeapBaseMinAddress,
DefaultHeapBaseMinAddress/G,
HeapBaseMinAddress);
}
- FLAG_SET_ERGO(uintx, HeapBaseMinAddress, DefaultHeapBaseMinAddress);
+ FLAG_SET_ERGO(size_t, HeapBaseMinAddress, DefaultHeapBaseMinAddress);
}
}
@@ -1862,7 +1862,7 @@
// Cannot use gclog_or_tty yet.
tty->print_cr(" Maximum heap size " SIZE_FORMAT, (size_t) reasonable_max);
}
- FLAG_SET_ERGO(uintx, MaxHeapSize, (uintx)reasonable_max);
+ FLAG_SET_ERGO(size_t, MaxHeapSize, (size_t)reasonable_max);
}
// If the minimum or initial heap_size have not been set or requested to be set
@@ -1884,14 +1884,14 @@
if (PrintGCDetails && Verbose) {
// Cannot use gclog_or_tty yet.
- tty->print_cr(" Initial heap size " SIZE_FORMAT, (uintx)reasonable_initial);
+ tty->print_cr(" Initial heap size " SIZE_FORMAT, (size_t)reasonable_initial);
}
- FLAG_SET_ERGO(uintx, InitialHeapSize, (uintx)reasonable_initial);
+ FLAG_SET_ERGO(size_t, InitialHeapSize, (size_t)reasonable_initial);
}
// If the minimum heap size has not been set (via -Xms),
// synchronize with InitialHeapSize to avoid errors with the default value.
if (min_heap_size() == 0) {
- set_min_heap_size(MIN2((uintx)reasonable_minimum, InitialHeapSize));
+ set_min_heap_size(MIN2((size_t)reasonable_minimum, InitialHeapSize));
if (PrintGCDetails && Verbose) {
// Cannot use gclog_or_tty yet.
tty->print_cr(" Minimum heap size " SIZE_FORMAT, min_heap_size());
@@ -2037,7 +2037,7 @@
}
if (UseGCLogFileRotation && (GCLogFileSize != 0) && (GCLogFileSize < 8*K)) {
- FLAG_SET_CMDLINE(uintx, GCLogFileSize, 8*K);
+ FLAG_SET_CMDLINE(size_t, GCLogFileSize, 8*K);
jio_fprintf(defaultStream::output_stream(),
"GCLogFileSize changed to minimum 8K\n");
}
@@ -2394,7 +2394,7 @@
status = status && verify_min_value(LogEventsBufferEntries, 1, "LogEventsBufferEntries");
- status = status && verify_min_value(HeapSizePerGCThread, (uintx) os::vm_page_size(), "HeapSizePerGCThread");
+ status = status && verify_min_value(HeapSizePerGCThread, (size_t) os::vm_page_size(), "HeapSizePerGCThread");
status = status && verify_min_value(GCTaskTimeStampEntries, 1, "GCTaskTimeStampEntries");
@@ -2809,8 +2809,8 @@
describe_range_error(errcode);
return JNI_EINVAL;
}
- FLAG_SET_CMDLINE(uintx, MaxNewSize, (uintx)long_initial_young_size);
- FLAG_SET_CMDLINE(uintx, NewSize, (uintx)long_initial_young_size);
+ FLAG_SET_CMDLINE(size_t, MaxNewSize, (size_t)long_initial_young_size);
+ FLAG_SET_CMDLINE(size_t, NewSize, (size_t)long_initial_young_size);
// -Xms
} else if (match_option(option, "-Xms", &tail)) {
julong long_initial_heap_size = 0;
@@ -2822,10 +2822,10 @@
describe_range_error(errcode);
return JNI_EINVAL;
}
- set_min_heap_size((uintx)long_initial_heap_size);
+ set_min_heap_size((size_t)long_initial_heap_size);
// Currently the minimum size and the initial heap sizes are the same.
// Can be overridden with -XX:InitialHeapSize.
- FLAG_SET_CMDLINE(uintx, InitialHeapSize, (uintx)long_initial_heap_size);
+ FLAG_SET_CMDLINE(size_t, InitialHeapSize, (size_t)long_initial_heap_size);
// -Xmx
} else if (match_option(option, "-Xmx", &tail) || match_option(option, "-XX:MaxHeapSize=", &tail)) {
julong long_max_heap_size = 0;
@@ -2836,7 +2836,7 @@
describe_range_error(errcode);
return JNI_EINVAL;
}
- FLAG_SET_CMDLINE(uintx, MaxHeapSize, (uintx)long_max_heap_size);
+ FLAG_SET_CMDLINE(size_t, MaxHeapSize, (size_t)long_max_heap_size);
// Xmaxf
} else if (match_option(option, "-Xmaxf", &tail)) {
char* err;
@@ -2977,7 +2977,7 @@
FLAG_SET_CMDLINE(bool, BackgroundCompilation, false);
FLAG_SET_CMDLINE(intx, DeferThrSuspendLoopCount, 1);
FLAG_SET_CMDLINE(bool, UseTLAB, false);
- FLAG_SET_CMDLINE(uintx, NewSizeThreadIncrease, 16 * K); // 20Kb per thread added to new generation
+ FLAG_SET_CMDLINE(size_t, NewSizeThreadIncrease, 16 * K); // 20Kb per thread added to new generation
// -Xinternalversion
} else if (match_option(option, "-Xinternalversion")) {
@@ -3138,16 +3138,16 @@
initHeapSize = limit_by_allocatable_memory(initHeapSize);
if (FLAG_IS_DEFAULT(MaxHeapSize)) {
- FLAG_SET_CMDLINE(uintx, MaxHeapSize, initHeapSize);
- FLAG_SET_CMDLINE(uintx, InitialHeapSize, initHeapSize);
+ FLAG_SET_CMDLINE(size_t, MaxHeapSize, initHeapSize);
+ FLAG_SET_CMDLINE(size_t, InitialHeapSize, initHeapSize);
// Currently the minimum size and the initial heap sizes are the same.
set_min_heap_size(initHeapSize);
}
if (FLAG_IS_DEFAULT(NewSize)) {
// Make the young generation 3/8ths of the total heap.
- FLAG_SET_CMDLINE(uintx, NewSize,
+ FLAG_SET_CMDLINE(size_t, NewSize,
((julong)MaxHeapSize / (julong)8) * (julong)3);
- FLAG_SET_CMDLINE(uintx, MaxNewSize, NewSize);
+ FLAG_SET_CMDLINE(size_t, MaxNewSize, NewSize);
}
#ifndef _ALLBSD_SOURCE // UseLargePages is not yet supported on BSD.
@@ -3155,14 +3155,14 @@
#endif
// Increase some data structure sizes for efficiency
- FLAG_SET_CMDLINE(uintx, BaseFootPrintEstimate, MaxHeapSize);
+ FLAG_SET_CMDLINE(size_t, BaseFootPrintEstimate, MaxHeapSize);
FLAG_SET_CMDLINE(bool, ResizeTLAB, false);
- FLAG_SET_CMDLINE(uintx, TLABSize, 256*K);
+ FLAG_SET_CMDLINE(size_t, TLABSize, 256*K);
// See the OldPLABSize comment below, but replace 'after promotion'
// with 'after copying'. YoungPLABSize is the size of the survivor
// space per-gc-thread buffers. The default is 4kw.
- FLAG_SET_CMDLINE(uintx, YoungPLABSize, 256*K); // Note: this is in words
+ FLAG_SET_CMDLINE(size_t, YoungPLABSize, 256*K); // Note: this is in words
// OldPLABSize is the size of the buffers in the old gen that
// UseParallelGC uses to promote live data that doesn't fit in the
@@ -3177,7 +3177,7 @@
// locality. A minor effect may be that larger PLABs reduce the
// number of PLAB allocation events during gc. The value of 8kw
// was arrived at by experimenting with specjbb.
- FLAG_SET_CMDLINE(uintx, OldPLABSize, 8*K); // Note: this is in words
+ FLAG_SET_CMDLINE(size_t, OldPLABSize, 8*K); // Note: this is in words
// Enable parallel GC and adaptive generation sizing
FLAG_SET_CMDLINE(bool, UseParallelGC, true);
@@ -3256,7 +3256,7 @@
jio_fprintf(defaultStream::error_stream(),
"Please use -XX:MarkStackSize in place of "
"-XX:CMSMarkStackSize or -XX:G1MarkStackSize in the future\n");
- FLAG_SET_CMDLINE(uintx, MarkStackSize, stack_size);
+ FLAG_SET_CMDLINE(size_t, MarkStackSize, stack_size);
} else if (match_option(option, "-XX:CMSMarkStackSizeMax=", &tail)) {
julong max_stack_size = 0;
ArgsRange errcode = parse_memory_size(tail, &max_stack_size, 1);
@@ -3270,7 +3270,7 @@
jio_fprintf(defaultStream::error_stream(),
"Please use -XX:MarkStackSizeMax in place of "
"-XX:CMSMarkStackSizeMax in the future\n");
- FLAG_SET_CMDLINE(uintx, MarkStackSizeMax, max_stack_size);
+ FLAG_SET_CMDLINE(size_t, MarkStackSizeMax, max_stack_size);
} else if (match_option(option, "-XX:ParallelMarkingThreads=", &tail) ||
match_option(option, "-XX:ParallelCMSThreads=", &tail)) {
uintx conc_threads = 0;
@@ -3293,7 +3293,7 @@
describe_range_error(errcode);
return JNI_EINVAL;
}
- FLAG_SET_CMDLINE(uintx, MaxDirectMemorySize, max_direct_memory_size);
+ FLAG_SET_CMDLINE(size_t, MaxDirectMemorySize, max_direct_memory_size);
#if !INCLUDE_MANAGEMENT
} else if (match_option(option, "-XX:+ManagementServer")) {
jio_fprintf(defaultStream::error_stream(),
--- a/hotspot/src/share/vm/runtime/arguments.hpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/share/vm/runtime/arguments.hpp Tue Mar 03 18:01:27 2015 +0100
@@ -530,8 +530,8 @@
static bool has_profile() { return _has_profile; }
// -Xms
- static uintx min_heap_size() { return _min_heap_size; }
- static void set_min_heap_size(uintx v) { _min_heap_size = v; }
+ static size_t min_heap_size() { return _min_heap_size; }
+ static void set_min_heap_size(size_t v) { _min_heap_size = v; }
// Returns the original values of -XX:MinHeapFreeRatio and -XX:MaxHeapFreeRatio
static uintx min_heap_free_ratio() { return _min_heap_free_ratio; }
--- a/hotspot/src/share/vm/runtime/globals.hpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/share/vm/runtime/globals.hpp Tue Mar 03 18:01:27 2015 +0100
@@ -193,19 +193,19 @@
define_pd_global(intx, OnStackReplacePercentage, 0);
define_pd_global(bool, ResizeTLAB, false);
define_pd_global(intx, FreqInlineSize, 0);
-define_pd_global(intx, NewSizeThreadIncrease, 4*K);
+define_pd_global(size_t, NewSizeThreadIncrease, 4*K);
define_pd_global(intx, InlineClassNatives, true);
define_pd_global(intx, InlineUnsafeOps, true);
define_pd_global(intx, InitialCodeCacheSize, 160*K);
define_pd_global(intx, ReservedCodeCacheSize, 32*M);
define_pd_global(intx, NonProfiledCodeHeapSize, 0);
define_pd_global(intx, ProfiledCodeHeapSize, 0);
-define_pd_global(intx, NonNMethodCodeHeapSize, 32*M);
+define_pd_global(intx, NonNMethodCodeHeapSize, 32*M);
define_pd_global(intx, CodeCacheExpansionSize, 32*K);
define_pd_global(intx, CodeCacheMinBlockLength, 1);
define_pd_global(intx, CodeCacheMinimumUseSpace, 200*K);
-define_pd_global(uintx,MetaspaceSize, ScaleForWordSize(4*M));
+define_pd_global(size_t, MetaspaceSize, ScaleForWordSize(4*M));
define_pd_global(bool, NeverActAsServerClassMachine, true);
define_pd_global(uint64_t,MaxRAM, 1ULL*G);
#define CI_COMPILER_COUNT 0
@@ -468,7 +468,7 @@
// notproduct flags are settable / visible only during development and are not declared in the PRODUCT version
// A flag must be declared with one of the following types:
-// bool, intx, uintx, ccstr, double, or uint64_t.
+// bool, intx, uintx, size_t, ccstr, double, or uint64_t.
// The type "ccstr" is an alias for "const char*" and is used
// only in this file, because the macrology requires single-token type names.
@@ -540,7 +540,7 @@
notproduct(bool, CheckCompressedOops, true, \
"Generate checks in encoding/decoding code in debug VM") \
\
- product_pd(uintx, HeapBaseMinAddress, \
+ product_pd(size_t, HeapBaseMinAddress, \
"OS specific low limit for heap base address") \
\
product(uintx, HeapSearchSteps, 3 PPC64_ONLY(+17), \
@@ -606,7 +606,7 @@
product(bool, UseNUMAInterleaving, false, \
"Interleave memory across NUMA nodes if available") \
\
- product(uintx, NUMAInterleaveGranularity, 2*M, \
+ product(size_t, NUMAInterleaveGranularity, 2*M, \
"Granularity to use for NUMA interleaving on Windows OS") \
\
product(bool, ForceNUMA, false, \
@@ -617,7 +617,7 @@
"computing exponentially decaying average for " \
"AdaptiveNUMAChunkSizing") \
\
- product(uintx, NUMASpaceResizeRate, 1*G, \
+ product(size_t, NUMASpaceResizeRate, 1*G, \
"Do not reallocate more than this amount per collection") \
\
product(bool, UseAdaptiveNUMAChunkSizing, true, \
@@ -641,10 +641,10 @@
product(bool, UseSHA, false, \
"Control whether SHA instructions can be used on SPARC") \
\
- product(uintx, LargePageSizeInBytes, 0, \
+ product(size_t, LargePageSizeInBytes, 0, \
"Large page size (0 to let VM choose the page size)") \
\
- product(uintx, LargePageHeapSizeThreshold, 128*M, \
+ product(size_t, LargePageHeapSizeThreshold, 128*M, \
"Use large pages if maximum heap is at least this big") \
\
product(bool, ForceTimeHighResolution, false, \
@@ -963,11 +963,11 @@
"directory) of the dump file (defaults to java_pid<pid>.hprof " \
"in the working directory)") \
\
- develop(uintx, SegmentedHeapDumpThreshold, 2*G, \
+ develop(size_t, SegmentedHeapDumpThreshold, 2*G, \
"Generate a segmented heap dump (JAVA PROFILE 1.0.2 format) " \
"when the heap usage is larger than this") \
\
- develop(uintx, HeapDumpSegmentSize, 1*G, \
+ develop(size_t, HeapDumpSegmentSize, 1*G, \
"Approximate segment size when generating a segmented heap dump") \
\
develop(bool, BreakAtWarning, false, \
@@ -1465,7 +1465,7 @@
"Force dynamic selection of the number of " \
"parallel threads parallel gc will use to aid debugging") \
\
- product(uintx, HeapSizePerGCThread, ScaleForWordSize(64*M), \
+ product(size_t, HeapSizePerGCThread, ScaleForWordSize(64*M), \
"Size of heap (bytes) per GC thread used in calculating the " \
"number of GC threads") \
\
@@ -1482,10 +1482,10 @@
product(uintx, ConcGCThreads, 0, \
"Number of threads concurrent gc will use") \
\
- product(uintx, YoungPLABSize, 4096, \
+ product(size_t, YoungPLABSize, 4096, \
"Size of young gen promotion LAB's (in HeapWords)") \
\
- product(uintx, OldPLABSize, 1024, \
+ product(size_t, OldPLABSize, 1024, \
"Size of old gen promotion LAB's (in HeapWords), or Number \
of blocks to attempt to claim when refilling CMS LAB's") \
\
@@ -1604,11 +1604,11 @@
product(bool, PrintOldPLAB, false, \
"Print (old gen) promotion LAB's sizing decisions") \
\
- product(uintx, CMSOldPLABMin, 16, \
+ product(size_t, CMSOldPLABMin, 16, \
"Minimum size of CMS gen promotion LAB caches per worker " \
"per block size") \
\
- product(uintx, CMSOldPLABMax, 1024, \
+ product(size_t, CMSOldPLABMax, 1024, \
"Maximum size of CMS gen promotion LAB caches per worker " \
"per block size") \
\
@@ -1631,7 +1631,7 @@
product(bool, AlwaysPreTouch, false, \
"Force all freshly committed pages to be pre-touched") \
\
- product_pd(uintx, CMSYoungGenPerWorker, \
+ product_pd(size_t, CMSYoungGenPerWorker, \
"The maximum size of young gen chosen by default per GC worker " \
"thread available") \
\
@@ -1723,10 +1723,10 @@
develop(bool, CMSOverflowEarlyRestoration, false, \
"Restore preserved marks early") \
\
- product(uintx, MarkStackSize, NOT_LP64(32*K) LP64_ONLY(4*M), \
+ product(size_t, MarkStackSize, NOT_LP64(32*K) LP64_ONLY(4*M), \
"Size of marking stack") \
\
- product(uintx, MarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M), \
+ product(size_t, MarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M), \
"Maximum size of marking stack") \
\
notproduct(bool, CMSMarkStackOverflowALot, false, \
@@ -1749,10 +1749,10 @@
"Time that we sleep between iterations when not given " \
"enough work per iteration") \
\
- product(uintx, CMSRescanMultiple, 32, \
+ product(size_t, CMSRescanMultiple, 32, \
"Size (in cards) of CMS parallel rescan task") \
\
- product(uintx, CMSConcMarkMultiple, 32, \
+ product(size_t, CMSConcMarkMultiple, 32, \
"Size (in cards) of CMS concurrent MT marking task") \
\
product(bool, CMSAbortSemantics, false, \
@@ -1819,7 +1819,7 @@
product(uintx, CMSRemarkVerifyVariant, 1, \
"Choose variant (1,2) of verification following remark") \
\
- product(uintx, CMSScheduleRemarkEdenSizeThreshold, 2*M, \
+ product(size_t, CMSScheduleRemarkEdenSizeThreshold, 2*M, \
"If Eden size is below this, do not try to schedule remark") \
\
product(uintx, CMSScheduleRemarkEdenPenetration, 50, \
@@ -1853,7 +1853,7 @@
product(bool, CMSYield, true, \
"Yield between steps of CMS") \
\
- product(uintx, CMSBitMapYieldQuantum, 10*M, \
+ product(size_t, CMSBitMapYieldQuantum, 10*M, \
"Bitmap operations should process at most this many bits " \
"between yields") \
\
@@ -2033,7 +2033,7 @@
product_pd(uint64_t, MaxRAM, \
"Real memory size (in bytes) used to set maximum heap size") \
\
- product(uintx, ErgoHeapSizeLimit, 0, \
+ product(size_t, ErgoHeapSizeLimit, 0, \
"Maximum ergonomically set heap size (in bytes); zero means use " \
"MaxRAM / MaxRAMFraction") \
\
@@ -2176,7 +2176,7 @@
product(uintx, InitialSurvivorRatio, 8, \
"Initial ratio of young generation/survivor space size") \
\
- product(uintx, BaseFootPrintEstimate, 256*M, \
+ product(size_t, BaseFootPrintEstimate, 256*M, \
"Estimate of footprint other than Java Heap") \
\
product(bool, UseGCOverheadLimit, true, \
@@ -2327,7 +2327,7 @@
develop(bool, TraceClassLoaderData, false, \
"Trace class loader loader_data lifetime") \
\
- product(uintx, InitialBootClassLoaderMetaspaceSize, \
+ product(size_t, InitialBootClassLoaderMetaspaceSize, \
NOT_LP64(2200*K) LP64_ONLY(4*M), \
"Initial size of the boot class loader data metaspace") \
\
@@ -2417,7 +2417,7 @@
"Number of gclog files in rotation " \
"(default: 0, no rotation)") \
\
- product(uintx, GCLogFileSize, 8*K, \
+ product(size_t, GCLogFileSize, 8*K, \
"GC log file size, requires UseGCLogFileRotation. " \
"Set to 0 to only trigger rotation via jcmd") \
\
@@ -2955,11 +2955,11 @@
notproduct(ccstrlist, SuppressErrorAt, "", \
"List of assertions (file:line) to muzzle") \
\
- notproduct(uintx, HandleAllocationLimit, 1024, \
+ notproduct(size_t, HandleAllocationLimit, 1024, \
"Threshold for HandleMark allocation when +TraceHandleAllocation "\
"is used") \
\
- develop(uintx, TotalHandleAllocationLimit, 1024, \
+ develop(size_t, TotalHandleAllocationLimit, 1024, \
"Threshold for total handle allocation when " \
"+TraceHandleAllocation is used") \
\
@@ -3103,30 +3103,30 @@
"Number of times to spin wait before inflation") \
\
/* gc parameters */ \
- product(uintx, InitialHeapSize, 0, \
+ product(size_t, InitialHeapSize, 0, \
"Initial heap size (in bytes); zero means use ergonomics") \
\
- product(uintx, MaxHeapSize, ScaleForWordSize(96*M), \
+ product(size_t, MaxHeapSize, ScaleForWordSize(96*M), \
"Maximum heap size (in bytes)") \
\
- product(uintx, OldSize, ScaleForWordSize(4*M), \
+ product(size_t, OldSize, ScaleForWordSize(4*M), \
"Initial tenured generation size (in bytes)") \
\
- product(uintx, NewSize, ScaleForWordSize(1*M), \
+ product(size_t, NewSize, ScaleForWordSize(1*M), \
"Initial new generation size (in bytes)") \
\
- product(uintx, MaxNewSize, max_uintx, \
+ product(size_t, MaxNewSize, max_uintx, \
"Maximum new generation size (in bytes), max_uintx means set " \
"ergonomically") \
\
- product(uintx, PretenureSizeThreshold, 0, \
+ product(size_t, PretenureSizeThreshold, 0, \
"Maximum size in bytes of objects allocated in DefNew " \
"generation; zero means no maximum") \
\
- product(uintx, TLABSize, 0, \
+ product(size_t, TLABSize, 0, \
"Starting TLAB size (in bytes); zero means set ergonomically") \
\
- product(uintx, MinTLABSize, 2*K, \
+ product(size_t, MinTLABSize, 2*K, \
"Minimum allowed TLAB size (in bytes)") \
\
product(uintx, TLABAllocationWeight, 35, \
@@ -3147,17 +3147,17 @@
product(uintx, NewRatio, 2, \
"Ratio of old/new generation sizes") \
\
- product_pd(uintx, NewSizeThreadIncrease, \
+ product_pd(size_t, NewSizeThreadIncrease, \
"Additional size added to desired new generation size per " \
"non-daemon thread (in bytes)") \
\
- product_pd(uintx, MetaspaceSize, \
+ product_pd(size_t, MetaspaceSize, \
"Initial size of Metaspaces (in bytes)") \
\
- product(uintx, MaxMetaspaceSize, max_uintx, \
+ product(size_t, MaxMetaspaceSize, max_uintx, \
"Maximum size of Metaspaces (in bytes)") \
\
- product(uintx, CompressedClassSpaceSize, 1*G, \
+ product(size_t, CompressedClassSpaceSize, 1*G, \
"Maximum size of class area in Metaspace when compressed " \
"class pointers are used") \
\
@@ -3174,10 +3174,10 @@
product(intx, SoftRefLRUPolicyMSPerMB, 1000, \
"Number of milliseconds per MB of free space in the heap") \
\
- product(uintx, MinHeapDeltaBytes, ScaleForWordSize(128*K), \
+ product(size_t, MinHeapDeltaBytes, ScaleForWordSize(128*K), \
"The minimum change in heap space due to GC (in bytes)") \
\
- product(uintx, MinMetaspaceExpansion, ScaleForWordSize(256*K), \
+ product(size_t, MinMetaspaceExpansion, ScaleForWordSize(256*K), \
"The minimum expansion of Metaspace (in bytes)") \
\
product(uintx, MinMetaspaceFreeRatio, 40, \
@@ -3188,7 +3188,7 @@
"The maximum percentage of Metaspace free after GC to avoid " \
"shrinking") \
\
- product(uintx, MaxMetaspaceExpansion, ScaleForWordSize(4*M), \
+ product(size_t, MaxMetaspaceExpansion, ScaleForWordSize(4*M), \
"The maximum expansion of Metaspace without full GC (in bytes)") \
\
product(uintx, QueuedAllocationWarningCount, 0, \
@@ -3279,10 +3279,10 @@
product_pd(intx, CompilerThreadStackSize, \
"Compiler Thread Stack Size (in Kbytes)") \
\
- develop_pd(uintx, JVMInvokeMethodSlack, \
+ develop_pd(size_t, JVMInvokeMethodSlack, \
"Stack space (bytes) required for JVM_InvokeMethod to complete") \
\
- product(uintx, ThreadSafetyMargin, 50*M, \
+ product(size_t, ThreadSafetyMargin, 50*M, \
"Thread safety margin is used on fixed-stack LinuxThreads (on " \
"Linux/x86 only) to prevent heap-stack collision. Set to 0 to " \
"disable this feature") \
@@ -3670,7 +3670,7 @@
\
/* Properties for Java libraries */ \
\
- product(uintx, MaxDirectMemorySize, 0, \
+ product(size_t, MaxDirectMemorySize, 0, \
"Maximum total size of NIO direct-buffer allocations") \
\
/* Flags used for temporary code during development */ \
@@ -3774,10 +3774,10 @@
"If PrintSharedArchiveAndExit is true, also print the shared " \
"dictionary") \
\
- product(uintx, SharedReadWriteSize, NOT_LP64(12*M) LP64_ONLY(16*M), \
+ product(size_t, SharedReadWriteSize, NOT_LP64(12*M) LP64_ONLY(16*M), \
"Size of read-write space for metadata (in bytes)") \
\
- product(uintx, SharedReadOnlySize, NOT_LP64(12*M) LP64_ONLY(16*M), \
+ product(size_t, SharedReadOnlySize, NOT_LP64(12*M) LP64_ONLY(16*M), \
"Size of read-only space for metadata (in bytes)") \
\
product(uintx, SharedMiscDataSize, NOT_LP64(2*M) LP64_ONLY(4*M), \
--- a/hotspot/src/share/vm/runtime/handles.cpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/share/vm/runtime/handles.cpp Tue Mar 03 18:01:27 2015 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -83,7 +83,7 @@
}
// The thread local handle areas should not get very large
- if (TraceHandleAllocation && handles_visited > TotalHandleAllocationLimit) {
+ if (TraceHandleAllocation && (size_t)handles_visited > TotalHandleAllocationLimit) {
#ifdef ASSERT
warning("%d: Visited in HandleMark : %d",
_nof_handlemarks, handles_visited);
--- a/hotspot/src/share/vm/services/heapDumper.cpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/share/vm/services/heapDumper.cpp Tue Mar 03 18:01:27 2015 +0100
@@ -1721,7 +1721,7 @@
// Write the file header - use 1.0.2 for large heaps, otherwise 1.0.1
size_t used = ch->used();
const char* header;
- if (used > (size_t)SegmentedHeapDumpThreshold) {
+ if (used > SegmentedHeapDumpThreshold) {
set_segmented_dump();
header = "JAVA PROFILE 1.0.2";
} else {
--- a/hotspot/src/share/vm/utilities/debug.cpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/share/vm/utilities/debug.cpp Tue Mar 03 18:01:27 2015 +0100
@@ -273,7 +273,7 @@
}
void report_insufficient_metaspace(size_t required_size) {
- warning("\nThe MaxMetaspaceSize of " UINTX_FORMAT " bytes is not large enough.\n"
+ warning("\nThe MaxMetaspaceSize of " SIZE_FORMAT " bytes is not large enough.\n"
"Either don't specify the -XX:MaxMetaspaceSize=<size>\n"
"or increase the size to at least " SIZE_FORMAT ".\n",
MaxMetaspaceSize, required_size);
--- a/hotspot/src/share/vm/utilities/ostream.hpp Mon Mar 02 11:08:09 2015 +0100
+++ b/hotspot/src/share/vm/utilities/ostream.hpp Tue Mar 03 18:01:27 2015 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -250,7 +250,7 @@
/* If "force" sets true, force log file rotation from outside JVM */
bool should_rotate(bool force) {
return force ||
- ((GCLogFileSize != 0) && ((uintx)_bytes_written >= GCLogFileSize));
+ ((GCLogFileSize != 0) && (_bytes_written >= (jlong)GCLogFileSize));
}
};