--- a/hotspot/make/Makefile Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/make/Makefile Fri Aug 29 08:07:13 2014 -0700
@@ -721,6 +721,19 @@
($(CD) $(JDK_IMAGE_DIR)/debug && $(TAR) -xf -) ; \
fi
+copy_optimized_jdk::
+ $(RM) -r $(JDK_IMAGE_DIR)/optimized
+ $(MKDIR) -p $(JDK_IMAGE_DIR)/optimized
+ if [ -d $(JDK_IMPORT_PATH)/optimized ] ; then \
+ ($(CD) $(JDK_IMPORT_PATH)/optimized && \
+ $(TAR) -cf - $(JDK_DIRS)) | \
+ ($(CD) $(JDK_IMAGE_DIR)/optimized && $(TAR) -xf -) ; \
+ else \
+ ($(CD) $(JDK_IMPORT_PATH) && \
+ $(TAR) -cf - $(JDK_DIRS)) | \
+ ($(CD) $(JDK_IMAGE_DIR)/optimized && $(TAR) -xf -) ; \
+ fi
+
#
# Check target
#
--- a/hotspot/make/jprt.gmk Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/make/jprt.gmk Fri Aug 29 08:07:13 2014 -0700
@@ -42,6 +42,9 @@
jprt_build_fastdebugEmb:
$(MAKE) JAVASE_EMBEDDED=true MINIMIZE_RAM_USAGE=true jprt_build_fastdebug
+jprt_build_optimizedEmb:
+ $(MAKE) JAVASE_EMBEDDED=true MINIMIZE_RAM_USAGE=true jprt_build_optimized
+
jprt_build_productOpen:
$(MAKE) OPENJDK=true jprt_build_product
@@ -51,6 +54,9 @@
jprt_build_fastdebugOpen:
$(MAKE) OPENJDK=true jprt_build_fastdebug
+jprt_build_optimizedOpen:
+ $(MAKE) OPENJDK=true jprt_build_optimized
+
jprt_build_product: all_product copy_product_jdk export_product_jdk
( $(CD) $(JDK_IMAGE_DIR) && \
$(ZIPEXE) $(ZIPFLAGS) -r $(JPRT_ARCHIVE_BUNDLE) . )
@@ -63,5 +69,9 @@
( $(CD) $(JDK_IMAGE_DIR)/debug && \
$(ZIPEXE) $(ZIPFLAGS) -r $(JPRT_ARCHIVE_BUNDLE) . )
-.PHONY: jprt_build_product jprt_build_fastdebug jprt_build_debug
+jprt_build_optimized: all_optimized copy_optimized_jdk export_optimized_jdk
+ ( $(CD) $(JDK_IMAGE_DIR)/optimized && \
+ $(ZIPEXE) $(ZIPFLAGS) -r $(JPRT_ARCHIVE_BUNDLE) . )
+.PHONY: jprt_build_product jprt_build_fastdebug jprt_build_debug jprt_build_optimized
+
--- a/hotspot/make/jprt.properties Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/make/jprt.properties Fri Aug 29 08:07:13 2014 -0700
@@ -93,13 +93,13 @@
# Standard list of jprt build targets for this source tree
jprt.build.targets.standard= \
- ${jprt.my.solaris.sparcv9}-{product|fastdebug|optimized}, \
+ ${jprt.my.solaris.sparcv9}-{product|fastdebug}, \
${jprt.my.solaris.x64}-{product|fastdebug}, \
${jprt.my.linux.i586}-{product|fastdebug}, \
- ${jprt.my.linux.x64}-{product|fastdebug|optimized}, \
+ ${jprt.my.linux.x64}-{product|fastdebug}, \
${jprt.my.macosx.x64}-{product|fastdebug}, \
${jprt.my.windows.i586}-{product|fastdebug}, \
- ${jprt.my.windows.x64}-{product|fastdebug|optimized}, \
+ ${jprt.my.windows.x64}-{product|fastdebug}, \
${jprt.my.linux.armvh}-{product|fastdebug}
jprt.build.targets.open= \
--- a/hotspot/make/windows/makefiles/vm.make Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/make/windows/makefiles/vm.make Fri Aug 29 08:07:13 2014 -0700
@@ -34,6 +34,9 @@
CXX_FLAGS=$(CXX_FLAGS) /D "PRODUCT"
!else
CXX_FLAGS=$(CXX_FLAGS) /D "ASSERT"
+!if "$(BUILDARCH)" == "amd64"
+CXX_FLAGS=$(CXX_FLAGS) /homeparams
+!endif
!endif
!if "$(Variant)" == "compiler1"
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp Fri Aug 29 08:07:13 2014 -0700
@@ -604,6 +604,17 @@
#if INCLUDE_RTM_OPT
if (UseRTMLocking) {
+ if (is_intel_family_core()) {
+ if ((_model == CPU_MODEL_HASWELL_E3) ||
+ (_model == CPU_MODEL_HASWELL_E7 && _stepping < 3) ||
+ (_model == CPU_MODEL_BROADWELL && _stepping < 4)) {
+ if (!UnlockExperimentalVMOptions) {
+ vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this platform. It must be enabled via -XX:+UnlockExperimentalVMOptions flag.");
+ } else {
+ warning("UseRTMLocking is only available as experimental option on this platform.");
+ }
+ }
+ }
if (!FLAG_IS_CMDLINE(UseRTMLocking)) {
// RTM locking should be used only for applications with
// high lock contention. For now we do not use it by default.
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.hpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.hpp Fri Aug 29 08:07:13 2014 -0700
@@ -276,7 +276,10 @@
CPU_MODEL_WESTMERE_EX = 0x2f,
CPU_MODEL_SANDYBRIDGE = 0x2a,
CPU_MODEL_SANDYBRIDGE_EP = 0x2d,
- CPU_MODEL_IVYBRIDGE_EP = 0x3a
+ CPU_MODEL_IVYBRIDGE_EP = 0x3a,
+ CPU_MODEL_HASWELL_E3 = 0x3c,
+ CPU_MODEL_HASWELL_E7 = 0x3f,
+ CPU_MODEL_BROADWELL = 0x3d
} cpuExtendedFamily;
// cpuid information block. All info derived from executing cpuid with
--- a/hotspot/src/os/windows/vm/os_windows.cpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/os/windows/vm/os_windows.cpp Fri Aug 29 08:07:13 2014 -0700
@@ -135,11 +135,6 @@
if (ForceTimeHighResolution)
timeEndPeriod(1L);
- // Workaround for issue when a custom launcher doesn't call
- // DestroyJavaVM and NMT is trying to track memory when free is
- // called from a static destructor
- MemTracker::shutdown();
-
break;
default:
break;
@@ -414,6 +409,8 @@
LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
+extern jint volatile vm_getting_terminated;
+
// Thread start routine for all new Java threads
static unsigned __stdcall java_start(Thread* thread) {
// Try to randomize the cache line index of hot stack frames.
@@ -435,9 +432,17 @@
}
}
+ // Diagnostic code to investigate JDK-6573254 (Part I)
+ unsigned res = 90115; // non-java thread
+ if (thread->is_Java_thread()) {
+ JavaThread* java_thread = (JavaThread*)thread;
+ res = java_lang_Thread::is_daemon(java_thread->threadObj())
+ ? 70115 // java daemon thread
+ : 80115; // java non-daemon thread
+ }
// Install a win32 structured exception handler around every thread created
- // by VM, so VM can genrate error dump when an exception occurred in non-
+ // by VM, so VM can generate error dump when an exception occurred in non-
// Java thread (e.g. VM thread).
__try {
thread->run();
@@ -453,6 +458,11 @@
Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count);
}
+ // Diagnostic code to investigate JDK-6573254 (Part II)
+ if (OrderAccess::load_acquire(&vm_getting_terminated)) {
+ return res;
+ }
+
return 0;
}
--- a/hotspot/src/share/tools/ProjectCreator/BuildConfig.java Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/tools/ProjectCreator/BuildConfig.java Fri Aug 29 08:07:13 2014 -0700
@@ -504,7 +504,7 @@
super.init(includes, defines);
- getV("CompilerFlags").addAll(getCI().getDebugCompilerFlags(getOptFlag()));
+ getV("CompilerFlags").addAll(getCI().getDebugCompilerFlags(getOptFlag(), get("PlatformName")));
getV("LinkerFlags").addAll(getCI().getDebugLinkerFlags());
}
}
@@ -619,7 +619,7 @@
abstract class CompilerInterface {
abstract Vector getBaseCompilerFlags(Vector defines, Vector includes, String outDir);
abstract Vector getBaseLinkerFlags(String outDir, String outDll, String platformName);
- abstract Vector getDebugCompilerFlags(String opt);
+ abstract Vector getDebugCompilerFlags(String opt, String platformName);
abstract Vector getDebugLinkerFlags();
abstract void getAdditionalNonKernelLinkerFlags(Vector rv);
abstract Vector getProductCompilerFlags();
--- a/hotspot/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java Fri Aug 29 08:07:13 2014 -0700
@@ -357,7 +357,7 @@
}
@Override
- Vector getDebugCompilerFlags(String opt) {
+ Vector getDebugCompilerFlags(String opt, String platformName) {
Vector rv = new Vector();
// Set /On option
@@ -369,6 +369,10 @@
addAttr(rv, "RuntimeLibrary", "MultiThreadedDLL");
// Set /Oy- option
addAttr(rv, "OmitFramePointers", "false");
+ // Set /homeparams for x64 debug builds
+ if(platformName.equals("x64")) {
+ addAttr(rv, "AdditionalOptions", "/homeparams");
+ }
return rv;
}
--- a/hotspot/src/share/tools/ProjectCreator/WinGammaPlatformVC7.java Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/tools/ProjectCreator/WinGammaPlatformVC7.java Fri Aug 29 08:07:13 2014 -0700
@@ -284,7 +284,7 @@
}
- Vector getDebugCompilerFlags(String opt) {
+ Vector getDebugCompilerFlags(String opt, String platformName) {
Vector rv = new Vector();
getDebugCompilerFlags_common(opt, rv);
--- a/hotspot/src/share/tools/ProjectCreator/WinGammaPlatformVC8.java Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/tools/ProjectCreator/WinGammaPlatformVC8.java Fri Aug 29 08:07:13 2014 -0700
@@ -48,7 +48,7 @@
}
- Vector getDebugCompilerFlags(String opt) {
+ Vector getDebugCompilerFlags(String opt, String platformName) {
Vector rv = new Vector();
getDebugCompilerFlags_common(opt,rv);
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Fri Aug 29 08:07:13 2014 -0700
@@ -328,9 +328,11 @@
void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
const char* gen_name = "old";
+ GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
// Generation Counters - generation 1, 1 subspace
- _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
+ _gen_counters = new GenerationCounters(gen_name, 1, 1,
+ gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
_space_counters = new GSpaceCounters(gen_name, 0,
_virtual_space.reserved_size(),
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp Fri Aug 29 08:07:13 2014 -0700
@@ -32,13 +32,6 @@
-void G1BlockOffsetSharedArrayMappingChangedListener::on_commit(uint start_idx, size_t num_regions) {
- // Nothing to do. The BOT is hard-wired to be part of the HeapRegion, and we cannot
- // retrieve it here since this would cause firing of several asserts. The code
- // executed after commit of a region already needs to do some re-initialization of
- // the HeapRegion, so we combine that.
-}
-
//////////////////////////////////////////////////////////////////////
// G1BlockOffsetSharedArray
//////////////////////////////////////////////////////////////////////
@@ -72,26 +65,16 @@
return (delta & right_n_bits(LogN_words)) == (size_t)NoBits;
}
-void G1BlockOffsetSharedArray::set_offset_array(HeapWord* left, HeapWord* right, u_char offset) {
- set_offset_array(index_for(left), index_for(right -1), offset);
-}
-
//////////////////////////////////////////////////////////////////////
// G1BlockOffsetArray
//////////////////////////////////////////////////////////////////////
G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array,
- MemRegion mr, bool init_to_zero) :
+ MemRegion mr) :
G1BlockOffsetTable(mr.start(), mr.end()),
_unallocated_block(_bottom),
- _array(array), _gsp(NULL),
- _init_to_zero(init_to_zero) {
+ _array(array), _gsp(NULL) {
assert(_bottom <= _end, "arguments out of order");
- if (!_init_to_zero) {
- // initialize cards to point back to mr.start()
- set_remainder_to_point_to_start(mr.start() + N_words, mr.end());
- _array->set_offset_array(0, 0); // set first card to 0
- }
}
void G1BlockOffsetArray::set_space(G1OffsetTableContigSpace* sp) {
@@ -181,93 +164,6 @@
DEBUG_ONLY(check_all_cards(start_card, end_card);)
}
-// The block [blk_start, blk_end) has been allocated;
-// adjust the block offset table to represent this information;
-// right-open interval: [blk_start, blk_end)
-void
-G1BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
- mark_block(blk_start, blk_end);
- allocated(blk_start, blk_end);
-}
-
-// Adjust BOT to show that a previously whole block has been split
-// into two.
-void G1BlockOffsetArray::split_block(HeapWord* blk, size_t blk_size,
- size_t left_blk_size) {
- // Verify that the BOT shows [blk, blk + blk_size) to be one block.
- verify_single_block(blk, blk_size);
- // Update the BOT to indicate that [blk + left_blk_size, blk + blk_size)
- // is one single block.
- mark_block(blk + left_blk_size, blk + blk_size);
-}
-
-
-// Action_mark - update the BOT for the block [blk_start, blk_end).
-// Current typical use is for splitting a block.
-// Action_single - update the BOT for an allocation.
-// Action_verify - BOT verification.
-void G1BlockOffsetArray::do_block_internal(HeapWord* blk_start,
- HeapWord* blk_end,
- Action action) {
- assert(Universe::heap()->is_in_reserved(blk_start),
- "reference must be into the heap");
- assert(Universe::heap()->is_in_reserved(blk_end-1),
- "limit must be within the heap");
- // This is optimized to make the test fast, assuming we only rarely
- // cross boundaries.
- uintptr_t end_ui = (uintptr_t)(blk_end - 1);
- uintptr_t start_ui = (uintptr_t)blk_start;
- // Calculate the last card boundary preceding end of blk
- intptr_t boundary_before_end = (intptr_t)end_ui;
- clear_bits(boundary_before_end, right_n_bits(LogN));
- if (start_ui <= (uintptr_t)boundary_before_end) {
- // blk starts at or crosses a boundary
- // Calculate index of card on which blk begins
- size_t start_index = _array->index_for(blk_start);
- // Index of card on which blk ends
- size_t end_index = _array->index_for(blk_end - 1);
- // Start address of card on which blk begins
- HeapWord* boundary = _array->address_for_index(start_index);
- assert(boundary <= blk_start, "blk should start at or after boundary");
- if (blk_start != boundary) {
- // blk starts strictly after boundary
- // adjust card boundary and start_index forward to next card
- boundary += N_words;
- start_index++;
- }
- assert(start_index <= end_index, "monotonicity of index_for()");
- assert(boundary <= (HeapWord*)boundary_before_end, "tautology");
- switch (action) {
- case Action_mark: {
- if (init_to_zero()) {
- _array->set_offset_array(start_index, boundary, blk_start);
- break;
- } // Else fall through to the next case
- }
- case Action_single: {
- _array->set_offset_array(start_index, boundary, blk_start);
- // We have finished marking the "offset card". We need to now
- // mark the subsequent cards that this blk spans.
- if (start_index < end_index) {
- HeapWord* rem_st = _array->address_for_index(start_index) + N_words;
- HeapWord* rem_end = _array->address_for_index(end_index) + N_words;
- set_remainder_to_point_to_start(rem_st, rem_end);
- }
- break;
- }
- case Action_check: {
- _array->check_offset_array(start_index, boundary, blk_start);
- // We have finished checking the "offset card". We need to now
- // check the subsequent cards that this blk spans.
- check_all_cards(start_index + 1, end_index);
- break;
- }
- default:
- ShouldNotReachHere();
- }
- }
-}
-
// The card-interval [start_card, end_card] is a closed interval; this
// is an expensive check -- use with care and only under protection of
// suitable flag.
@@ -306,25 +202,6 @@
}
}
-// The range [blk_start, blk_end) represents a single contiguous block
-// of storage; modify the block offset table to represent this
-// information; Right-open interval: [blk_start, blk_end)
-// NOTE: this method does _not_ adjust _unallocated_block.
-void
-G1BlockOffsetArray::single_block(HeapWord* blk_start, HeapWord* blk_end) {
- do_block_internal(blk_start, blk_end, Action_single);
-}
-
-// Mark the BOT such that if [blk_start, blk_end) straddles a card
-// boundary, the card following the first such boundary is marked
-// with the appropriate offset.
-// NOTE: this method does _not_ adjust _unallocated_block or
-// any cards subsequent to the first one.
-void
-G1BlockOffsetArray::mark_block(HeapWord* blk_start, HeapWord* blk_end) {
- do_block_internal(blk_start, blk_end, Action_mark);
-}
-
HeapWord* G1BlockOffsetArray::block_start_unsafe(const void* addr) {
assert(_bottom <= addr && addr < _end,
"addr must be covered by this Array");
@@ -397,57 +274,13 @@
return forward_to_block_containing_addr_const(q, n, addr);
}
-HeapWord* G1BlockOffsetArray::block_start_careful(const void* addr) const {
- assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
-
- assert(_bottom <= addr && addr < _end,
- "addr must be covered by this Array");
- // Must read this exactly once because it can be modified by parallel
- // allocation.
- HeapWord* ub = _unallocated_block;
- if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
- assert(ub < _end, "tautology (see above)");
- return ub;
- }
-
- // Otherwise, find the block start using the table, but taking
- // care (cf block_start_unsafe() above) not to parse any objects/blocks
- // on the cards themselves.
- size_t index = _array->index_for(addr);
- assert(_array->address_for_index(index) == addr,
- "arg should be start of card");
-
- HeapWord* q = (HeapWord*)addr;
- uint offset;
- do {
- offset = _array->offset_array(index--);
- q -= offset;
- } while (offset == N_words);
- assert(q <= addr, "block start should be to left of arg");
- return q;
-}
-
// Note that the committed size of the covered space may have changed,
// so the table size might also wish to change.
void G1BlockOffsetArray::resize(size_t new_word_size) {
HeapWord* new_end = _bottom + new_word_size;
- if (_end < new_end && !init_to_zero()) {
- // verify that the old and new boundaries are also card boundaries
- assert(_array->is_card_boundary(_end),
- "_end not a card boundary");
- assert(_array->is_card_boundary(new_end),
- "new _end would not be a card boundary");
- // set all the newly added cards
- _array->set_offset_array(_end, new_end, N_words);
- }
_end = new_end; // update _end
}
-void G1BlockOffsetArray::set_region(MemRegion mr) {
- _bottom = mr.start();
- _end = mr.end();
-}
-
//
// threshold_
// | _index_
@@ -606,7 +439,7 @@
G1BlockOffsetArrayContigSpace::
G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array,
MemRegion mr) :
- G1BlockOffsetArray(array, mr, true)
+ G1BlockOffsetArray(array, mr)
{
_next_offset_threshold = NULL;
_next_offset_index = 0;
@@ -641,15 +474,6 @@
return _next_offset_threshold;
}
-void G1BlockOffsetArrayContigSpace::zero_bottom_entry() {
- assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
- "just checking");
- size_t bottom_index = _array->index_for(_bottom);
- assert(_array->address_for_index(bottom_index) == _bottom,
- "Precondition of call");
- _array->set_offset_array(bottom_index, 0);
-}
-
void
G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) {
assert(new_top <= _end, "_end should have already been updated");
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp Fri Aug 29 08:07:13 2014 -0700
@@ -109,7 +109,12 @@
class G1BlockOffsetSharedArrayMappingChangedListener : public G1MappingChangedListener {
public:
- virtual void on_commit(uint start_idx, size_t num_regions);
+ virtual void on_commit(uint start_idx, size_t num_regions) {
+ // Nothing to do. The BOT is hard-wired to be part of the HeapRegion, and we cannot
+ // retrieve it here since this would cause firing of several asserts. The code
+ // executed after commit of a region already needs to do some re-initialization of
+ // the HeapRegion, so we combine that.
+ }
};
// This implementation of "G1BlockOffsetTable" divides the covered region
@@ -153,8 +158,6 @@
// For performance these have to devolve to array accesses in product builds.
inline u_char offset_array(size_t index) const;
- void set_offset_array(HeapWord* left, HeapWord* right, u_char offset);
-
void set_offset_array_raw(size_t index, u_char offset) {
_offset_array[index] = offset;
}
@@ -165,8 +168,6 @@
inline void set_offset_array(size_t left, size_t right, u_char offset);
- inline void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const;
-
bool is_card_boundary(HeapWord* p) const;
public:
@@ -193,8 +194,6 @@
// G1BlockOffsetTable(s) to initialize cards.
G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage);
- void set_bottom(HeapWord* new_bottom);
-
// Return the appropriate index into "_offset_array" for "p".
inline size_t index_for(const void* p) const;
inline size_t index_for_raw(const void* p) const;
@@ -220,14 +219,6 @@
LogN = G1BlockOffsetSharedArray::LogN
};
- // The following enums are used by do_block_helper
- enum Action {
- Action_single, // BOT records a single block (see single_block())
- Action_mark, // BOT marks the start of a block (see mark_block())
- Action_check // Check that BOT records block correctly
- // (see verify_single_block()).
- };
-
// This is the array, which can be shared by several BlockOffsetArray's
// servicing different
G1BlockOffsetSharedArray* _array;
@@ -235,10 +226,6 @@
// The space that owns this subregion.
G1OffsetTableContigSpace* _gsp;
- // If true, array entries are initialized to 0; otherwise, they are
- // initialized to point backwards to the beginning of the covered region.
- bool _init_to_zero;
-
// The portion [_unallocated_block, _sp.end()) of the space that
// is a single block known not to contain any objects.
// NOTE: See BlockOffsetArrayUseUnallocatedBlock flag.
@@ -253,9 +240,6 @@
// that is closed: [start_index, end_index]
void set_remainder_to_point_to_start_incl(size_t start, size_t end);
- // A helper function for BOT adjustment/verification work
- void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action);
-
protected:
G1OffsetTableContigSpace* gsp() const { return _gsp; }
@@ -303,11 +287,9 @@
public:
// The space may not have it's bottom and top set yet, which is why the
- // region is passed as a parameter. If "init_to_zero" is true, the
- // elements of the array are initialized to zero. Otherwise, they are
- // initialized to point backwards to the beginning.
- G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr,
- bool init_to_zero);
+ // region is passed as a parameter. The elements of the array are
+ // initialized to zero.
+ G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr);
// Note: this ought to be part of the constructor, but that would require
// "this" to be passed as a parameter to a member constructor for
@@ -315,114 +297,19 @@
// This would be legal C++, but MS VC++ doesn't allow it.
void set_space(G1OffsetTableContigSpace* sp);
- // Resets the covered region to the given "mr".
- void set_region(MemRegion mr);
-
// Resets the covered region to one with the same _bottom as before but
// the "new_word_size".
void resize(size_t new_word_size);
- // These must be guaranteed to work properly (i.e., do nothing)
- // when "blk_start" ("blk" for second version) is "NULL".
- virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end);
- virtual void alloc_block(HeapWord* blk, size_t size) {
- alloc_block(blk, blk + size);
- }
-
- // The following methods are useful and optimized for a
- // general, non-contiguous space.
-
- // Given a block [blk_start, blk_start + full_blk_size), and
- // a left_blk_size < full_blk_size, adjust the BOT to show two
- // blocks [blk_start, blk_start + left_blk_size) and
- // [blk_start + left_blk_size, blk_start + full_blk_size).
- // It is assumed (and verified in the non-product VM) that the
- // BOT was correct for the original block.
- void split_block(HeapWord* blk_start, size_t full_blk_size,
- size_t left_blk_size);
-
- // Adjust the BOT to show that it has a single block in the
- // range [blk_start, blk_start + size). All necessary BOT
- // cards are adjusted, but _unallocated_block isn't.
- void single_block(HeapWord* blk_start, HeapWord* blk_end);
- void single_block(HeapWord* blk, size_t size) {
- single_block(blk, blk + size);
- }
-
- // Adjust BOT to show that it has a block in the range
- // [blk_start, blk_start + size). Only the first card
- // of BOT is touched. It is assumed (and verified in the
- // non-product VM) that the remaining cards of the block
- // are correct.
- void mark_block(HeapWord* blk_start, HeapWord* blk_end);
- void mark_block(HeapWord* blk, size_t size) {
- mark_block(blk, blk + size);
- }
-
- // Adjust _unallocated_block to indicate that a particular
- // block has been newly allocated or freed. It is assumed (and
- // verified in the non-product VM) that the BOT is correct for
- // the given block.
- inline void allocated(HeapWord* blk_start, HeapWord* blk_end) {
- // Verify that the BOT shows [blk, blk + blk_size) to be one block.
- verify_single_block(blk_start, blk_end);
- if (BlockOffsetArrayUseUnallocatedBlock) {
- _unallocated_block = MAX2(_unallocated_block, blk_end);
- }
- }
-
- inline void allocated(HeapWord* blk, size_t size) {
- allocated(blk, blk + size);
- }
-
- inline void freed(HeapWord* blk_start, HeapWord* blk_end);
-
- inline void freed(HeapWord* blk, size_t size);
-
virtual HeapWord* block_start_unsafe(const void* addr);
virtual HeapWord* block_start_unsafe_const(const void* addr) const;
- // Requires "addr" to be the start of a card and returns the
- // start of the block that contains the given address.
- HeapWord* block_start_careful(const void* addr) const;
-
- // If true, initialize array slots with no allocated blocks to zero.
- // Otherwise, make them point back to the front.
- bool init_to_zero() { return _init_to_zero; }
-
- // Verification & debugging - ensure that the offset table reflects the fact
- // that the block [blk_start, blk_end) or [blk, blk + size) is a
- // single block of storage. NOTE: can;t const this because of
- // call to non-const do_block_internal() below.
- inline void verify_single_block(HeapWord* blk_start, HeapWord* blk_end) {
- if (VerifyBlockOffsetArray) {
- do_block_internal(blk_start, blk_end, Action_check);
- }
- }
-
- inline void verify_single_block(HeapWord* blk, size_t size) {
- verify_single_block(blk, blk + size);
- }
-
// Used by region verification. Checks that the contents of the
// BOT reflect that there's a single object that spans the address
// range [obj_start, obj_start + word_size); returns true if this is
// the case, returns false if it's not.
bool verify_for_object(HeapWord* obj_start, size_t word_size) const;
- // Verify that the given block is before _unallocated_block
- inline void verify_not_unallocated(HeapWord* blk_start,
- HeapWord* blk_end) const {
- if (BlockOffsetArrayUseUnallocatedBlock) {
- assert(blk_start < blk_end, "Block inconsistency?");
- assert(blk_end <= _unallocated_block, "_unallocated_block problem");
- }
- }
-
- inline void verify_not_unallocated(HeapWord* blk, size_t size) const {
- verify_not_unallocated(blk, blk + size);
- }
-
void check_all_cards(size_t left_card, size_t right_card) const;
virtual void print_on(outputStream* out) PRODUCT_RETURN;
@@ -445,14 +332,12 @@
blk_start, blk_end);
}
- // Variant of zero_bottom_entry that does not check for availability of the
+ // Zero out the entry for _bottom (offset will be zero). Does not check for availability of the
// memory first.
void zero_bottom_entry_raw();
// Variant of initialize_threshold that does not check for availability of the
// memory first.
HeapWord* initialize_threshold_raw();
- // Zero out the entry for _bottom (offset will be zero).
- void zero_bottom_entry();
public:
G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp Fri Aug 29 08:07:13 2014 -0700
@@ -91,13 +91,6 @@
}
}
-void G1BlockOffsetSharedArray::check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
- check_index(index, "index out of range");
- assert(high >= low, "addresses out of order");
- check_offset(pointer_delta(high, low), "offset too large");
- assert(_offset_array[index] == pointer_delta(high, low), "Wrong offset");
-}
-
// Variant of index_for that does not check the index for validity.
inline size_t G1BlockOffsetSharedArray::index_for_raw(const void* p) const {
return pointer_delta((char*)p, _reserved.start(), sizeof(char)) >> LogN;
@@ -193,28 +186,4 @@
return q;
}
-//////////////////////////////////////////////////////////////////////////
-// BlockOffsetArrayNonContigSpace inlines
-//////////////////////////////////////////////////////////////////////////
-inline void G1BlockOffsetArray::freed(HeapWord* blk_start, HeapWord* blk_end) {
- // Verify that the BOT shows [blk_start, blk_end) to be one block.
- verify_single_block(blk_start, blk_end);
- // adjust _unallocated_block upward or downward
- // as appropriate
- if (BlockOffsetArrayUseUnallocatedBlock) {
- assert(_unallocated_block <= _end,
- "Inconsistent value for _unallocated_block");
- if (blk_end >= _unallocated_block && blk_start <= _unallocated_block) {
- // CMS-specific note: a block abutting _unallocated_block to
- // its left is being freed, a new block is being added or
- // we are resetting following a compaction
- _unallocated_block = blk_start;
- }
- }
-}
-
-inline void G1BlockOffsetArray::freed(HeapWord* blk, size_t size) {
- freed(blk, blk + size);
-}
-
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Fri Aug 29 08:07:13 2014 -0700
@@ -322,29 +322,6 @@
return false;
}
-HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
- HeapWord* low = addr;
- HeapWord* high = end();
- while (low < high) {
- size_t diff = pointer_delta(high, low);
- // Must add one below to bias toward the high amount. Otherwise, if
- // "high" were at the desired value, and "low" were one less, we
- // would not converge on "high". This is not symmetric, because
- // we set "high" to a block start, which might be the right one,
- // which we don't do for "low".
- HeapWord* middle = low + (diff+1)/2;
- if (middle == high) return high;
- HeapWord* mid_bs = block_start_careful(middle);
- if (mid_bs < addr) {
- low = middle;
- } else {
- high = mid_bs;
- }
- }
- assert(low == high && low >= addr, "Didn't work.");
- return low;
-}
-
HeapRegion::HeapRegion(uint hrm_index,
G1BlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr) :
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp Fri Aug 29 08:07:13 2014 -0700
@@ -206,10 +206,6 @@
_offsets.reset_bot();
}
- void update_bot_for_object(HeapWord* start, size_t word_size) {
- _offsets.alloc_block(start, word_size);
- }
-
void print_bot_on(outputStream* out) {
_offsets.print_on(out);
}
@@ -737,18 +733,6 @@
bool filter_young,
jbyte* card_ptr);
- // A version of block start that is guaranteed to find *some* block
- // boundary at or before "p", but does not object iteration, and may
- // therefore be used safely when the heap is unparseable.
- HeapWord* block_start_careful(const void* p) const {
- return _offsets.block_start_careful(p);
- }
-
- // Requires that "addr" is within the region. Returns the start of the
- // first ("careful") block that starts at or after "addr", or else the
- // "end" of the region if there is no such block.
- HeapWord* next_block_start_careful(HeapWord* addr);
-
size_t recorded_rs_length() const { return _recorded_rs_length; }
double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGenerationCounters.cpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGenerationCounters.cpp Fri Aug 29 08:07:13 2014 -0700
@@ -30,6 +30,8 @@
PSGenerationCounters::PSGenerationCounters(const char* name,
int ordinal, int spaces,
+ size_t min_capacity,
+ size_t max_capacity,
PSVirtualSpace* v):
_ps_virtual_space(v) {
@@ -52,11 +54,11 @@
cname = PerfDataManager::counter_name(_name_space, "minCapacity");
PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
- _ps_virtual_space->committed_size(), CHECK);
+ min_capacity, CHECK);
cname = PerfDataManager::counter_name(_name_space, "maxCapacity");
PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
- _ps_virtual_space->reserved_size(), CHECK);
+ max_capacity, CHECK);
cname = PerfDataManager::counter_name(_name_space, "capacity");
_current_size = PerfDataManager::create_variable(SUN_GC, cname,
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGenerationCounters.hpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGenerationCounters.hpp Fri Aug 29 08:07:13 2014 -0700
@@ -41,7 +41,7 @@
public:
PSGenerationCounters(const char* name, int ordinal, int spaces,
- PSVirtualSpace* v);
+ size_t min_capacity, size_t max_capacity, PSVirtualSpace* v);
void update_all() {
assert(_virtual_space == NULL, "Only one should be in use");
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp Fri Aug 29 08:07:13 2014 -0700
@@ -149,8 +149,8 @@
void PSOldGen::initialize_performance_counters(const char* perf_data_name, int level) {
// Generation Counters, generation 'level', 1 subspace
- _gen_counters = new PSGenerationCounters(perf_data_name, level, 1,
- virtual_space());
+ _gen_counters = new PSGenerationCounters(perf_data_name, level, 1, _min_gen_size,
+ _max_gen_size, virtual_space());
_space_counters = new SpaceCounters(perf_data_name, 0,
virtual_space()->reserved_size(),
_object_space, _gen_counters);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp Fri Aug 29 08:07:13 2014 -0700
@@ -101,7 +101,8 @@
}
// Generation Counters - generation 0, 3 subspaces
- _gen_counters = new PSGenerationCounters("new", 0, 3, _virtual_space);
+ _gen_counters = new PSGenerationCounters("new", 0, 3, _min_gen_size,
+ _max_gen_size, _virtual_space);
// Compute maximum space sizes for performance counters
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
--- a/hotspot/src/share/vm/gc_implementation/shared/generationCounters.cpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/generationCounters.cpp Fri Aug 29 08:07:13 2014 -0700
@@ -62,11 +62,12 @@
GenerationCounters::GenerationCounters(const char* name,
int ordinal, int spaces,
+ size_t min_capacity, size_t max_capacity,
VirtualSpace* v)
: _virtual_space(v) {
assert(v != NULL, "don't call this constructor if v == NULL");
initialize(name, ordinal, spaces,
- v->committed_size(), v->reserved_size(), v->committed_size());
+ min_capacity, max_capacity, v->committed_size());
}
GenerationCounters::GenerationCounters(const char* name,
--- a/hotspot/src/share/vm/gc_implementation/shared/generationCounters.hpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/generationCounters.hpp Fri Aug 29 08:07:13 2014 -0700
@@ -66,7 +66,7 @@
public:
GenerationCounters(const char* name, int ordinal, int spaces,
- VirtualSpace* v);
+ size_t min_capacity, size_t max_capacity, VirtualSpace* v);
~GenerationCounters() {
if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space, mtGC);
--- a/hotspot/src/share/vm/memory/defNewGeneration.cpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp Fri Aug 29 08:07:13 2014 -0700
@@ -214,9 +214,11 @@
_max_eden_size = size - (2*_max_survivor_size);
// allocate the performance counters
+ GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
// Generation counters -- generation 0, 3 subspaces
- _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
+ _gen_counters = new GenerationCounters("new", 0, 3,
+ gcp->min_young_size(), gcp->max_young_size(), &_virtual_space);
_gc_counters = new CollectorCounters(policy, 0);
_eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
--- a/hotspot/src/share/vm/memory/filemap.cpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/vm/memory/filemap.cpp Fri Aug 29 08:07:13 2014 -0700
@@ -445,7 +445,7 @@
// close and remove the file. See bug 6372906.
close();
remove(_full_path);
- fail_stop("Unable to write to shared archive file.", NULL);
+ fail_stop("Unable to write to shared archive file.");
}
}
_file_offset += nbytes;
@@ -463,7 +463,7 @@
// that the written file is the correct length.
_file_offset -= 1;
if (lseek(_fd, _file_offset, SEEK_SET) < 0) {
- fail_stop("Unable to seek.", NULL);
+ fail_stop("Unable to seek.");
}
char zero = 0;
write_bytes(&zero, 1);
@@ -534,7 +534,7 @@
// other reserved memory (like the code cache).
ReservedSpace rs(size, os::vm_allocation_granularity(), false, requested_addr);
if (!rs.is_reserved()) {
- fail_continue(err_msg("Unable to reserve shared space at required address " INTPTR_FORMAT, requested_addr));
+ fail_continue("Unable to reserve shared space at required address " INTPTR_FORMAT, requested_addr);
return rs;
}
// the reserved virtual memory is for mapping class data sharing archive
@@ -558,7 +558,7 @@
requested_addr, size, si->_read_only,
si->_allow_exec);
if (base == NULL || base != si->_base) {
- fail_continue(err_msg("Unable to map %s shared space at required address.", shared_region_name[i]));
+ fail_continue("Unable to map %s shared space at required address.", shared_region_name[i]);
return NULL;
}
#ifdef _WINDOWS
@@ -584,7 +584,7 @@
void FileMapInfo::assert_mark(bool check) {
if (!check) {
- fail_stop("Mark mismatch while restoring from shared file.", NULL);
+ fail_stop("Mark mismatch while restoring from shared file.");
}
}
@@ -709,7 +709,7 @@
void FileMapInfo::stop_sharing_and_unmap(const char* msg) {
FileMapInfo *map_info = FileMapInfo::current_info();
if (map_info) {
- map_info->fail_continue(msg);
+ map_info->fail_continue("%s", msg);
for (int i = 0; i < MetaspaceShared::n_regions; i++) {
if (map_info->_header->_space[i]._base != NULL) {
map_info->unmap_region(i);
@@ -717,6 +717,6 @@
}
}
} else if (DumpSharedSpaces) {
- fail_stop(msg, NULL);
+ fail_stop("%s", msg);
}
}
--- a/hotspot/src/share/vm/memory/filemap.hpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/vm/memory/filemap.hpp Fri Aug 29 08:07:13 2014 -0700
@@ -190,8 +190,8 @@
bool remap_shared_readonly_as_readwrite();
// Errors.
- static void fail_stop(const char *msg, ...);
- static void fail_continue(const char *msg, ...);
+ static void fail_stop(const char *msg, ...) ATTRIBUTE_PRINTF(1, 2);
+ static void fail_continue(const char *msg, ...) ATTRIBUTE_PRINTF(1, 2);
// Return true if given address is in the mapped shared space.
bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false);
--- a/hotspot/src/share/vm/memory/metaspace.cpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/vm/memory/metaspace.cpp Fri Aug 29 08:07:13 2014 -0700
@@ -3126,6 +3126,8 @@
if (DumpSharedSpaces) {
#if INCLUDE_CDS
+ MetaspaceShared::estimate_regions_size();
+
SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment);
--- a/hotspot/src/share/vm/memory/metaspaceShared.cpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp Fri Aug 29 08:07:13 2014 -0700
@@ -816,6 +816,7 @@
//tty->print_cr("Preload failed: %s", class_name);
}
}
+ fclose(file);
} else {
char errmsg[JVM_MAXPATHLEN];
os::lasterror(errmsg, JVM_MAXPATHLEN);
@@ -1086,3 +1087,49 @@
}
return true;
}
+
+int MetaspaceShared::count_class(const char* classlist_file) {
+ if (classlist_file == NULL) {
+ return 0;
+ }
+ char class_name[256];
+ int class_count = 0;
+ FILE* file = fopen(classlist_file, "r");
+ if (file != NULL) {
+ while ((fgets(class_name, sizeof class_name, file)) != NULL) {
+ if (*class_name == '#') { // comment
+ continue;
+ }
+ class_count++;
+ }
+ fclose(file);
+ } else {
+ char errmsg[JVM_MAXPATHLEN];
+ os::lasterror(errmsg, JVM_MAXPATHLEN);
+ tty->print_cr("Loading classlist failed: %s", errmsg);
+ exit(1);
+ }
+
+ return class_count;
+}
+
+// the sizes are good for typical large applications that have a lot of shared
+// classes
+void MetaspaceShared::estimate_regions_size() {
+ int class_count = count_class(SharedClassListFile);
+ class_count += count_class(ExtraSharedClassListFile);
+
+ if (class_count > LargeThresholdClassCount) {
+ if (class_count < HugeThresholdClassCount) {
+ SET_ESTIMATED_SIZE(Large, ReadOnly);
+ SET_ESTIMATED_SIZE(Large, ReadWrite);
+ SET_ESTIMATED_SIZE(Large, MiscData);
+ SET_ESTIMATED_SIZE(Large, MiscCode);
+ } else {
+ SET_ESTIMATED_SIZE(Huge, ReadOnly);
+ SET_ESTIMATED_SIZE(Huge, ReadWrite);
+ SET_ESTIMATED_SIZE(Huge, MiscData);
+ SET_ESTIMATED_SIZE(Huge, MiscCode);
+ }
+ }
+}
--- a/hotspot/src/share/vm/memory/metaspaceShared.hpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/vm/memory/metaspaceShared.hpp Fri Aug 29 08:07:13 2014 -0700
@@ -30,6 +30,19 @@
#include "utilities/exceptions.hpp"
#include "utilities/macros.hpp"
+#define LargeSharedArchiveSize (300*M)
+#define HugeSharedArchiveSize (800*M)
+#define ReadOnlyRegionPercentage 0.4
+#define ReadWriteRegionPercentage 0.55
+#define MiscDataRegionPercentage 0.03
+#define MiscCodeRegionPercentage 0.02
+#define LargeThresholdClassCount 5000
+#define HugeThresholdClassCount 40000
+
+#define SET_ESTIMATED_SIZE(type, region) \
+ Shared ##region## Size = FLAG_IS_DEFAULT(Shared ##region## Size) ? \
+ (type ## SharedArchiveSize * region ## RegionPercentage) : Shared ## region ## Size
+
class FileMapInfo;
// Class Data Sharing Support
@@ -112,5 +125,8 @@
static void link_one_shared_class(Klass* obj, TRAPS);
static void check_one_shared_class(Klass* obj);
static void link_and_cleanup_shared_classes(TRAPS);
+
+ static int count_class(const char* classlist_file);
+ static void estimate_regions_size() NOT_CDS_RETURN;
};
#endif // SHARE_VM_MEMORY_METASPACE_SHARED_HPP
--- a/hotspot/src/share/vm/memory/tenuredGeneration.cpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/vm/memory/tenuredGeneration.cpp Fri Aug 29 08:07:13 2014 -0700
@@ -53,9 +53,11 @@
// initialize performance counters
const char* gen_name = "old";
+ GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
// Generation Counters -- generation 1, 1 subspace
- _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
+ _gen_counters = new GenerationCounters(gen_name, 1, 1,
+ gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
_gc_counters = new CollectorCounters("MSC", 1);
--- a/hotspot/src/share/vm/opto/type.cpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/vm/opto/type.cpp Fri Aug 29 08:07:13 2014 -0700
@@ -1708,8 +1708,8 @@
// Make a TypeTuple from the range of a method signature
const TypeTuple *TypeTuple::make_range(ciSignature* sig) {
ciType* return_type = sig->return_type();
- uint total_fields = TypeFunc::Parms + return_type->size();
- const Type **field_array = fields(total_fields);
+ uint arg_cnt = return_type->size();
+ const Type **field_array = fields(arg_cnt);
switch (return_type->basic_type()) {
case T_LONG:
field_array[TypeFunc::Parms] = TypeLong::LONG;
@@ -1734,26 +1734,26 @@
default:
ShouldNotReachHere();
}
- return (TypeTuple*)(new TypeTuple(total_fields,field_array))->hashcons();
+ return (TypeTuple*)(new TypeTuple(TypeFunc::Parms + arg_cnt, field_array))->hashcons();
}
// Make a TypeTuple from the domain of a method signature
const TypeTuple *TypeTuple::make_domain(ciInstanceKlass* recv, ciSignature* sig) {
- uint total_fields = TypeFunc::Parms + sig->size();
+ uint arg_cnt = sig->size();
uint pos = TypeFunc::Parms;
const Type **field_array;
if (recv != NULL) {
- total_fields++;
- field_array = fields(total_fields);
+ arg_cnt++;
+ field_array = fields(arg_cnt);
// Use get_const_type here because it respects UseUniqueSubclasses:
field_array[pos++] = get_const_type(recv)->join_speculative(TypePtr::NOTNULL);
} else {
- field_array = fields(total_fields);
+ field_array = fields(arg_cnt);
}
int i = 0;
- while (pos < total_fields) {
+ while (pos < TypeFunc::Parms + arg_cnt) {
ciType* type = sig->type_at(i);
switch (type->basic_type()) {
@@ -1780,7 +1780,8 @@
}
i++;
}
- return (TypeTuple*)(new TypeTuple(total_fields,field_array))->hashcons();
+
+ return (TypeTuple*)(new TypeTuple(TypeFunc::Parms + arg_cnt, field_array))->hashcons();
}
const TypeTuple *TypeTuple::make( uint cnt, const Type **fields ) {
@@ -1789,6 +1790,7 @@
//------------------------------fields-----------------------------------------
// Subroutine call type with space allocated for argument types
+// Memory for Control, I_O, Memory, FramePtr, and ReturnAdr is allocated implicitly
const Type **TypeTuple::fields( uint arg_cnt ) {
const Type **flds = (const Type **)(Compile::current()->type_arena()->Amalloc_4((TypeFunc::Parms+arg_cnt)*sizeof(Type*) ));
flds[TypeFunc::Control ] = Type::CONTROL;
--- a/hotspot/src/share/vm/opto/type.hpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/vm/opto/type.hpp Fri Aug 29 08:07:13 2014 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -635,6 +635,7 @@
static const TypeTuple *make_domain(ciInstanceKlass* recv, ciSignature *sig);
// Subroutine call type with space allocated for argument types
+ // Memory for Control, I_O, Memory, FramePtr, and ReturnAdr is allocated implicitly
static const Type **fields( uint arg_cnt );
virtual const Type *xmeet( const Type *t ) const;
--- a/hotspot/src/share/vm/runtime/java.cpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/vm/runtime/java.cpp Fri Aug 29 08:07:13 2014 -0700
@@ -430,6 +430,8 @@
}
}
+jint volatile vm_getting_terminated = 0;
+
// Note: before_exit() can be executed only once, if more than one threads
// are trying to shutdown the VM at the same time, only one thread
// can run before_exit() and all other threads must wait.
@@ -460,6 +462,8 @@
}
}
+ OrderAccess::release_store(&vm_getting_terminated, 1);
+
// The only difference between this and Win32's _onexit procs is that
// this version is invoked before any threads get killed.
ExitProc* current = exit_procs;
--- a/hotspot/src/share/vm/services/mallocTracker.hpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/vm/services/mallocTracker.hpp Fri Aug 29 08:07:13 2014 -0700
@@ -171,8 +171,9 @@
// Total malloc'd memory used by arenas
size_t total_arena() const;
- inline size_t thread_count() {
- return by_type(mtThreadStack)->malloc_count();
+ inline size_t thread_count() const {
+ MallocMemorySnapshot* s = const_cast<MallocMemorySnapshot*>(this);
+ return s->by_type(mtThreadStack)->malloc_count();
}
void reset();
--- a/hotspot/src/share/vm/services/memBaseline.cpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/vm/services/memBaseline.cpp Fri Aug 29 08:07:13 2014 -0700
@@ -70,15 +70,13 @@
*/
class MallocAllocationSiteWalker : public MallocSiteWalker {
private:
- SortedLinkedList<MallocSite, compare_malloc_size, ResourceObj::ARENA>
- _malloc_sites;
+ SortedLinkedList<MallocSite, compare_malloc_size> _malloc_sites;
size_t _count;
// Entries in MallocSiteTable with size = 0 and count = 0,
// when the malloc site is not longer there.
public:
- MallocAllocationSiteWalker(Arena* arena) : _count(0), _malloc_sites(arena) {
- }
+ MallocAllocationSiteWalker() : _count(0) { }
inline size_t count() const { return _count; }
@@ -109,13 +107,12 @@
// Walk all virtual memory regions for baselining
class VirtualMemoryAllocationWalker : public VirtualMemoryWalker {
private:
- SortedLinkedList<ReservedMemoryRegion, compare_virtual_memory_base, ResourceObj::ARENA>
+ SortedLinkedList<ReservedMemoryRegion, compare_virtual_memory_base>
_virtual_memory_regions;
size_t _count;
public:
- VirtualMemoryAllocationWalker(Arena* a) : _count(0), _virtual_memory_regions(a) {
- }
+ VirtualMemoryAllocationWalker() : _count(0) { }
bool do_allocation_site(const ReservedMemoryRegion* rgn) {
if (rgn->size() >= MemBaseline::SIZE_THRESHOLD) {
@@ -136,39 +133,30 @@
bool MemBaseline::baseline_summary() {
- assert(_malloc_memory_snapshot == NULL, "Malloc baseline not yet reset");
- assert(_virtual_memory_snapshot == NULL, "Virtual baseline not yet reset");
-
- _malloc_memory_snapshot = new (arena()) MallocMemorySnapshot();
- _virtual_memory_snapshot = new (arena()) VirtualMemorySnapshot();
- if (_malloc_memory_snapshot == NULL || _virtual_memory_snapshot == NULL) {
- return false;
- }
- MallocMemorySummary::snapshot(_malloc_memory_snapshot);
- VirtualMemorySummary::snapshot(_virtual_memory_snapshot);
+ MallocMemorySummary::snapshot(&_malloc_memory_snapshot);
+ VirtualMemorySummary::snapshot(&_virtual_memory_snapshot);
return true;
}
bool MemBaseline::baseline_allocation_sites() {
- assert(arena() != NULL, "Just check");
// Malloc allocation sites
- MallocAllocationSiteWalker malloc_walker(arena());
+ MallocAllocationSiteWalker malloc_walker;
if (!MallocSiteTable::walk_malloc_site(&malloc_walker)) {
return false;
}
- _malloc_sites.set_head(malloc_walker.malloc_sites()->head());
+ _malloc_sites.move(malloc_walker.malloc_sites());
// The malloc sites are collected in size order
_malloc_sites_order = by_size;
// Virtual memory allocation sites
- VirtualMemoryAllocationWalker virtual_memory_walker(arena());
+ VirtualMemoryAllocationWalker virtual_memory_walker;
if (!VirtualMemoryTracker::walk_virtual_memory(&virtual_memory_walker)) {
return false;
}
// Virtual memory allocations are collected in call stack order
- _virtual_memory_allocations.set_head(virtual_memory_walker.virtual_memory_allocations()->head());
+ _virtual_memory_allocations.move(virtual_memory_walker.virtual_memory_allocations());
if (!aggregate_virtual_memory_allocation_sites()) {
return false;
@@ -180,11 +168,6 @@
}
bool MemBaseline::baseline(bool summaryOnly) {
- if (arena() == NULL) {
- _arena = new (std::nothrow, mtNMT) Arena(mtNMT);
- if (arena() == NULL) return false;
- }
-
reset();
_class_count = InstanceKlass::number_of_instance_classes();
@@ -211,8 +194,7 @@
}
bool MemBaseline::aggregate_virtual_memory_allocation_sites() {
- SortedLinkedList<VirtualMemoryAllocationSite, compare_allocation_site, ResourceObj::ARENA>
- allocation_sites(arena());
+ SortedLinkedList<VirtualMemoryAllocationSite, compare_allocation_site> allocation_sites;
VirtualMemoryAllocationIterator itr = virtual_memory_allocations();
const ReservedMemoryRegion* rgn;
@@ -230,12 +212,12 @@
site->commit_memory(rgn->committed_size());
}
- _virtual_memory_sites.set_head(allocation_sites.head());
+ _virtual_memory_sites.move(&allocation_sites);
return true;
}
MallocSiteIterator MemBaseline::malloc_sites(SortingOrder order) {
- assert(!_malloc_sites.is_empty(), "Detail baseline?");
+ assert(!_malloc_sites.is_empty(), "Not detail baseline");
switch(order) {
case by_size:
malloc_sites_to_size_order();
@@ -251,7 +233,7 @@
}
VirtualMemorySiteIterator MemBaseline::virtual_memory_sites(SortingOrder order) {
- assert(!_virtual_memory_sites.is_empty(), "Detail baseline?");
+ assert(!_virtual_memory_sites.is_empty(), "Not detail baseline");
switch(order) {
case by_size:
virtual_memory_sites_to_size_order();
@@ -270,8 +252,7 @@
// Sorting allocations sites in different orders
void MemBaseline::malloc_sites_to_size_order() {
if (_malloc_sites_order != by_size) {
- SortedLinkedList<MallocSite, compare_malloc_size, ResourceObj::ARENA>
- tmp(arena());
+ SortedLinkedList<MallocSite, compare_malloc_size> tmp;
// Add malloc sites to sorted linked list to sort into size order
tmp.move(&_malloc_sites);
@@ -283,8 +264,7 @@
void MemBaseline::malloc_sites_to_allocation_site_order() {
if (_malloc_sites_order != by_site) {
- SortedLinkedList<MallocSite, compare_malloc_site, ResourceObj::ARENA>
- tmp(arena());
+ SortedLinkedList<MallocSite, compare_malloc_site> tmp;
// Add malloc sites to sorted linked list to sort into site (address) order
tmp.move(&_malloc_sites);
_malloc_sites.set_head(tmp.head());
@@ -295,8 +275,7 @@
void MemBaseline::virtual_memory_sites_to_size_order() {
if (_virtual_memory_sites_order != by_size) {
- SortedLinkedList<VirtualMemoryAllocationSite, compare_virtual_memory_size, ResourceObj::ARENA>
- tmp(arena());
+ SortedLinkedList<VirtualMemoryAllocationSite, compare_virtual_memory_size> tmp;
tmp.move(&_virtual_memory_sites);
@@ -308,10 +287,9 @@
void MemBaseline::virtual_memory_sites_to_reservation_site_order() {
if (_virtual_memory_sites_order != by_size) {
- SortedLinkedList<VirtualMemoryAllocationSite, compare_virtual_memory_site, ResourceObj::ARENA>
- tmp(arena());
+ SortedLinkedList<VirtualMemoryAllocationSite, compare_virtual_memory_site> tmp;
- tmp.add(&_virtual_memory_sites);
+ tmp.move(&_virtual_memory_sites);
_virtual_memory_sites.set_head(tmp.head());
tmp.set_head(NULL);
--- a/hotspot/src/share/vm/services/memBaseline.hpp Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/src/share/vm/services/memBaseline.hpp Fri Aug 29 08:07:13 2014 -0700
@@ -61,28 +61,22 @@
};
private:
- // All baseline data is stored in this arena
- Arena* _arena;
-
// Summary information
- MallocMemorySnapshot* _malloc_memory_snapshot;
- VirtualMemorySnapshot* _virtual_memory_snapshot;
+ MallocMemorySnapshot _malloc_memory_snapshot;
+ VirtualMemorySnapshot _virtual_memory_snapshot;
size_t _class_count;
// Allocation sites information
// Malloc allocation sites
- LinkedListImpl<MallocSite, ResourceObj::ARENA>
- _malloc_sites;
+ LinkedListImpl<MallocSite> _malloc_sites;
// All virtual memory allocations
- LinkedListImpl<ReservedMemoryRegion, ResourceObj::ARENA>
- _virtual_memory_allocations;
+ LinkedListImpl<ReservedMemoryRegion> _virtual_memory_allocations;
// Virtual memory allocations by allocation sites, always in by_address
// order
- LinkedListImpl<VirtualMemoryAllocationSite, ResourceObj::ARENA>
- _virtual_memory_sites;
+ LinkedListImpl<VirtualMemoryAllocationSite> _virtual_memory_sites;
SortingOrder _malloc_sites_order;
SortingOrder _virtual_memory_sites_order;
@@ -93,30 +87,23 @@
// create a memory baseline
MemBaseline():
_baseline_type(Not_baselined),
- _class_count(0),
- _arena(NULL),
- _malloc_memory_snapshot(NULL),
- _virtual_memory_snapshot(NULL),
- _malloc_sites(NULL) {
+ _class_count(0) {
}
~MemBaseline() {
reset();
- if (_arena != NULL) {
- delete _arena;
- }
}
bool baseline(bool summaryOnly = true);
BaselineType baseline_type() const { return _baseline_type; }
- MallocMemorySnapshot* malloc_memory_snapshot() const {
- return _malloc_memory_snapshot;
+ MallocMemorySnapshot* malloc_memory_snapshot() {
+ return &_malloc_memory_snapshot;
}
- VirtualMemorySnapshot* virtual_memory_snapshot() const {
- return _virtual_memory_snapshot;
+ VirtualMemorySnapshot* virtual_memory_snapshot() {
+ return &_virtual_memory_snapshot;
}
MallocSiteIterator malloc_sites(SortingOrder order);
@@ -133,10 +120,8 @@
// memory
size_t total_reserved_memory() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
- assert(_virtual_memory_snapshot != NULL, "No virtual memory snapshot");
- assert(_malloc_memory_snapshot != NULL, "No malloc memory snapshot");
- size_t amount = _malloc_memory_snapshot->total() +
- _virtual_memory_snapshot->total_reserved();
+ size_t amount = _malloc_memory_snapshot.total() +
+ _virtual_memory_snapshot.total_reserved();
return amount;
}
@@ -144,32 +129,30 @@
// virtual memory
size_t total_committed_memory() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
- assert(_virtual_memory_snapshot != NULL,
- "Not a snapshot");
- size_t amount = _malloc_memory_snapshot->total() +
- _virtual_memory_snapshot->total_committed();
+ size_t amount = _malloc_memory_snapshot.total() +
+ _virtual_memory_snapshot.total_committed();
return amount;
}
size_t total_arena_memory() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
- assert(_malloc_memory_snapshot != NULL, "Not yet baselined");
- return _malloc_memory_snapshot->total_arena();
+ return _malloc_memory_snapshot.total_arena();
}
size_t malloc_tracking_overhead() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
- return _malloc_memory_snapshot->malloc_overhead()->size();
+ MemBaseline* bl = const_cast<MemBaseline*>(this);
+ return bl->_malloc_memory_snapshot.malloc_overhead()->size();
}
- const MallocMemory* malloc_memory(MEMFLAGS flag) const {
- assert(_malloc_memory_snapshot != NULL, "Not a snapshot");
- return _malloc_memory_snapshot->by_type(flag);
+ MallocMemory* malloc_memory(MEMFLAGS flag) {
+ assert(baseline_type() != Not_baselined, "Not yet baselined");
+ return _malloc_memory_snapshot.by_type(flag);
}
- const VirtualMemory* virtual_memory(MEMFLAGS flag) const {
- assert(_virtual_memory_snapshot != NULL, "Not a snapshot");
- return _virtual_memory_snapshot->by_type(flag);
+ VirtualMemory* virtual_memory(MEMFLAGS flag) {
+ assert(baseline_type() != Not_baselined, "Not yet baselined");
+ return _virtual_memory_snapshot.by_type(flag);
}
@@ -180,24 +163,19 @@
size_t thread_count() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
- assert(_malloc_memory_snapshot != NULL, "Baselined?");
- return _malloc_memory_snapshot->thread_count();
+ return _malloc_memory_snapshot.thread_count();
}
// reset the baseline for reuse
void reset() {
_baseline_type = Not_baselined;
- _malloc_memory_snapshot = NULL;
- _virtual_memory_snapshot = NULL;
+ _malloc_memory_snapshot.reset();
+ _virtual_memory_snapshot.reset();
_class_count = 0;
- _malloc_sites = NULL;
- _virtual_memory_sites = NULL;
- _virtual_memory_allocations = NULL;
-
- if (_arena != NULL) {
- _arena->destruct_contents();
- }
+ _malloc_sites.clear();
+ _virtual_memory_sites.clear();
+ _virtual_memory_allocations.clear();
}
private:
@@ -210,8 +188,6 @@
// Aggregate virtual memory allocation by allocation sites
bool aggregate_virtual_memory_allocation_sites();
- Arena* arena() { return _arena; }
-
// Sorting allocation sites in different orders
// Sort allocation sites in size order
void malloc_sites_to_size_order();
--- a/hotspot/test/compiler/classUnloading/anonymousClass/TestAnonymousClassUnloading.java Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/test/compiler/classUnloading/anonymousClass/TestAnonymousClassUnloading.java Fri Aug 29 08:07:13 2014 -0700
@@ -35,7 +35,9 @@
* @summary "Tests unloading of anonymous classes."
* @library /testlibrary /testlibrary/whitebox
* @compile TestAnonymousClassUnloading.java
- * @run main ClassFileInstaller TestAnonymousClassUnloading sun.hotspot.WhiteBox
+ * @run main ClassFileInstaller TestAnonymousClassUnloading
+ * sun.hotspot.WhiteBox
+ * sun.hotspot.WhiteBox$WhiteBoxPermission
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:-BackgroundCompilation TestAnonymousClassUnloading
*/
public class TestAnonymousClassUnloading {
--- a/hotspot/test/compiler/classUnloading/methodUnloading/TestMethodUnloading.java Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/test/compiler/classUnloading/methodUnloading/TestMethodUnloading.java Fri Aug 29 08:07:13 2014 -0700
@@ -36,7 +36,7 @@
* @build WorkerClass
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:-BackgroundCompilation -XX:-UseCompressedOops -XX:+UseParallelGC -XX:CompileOnly=TestMethodUnloading::doWork TestMethodUnloading
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:-BackgroundCompilation -XX:-UseCompressedOops -XX:CompileOnly=TestMethodUnloading::doWork TestMethodUnloading
*/
public class TestMethodUnloading {
private static final String workerClassName = "WorkerClass";
--- a/hotspot/test/compiler/rtm/cli/TestPrintPreciseRTMLockingStatisticsOptionOnSupportedConfig.java Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/test/compiler/rtm/cli/TestPrintPreciseRTMLockingStatisticsOptionOnSupportedConfig.java Fri Aug 29 08:07:13 2014 -0700
@@ -54,16 +54,19 @@
CommandLineOptionTest.verifyOptionValueForSameVM(optionName,
TestPrintPreciseRTMLockingStatisticsBase.DEFAULT_VALUE,
CommandLineOptionTest.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
+ CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
"-XX:+UseRTMLocking");
CommandLineOptionTest.verifyOptionValueForSameVM(optionName,
TestPrintPreciseRTMLockingStatisticsBase.DEFAULT_VALUE,
CommandLineOptionTest.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
+ CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
"-XX:-UseRTMLocking", prepareOptionValue("true"));
// verify that option could be turned on
CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "true",
CommandLineOptionTest.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
+ CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
"-XX:+UseRTMLocking", prepareOptionValue("true"));
}
--- a/hotspot/test/compiler/rtm/cli/TestUseRTMDeoptOptionOnSupportedConfig.java Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/test/compiler/rtm/cli/TestUseRTMDeoptOptionOnSupportedConfig.java Fri Aug 29 08:07:13 2014 -0700
@@ -63,13 +63,16 @@
// verify default value
CommandLineOptionTest.verifyOptionValueForSameVM("UseRTMDeopt",
TestUseRTMDeoptOptionOnSupportedConfig.DEFAULT_VALUE,
+ CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
"-XX:+UseRTMLocking");
// verify that option is off when UseRTMLocking is off
- CommandLineOptionTest.verifyOptionValueForSameVM("UseRTMDeopt",
- "false", "-XX:-UseRTMLocking", "-XX:+UseRTMDeopt");
+ CommandLineOptionTest.verifyOptionValueForSameVM("UseRTMDeopt", "false",
+ CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
+ "-XX:-UseRTMLocking", "-XX:+UseRTMDeopt");
// verify that option could be turned on
- CommandLineOptionTest.verifyOptionValueForSameVM("UseRTMDeopt",
- "true", "-XX:+UseRTMLocking", "-XX:+UseRTMDeopt");
+ CommandLineOptionTest.verifyOptionValueForSameVM("UseRTMDeopt", "true",
+ CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
+ "-XX:+UseRTMLocking", "-XX:+UseRTMDeopt");
}
public static void main(String args[]) throws Throwable {
--- a/hotspot/test/compiler/rtm/cli/TestUseRTMLockingOptionOnSupportedConfig.java Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/test/compiler/rtm/cli/TestUseRTMLockingOptionOnSupportedConfig.java Fri Aug 29 08:07:13 2014 -0700
@@ -59,24 +59,31 @@
new String[]{
RTMGenericCommandLineOptionTest.RTM_INSTR_ERROR,
unrecongnizedOption
- }, ExitCode.OK, "-XX:+UseRTMLocking"
+ }, ExitCode.OK,
+ CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
+ "-XX:+UseRTMLocking"
);
CommandLineOptionTest.verifySameJVMStartup(null,
new String[]{
RTMGenericCommandLineOptionTest.RTM_INSTR_ERROR,
unrecongnizedOption
- }, ExitCode.OK, "-XX:-UseRTMLocking"
+ }, ExitCode.OK,
+ CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
+ "-XX:-UseRTMLocking"
);
// verify that UseRTMLocking is of by default
CommandLineOptionTest.verifyOptionValueForSameVM("UseRTMLocking",
- TestUseRTMLockingOptionOnSupportedConfig.DEFAULT_VALUE);
+ TestUseRTMLockingOptionOnSupportedConfig.DEFAULT_VALUE,
+ CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS);
// verify that we can change UseRTMLocking value
CommandLineOptionTest.verifyOptionValueForSameVM("UseRTMLocking",
TestUseRTMLockingOptionOnSupportedConfig.DEFAULT_VALUE,
+ CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
"-XX:-UseRTMLocking");
CommandLineOptionTest.verifyOptionValueForSameVM("UseRTMLocking",
- "true", "-XX:+UseRTMLocking");
+ "true", CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
+ "-XX:+UseRTMLocking");
}
public static void main(String args[]) throws Throwable {
--- a/hotspot/test/compiler/rtm/cli/TestUseRTMLockingOptionWithBiasedLocking.java Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/test/compiler/rtm/cli/TestUseRTMLockingOptionWithBiasedLocking.java Fri Aug 29 08:07:13 2014 -0700
@@ -54,18 +54,22 @@
// verify that we will not get a warning
CommandLineOptionTest.verifySameJVMStartup(null,
new String[] { warningMessage }, ExitCode.OK,
+ CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
"-XX:+UseRTMLocking", "-XX:-UseBiasedLocking");
// verify that we will get a warning
CommandLineOptionTest.verifySameJVMStartup(
new String[] { warningMessage }, null, ExitCode.OK,
+ CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
"-XX:+UseRTMLocking", "-XX:+UseBiasedLocking");
// verify that UseBiasedLocking is false when we use rtm locking
CommandLineOptionTest.verifyOptionValueForSameVM("UseBiasedLocking",
- "false", "-XX:+UseRTMLocking");
+ "false", CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
+ "-XX:+UseRTMLocking");
// verify that we can't turn on biased locking when
// using rtm locking
CommandLineOptionTest.verifyOptionValueForSameVM("UseBiasedLocking",
- "false", "-XX:+UseRTMLocking", "-XX:+UseBiasedLocking");
+ "false", CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
+ "-XX:+UseRTMLocking", "-XX:+UseBiasedLocking");
}
public static void main(String args[]) throws Throwable {
--- a/hotspot/test/runtime/CompressedOops/CompressedClassPointers.java Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/test/runtime/CompressedOops/CompressedClassPointers.java Fri Aug 29 08:07:13 2014 -0700
@@ -26,7 +26,6 @@
* @bug 8024927
* @summary Testing address of compressed class pointer space as best as possible.
* @library /testlibrary
- * @ignore 8055164
*/
import com.oracle.java.testlibrary.*;
@@ -89,7 +88,6 @@
"-version");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldContain("HeapBaseMinAddress must be at least");
- output.shouldContain("HotSpot");
output.shouldHaveExitValue(0);
}
--- a/hotspot/test/runtime/NMT/CommandLineEmptyArgument.java Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/test/runtime/NMT/CommandLineEmptyArgument.java Fri Aug 29 08:07:13 2014 -0700
@@ -26,7 +26,6 @@
* @key nmt
* @summary Empty argument to NMT should result in an informative error message
* @library /testlibrary
- * @ignore 8055051
*/
import com.oracle.java.testlibrary.*;
--- a/hotspot/test/runtime/NMT/JcmdDetailDiff.java Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/test/runtime/NMT/JcmdDetailDiff.java Fri Aug 29 08:07:13 2014 -0700
@@ -62,21 +62,18 @@
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB +256KB, committed=0KB)");
- output.shouldContain("WB_NMTReserveMemory");
wb.NMTCommitMemory(addr, commitSize);
pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail.diff", "scale=KB"});
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB +256KB, committed=128KB +128KB)");
- output.shouldContain("WB_NMTReserveMemory");
wb.NMTUncommitMemory(addr, commitSize);
pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail.diff", "scale=KB"});
output = new OutputAnalyzer(pb.start());
output.shouldContain("Test (reserved=256KB +256KB, committed=0KB)");
- output.shouldContain("WB_NMTReserveMemory");
wb.NMTReleaseMemory(addr, reserveSize);
pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail.diff", "scale=KB"});
--- a/hotspot/test/runtime/NMT/MallocSiteHashOverflow.java Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/test/runtime/NMT/MallocSiteHashOverflow.java Fri Aug 29 08:07:13 2014 -0700
@@ -22,10 +22,9 @@
*/
/*
- * @key stress
* @test
* @summary Test corner case that overflows malloc site hashtable bucket
- * @key nmt jcmd
+ * @key nmt jcmd stress
* @library /testlibrary /testlibrary/whitebox
* @ignore - This test is disabled since it will stress NMT and timeout during normal testing
* @build MallocSiteHashOverflow
--- a/hotspot/test/runtime/NMT/MallocStressTest.java Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/test/runtime/NMT/MallocStressTest.java Fri Aug 29 08:07:13 2014 -0700
@@ -22,10 +22,9 @@
*/
/*
- * @key stress
* @test
* @summary Stress test for malloc tracking
- * @key nmt jcmd
+ * @key nmt jcmd stress
* @library /testlibrary /testlibrary/whitebox
* @build MallocStressTest
* @ignore - This test is disabled since it will stress NMT and timeout during normal testing
--- a/hotspot/test/runtime/NMT/NMTWithCDS.java Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/test/runtime/NMT/NMTWithCDS.java Fri Aug 29 08:07:13 2014 -0700
@@ -34,14 +34,15 @@
public static void main(String[] args) throws Exception {
ProcessBuilder pb;
- pb = ProcessTools.createJavaProcessBuilder("-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
+ pb = ProcessTools.createJavaProcessBuilder(
+ "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
try {
output.shouldContain("Loading classes to share");
output.shouldHaveExitValue(0);
pb = ProcessTools.createJavaProcessBuilder(
- "-XX:NativeMemoryTracking=detail", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
+ "-XX:+UnlockDiagnosticVMOptions", "-XX:NativeMemoryTracking=detail", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
output = new OutputAnalyzer(pb.start());
output.shouldContain("sharing");
output.shouldHaveExitValue(0);
--- a/hotspot/test/runtime/NMT/VirtualAllocCommitUncommitRecommit.java Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/test/runtime/NMT/VirtualAllocCommitUncommitRecommit.java Fri Aug 29 08:07:13 2014 -0700
@@ -26,7 +26,6 @@
* @summary Test reserve/commit/uncommit/release of virtual memory and that we track it correctly
* @key nmt jcmd
* @library /testlibrary /testlibrary/whitebox
- * @ignore
* @build VirtualAllocCommitUncommitRecommit
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail VirtualAllocCommitUncommitRecommit
@@ -43,8 +42,8 @@
public static void main(String args[]) throws Exception {
OutputAnalyzer output;
- long commitSize = 4 * 1024; // 4KB
- long reserveSize = 1024 * 1024; // 1024KB
+ long commitSize = 128 * 1024; // 128KB
+ long reserveSize = 4 * 1024 * 1024; // 4096KB
long addr;
String pid = Integer.toString(ProcessTools.getProcessId());
@@ -63,11 +62,11 @@
"VM.native_memory", "detail" });
output = new OutputAnalyzer(pb.start());
- output.shouldContain("Test (reserved=1024KB, committed=0KB)");
+ output.shouldContain("Test (reserved=4096KB, committed=0KB)");
if (has_nmt_detail) {
output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+ Long.toHexString(addr + reserveSize)
- + "\\] reserved 1024KB for Test");
+ + "\\] reserved 4096KB for Test");
}
long addrA = addr;
@@ -84,24 +83,24 @@
wb.NMTCommitMemory(addrD, commitSize);
output = new OutputAnalyzer(pb.start());
- output.shouldContain("Test (reserved=1024KB, committed=16KB)");
+ output.shouldContain("Test (reserved=4096KB, committed=512KB)");
if (has_nmt_detail) {
output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+ Long.toHexString(addr + reserveSize)
- + "\\] reserved 1024KB for Test");
+ + "\\] reserved 4096KB for Test");
}
// uncommit BC
wb.NMTUncommitMemory(addrB, commitSize);
wb.NMTUncommitMemory(addrC, commitSize);
output = new OutputAnalyzer(pb.start());
- output.shouldContain("Test (reserved=1024KB, committed=8KB)");
+ output.shouldContain("Test (reserved=4096KB, committed=256KB)");
if (has_nmt_detail) {
output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+ Long.toHexString(addr + reserveSize)
- + "\\] reserved 1024KB for Test");
+ + "\\] reserved 4096KB for Test");
}
// commit EF
@@ -109,22 +108,22 @@
wb.NMTCommitMemory(addrF, commitSize);
output = new OutputAnalyzer(pb.start());
- output.shouldContain("Test (reserved=1024KB, committed=16KB)");
+ output.shouldContain("Test (reserved=4096KB, committed=512KB)");
if (has_nmt_detail) {
output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+ Long.toHexString(addr + reserveSize)
- + "\\] reserved 1024KB for Test");
+ + "\\] reserved 4096KB for Test");
}
// uncommit A
wb.NMTUncommitMemory(addrA, commitSize);
output = new OutputAnalyzer(pb.start());
- output.shouldContain("Test (reserved=1024KB, committed=12KB)");
+ output.shouldContain("Test (reserved=4096KB, committed=384KB)");
if (has_nmt_detail) {
output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+ Long.toHexString(addr + reserveSize)
- + "\\] reserved 1024KB for Test");
+ + "\\] reserved 4096KB for Test");
}
// commit ABC
@@ -133,11 +132,11 @@
wb.NMTCommitMemory(addrC, commitSize);
output = new OutputAnalyzer(pb.start());
- output.shouldContain("Test (reserved=1024KB, committed=24KB)");
+ output.shouldContain("Test (reserved=4096KB, committed=768KB)");
if (has_nmt_detail) {
output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+ Long.toHexString(addr + reserveSize)
- + "\\] reserved 1024KB for Test");
+ + "\\] reserved 4096KB for Test");
}
// uncommit ABCDEF
@@ -149,11 +148,11 @@
wb.NMTUncommitMemory(addrF, commitSize);
output = new OutputAnalyzer(pb.start());
- output.shouldContain("Test (reserved=1024KB, committed=0KB)");
+ output.shouldContain("Test (reserved=4096KB, committed=0KB)");
if (has_nmt_detail) {
output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+ Long.toHexString(addr + reserveSize)
- + "\\] reserved 1024KB for Test");
+ + "\\] reserved 4096KB for Test");
}
// release
@@ -161,6 +160,6 @@
output = new OutputAnalyzer(pb.start());
output.shouldNotContain("Test (reserved=");
output.shouldNotMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
- + Long.toHexString(addr + reserveSize) + "\\] reserved");
+ + Long.toHexString(addr + reserveSize) + "\\] reserved 4096KB for Test");
}
}
--- a/hotspot/test/runtime/jsig/Test8017498.sh Thu Aug 28 17:05:41 2014 +0200
+++ b/hotspot/test/runtime/jsig/Test8017498.sh Fri Aug 29 08:07:13 2014 -0700
@@ -31,15 +31,14 @@
## @bug 8022301
## @bug 8025519
## @summary sigaction(sig) results in process hang/timed-out if sig is much greater than SIGRTMAX
-## @ignore 8041727
## @run shell/timeout=60 Test8017498.sh
##
-if [ "${TESTSRC}" = "" ]
-then
- TESTSRC=${PWD}
+if [ -z "${TESTSRC}" ]; then
+ TESTSRC="${PWD}"
echo "TESTSRC not set. Using "${TESTSRC}" as default"
fi
+
echo "TESTSRC=${TESTSRC}"
## Adding common setup Variables for running shell tests.
. ${TESTSRC}/../../test_env.sh
@@ -52,13 +51,13 @@
Linux)
echo "Testing on Linux"
gcc_cmd=`which gcc`
- if [ "x$gcc_cmd" == "x" ]; then
+ if [ -z "$gcc_cmd" ]; then
echo "WARNING: gcc not found. Cannot execute test." 2>&1
exit 0;
fi
MY_LD_PRELOAD=${TESTJAVA}${FS}jre${FS}lib${FS}${VM_CPU}${FS}libjsig.so
- if [ "$VM_BITS" == "32" ] && [ "$VM_CPU" != "arm" ] && [ "$VM_CPU" != "ppc" ]; then
- EXTRA_CFLAG=-m32
+ if [ "$VM_BITS" = "32" ] && [ "$VM_CPU" != "arm" ] && [ "$VM_CPU" != "ppc" ]; then
+ EXTRA_CFLAG=-m32
fi
echo MY_LD_PRELOAD = ${MY_LD_PRELOAD}
;;
@@ -70,7 +69,7 @@
THIS_DIR=.
-cp ${TESTSRC}${FS}*.java ${THIS_DIR}
+cp "${TESTSRC}${FS}"*.java "${THIS_DIR}"
${COMPILEJAVA}${FS}bin${FS}javac *.java
$gcc_cmd -DLINUX -fPIC -shared \
@@ -80,16 +79,19 @@
-I${COMPILEJAVA}${FS}include${FS}linux \
${TESTSRC}${FS}TestJNI.c
+if [ $? -ne 0 ] ; then
+ echo "Compile failed, Ignoring failed compilation and forcing the test to pass"
+ exit 0
+fi
+
# run the java test in the background
cmd="LD_PRELOAD=$MY_LD_PRELOAD \
${TESTJAVA}${FS}bin${FS}java \
-Djava.library.path=. -server TestJNI 100"
-echo "$cmd > test.out 2>&1"
-eval $cmd > test.out 2>&1
+echo "$cmd > test.out"
+eval $cmd > test.out
-grep "old handler" test.out > ${NULL}
-if [ $? = 0 ]
-then
+if grep "old handler" test.out > ${NULL}; then
echo "Test Passed"
exit 0
fi