--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Feb 07 09:41:36 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon Feb 10 13:31:22 2014 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2266,7 +2266,7 @@
// (for efficiency/performance)
false);
// Setting next fields of discovered
- // lists requires a barrier.
+ // lists does not require a barrier.
}
size_t G1CollectedHeap::capacity() const {
@@ -5202,9 +5202,12 @@
bool _process_symbols;
int _symbols_processed;
int _symbols_removed;
+
+ bool _do_in_parallel;
public:
G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
AbstractGangTask("Par String/Symbol table unlink"), _is_alive(is_alive),
+ _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),
_process_strings(process_strings), _strings_processed(0), _strings_removed(0),
_process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
@@ -5219,16 +5222,16 @@
}
~G1StringSymbolTableUnlinkTask() {
- guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
+ guarantee(!_process_strings || !_do_in_parallel || StringTable::parallel_claimed_index() >= _initial_string_table_size,
err_msg("claim value "INT32_FORMAT" after unlink less than initial string table size "INT32_FORMAT,
StringTable::parallel_claimed_index(), _initial_string_table_size));
- guarantee(!_process_strings || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
+ guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
err_msg("claim value "INT32_FORMAT" after unlink less than initial symbol table size "INT32_FORMAT,
SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
}
void work(uint worker_id) {
- if (G1CollectedHeap::use_parallel_gc_threads()) {
+ if (_do_in_parallel) {
int strings_processed = 0;
int strings_removed = 0;
int symbols_processed = 0;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp Fri Feb 07 09:41:36 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp Mon Feb 10 13:31:22 2014 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -86,13 +86,26 @@
#define G1_PARTIAL_ARRAY_MASK 0x2
-template <class T> inline bool has_partial_array_mask(T* ref) {
+inline bool has_partial_array_mask(oop* ref) {
return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
}
-template <class T> inline T* set_partial_array_mask(T obj) {
+// We never encode partial array oops as narrowOop*, so return false immediately.
+// This allows the compiler to create optimized code when popping references from
+// the work queue.
+inline bool has_partial_array_mask(narrowOop* ref) {
+ assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
+ return false;
+}
+
+// Only implement set_partial_array_mask() for regular oops, not for narrowOops.
+// We always encode partial arrays as regular oop, to allow the
+// specialization for has_partial_array_mask() for narrowOops above.
+// This means that unintentional use of this method with narrowOops are caught
+// by the compiler.
+inline oop* set_partial_array_mask(oop obj) {
assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
- return (T*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
+ return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
}
template <class T> inline oop clear_partial_array_mask(T* ref) {
--- a/hotspot/src/share/vm/memory/referenceProcessor.cpp Fri Feb 07 09:41:36 2014 -0800
+++ b/hotspot/src/share/vm/memory/referenceProcessor.cpp Mon Feb 10 13:31:22 2014 +0100
@@ -95,11 +95,11 @@
uint mt_discovery_degree,
bool atomic_discovery,
BoolObjectClosure* is_alive_non_header,
- bool discovered_list_needs_barrier) :
+ bool discovered_list_needs_post_barrier) :
_discovering_refs(false),
_enqueuing_is_done(false),
_is_alive_non_header(is_alive_non_header),
- _discovered_list_needs_barrier(discovered_list_needs_barrier),
+ _discovered_list_needs_post_barrier(discovered_list_needs_post_barrier),
_processing_is_mt(mt_processing),
_next_id(0)
{
@@ -490,13 +490,13 @@
} else {
new_next = _next;
}
-
- if (UseCompressedOops) {
- // Remove Reference object from list.
- oopDesc::encode_store_heap_oop((narrowOop*)_prev_next, new_next);
- } else {
- // Remove Reference object from list.
- oopDesc::store_heap_oop((oop*)_prev_next, new_next);
+ // Remove Reference object from discovered list. Note that G1 does not need a
+ // pre-barrier here because we know the Reference has already been found/marked,
+ // that's how it ended up in the discovered list in the first place.
+ oop_store_raw(_prev_next, new_next);
+ if (_discovered_list_needs_post_barrier && _prev_next != _refs_list.adr_head()) {
+ // Needs post-barrier and this is not the list head (which is not on the heap)
+ oopDesc::bs()->write_ref_field(_prev_next, new_next);
}
NOT_PRODUCT(_removed++);
_refs_list.dec_length(1);
@@ -544,7 +544,7 @@
OopClosure* keep_alive,
VoidClosure* complete_gc) {
assert(policy != NULL, "Must have a non-NULL policy");
- DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
+ DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
// Decide which softly reachable refs should be kept alive.
while (iter.has_next()) {
iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
@@ -584,7 +584,7 @@
BoolObjectClosure* is_alive,
OopClosure* keep_alive) {
assert(discovery_is_atomic(), "Error");
- DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
+ DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
while (iter.has_next()) {
iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
@@ -621,7 +621,7 @@
OopClosure* keep_alive,
VoidClosure* complete_gc) {
assert(!discovery_is_atomic(), "Error");
- DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
+ DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
while (iter.has_next()) {
iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
@@ -664,7 +664,7 @@
OopClosure* keep_alive,
VoidClosure* complete_gc) {
ResourceMark rm;
- DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
+ DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
while (iter.has_next()) {
iter.update_discovered();
iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
@@ -782,8 +782,8 @@
void ReferenceProcessor::set_discovered(oop ref, oop value) {
java_lang_ref_Reference::set_discovered_raw(ref, value);
- if (_discovered_list_needs_barrier) {
- oopDesc::bs()->write_ref_field(ref, value);
+ if (_discovered_list_needs_post_barrier) {
+ oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(ref), value);
}
}
@@ -980,7 +980,7 @@
void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) {
assert(!discovery_is_atomic(), "Else why call this method?");
- DiscoveredListIterator iter(refs_list, NULL, NULL);
+ DiscoveredListIterator iter(refs_list, NULL, NULL, _discovered_list_needs_post_barrier);
while (iter.has_next()) {
iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
oop next = java_lang_ref_Reference::next(iter.obj());
@@ -1076,7 +1076,7 @@
// elided this out for G1, but left in the test for some future
// collector that might have need for a pre-barrier here, e.g.:-
// oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
- assert(!_discovered_list_needs_barrier || UseG1GC,
+ assert(!_discovered_list_needs_post_barrier || UseG1GC,
"Need to check non-G1 collector: "
"may need a pre-write-barrier for CAS from NULL below");
oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr,
@@ -1087,7 +1087,7 @@
// is necessary.
refs_list.set_head(obj);
refs_list.inc_length(1);
- if (_discovered_list_needs_barrier) {
+ if (_discovered_list_needs_post_barrier) {
oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered);
}
@@ -1240,7 +1240,7 @@
if (_discovery_is_mt) {
add_to_discovered_list_mt(*list, obj, discovered_addr);
} else {
- // If "_discovered_list_needs_barrier", we do write barriers when
+ // If "_discovered_list_needs_post_barrier", we do write barriers when
// updating the discovered reference list. Otherwise, we do a raw store
// here: the field will be visited later when processing the discovered
// references.
@@ -1252,10 +1252,10 @@
// pre-value, we can safely elide the pre-barrier here for the case of G1.
// e.g.:- oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
assert(discovered == NULL, "control point invariant");
- assert(!_discovered_list_needs_barrier || UseG1GC,
+ assert(!_discovered_list_needs_post_barrier || UseG1GC,
"For non-G1 collector, may need a pre-write-barrier for CAS from NULL below");
oop_store_raw(discovered_addr, next_discovered);
- if (_discovered_list_needs_barrier) {
+ if (_discovered_list_needs_post_barrier) {
oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered);
}
list->set_head(obj);
@@ -1351,7 +1351,7 @@
OopClosure* keep_alive,
VoidClosure* complete_gc,
YieldClosure* yield) {
- DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
+ DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
while (iter.has_next()) {
iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
oop obj = iter.obj();
--- a/hotspot/src/share/vm/memory/referenceProcessor.hpp Fri Feb 07 09:41:36 2014 -0800
+++ b/hotspot/src/share/vm/memory/referenceProcessor.hpp Mon Feb 10 13:31:22 2014 +0100
@@ -99,6 +99,7 @@
oop _referent;
OopClosure* _keep_alive;
BoolObjectClosure* _is_alive;
+ bool _discovered_list_needs_post_barrier;
DEBUG_ONLY(
oop _first_seen; // cyclic linked list check
@@ -112,7 +113,8 @@
public:
inline DiscoveredListIterator(DiscoveredList& refs_list,
OopClosure* keep_alive,
- BoolObjectClosure* is_alive):
+ BoolObjectClosure* is_alive,
+ bool discovered_list_needs_post_barrier = false):
_refs_list(refs_list),
_prev_next(refs_list.adr_head()),
_prev(NULL),
@@ -126,7 +128,8 @@
#endif
_next(NULL),
_keep_alive(keep_alive),
- _is_alive(is_alive)
+ _is_alive(is_alive),
+ _discovered_list_needs_post_barrier(discovered_list_needs_post_barrier)
{ }
// End Of List.
@@ -228,12 +231,12 @@
bool _discovery_is_mt; // true if reference discovery is MT.
// If true, setting "next" field of a discovered refs list requires
- // write barrier(s). (Must be true if used in a collector in which
+ // write post barrier. (Must be true if used in a collector in which
// elements of a discovered list may be moved during discovery: for
// example, a collector like Garbage-First that moves objects during a
// long-term concurrent marking phase that does weak reference
// discovery.)
- bool _discovered_list_needs_barrier;
+ bool _discovered_list_needs_post_barrier;
bool _enqueuing_is_done; // true if all weak references enqueued
bool _processing_is_mt; // true during phases when
@@ -380,8 +383,8 @@
protected:
// Set the 'discovered' field of the given reference to
- // the given value - emitting barriers depending upon
- // the value of _discovered_list_needs_barrier.
+ // the given value - emitting post barriers depending upon
+ // the value of _discovered_list_needs_post_barrier.
void set_discovered(oop ref, oop value);
// "Preclean" the given discovered reference list
@@ -425,7 +428,7 @@
bool mt_discovery = false, uint mt_discovery_degree = 1,
bool atomic_discovery = true,
BoolObjectClosure* is_alive_non_header = NULL,
- bool discovered_list_needs_barrier = false);
+ bool discovered_list_needs_post_barrier = false);
// RefDiscoveryPolicy values
enum DiscoveryPolicy {
--- a/hotspot/src/share/vm/prims/whitebox.cpp Fri Feb 07 09:41:36 2014 -0800
+++ b/hotspot/src/share/vm/prims/whitebox.cpp Mon Feb 10 13:31:22 2014 +0100
@@ -105,7 +105,7 @@
WB_ENTRY(void, WB_PrintHeapSizes(JNIEnv* env, jobject o)) {
CollectorPolicy * p = Universe::heap()->collector_policy();
gclog_or_tty->print_cr("Minimum heap "SIZE_FORMAT" Initial heap "
- SIZE_FORMAT" Maximum heap "SIZE_FORMAT" Min alignment "SIZE_FORMAT" Max alignment "SIZE_FORMAT,
+ SIZE_FORMAT" Maximum heap "SIZE_FORMAT" Space alignment "SIZE_FORMAT" Heap alignment "SIZE_FORMAT,
p->min_heap_byte_size(), p->initial_heap_byte_size(), p->max_heap_byte_size(),
p->space_alignment(), p->heap_alignment());
}
--- a/hotspot/src/share/vm/runtime/arguments.cpp Fri Feb 07 09:41:36 2014 -0800
+++ b/hotspot/src/share/vm/runtime/arguments.cpp Mon Feb 10 13:31:22 2014 +0100
@@ -3818,18 +3818,24 @@
}
jint Arguments::adjust_after_os() {
-#if INCLUDE_ALL_GCS
- if (UseParallelGC || UseParallelOldGC) {
- if (UseNUMA) {
+ if (UseNUMA) {
+ if (UseParallelGC || UseParallelOldGC) {
if (FLAG_IS_DEFAULT(MinHeapDeltaBytes)) {
- FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M);
+ FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M);
}
- // For those collectors or operating systems (eg, Windows) that do
- // not support full UseNUMA, we will map to UseNUMAInterleaving for now
- UseNUMAInterleaving = true;
+ }
+ // UseNUMAInterleaving is set to ON for all collectors and
+ // platforms when UseNUMA is set to ON. NUMA-aware collectors
+ // such as the parallel collector for Linux and Solaris will
+ // interleave old gen and survivor spaces on top of NUMA
+ // allocation policy for the eden space.
+ // Non NUMA-aware collectors such as CMS, G1 and Serial-GC on
+ // all platforms and ParallelGC on Windows will interleave all
+ // of the heap spaces across NUMA nodes.
+ if (FLAG_IS_DEFAULT(UseNUMAInterleaving)) {
+ FLAG_SET_ERGO(bool, UseNUMAInterleaving, true);
}
}
-#endif // INCLUDE_ALL_GCS
return JNI_OK;
}
--- a/hotspot/src/share/vm/utilities/bitMap.cpp Fri Feb 07 09:41:36 2014 -0800
+++ b/hotspot/src/share/vm/utilities/bitMap.cpp Mon Feb 10 13:31:22 2014 +0100
@@ -107,7 +107,7 @@
while (true) {
intptr_t res = Atomic::cmpxchg_ptr(nw, pw, w);
if (res == w) break;
- w = *pw;
+ w = res;
nw = value ? (w | ~mr) : (w & mr);
}
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/TestVerifySilently.java Mon Feb 10 13:31:22 2014 +0100
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/* @test TestVerifySilently.java
+ * @key gc
+ * @bug 8032771
+ * @summary Test silent verification.
+ * @library /testlibrary
+ */
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+import java.util.ArrayList;
+import java.util.Collections;
+
+class RunSystemGC {
+ public static void main(String args[]) throws Exception {
+ System.gc();
+ }
+}
+
+
+public class TestVerifySilently {
+ private static String[] getTestJavaOpts() {
+ String testVmOptsStr = System.getProperty("test.java.opts");
+ if (!testVmOptsStr.isEmpty()) {
+ return testVmOptsStr.split(" ");
+ } else {
+ return new String[] {};
+ }
+ }
+
+ private static OutputAnalyzer runTest(boolean verifySilently) throws Exception {
+ ArrayList<String> vmOpts = new ArrayList();
+
+ Collections.addAll(vmOpts, getTestJavaOpts());
+ Collections.addAll(vmOpts, new String[] {"-XX:+UnlockDiagnosticVMOptions",
+ "-XX:+VerifyDuringStartup",
+ "-XX:+VerifyBeforeGC",
+ "-XX:+VerifyAfterGC",
+ "-XX:" + (verifySilently ? "+":"-") + "VerifySilently",
+ RunSystemGC.class.getName()});
+ ProcessBuilder pb =
+ ProcessTools.createJavaProcessBuilder(vmOpts.toArray(new String[vmOpts.size()]));
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+ System.out.println("Output:\n" + output.getOutput());
+ return output;
+ }
+
+
+ public static void main(String args[]) throws Exception {
+
+ OutputAnalyzer output;
+
+ output = runTest(false);
+ output.shouldContain("[Verifying");
+ output.shouldHaveExitValue(0);
+
+ output = runTest(true);
+ output.shouldNotContain("[Verifying");
+ output.shouldHaveExitValue(0);
+ }
+}
--- a/hotspot/test/gc/arguments/TestMaxHeapSizeTools.java Fri Feb 07 09:41:36 2014 -0800
+++ b/hotspot/test/gc/arguments/TestMaxHeapSizeTools.java Mon Feb 10 13:31:22 2014 +0100
@@ -41,8 +41,8 @@
public long initialHeapSize;
public long maxHeapSize;
- public long minAlignment;
- public long maxAlignment;
+ public long spaceAlignment;
+ public long heapAlignment;
}
class TestMaxHeapSizeTools {
@@ -192,7 +192,7 @@
// Unfortunately there is no other way to retrieve the minimum heap size and
// the alignments.
- Matcher m = Pattern.compile("Minimum heap \\d+ Initial heap \\d+ Maximum heap \\d+ Min alignment \\d+ Max alignment \\d+").
+ Matcher m = Pattern.compile("Minimum heap \\d+ Initial heap \\d+ Maximum heap \\d+ Space alignment \\d+ Heap alignment \\d+").
matcher(output.getStdout());
if (!m.find()) {
throw new RuntimeException("Could not find heap size string.");
@@ -204,8 +204,8 @@
val.minHeapSize = valueAfter(match, "Minimum heap ");
val.initialHeapSize = valueAfter(match, "Initial heap ");
val.maxHeapSize = valueAfter(match, "Maximum heap ");
- val.minAlignment = valueAfter(match, "Min alignment ");
- val.maxAlignment = valueAfter(match, "Max alignment ");
+ val.spaceAlignment = valueAfter(match, "Space alignment ");
+ val.heapAlignment = valueAfter(match, "Heap alignment ");
}
/**
@@ -218,12 +218,12 @@
MinInitialMaxValues v = new MinInitialMaxValues();
getMinInitialMaxHeap(args, v);
- if ((expectedMin != -1) && (align_up(expectedMin, v.minAlignment) != v.minHeapSize)) {
+ if ((expectedMin != -1) && (align_up(expectedMin, v.heapAlignment) != v.minHeapSize)) {
throw new RuntimeException("Actual minimum heap size of " + v.minHeapSize +
" differs from expected minimum heap size of " + expectedMin);
}
- if ((expectedInitial != -1) && (align_up(expectedInitial, v.minAlignment) != v.initialHeapSize)) {
+ if ((expectedInitial != -1) && (align_up(expectedInitial, v.heapAlignment) != v.initialHeapSize)) {
throw new RuntimeException("Actual initial heap size of " + v.initialHeapSize +
" differs from expected initial heap size of " + expectedInitial);
}
@@ -247,7 +247,7 @@
MinInitialMaxValues v = new MinInitialMaxValues();
getMinInitialMaxHeap(new String[] { gcflag, "-XX:MaxHeapSize=" + maxHeapsize + "M" }, v);
- long expectedHeapSize = align_up(maxHeapsize * K * K, v.maxAlignment);
+ long expectedHeapSize = align_up(maxHeapsize * K * K, v.heapAlignment);
long actualHeapSize = v.maxHeapSize;
if (actualHeapSize > expectedHeapSize) {