--- a/make/autoconf/flags-cflags.m4 Mon May 06 09:43:26 2019 +0100
+++ b/make/autoconf/flags-cflags.m4 Mon May 06 09:43:48 2019 +0100
@@ -219,7 +219,7 @@
-Wunused-function -Wundef -Wunused-value -Woverloaded-virtual"
WARNINGS_ENABLE_ALL="-Wall -Wextra -Wformat=2 $WARNINGS_ENABLE_ADDITIONAL"
- DISABLED_WARNINGS="unused-parameter unused"
+ DISABLED_WARNINGS="unknown-warning-option unused-parameter unused"
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
# missing-method-return-type triggers in JavaNativeFoundation framework
--- a/make/hotspot/lib/CompileGtest.gmk Mon May 06 09:43:26 2019 +0100
+++ b/make/hotspot/lib/CompileGtest.gmk Mon May 06 09:43:48 2019 +0100
@@ -76,7 +76,8 @@
DISABLED_WARNINGS_gcc := $(DISABLED_WARNINGS_gcc) \
undef, \
DISABLED_WARNINGS_clang := $(DISABLED_WARNINGS_clang) \
- undef switch format-nonliteral tautological-undefined-compare, \
+ undef switch format-nonliteral tautological-undefined-compare \
+ self-assign-overloaded, \
DISABLED_WARNINGS_solstudio := $(DISABLED_WARNINGS_solstudio) \
identexpected, \
DISABLED_WARNINGS_microsoft := $(DISABLED_WARNINGS_microsoft) \
--- a/src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.hpp Mon May 06 09:43:26 2019 +0100
+++ b/src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.hpp Mon May 06 09:43:48 2019 +0100
@@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2014 SAP SE. All rights reserved.
+ * Copyright (c) 2012, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -65,9 +65,7 @@
#define inlasm_eieio() __asm__ __volatile__ ("eieio" : : : "memory");
#define inlasm_isync() __asm__ __volatile__ ("isync" : : : "memory");
// Use twi-isync for load_acquire (faster than lwsync).
-// ATTENTION: seems like xlC 10.1 has problems with this inline assembler macro (VerifyMethodHandles found "bad vminfo in AMH.conv"):
-// #define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory");
-#define inlasm_acquire_reg(X) inlasm_lwsync();
+#define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory");
inline void OrderAccess::loadload() { inlasm_lwsync(); }
inline void OrderAccess::storestore() { inlasm_lwsync(); }
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Mon May 06 09:43:26 2019 +0100
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Mon May 06 09:43:48 2019 +0100
@@ -1092,7 +1092,7 @@
// the full GC has compacted objects and updated TAMS but not updated
// the prev bitmap.
if (G1VerifyBitmaps) {
- GCTraceTime(Debug, gc)("Clear Prev Bitmap for Verification");
+ GCTraceTime(Debug, gc) tm("Clear Prev Bitmap for Verification");
_cm->clear_prev_bitmap(workers());
}
// This call implicitly verifies that the next bitmap is clear after Full GC.
--- a/src/hotspot/share/gc/g1/g1FullCollector.cpp Mon May 06 09:43:26 2019 +0100
+++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp Mon May 06 09:43:48 2019 +0100
@@ -289,6 +289,6 @@
// fail. At the end of the GC, the original mark word values
// (including hash values) are restored to the appropriate
// objects.
- GCTraceTime(Info, gc, verify)("Verifying During GC (full)");
+ GCTraceTime(Info, gc, verify) tm("Verifying During GC (full)");
_heap->verify(VerifyOption_G1UseFullMarking);
}
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp Mon May 06 09:43:26 2019 +0100
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp Mon May 06 09:43:48 2019 +0100
@@ -65,7 +65,6 @@
_gc_par_phases[CMRefRoots] = new WorkerDataArray<double>(max_gc_threads, "CM RefProcessor Roots (ms):");
_gc_par_phases[WaitForStrongCLD] = new WorkerDataArray<double>(max_gc_threads, "Wait For Strong CLD (ms):");
_gc_par_phases[WeakCLDRoots] = new WorkerDataArray<double>(max_gc_threads, "Weak CLD Roots (ms):");
- _gc_par_phases[SATBFiltering] = new WorkerDataArray<double>(max_gc_threads, "SATB Filtering (ms):");
_gc_par_phases[UpdateRS] = new WorkerDataArray<double>(max_gc_threads, "Update RS (ms):");
if (G1HotCardCache::default_use_cache()) {
@@ -406,7 +405,7 @@
trace_phase(_gc_par_phases[GCWorkerStart], false);
debug_phase(_gc_par_phases[ExtRootScan]);
- for (int i = ExtRootScanSubPhasesStart; i <= ExtRootScanSubPhasesEnd; i++) {
+ for (int i = ExtRootScanSubPhasesFirst; i <= ExtRootScanSubPhasesLast; i++) {
trace_phase(_gc_par_phases[i]);
}
if (G1HotCardCache::default_use_cache()) {
@@ -531,7 +530,6 @@
"CMRefRoots",
"WaitForStrongCLD",
"WeakCLDRoots",
- "SATBFiltering",
"UpdateRS",
"ScanHCC",
"ScanRS",
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp Mon May 06 09:43:26 2019 +0100
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp Mon May 06 09:43:48 2019 +0100
@@ -60,7 +60,6 @@
CMRefRoots,
WaitForStrongCLD,
WeakCLDRoots,
- SATBFiltering,
UpdateRS,
ScanHCC,
ScanRS,
@@ -82,8 +81,8 @@
GCParPhasesSentinel
};
- static const GCParPhases ExtRootScanSubPhasesStart = ThreadRoots;
- static const GCParPhases ExtRootScanSubPhasesEnd = SATBFiltering;
+ static const GCParPhases ExtRootScanSubPhasesFirst = ThreadRoots;
+ static const GCParPhases ExtRootScanSubPhasesLast = WeakCLDRoots;
enum GCScanRSWorkItems {
ScanRSScannedCards,
--- a/src/hotspot/share/gc/g1/g1RootProcessor.cpp Mon May 06 09:43:26 2019 +0100
+++ b/src/hotspot/share/gc/g1/g1RootProcessor.cpp Mon May 06 09:43:48 2019 +0100
@@ -122,16 +122,6 @@
assert(closures->second_pass_weak_clds() == NULL, "Should be null if not tracing metadata.");
}
- // During conc marking we have to filter the per-thread SATB buffers
- // to make sure we remove any oops into the CSet (which will show up
- // as implicitly live).
- {
- G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SATBFiltering, worker_i);
- if (_process_strong_tasks.try_claim_task(G1RP_PS_filter_satb_buffers) && _g1h->collector_state()->mark_or_rebuild_in_progress()) {
- G1BarrierSet::satb_mark_queue_set().filter_thread_buffers();
- }
- }
-
_process_strong_tasks.all_tasks_completed(n_workers());
}
--- a/src/hotspot/share/gc/g1/g1RootProcessor.hpp Mon May 06 09:43:26 2019 +0100
+++ b/src/hotspot/share/gc/g1/g1RootProcessor.hpp Mon May 06 09:43:48 2019 +0100
@@ -65,7 +65,6 @@
G1RP_PS_CodeCache_oops_do,
AOT_ONLY(G1RP_PS_aot_oops_do COMMA)
JVMCI_ONLY(G1RP_PS_JVMCI_oops_do COMMA)
- G1RP_PS_filter_satb_buffers,
G1RP_PS_refProcessor_oops_do,
// Leave this one last.
G1RP_PS_NumElements
--- a/src/hotspot/share/gc/shared/satbMarkQueue.cpp Mon May 06 09:43:26 2019 +0100
+++ b/src/hotspot/share/gc/shared/satbMarkQueue.cpp Mon May 06 09:43:48 2019 +0100
@@ -189,18 +189,6 @@
Threads::threads_do(&closure);
}
-void SATBMarkQueueSet::filter_thread_buffers() {
- class FilterThreadBufferClosure : public ThreadClosure {
- SATBMarkQueueSet* _qset;
- public:
- FilterThreadBufferClosure(SATBMarkQueueSet* qset) : _qset(qset) {}
- virtual void do_thread(Thread* t) {
- _qset->satb_queue_for_thread(t).filter();
- }
- } closure(this);
- Threads::threads_do(&closure);
-}
-
bool SATBMarkQueueSet::apply_closure_to_completed_buffer(SATBBufferClosure* cl) {
BufferNode* nd = get_completed_buffer();
if (nd != NULL) {
--- a/src/hotspot/share/gc/shared/satbMarkQueue.hpp Mon May 06 09:43:26 2019 +0100
+++ b/src/hotspot/share/gc/shared/satbMarkQueue.hpp Mon May 06 09:43:48 2019 +0100
@@ -125,9 +125,6 @@
size_t buffer_enqueue_threshold() const { return _buffer_enqueue_threshold; }
virtual void filter(SATBMarkQueue* queue) = 0;
- // Filter all the currently-active SATB buffers.
- void filter_thread_buffers();
-
// If there exists some completed buffer, pop and process it, and
// return true. Otherwise return false. Processing a buffer
// consists of applying the closure to the active range of the
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp Mon May 06 09:43:26 2019 +0100
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp Mon May 06 09:43:48 2019 +0100
@@ -557,7 +557,10 @@
Klass* klass = cp->klass_at(index, CHECK_NULL);
JVMCIKlassHandle resolved_klass(THREAD, klass);
if (resolved_klass->is_instance_klass()) {
- InstanceKlass::cast(resolved_klass())->link_class_or_fail(THREAD);
+ bool linked = InstanceKlass::cast(resolved_klass())->link_class_or_fail(CHECK_NULL);
+ if (!linked) {
+ return NULL;
+ }
}
JVMCIObject klassObject = JVMCIENV->get_jvmci_type(resolved_klass, JVMCI_CHECK_NULL);
return JVMCIENV->get_jobject(klassObject);
--- a/src/hotspot/share/opto/loopTransform.cpp Mon May 06 09:43:26 2019 +0100
+++ b/src/hotspot/share/opto/loopTransform.cpp Mon May 06 09:43:48 2019 +0100
@@ -45,13 +45,13 @@
// Given an IfNode, return the loop-exiting projection or NULL if both
// arms remain in the loop.
Node *IdealLoopTree::is_loop_exit(Node *iff) const {
- if( iff->outcnt() != 2 ) return NULL; // Ignore partially dead tests
+ if (iff->outcnt() != 2) return NULL; // Ignore partially dead tests
PhaseIdealLoop *phase = _phase;
// Test is an IfNode, has 2 projections. If BOTH are in the loop
// we need loop unswitching instead of peeling.
- if( !is_member(phase->get_loop( iff->raw_out(0) )) )
+ if (!is_member(phase->get_loop(iff->raw_out(0))))
return iff->raw_out(0);
- if( !is_member(phase->get_loop( iff->raw_out(1) )) )
+ if (!is_member(phase->get_loop(iff->raw_out(1))))
return iff->raw_out(1);
return NULL;
}
@@ -63,7 +63,7 @@
//------------------------------record_for_igvn----------------------------
// Put loop body on igvn work list
void IdealLoopTree::record_for_igvn() {
- for( uint i = 0; i < _body.size(); i++ ) {
+ for (uint i = 0; i < _body.size(); i++) {
Node *n = _body.at(i);
_phase->_igvn._worklist.push(n);
}
@@ -145,7 +145,9 @@
Node *exit = is_loop_exit(iff);
if (exit) {
float exit_prob = iff->_prob;
- if (exit->Opcode() == Op_IfFalse) exit_prob = 1.0 - exit_prob;
+ if (exit->Opcode() == Op_IfFalse) {
+ exit_prob = 1.0 - exit_prob;
+ }
if (exit_prob > PROB_MIN) {
float exit_cnt = iff->_fcnt * exit_prob;
return exit_cnt;
@@ -202,7 +204,7 @@
// Now compute a loop exit count
float loop_exit_cnt = 0.0f;
if (_child == NULL) {
- for( uint i = 0; i < _body.size(); i++ ) {
+ for (uint i = 0; i < _body.size(); i++) {
Node *n = _body[i];
loop_exit_cnt += compute_profile_trip_cnt_helper(n);
}
@@ -342,33 +344,48 @@
//------------------------------policy_peeling---------------------------------
// Return TRUE or FALSE if the loop should be peeled or not. Peel if we can
// make some loop-invariant test (usually a null-check) happen before the loop.
-bool IdealLoopTree::policy_peeling( PhaseIdealLoop *phase ) const {
- Node *test = ((IdealLoopTree*)this)->tail();
- int body_size = ((IdealLoopTree*)this)->_body.size();
+bool IdealLoopTree::policy_peeling(PhaseIdealLoop *phase) const {
+ IdealLoopTree *loop = (IdealLoopTree*)this;
+
+ // If nodes are depleted, some transform has miscalculated its needs.
+ assert(!phase->exceeding_node_budget(), "sanity");
+
+ uint body_size = loop->_body.size();
// Peeling does loop cloning which can result in O(N^2) node construction
- if( body_size > 255 /* Prevent overflow for large body_size */
- || (body_size * body_size + phase->C->live_nodes()) > phase->C->max_node_limit() ) {
- return false; // too large to safely clone
+ if (body_size > 255) {
+ return false; // Prevent overflow for large body size
+ }
+ uint estimate = body_size * body_size;
+ if (phase->exceeding_node_budget(estimate)) {
+ return false; // Too large to safely clone
}
// check for vectorized loops, any peeling done was already applied
- if (_head->is_CountedLoop() && _head->as_CountedLoop()->do_unroll_only()) return false;
-
- if (_head->is_CountedLoop() && _head->as_CountedLoop()->trip_count() == 1) {
- return false;
+ if (_head->is_CountedLoop()) {
+ CountedLoopNode* cl = _head->as_CountedLoop();
+ if (cl->is_unroll_only() || cl->trip_count() == 1) {
+ return false;
+ }
}
- while( test != _head ) { // Scan till run off top of loop
- if( test->is_If() ) { // Test?
+ Node* test = loop->tail();
+
+ while (test != _head) { // Scan till run off top of loop
+ if (test->is_If()) { // Test?
Node *ctrl = phase->get_ctrl(test->in(1));
- if (ctrl->is_top())
+ if (ctrl->is_top()) {
return false; // Found dead test on live IF? No peeling!
+ }
// Standard IF only has one input value to check for loop invariance
- assert(test->Opcode() == Op_If || test->Opcode() == Op_CountedLoopEnd || test->Opcode() == Op_RangeCheck, "Check this code when new subtype is added");
+ assert(test->Opcode() == Op_If ||
+ test->Opcode() == Op_CountedLoopEnd ||
+ test->Opcode() == Op_RangeCheck,
+ "Check this code when new subtype is added");
// Condition is not a member of this loop?
- if( !is_member(phase->get_loop(ctrl)) &&
- is_loop_exit(test) )
- return true; // Found reason to peel!
+ if (!is_member(phase->get_loop(ctrl)) && is_loop_exit(test)) {
+ // Found reason to peel!
+ return phase->may_require_nodes(estimate);
+ }
}
// Walk up dominators to loop _head looking for test which is
// executed on every path thru loop.
@@ -381,27 +398,27 @@
// If we got the effect of peeling, either by actually peeling or by making
// a pre-loop which must execute at least once, we can remove all
// loop-invariant dominated tests in the main body.
-void PhaseIdealLoop::peeled_dom_test_elim( IdealLoopTree *loop, Node_List &old_new ) {
+void PhaseIdealLoop::peeled_dom_test_elim(IdealLoopTree *loop, Node_List &old_new) {
bool progress = true;
- while( progress ) {
+ while (progress) {
progress = false; // Reset for next iteration
Node *prev = loop->_head->in(LoopNode::LoopBackControl);//loop->tail();
Node *test = prev->in(0);
- while( test != loop->_head ) { // Scan till run off top of loop
+ while (test != loop->_head) { // Scan till run off top of loop
int p_op = prev->Opcode();
- if( (p_op == Op_IfFalse || p_op == Op_IfTrue) &&
+ if ((p_op == Op_IfFalse || p_op == Op_IfTrue) &&
test->is_If() && // Test?
!test->in(1)->is_Con() && // And not already obvious?
// Condition is not a member of this loop?
!loop->is_member(get_loop(get_ctrl(test->in(1))))){
// Walk loop body looking for instances of this test
- for( uint i = 0; i < loop->_body.size(); i++ ) {
+ for (uint i = 0; i < loop->_body.size(); i++) {
Node *n = loop->_body.at(i);
- if( n->is_If() && n->in(1) == test->in(1) /*&& n != loop->tail()->in(0)*/ ) {
+ if (n->is_If() && n->in(1) == test->in(1) /*&& n != loop->tail()->in(0)*/) {
// IfNode was dominated by version in peeled loop body
progress = true;
- dominated_by( old_new[prev->_idx], n );
+ dominated_by(old_new[prev->_idx], n);
}
}
}
@@ -409,7 +426,7 @@
test = idom(test);
} // End of scan tests in loop
- } // End of while( progress )
+ } // End of while (progress)
}
//------------------------------do_peeling-------------------------------------
@@ -550,7 +567,7 @@
// v
// exit
//
-void PhaseIdealLoop::do_peeling( IdealLoopTree *loop, Node_List &old_new ) {
+void PhaseIdealLoop::do_peeling(IdealLoopTree *loop, Node_List &old_new) {
C->set_major_progress();
// Peeling a 'main' loop in a pre/main/post situation obfuscates the
@@ -599,7 +616,7 @@
Node* old = head->fast_out(j);
if (old->in(0) == loop->_head && old->req() == 3 && old->is_Phi()) {
Node* new_exit_value = old_new[old->in(LoopNode::LoopBackControl)->_idx];
- if (!new_exit_value ) // Backedge value is ALSO loop invariant?
+ if (!new_exit_value) // Backedge value is ALSO loop invariant?
// Then loop body backedge value remains the same.
new_exit_value = old->in(LoopNode::LoopBackControl);
_igvn.hash_delete(old);
@@ -628,8 +645,9 @@
for (uint j3 = 0; j3 < loop->_body.size(); j3++) {
Node *old = loop->_body.at(j3);
Node *nnn = old_new[old->_idx];
- if (!has_ctrl(nnn))
+ if (!has_ctrl(nnn)) {
set_idom(nnn, idom(nnn), dd-1);
+ }
}
// Now force out all loop-invariant dominating tests. The optimizer
@@ -644,12 +662,12 @@
//------------------------------policy_maximally_unroll------------------------
// Calculate exact loop trip count and return true if loop can be maximally
// unrolled.
-bool IdealLoopTree::policy_maximally_unroll( PhaseIdealLoop *phase ) const {
+bool IdealLoopTree::policy_maximally_unroll(PhaseIdealLoop *phase) const {
CountedLoopNode *cl = _head->as_CountedLoop();
assert(cl->is_normal_loop(), "");
- if (!cl->is_valid_counted_loop())
+ if (!cl->is_valid_counted_loop()) {
return false; // Malformed counted loop
-
+ }
if (!cl->has_exact_trip_count()) {
// Trip count is not exact.
return false;
@@ -660,31 +678,36 @@
assert(trip_count > 1, "one iteration loop should be optimized out already");
assert(trip_count < max_juint, "exact trip_count should be less than max_uint.");
+ // If nodes are depleted, some transform has miscalculated its needs.
+ assert(!phase->exceeding_node_budget(), "sanity");
+
// Real policy: if we maximally unroll, does it get too big?
// Allow the unrolled mess to get larger than standard loop
// size. After all, it will no longer be a loop.
uint body_size = _body.size();
uint unroll_limit = (uint)LoopUnrollLimit * 4;
- assert( (intx)unroll_limit == LoopUnrollLimit * 4, "LoopUnrollLimit must fit in 32bits");
+ assert((intx)unroll_limit == LoopUnrollLimit * 4, "LoopUnrollLimit must fit in 32bits");
if (trip_count > unroll_limit || body_size > unroll_limit) {
return false;
}
- // Fully unroll a loop with few iterations regardless next
- // conditions since following loop optimizations will split
- // such loop anyway (pre-main-post).
- if (trip_count <= 3)
- return true;
-
// Take into account that after unroll conjoined heads and tails will fold,
// otherwise policy_unroll() may allow more unrolling than max unrolling.
- uint new_body_size = EMPTY_LOOP_SIZE + (body_size - EMPTY_LOOP_SIZE) * trip_count;
- uint tst_body_size = (new_body_size - EMPTY_LOOP_SIZE) / trip_count + EMPTY_LOOP_SIZE;
- if (body_size != tst_body_size) // Check for int overflow
+ uint new_body_size = est_loop_clone_sz(trip_count, body_size - EMPTY_LOOP_SIZE);
+
+ if (new_body_size == UINT_MAX) { // Check for bad estimate (overflow).
return false;
+ }
+
+ // Fully unroll a loop with few iterations regardless next conditions since
+ // following loop optimizations will split such loop anyway (pre-main-post).
+ if (trip_count <= 3) {
+ return phase->may_require_nodes(new_body_size);
+ }
+
if (new_body_size > unroll_limit ||
// Unrolling can result in a large amount of node construction
- new_body_size >= phase->C->max_node_limit() - phase->C->live_nodes()) {
+ phase->exceeding_node_budget(new_body_size)) {
return false;
}
@@ -714,38 +737,43 @@
} // switch
}
- return true; // Do maximally unroll
+ return phase->may_require_nodes(new_body_size);
}
//------------------------------policy_unroll----------------------------------
-// Return TRUE or FALSE if the loop should be unrolled or not. Unroll if
-// the loop is a CountedLoop and the body is small enough.
+// Return TRUE or FALSE if the loop should be unrolled or not. Unroll if the
+// loop is a CountedLoop and the body is small enough.
bool IdealLoopTree::policy_unroll(PhaseIdealLoop *phase) {
CountedLoopNode *cl = _head->as_CountedLoop();
assert(cl->is_normal_loop() || cl->is_main_loop(), "");
- if (!cl->is_valid_counted_loop())
+ if (!cl->is_valid_counted_loop()) {
return false; // Malformed counted loop
+ }
+
+ // If nodes are depleted, some transform has miscalculated its needs.
+ assert(!phase->exceeding_node_budget(), "sanity");
// Protect against over-unrolling.
// After split at least one iteration will be executed in pre-loop.
- if (cl->trip_count() <= (uint)(cl->is_normal_loop() ? 2 : 1)) return false;
-
- _local_loop_unroll_limit = LoopUnrollLimit;
+ if (cl->trip_count() <= (cl->is_normal_loop() ? 2u : 1u)) {
+ return false;
+ }
+ _local_loop_unroll_limit = LoopUnrollLimit;
_local_loop_unroll_factor = 4;
- int future_unroll_ct = cl->unrolled_count() * 2;
+ int future_unroll_cnt = cl->unrolled_count() * 2;
if (!cl->is_vectorized_loop()) {
- if (future_unroll_ct > LoopMaxUnroll) return false;
+ if (future_unroll_cnt > LoopMaxUnroll) return false;
} else {
// obey user constraints on vector mapped loops with additional unrolling applied
int unroll_constraint = (cl->slp_max_unroll()) ? cl->slp_max_unroll() : 1;
- if ((future_unroll_ct / unroll_constraint) > LoopMaxUnroll) return false;
+ if ((future_unroll_cnt / unroll_constraint) > LoopMaxUnroll) return false;
}
// Check for initial stride being a small enough constant
- if (abs(cl->stride_con()) > (1<<2)*future_unroll_ct) return false;
+ if (abs(cl->stride_con()) > (1<<2)*future_unroll_cnt) return false;
// Don't unroll if the next round of unrolling would push us
// over the expected trip count of the loop. One is subtracted
@@ -753,8 +781,8 @@
// executes 1 iteration.
if (UnrollLimitForProfileCheck > 0 &&
cl->profile_trip_cnt() != COUNT_UNKNOWN &&
- future_unroll_ct > UnrollLimitForProfileCheck &&
- (float)future_unroll_ct > cl->profile_trip_cnt() - 1.0) {
+ future_unroll_cnt > UnrollLimitForProfileCheck &&
+ (float)future_unroll_cnt > cl->profile_trip_cnt() - 1.0) {
return false;
}
@@ -763,8 +791,8 @@
// and rounds of "unroll,optimize" are not making significant progress
// Progress defined as current size less than 20% larger than previous size.
if (UseSuperWord && cl->node_count_before_unroll() > 0 &&
- future_unroll_ct > LoopUnrollMin &&
- (future_unroll_ct - 1) * (100 / LoopPercentProfileLimit) > cl->profile_trip_cnt() &&
+ future_unroll_cnt > LoopUnrollMin &&
+ (future_unroll_cnt - 1) * (100 / LoopPercentProfileLimit) > cl->profile_trip_cnt() &&
1.2 * cl->node_count_before_unroll() < (double)_body.size()) {
return false;
}
@@ -848,8 +876,8 @@
if (LoopMaxUnroll > _local_loop_unroll_factor) {
// Once policy_slp_analysis succeeds, mark the loop with the
// maximal unroll factor so that we minimize analysis passes
- if (future_unroll_ct >= _local_loop_unroll_factor) {
- policy_unroll_slp_analysis(cl, phase, future_unroll_ct);
+ if (future_unroll_cnt >= _local_loop_unroll_factor) {
+ policy_unroll_slp_analysis(cl, phase, future_unroll_cnt);
}
}
}
@@ -858,32 +886,40 @@
if ((LoopMaxUnroll < slp_max_unroll_factor) && FLAG_IS_DEFAULT(LoopMaxUnroll) && UseSubwordForMaxVector) {
LoopMaxUnroll = slp_max_unroll_factor;
}
+
+ uint estimate = est_loop_clone_sz(2, body_size);
+
if (cl->has_passed_slp()) {
- if (slp_max_unroll_factor >= future_unroll_ct) return true;
- // Normal case: loop too big
- return false;
+ if (slp_max_unroll_factor >= future_unroll_cnt) {
+ return phase->may_require_nodes(estimate);
+ }
+ return false; // Loop too big.
}
// Check for being too big
if (body_size > (uint)_local_loop_unroll_limit) {
- if ((cl->is_subword_loop() || xors_in_loop >= 4) && body_size < (uint)LoopUnrollLimit * 4) {
- return true;
+ if ((cl->is_subword_loop() || xors_in_loop >= 4) && body_size < 4u * LoopUnrollLimit) {
+ return phase->may_require_nodes(estimate);
}
- // Normal case: loop too big
- return false;
+ return false; // Loop too big.
}
- if (cl->do_unroll_only()) {
+ if (cl->is_unroll_only()) {
if (TraceSuperWordLoopUnrollAnalysis) {
- tty->print_cr("policy_unroll passed vector loop(vlen=%d,factor = %d)\n", slp_max_unroll_factor, future_unroll_ct);
+ tty->print_cr("policy_unroll passed vector loop(vlen=%d, factor=%d)\n",
+ slp_max_unroll_factor, future_unroll_cnt);
}
}
// Unroll once! (Each trip will soon do double iterations)
- return true;
+ return phase->may_require_nodes(estimate);
}
-void IdealLoopTree::policy_unroll_slp_analysis(CountedLoopNode *cl, PhaseIdealLoop *phase, int future_unroll_ct) {
+void IdealLoopTree::policy_unroll_slp_analysis(CountedLoopNode *cl, PhaseIdealLoop *phase, int future_unroll_cnt) {
+
+ // If nodes are depleted, some transform has miscalculated its needs.
+ assert(!phase->exceeding_node_budget(), "sanity");
+
// Enable this functionality target by target as needed
if (SuperWordLoopUnrollAnalysis) {
if (!cl->was_slp_analyzed()) {
@@ -898,7 +934,7 @@
if (cl->has_passed_slp()) {
int slp_max_unroll_factor = cl->slp_max_unroll();
- if (slp_max_unroll_factor >= future_unroll_ct) {
+ if (slp_max_unroll_factor >= future_unroll_cnt) {
int new_limit = cl->node_count_before_unroll() * slp_max_unroll_factor;
if (new_limit > LoopUnrollLimit) {
if (TraceSuperWordLoopUnrollAnalysis) {
@@ -917,16 +953,19 @@
// aligned in a loop (unless the VM guarantees mutual alignment). Note that
// if we vectorize short memory ops into longer memory ops, we may want to
// increase alignment.
-bool IdealLoopTree::policy_align( PhaseIdealLoop *phase ) const {
+bool IdealLoopTree::policy_align(PhaseIdealLoop *phase) const {
return false;
}
//------------------------------policy_range_check-----------------------------
// Return TRUE or FALSE if the loop should be range-check-eliminated.
// Actually we do iteration-splitting, a more powerful form of RCE.
-bool IdealLoopTree::policy_range_check( PhaseIdealLoop *phase ) const {
+bool IdealLoopTree::policy_range_check(PhaseIdealLoop *phase) const {
if (!RangeCheckElimination) return false;
+ // If nodes are depleted, some transform has miscalculated its needs.
+ assert(!phase->exceeding_node_budget(), "sanity");
+
CountedLoopNode *cl = _head->as_CountedLoop();
// If we unrolled with no intention of doing RCE and we later
// changed our minds, we got no pre-loop. Either we need to
@@ -935,7 +974,7 @@
Node *trip_counter = cl->phi();
// check for vectorized loops, some opts are no longer needed
- if (cl->do_unroll_only()) return false;
+ if (cl->is_unroll_only()) return false;
// Check loop body for tests of trip-counter plus loop-invariant vs
// loop-invariant.
@@ -946,38 +985,45 @@
// Comparing trip+off vs limit
Node *bol = iff->in(1);
- if (bol->req() != 2) continue; // dead constant test
+ if (bol->req() != 2) {
+ continue; // dead constant test
+ }
if (!bol->is_Bool()) {
assert(bol->Opcode() == Op_Conv2B, "predicate check only");
continue;
}
- if (bol->as_Bool()->_test._test == BoolTest::ne)
+ if (bol->as_Bool()->_test._test == BoolTest::ne) {
continue; // not RC
-
+ }
Node *cmp = bol->in(1);
Node *rc_exp = cmp->in(1);
Node *limit = cmp->in(2);
Node *limit_c = phase->get_ctrl(limit);
- if( limit_c == phase->C->top() )
+ if (limit_c == phase->C->top()) {
return false; // Found dead test on live IF? No RCE!
- if( is_member(phase->get_loop(limit_c) ) ) {
+ }
+ if (is_member(phase->get_loop(limit_c))) {
// Compare might have operands swapped; commute them
rc_exp = cmp->in(2);
limit = cmp->in(1);
limit_c = phase->get_ctrl(limit);
- if( is_member(phase->get_loop(limit_c) ) )
+ if (is_member(phase->get_loop(limit_c))) {
continue; // Both inputs are loop varying; cannot RCE
+ }
}
if (!phase->is_scaled_iv_plus_offset(rc_exp, trip_counter, NULL, NULL)) {
continue;
}
- // Yeah! Found a test like 'trip+off vs limit'
- // Test is an IfNode, has 2 projections. If BOTH are in the loop
- // we need loop unswitching instead of iteration splitting.
- if( is_loop_exit(iff) )
- return true; // Found reason to split iterations
+ // Found a test like 'trip+off vs limit'. Test is an IfNode, has two
+ // (2) projections. If BOTH are in the loop we need loop unswitching
+ // instead of iteration splitting.
+ if (is_loop_exit(iff)) {
+ // Found valid reason to split iterations (if there is room).
+ // NOTE: Usually a gross overestimate.
+ return phase->may_require_nodes(est_loop_clone_sz(2, _body.size()));
+ }
} // End of is IF
}
@@ -987,14 +1033,21 @@
//------------------------------policy_peel_only-------------------------------
// Return TRUE or FALSE if the loop should NEVER be RCE'd or aligned. Useful
// for unrolling loops with NO array accesses.
-bool IdealLoopTree::policy_peel_only( PhaseIdealLoop *phase ) const {
+bool IdealLoopTree::policy_peel_only(PhaseIdealLoop *phase) const {
+
+ // If nodes are depleted, some transform has miscalculated its needs.
+ assert(!phase->exceeding_node_budget(), "sanity");
+
// check for vectorized loops, any peeling done was already applied
- if (_head->is_CountedLoop() && _head->as_CountedLoop()->do_unroll_only()) return false;
-
- for( uint i = 0; i < _body.size(); i++ )
- if( _body[i]->is_Mem() )
+ if (_head->is_CountedLoop() && _head->as_CountedLoop()->is_unroll_only()) {
+ return false;
+ }
+
+ for (uint i = 0; i < _body.size(); i++) {
+ if (_body[i]->is_Mem()) {
return false;
-
+ }
+ }
// No memory accesses at all!
return true;
}
@@ -1002,33 +1055,31 @@
//------------------------------clone_up_backedge_goo--------------------------
// If Node n lives in the back_ctrl block and cannot float, we clone a private
// version of n in preheader_ctrl block and return that, otherwise return n.
-Node *PhaseIdealLoop::clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n, VectorSet &visited, Node_Stack &clones ) {
- if( get_ctrl(n) != back_ctrl ) return n;
+Node *PhaseIdealLoop::clone_up_backedge_goo(Node *back_ctrl, Node *preheader_ctrl, Node *n, VectorSet &visited, Node_Stack &clones) {
+ if (get_ctrl(n) != back_ctrl) return n;
// Only visit once
if (visited.test_set(n->_idx)) {
Node *x = clones.find(n->_idx);
- if (x != NULL)
- return x;
- return n;
+ return (x != NULL) ? x : n;
}
Node *x = NULL; // If required, a clone of 'n'
// Check for 'n' being pinned in the backedge.
- if( n->in(0) && n->in(0) == back_ctrl ) {
+ if (n->in(0) && n->in(0) == back_ctrl) {
assert(clones.find(n->_idx) == NULL, "dead loop");
x = n->clone(); // Clone a copy of 'n' to preheader
clones.push(x, n->_idx);
- x->set_req( 0, preheader_ctrl ); // Fix x's control input to preheader
+ x->set_req(0, preheader_ctrl); // Fix x's control input to preheader
}
// Recursive fixup any other input edges into x.
// If there are no changes we can just return 'n', otherwise
// we need to clone a private copy and change it.
- for( uint i = 1; i < n->req(); i++ ) {
- Node *g = clone_up_backedge_goo( back_ctrl, preheader_ctrl, n->in(i), visited, clones );
- if( g != n->in(i) ) {
- if( !x ) {
+ for (uint i = 1; i < n->req(); i++) {
+ Node *g = clone_up_backedge_goo(back_ctrl, preheader_ctrl, n->in(i), visited, clones);
+ if (g != n->in(i)) {
+ if (!x) {
assert(clones.find(n->_idx) == NULL, "dead loop");
x = n->clone();
clones.push(x, n->_idx);
@@ -1036,11 +1087,11 @@
x->set_req(i, g);
}
}
- if( x ) { // x can legally float to pre-header location
- register_new_node( x, preheader_ctrl );
+ if (x) { // x can legally float to pre-header location
+ register_new_node(x, preheader_ctrl);
return x;
} else { // raise n to cover LCA of uses
- set_ctrl( n, find_non_split_ctrl(back_ctrl->in(0)) );
+ set_ctrl(n, find_non_split_ctrl(back_ctrl->in(0)));
}
return n;
}
@@ -1244,7 +1295,7 @@
// Insert pre and post loops. If peel_only is set, the pre-loop can not have
// more iterations added. It acts as a 'peel' only, no lower-bound RCE, no
// alignment. Useful to unroll loops that do no array accesses.
-void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only ) {
+void PhaseIdealLoop::insert_pre_post_loops(IdealLoopTree *loop, Node_List &old_new, bool peel_only) {
#ifndef PRODUCT
if (TraceLoopOpts) {
@@ -1259,9 +1310,9 @@
// Find common pieces of the loop being guarded with pre & post loops
CountedLoopNode *main_head = loop->_head->as_CountedLoop();
- assert( main_head->is_normal_loop(), "" );
+ assert(main_head->is_normal_loop(), "");
CountedLoopEndNode *main_end = main_head->loopexit();
- assert( main_end->outcnt() == 2, "1 true, 1 false path only" );
+ assert(main_end->outcnt() == 2, "1 true, 1 false path only");
Node *pre_header= main_head->in(LoopNode::EntryControl);
Node *init = main_head->init_trip();
@@ -1273,13 +1324,13 @@
// Need only 1 user of 'bol' because I will be hacking the loop bounds.
Node *bol = main_end->in(CountedLoopEndNode::TestValue);
- if( bol->outcnt() != 1 ) {
+ if (bol->outcnt() != 1) {
bol = bol->clone();
register_new_node(bol,main_end->in(CountedLoopEndNode::TestControl));
_igvn.replace_input_of(main_end, CountedLoopEndNode::TestValue, bol);
}
// Need only 1 user of 'cmp' because I will be hacking the loop bounds.
- if( cmp->outcnt() != 1 ) {
+ if (cmp->outcnt() != 1) {
cmp = cmp->clone();
register_new_node(cmp,main_end->in(CountedLoopEndNode::TestControl));
_igvn.replace_input_of(bol, 1, cmp);
@@ -1314,9 +1365,9 @@
// Find the pre-loop normal exit.
Node* pre_exit = pre_end->proj_out(false);
- assert( pre_exit->Opcode() == Op_IfFalse, "" );
+ assert(pre_exit->Opcode() == Op_IfFalse, "");
IfFalseNode *new_pre_exit = new IfFalseNode(pre_end);
- _igvn.register_new_node_with_optimizer( new_pre_exit );
+ _igvn.register_new_node_with_optimizer(new_pre_exit);
set_idom(new_pre_exit, pre_end, dd_main_head);
set_loop(new_pre_exit, outer_loop->_parent);
@@ -1325,26 +1376,26 @@
// zero-trip guard will become the minimum-trip guard when we unroll
// the main-loop.
Node *min_opaq = new Opaque1Node(C, limit);
- Node *min_cmp = new CmpINode( pre_incr, min_opaq );
- Node *min_bol = new BoolNode( min_cmp, b_test );
- register_new_node( min_opaq, new_pre_exit );
- register_new_node( min_cmp , new_pre_exit );
- register_new_node( min_bol , new_pre_exit );
+ Node *min_cmp = new CmpINode(pre_incr, min_opaq);
+ Node *min_bol = new BoolNode(min_cmp, b_test);
+ register_new_node(min_opaq, new_pre_exit);
+ register_new_node(min_cmp , new_pre_exit);
+ register_new_node(min_bol , new_pre_exit);
// Build the IfNode (assume the main-loop is executed always).
- IfNode *min_iff = new IfNode( new_pre_exit, min_bol, PROB_ALWAYS, COUNT_UNKNOWN );
- _igvn.register_new_node_with_optimizer( min_iff );
+ IfNode *min_iff = new IfNode(new_pre_exit, min_bol, PROB_ALWAYS, COUNT_UNKNOWN);
+ _igvn.register_new_node_with_optimizer(min_iff);
set_idom(min_iff, new_pre_exit, dd_main_head);
set_loop(min_iff, outer_loop->_parent);
// Plug in the false-path, taken if we need to skip main-loop
- _igvn.hash_delete( pre_exit );
+ _igvn.hash_delete(pre_exit);
pre_exit->set_req(0, min_iff);
set_idom(pre_exit, min_iff, dd_main_head);
set_idom(pre_exit->unique_ctrl_out(), min_iff, dd_main_head);
// Make the true-path, must enter the main loop
- Node *min_taken = new IfTrueNode( min_iff );
- _igvn.register_new_node_with_optimizer( min_taken );
+ Node *min_taken = new IfTrueNode(min_iff);
+ _igvn.register_new_node_with_optimizer(min_taken);
set_idom(min_taken, min_iff, dd_main_head);
set_loop(min_taken, outer_loop->_parent);
// Plug in the true path
@@ -1359,14 +1410,14 @@
// fall-out values of the pre-loop.
for (DUIterator_Fast i2max, i2 = main_head->fast_outs(i2max); i2 < i2max; i2++) {
Node* main_phi = main_head->fast_out(i2);
- if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() > 0 ) {
+ if (main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() > 0) {
Node *pre_phi = old_new[main_phi->_idx];
Node *fallpre = clone_up_backedge_goo(pre_head->back_control(),
main_head->skip_strip_mined()->in(LoopNode::EntryControl),
pre_phi->in(LoopNode::LoopBackControl),
visited, clones);
_igvn.hash_delete(main_phi);
- main_phi->set_req( LoopNode::EntryControl, fallpre );
+ main_phi->set_req(LoopNode::EntryControl, fallpre);
}
}
@@ -1381,7 +1432,7 @@
// dependencies.
// CastII for the main loop:
- Node* castii = cast_incr_before_loop( pre_incr, min_taken, main_head );
+ Node* castii = cast_incr_before_loop(pre_incr, min_taken, main_head);
assert(castii != NULL, "no castII inserted");
Node* opaque_castii = new Opaque1Node(C, castii);
register_new_node(opaque_castii, outer_main_head->in(LoopNode::EntryControl));
@@ -1390,18 +1441,18 @@
// Step B4: Shorten the pre-loop to run only 1 iteration (for now).
// RCE and alignment may change this later.
Node *cmp_end = pre_end->cmp_node();
- assert( cmp_end->in(2) == limit, "" );
- Node *pre_limit = new AddINode( init, stride );
+ assert(cmp_end->in(2) == limit, "");
+ Node *pre_limit = new AddINode(init, stride);
// Save the original loop limit in this Opaque1 node for
// use by range check elimination.
Node *pre_opaq = new Opaque1Node(C, pre_limit, limit);
- register_new_node( pre_limit, pre_head->in(0) );
- register_new_node( pre_opaq , pre_head->in(0) );
+ register_new_node(pre_limit, pre_head->in(0));
+ register_new_node(pre_opaq , pre_head->in(0));
// Since no other users of pre-loop compare, I can hack limit directly
- assert( cmp_end->outcnt() == 1, "no other users" );
+ assert(cmp_end->outcnt() == 1, "no other users");
_igvn.hash_delete(cmp_end);
cmp_end->set_req(2, peel_only ? pre_limit : pre_opaq);
@@ -1421,24 +1472,26 @@
// Modify pre loop end condition
Node* pre_bol = pre_end->in(CountedLoopEndNode::TestValue)->as_Bool();
BoolNode* new_bol0 = new BoolNode(pre_bol->in(1), new_test);
- register_new_node( new_bol0, pre_head->in(0) );
+ register_new_node(new_bol0, pre_head->in(0));
_igvn.replace_input_of(pre_end, CountedLoopEndNode::TestValue, new_bol0);
// Modify main loop guard condition
assert(min_iff->in(CountedLoopEndNode::TestValue) == min_bol, "guard okay");
BoolNode* new_bol1 = new BoolNode(min_bol->in(1), new_test);
- register_new_node( new_bol1, new_pre_exit );
+ register_new_node(new_bol1, new_pre_exit);
_igvn.hash_delete(min_iff);
min_iff->set_req(CountedLoopEndNode::TestValue, new_bol1);
// Modify main loop end condition
BoolNode* main_bol = main_end->in(CountedLoopEndNode::TestValue)->as_Bool();
BoolNode* new_bol2 = new BoolNode(main_bol->in(1), new_test);
- register_new_node( new_bol2, main_end->in(CountedLoopEndNode::TestControl) );
+ register_new_node(new_bol2, main_end->in(CountedLoopEndNode::TestControl));
_igvn.replace_input_of(main_end, CountedLoopEndNode::TestValue, new_bol2);
}
// Flag main loop
main_head->set_main_loop();
- if( peel_only ) main_head->set_main_no_pre_loop();
+ if (peel_only) {
+ main_head->set_main_no_pre_loop();
+ }
// Subtract a trip count for the pre-loop.
main_head->set_trip_count(main_head->trip_count() - 1);
@@ -1457,8 +1510,9 @@
//------------------------------insert_vector_post_loop------------------------
// Insert a copy of the atomic unrolled vectorized main loop as a post loop,
-// unroll_policy has already informed us that more unrolling is about to happen to
-// the main loop. The resultant post loop will serve as a vectorized drain loop.
+// unroll_policy has already informed us that more unrolling is about to
+// happen to the main loop. The resultant post loop will serve as a
+// vectorized drain loop.
void PhaseIdealLoop::insert_vector_post_loop(IdealLoopTree *loop, Node_List &old_new) {
if (!loop->_head->is_CountedLoop()) return;
@@ -1467,6 +1521,9 @@
// only process vectorized main loops
if (!cl->is_vectorized_loop() || !cl->is_main_loop()) return;
+ if (!may_require_nodes(est_loop_clone_sz(2, loop->_body.size()))) {
+ return;
+ }
int slp_max_unroll_factor = cl->slp_max_unroll();
int cur_unroll = cl->unrolled_count();
@@ -1638,7 +1695,7 @@
// fall-out values of the main-loop.
for (DUIterator_Fast imax, i = main_head->fast_outs(imax); i < imax; i++) {
Node* main_phi = main_head->fast_out(i);
- if (main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() >0) {
+ if (main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() > 0) {
Node *cur_phi = old_new[main_phi->_idx];
Node *fallnew = clone_up_backedge_goo(main_head->back_control(),
post_head->init_control(),
@@ -1710,7 +1767,7 @@
//------------------------------do_unroll--------------------------------------
// Unroll the loop body one step - make each trip do 2 iterations.
-void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip ) {
+void PhaseIdealLoop::do_unroll(IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip) {
assert(LoopUnrollLimit, "");
CountedLoopNode *loop_head = loop->_head->as_CountedLoop();
CountedLoopEndNode *loop_end = loop_head->loopexit();
@@ -1733,8 +1790,8 @@
Node_List rpo_list;
VectorSet visited(arena);
visited.set(loop_head->_idx);
- rpo( loop_head, stack, visited, rpo_list );
- dump(loop, rpo_list.size(), rpo_list );
+ rpo(loop_head, stack, visited, rpo_list);
+ dump(loop, rpo_list.size(), rpo_list);
}
#endif
@@ -1811,7 +1868,8 @@
// Verify that policy_unroll result is still valid.
const TypeInt* limit_type = _igvn.type(limit)->is_int();
assert(stride_con > 0 && ((limit_type->_hi - stride_con) < limit_type->_hi) ||
- stride_con < 0 && ((limit_type->_lo - stride_con) > limit_type->_lo), "sanity");
+ stride_con < 0 && ((limit_type->_lo - stride_con) > limit_type->_lo),
+ "sanity");
if (limit->is_Con()) {
// The check in policy_unroll and the assert above guarantee
@@ -1829,8 +1887,8 @@
// zero trip guard limit will be different from loop limit.
assert(has_ctrl(opaq), "should have it");
Node* opaq_ctrl = get_ctrl(opaq);
- limit = new Opaque2Node( C, limit );
- register_new_node( limit, opaq_ctrl );
+ limit = new Opaque2Node(C, limit);
+ register_new_node(limit, opaq_ctrl);
}
if ((stride_con > 0 && (java_subtract(limit_type->_lo, stride_con) < limit_type->_lo)) ||
(stride_con < 0 && (java_subtract(limit_type->_hi, stride_con) > limit_type->_hi))) {
@@ -1871,15 +1929,16 @@
adj_limit = new SubINode(limit, stride);
}
assert(old_limit != NULL && adj_limit != NULL, "");
- register_new_node( adj_limit, ctrl ); // adjust amount
+ register_new_node(adj_limit, ctrl); // adjust amount
Node* adj_cmp = new CmpINode(old_limit, adj_limit);
- register_new_node( adj_cmp, ctrl );
+ register_new_node(adj_cmp, ctrl);
Node* adj_bool = new BoolNode(adj_cmp, bt);
- register_new_node( adj_bool, ctrl );
+ register_new_node(adj_bool, ctrl);
new_limit = new CMoveINode(adj_bool, adj_limit, adj_max, TypeInt::INT);
}
register_new_node(new_limit, ctrl);
}
+
assert(new_limit != NULL, "");
// Replace in loop test.
assert(loop_end->in(1)->in(1) == cmp, "sanity");
@@ -1929,10 +1988,10 @@
// Make the fall-in from the original come from the fall-out of the clone.
for (DUIterator_Fast jmax, j = loop_head->fast_outs(jmax); j < jmax; j++) {
Node* phi = loop_head->fast_out(j);
- if( phi->is_Phi() && phi->in(0) == loop_head && phi->outcnt() > 0 ) {
+ if (phi->is_Phi() && phi->in(0) == loop_head && phi->outcnt() > 0) {
Node *newphi = old_new[phi->_idx];
- _igvn.hash_delete( phi );
- _igvn.hash_delete( newphi );
+ _igvn.hash_delete(phi);
+ _igvn.hash_delete(newphi);
phi ->set_req(LoopNode:: EntryControl, newphi->in(LoopNode::LoopBackControl));
newphi->set_req(LoopNode::LoopBackControl, phi ->in(LoopNode::LoopBackControl));
@@ -1940,7 +1999,7 @@
}
}
Node *clone_head = old_new[loop_head->_idx];
- _igvn.hash_delete( clone_head );
+ _igvn.hash_delete(clone_head);
loop_head ->set_req(LoopNode:: EntryControl, clone_head->in(LoopNode::LoopBackControl));
clone_head->set_req(LoopNode::LoopBackControl, loop_head ->in(LoopNode::LoopBackControl));
loop_head ->set_req(LoopNode::LoopBackControl, C->top());
@@ -1951,18 +2010,19 @@
// Kill the clone's backedge
Node *newcle = old_new[loop_end->_idx];
- _igvn.hash_delete( newcle );
+ _igvn.hash_delete(newcle);
Node *one = _igvn.intcon(1);
set_ctrl(one, C->root());
newcle->set_req(1, one);
// Force clone into same loop body
uint max = loop->_body.size();
- for( uint k = 0; k < max; k++ ) {
+ for (uint k = 0; k < max; k++) {
Node *old = loop->_body.at(k);
Node *nnn = old_new[old->_idx];
loop->_body.push(nnn);
- if (!has_ctrl(old))
+ if (!has_ctrl(old)) {
set_loop(nnn, loop);
+ }
}
loop->record_for_igvn();
@@ -1974,7 +2034,7 @@
for (uint i = 0; i < loop->_body.size(); i++) {
loop->_body.at(i)->dump();
}
- if(C->clone_map().is_debug()) {
+ if (C->clone_map().is_debug()) {
tty->print("\nCloneMap\n");
Dict* dict = C->clone_map().dict();
DictI i(dict);
@@ -1990,12 +2050,11 @@
}
}
#endif
-
}
//------------------------------do_maximally_unroll----------------------------
-void PhaseIdealLoop::do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ) {
+void PhaseIdealLoop::do_maximally_unroll(IdealLoopTree *loop, Node_List &old_new) {
CountedLoopNode *cl = loop->_head->as_CountedLoop();
assert(cl->has_exact_trip_count(), "trip count is not exact");
assert(cl->trip_count() > 0, "");
@@ -2113,7 +2172,7 @@
// the pre-loop or the post-loop until the condition holds true in the main
// loop. Stride, scale, offset and limit are all loop invariant. Further,
// stride and scale are constants (offset and limit often are).
-void PhaseIdealLoop::add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ) {
+void PhaseIdealLoop::add_constraint(int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit) {
// For positive stride, the pre-loop limit always uses a MAX function
// and the main loop a MIN function. For negative stride these are
// reversed.
@@ -2198,7 +2257,7 @@
set_ctrl(one, C->root());
Node *plus_one = new AddINode(offset, one);
- register_new_node( plus_one, pre_ctrl );
+ register_new_node(plus_one, pre_ctrl);
// Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond);
*pre_limit = adjust_limit((-stride_con), scale, plus_one, upper_limit, *pre_limit, pre_ctrl,
scale_con < -1 && stride_con > 0);
@@ -2367,7 +2426,7 @@
//------------------------------do_range_check---------------------------------
// Eliminate range-checks and other trip-counter vs loop-invariant tests.
-int PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
+int PhaseIdealLoop::do_range_check(IdealLoopTree *loop, Node_List &old_new) {
#ifndef PRODUCT
if (PrintOpto && VerifyLoopOptimizations) {
tty->print("Range Check Elimination ");
@@ -2377,15 +2436,16 @@
loop->dump_head();
}
#endif
+
assert(RangeCheckElimination, "");
CountedLoopNode *cl = loop->_head->as_CountedLoop();
// If we fail before trying to eliminate range checks, set multiversion state
int closed_range_checks = 1;
// protect against stride not being a constant
- if (!cl->stride_is_con())
+ if (!cl->stride_is_con()) {
return closed_range_checks;
-
+ }
// Find the trip counter; we are iteration splitting based on it
Node *trip_counter = cl->phi();
// Find the main loop limit; we will trim it's iterations
@@ -2400,7 +2460,7 @@
}
// Need to find the main-loop zero-trip guard
- Node *ctrl = cl->skip_predicates();
+ Node *ctrl = cl->skip_predicates();
Node *iffm = ctrl->in(0);
Node *opqzm = iffm->in(1)->in(1)->in(2);
assert(opqzm->in(1) == main_limit, "do not understand situation");
@@ -2418,8 +2478,9 @@
// Occasionally it's possible for a pre-loop Opaque1 node to be
// optimized away and then another round of loop opts attempted.
// We can not optimize this particular loop in that case.
- if (pre_opaq1->Opcode() != Op_Opaque1)
+ if (pre_opaq1->Opcode() != Op_Opaque1) {
return closed_range_checks;
+ }
Opaque1Node *pre_opaq = (Opaque1Node*)pre_opaq1;
Node *pre_limit = pre_opaq->in(1);
@@ -2429,9 +2490,9 @@
// Ensure the original loop limit is available from the
// pre-loop Opaque1 node.
Node *orig_limit = pre_opaq->original_loop_limit();
- if (orig_limit == NULL || _igvn.type(orig_limit) == Type::TOP)
+ if (orig_limit == NULL || _igvn.type(orig_limit) == Type::TOP) {
return closed_range_checks;
-
+ }
// Must know if its a count-up or count-down loop
int stride_con = cl->stride_con();
@@ -2457,8 +2518,9 @@
Node* predicate_proj = cl->skip_strip_mined()->in(LoopNode::EntryControl);
assert(predicate_proj->is_Proj() && predicate_proj->in(0)->is_If(), "if projection only");
+
// Check loop body for tests of trip-counter plus loop-invariant vs loop-variant.
- for( uint i = 0; i < loop->_body.size(); i++ ) {
+ for (uint i = 0; i < loop->_body.size(); i++) {
Node *iff = loop->_body[i];
if (iff->Opcode() == Op_If ||
iff->Opcode() == Op_RangeCheck) { // Test?
@@ -2466,18 +2528,18 @@
// we need loop unswitching instead of iteration splitting.
closed_range_checks++;
Node *exit = loop->is_loop_exit(iff);
- if( !exit ) continue;
+ if (!exit) continue;
int flip = (exit->Opcode() == Op_IfTrue) ? 1 : 0;
// Get boolean condition to test
Node *i1 = iff->in(1);
- if( !i1->is_Bool() ) continue;
+ if (!i1->is_Bool()) continue;
BoolNode *bol = i1->as_Bool();
BoolTest b_test = bol->_test;
// Flip sense of test if exit condition is flipped
- if( flip )
+ if (flip) {
b_test = b_test.negate();
-
+ }
// Get compare
Node *cmp = bol->in(1);
@@ -2487,14 +2549,15 @@
int scale_con= 1; // Assume trip counter not scaled
Node *limit_c = get_ctrl(limit);
- if( loop->is_member(get_loop(limit_c) ) ) {
+ if (loop->is_member(get_loop(limit_c))) {
// Compare might have operands swapped; commute them
b_test = b_test.commute();
rc_exp = cmp->in(2);
limit = cmp->in(1);
limit_c = get_ctrl(limit);
- if( loop->is_member(get_loop(limit_c) ) )
+ if (loop->is_member(get_loop(limit_c))) {
continue; // Both inputs are loop varying; cannot RCE
+ }
}
// Here we know 'limit' is loop invariant
@@ -2513,8 +2576,9 @@
}
Node *offset_c = get_ctrl(offset);
- if( loop->is_member( get_loop(offset_c) ) )
+ if (loop->is_member(get_loop(offset_c))) {
continue; // Offset is not really loop invariant
+ }
// Here we know 'offset' is loop invariant.
// As above for the 'limit', the 'offset' maybe pinned below the
@@ -2536,10 +2600,10 @@
// sense of the test.
// Adjust pre and main loop limits to guard the correct iteration set
- if( cmp->Opcode() == Op_CmpU ) {// Unsigned compare is really 2 tests
- if( b_test._test == BoolTest::lt ) { // Range checks always use lt
+ if (cmp->Opcode() == Op_CmpU) { // Unsigned compare is really 2 tests
+ if (b_test._test == BoolTest::lt) { // Range checks always use lt
// The underflow and overflow limits: 0 <= scale*I+offset < limit
- add_constraint( stride_con, scale_con, offset, zero, limit, pre_ctrl, &pre_limit, &main_limit );
+ add_constraint(stride_con, scale_con, offset, zero, limit, pre_ctrl, &pre_limit, &main_limit);
// (0-offset)/scale could be outside of loop iterations range.
conditional_rc = true;
Node* init = cl->init_trip();
@@ -2566,29 +2630,29 @@
continue; // In release mode, ignore it
}
} else { // Otherwise work on normal compares
- switch( b_test._test ) {
+ switch(b_test._test) {
case BoolTest::gt:
// Fall into GE case
case BoolTest::ge:
// Convert (I*scale+offset) >= Limit to (I*(-scale)+(-offset)) <= -Limit
scale_con = -scale_con;
- offset = new SubINode( zero, offset );
- register_new_node( offset, pre_ctrl );
- limit = new SubINode( zero, limit );
- register_new_node( limit, pre_ctrl );
+ offset = new SubINode(zero, offset);
+ register_new_node(offset, pre_ctrl);
+ limit = new SubINode(zero, limit);
+ register_new_node(limit, pre_ctrl);
// Fall into LE case
case BoolTest::le:
if (b_test._test != BoolTest::gt) {
// Convert X <= Y to X < Y+1
- limit = new AddINode( limit, one );
- register_new_node( limit, pre_ctrl );
+ limit = new AddINode(limit, one);
+ register_new_node(limit, pre_ctrl);
}
// Fall into LT case
case BoolTest::lt:
// The underflow and overflow limits: MIN_INT <= scale*I+offset < limit
// Note: (MIN_INT+1 == -MAX_INT) is used instead of MIN_INT here
// to avoid problem with scale == -1: MIN_INT/(-1) == MIN_INT.
- add_constraint( stride_con, scale_con, offset, mini, limit, pre_ctrl, &pre_limit, &main_limit );
+ add_constraint(stride_con, scale_con, offset, mini, limit, pre_ctrl, &pre_limit, &main_limit);
// ((MIN_INT+1)-offset)/scale could be outside of loop iterations range.
// Note: negative offset is replaced with 0 but (MIN_INT+1)/scale could
// still be outside of loop range.
@@ -2604,7 +2668,7 @@
// Kill the eliminated test
C->set_major_progress();
- Node *kill_con = _igvn.intcon( 1-flip );
+ Node *kill_con = _igvn.intcon(1-flip);
set_ctrl(kill_con, C->root());
_igvn.replace_input_of(iff, 1, kill_con);
// Find surviving projection
@@ -2624,9 +2688,7 @@
if (limit->Opcode() == Op_LoadRange) {
closed_range_checks--;
}
-
} // End of is IF
-
}
if (predicate_proj != cl->skip_strip_mined()->in(LoopNode::EntryControl)) {
_igvn.replace_input_of(cl->skip_strip_mined(), LoopNode::EntryControl, predicate_proj);
@@ -2647,21 +2709,21 @@
Node *main_cle = cl->loopexit();
Node *main_bol = main_cle->in(1);
// Hacking loop bounds; need private copies of exit test
- if( main_bol->outcnt() > 1 ) {// BoolNode shared?
- main_bol = main_bol->clone();// Clone a private BoolNode
- register_new_node( main_bol, main_cle->in(0) );
+ if (main_bol->outcnt() > 1) { // BoolNode shared?
+ main_bol = main_bol->clone(); // Clone a private BoolNode
+ register_new_node(main_bol, main_cle->in(0));
_igvn.replace_input_of(main_cle, 1, main_bol);
}
Node *main_cmp = main_bol->in(1);
- if( main_cmp->outcnt() > 1 ) { // CmpNode shared?
- main_cmp = main_cmp->clone();// Clone a private CmpNode
- register_new_node( main_cmp, main_cle->in(0) );
+ if (main_cmp->outcnt() > 1) { // CmpNode shared?
+ main_cmp = main_cmp->clone(); // Clone a private CmpNode
+ register_new_node(main_cmp, main_cle->in(0));
_igvn.replace_input_of(main_bol, 1, main_cmp);
}
// Hack the now-private loop bounds
_igvn.replace_input_of(main_cmp, 2, main_limit);
// The OpaqueNode is unshared by design
- assert( opqzm->outcnt() == 1, "cannot hack shared node" );
+ assert(opqzm->outcnt() == 1, "cannot hack shared node");
_igvn.replace_input_of(opqzm, 1, main_limit);
return closed_range_checks;
@@ -2830,64 +2892,67 @@
//------------------------------DCE_loop_body----------------------------------
// Remove simplistic dead code from loop body
void IdealLoopTree::DCE_loop_body() {
- for( uint i = 0; i < _body.size(); i++ )
- if( _body.at(i)->outcnt() == 0 )
- _body.map( i--, _body.pop() );
+ for (uint i = 0; i < _body.size(); i++) {
+ if (_body.at(i)->outcnt() == 0) {
+ _body.map(i, _body.pop());
+ i--; // Ensure we revisit the updated index.
+ }
+ }
}
//------------------------------adjust_loop_exit_prob--------------------------
// Look for loop-exit tests with the 50/50 (or worse) guesses from the parsing stage.
// Replace with a 1-in-10 exit guess.
-void IdealLoopTree::adjust_loop_exit_prob( PhaseIdealLoop *phase ) {
+void IdealLoopTree::adjust_loop_exit_prob(PhaseIdealLoop *phase) {
Node *test = tail();
- while( test != _head ) {
+ while (test != _head) {
uint top = test->Opcode();
- if( top == Op_IfTrue || top == Op_IfFalse ) {
+ if (top == Op_IfTrue || top == Op_IfFalse) {
int test_con = ((ProjNode*)test)->_con;
assert(top == (uint)(test_con? Op_IfTrue: Op_IfFalse), "sanity");
IfNode *iff = test->in(0)->as_If();
- if( iff->outcnt() == 2 ) { // Ignore dead tests
+ if (iff->outcnt() == 2) { // Ignore dead tests
Node *bol = iff->in(1);
- if( bol && bol->req() > 1 && bol->in(1) &&
- ((bol->in(1)->Opcode() == Op_StorePConditional ) ||
- (bol->in(1)->Opcode() == Op_StoreIConditional ) ||
- (bol->in(1)->Opcode() == Op_StoreLConditional ) ||
- (bol->in(1)->Opcode() == Op_CompareAndExchangeB ) ||
- (bol->in(1)->Opcode() == Op_CompareAndExchangeS ) ||
- (bol->in(1)->Opcode() == Op_CompareAndExchangeI ) ||
- (bol->in(1)->Opcode() == Op_CompareAndExchangeL ) ||
- (bol->in(1)->Opcode() == Op_CompareAndExchangeP ) ||
- (bol->in(1)->Opcode() == Op_CompareAndExchangeN ) ||
- (bol->in(1)->Opcode() == Op_WeakCompareAndSwapB ) ||
- (bol->in(1)->Opcode() == Op_WeakCompareAndSwapS ) ||
- (bol->in(1)->Opcode() == Op_WeakCompareAndSwapI ) ||
- (bol->in(1)->Opcode() == Op_WeakCompareAndSwapL ) ||
- (bol->in(1)->Opcode() == Op_WeakCompareAndSwapP ) ||
- (bol->in(1)->Opcode() == Op_WeakCompareAndSwapN ) ||
- (bol->in(1)->Opcode() == Op_CompareAndSwapB ) ||
- (bol->in(1)->Opcode() == Op_CompareAndSwapS ) ||
- (bol->in(1)->Opcode() == Op_CompareAndSwapI ) ||
- (bol->in(1)->Opcode() == Op_CompareAndSwapL ) ||
- (bol->in(1)->Opcode() == Op_CompareAndSwapP ) ||
- (bol->in(1)->Opcode() == Op_CompareAndSwapN ) ||
- (bol->in(1)->Opcode() == Op_ShenandoahCompareAndExchangeP ) ||
- (bol->in(1)->Opcode() == Op_ShenandoahCompareAndExchangeN ) ||
- (bol->in(1)->Opcode() == Op_ShenandoahWeakCompareAndSwapP ) ||
- (bol->in(1)->Opcode() == Op_ShenandoahWeakCompareAndSwapN ) ||
- (bol->in(1)->Opcode() == Op_ShenandoahCompareAndSwapP ) ||
- (bol->in(1)->Opcode() == Op_ShenandoahCompareAndSwapN )))
+ if (bol && bol->req() > 1 && bol->in(1) &&
+ ((bol->in(1)->Opcode() == Op_StorePConditional) ||
+ (bol->in(1)->Opcode() == Op_StoreIConditional) ||
+ (bol->in(1)->Opcode() == Op_StoreLConditional) ||
+ (bol->in(1)->Opcode() == Op_CompareAndExchangeB) ||
+ (bol->in(1)->Opcode() == Op_CompareAndExchangeS) ||
+ (bol->in(1)->Opcode() == Op_CompareAndExchangeI) ||
+ (bol->in(1)->Opcode() == Op_CompareAndExchangeL) ||
+ (bol->in(1)->Opcode() == Op_CompareAndExchangeP) ||
+ (bol->in(1)->Opcode() == Op_CompareAndExchangeN) ||
+ (bol->in(1)->Opcode() == Op_WeakCompareAndSwapB) ||
+ (bol->in(1)->Opcode() == Op_WeakCompareAndSwapS) ||
+ (bol->in(1)->Opcode() == Op_WeakCompareAndSwapI) ||
+ (bol->in(1)->Opcode() == Op_WeakCompareAndSwapL) ||
+ (bol->in(1)->Opcode() == Op_WeakCompareAndSwapP) ||
+ (bol->in(1)->Opcode() == Op_WeakCompareAndSwapN) ||
+ (bol->in(1)->Opcode() == Op_CompareAndSwapB) ||
+ (bol->in(1)->Opcode() == Op_CompareAndSwapS) ||
+ (bol->in(1)->Opcode() == Op_CompareAndSwapI) ||
+ (bol->in(1)->Opcode() == Op_CompareAndSwapL) ||
+ (bol->in(1)->Opcode() == Op_CompareAndSwapP) ||
+ (bol->in(1)->Opcode() == Op_CompareAndSwapN) ||
+ (bol->in(1)->Opcode() == Op_ShenandoahCompareAndExchangeP) ||
+ (bol->in(1)->Opcode() == Op_ShenandoahCompareAndExchangeN) ||
+ (bol->in(1)->Opcode() == Op_ShenandoahWeakCompareAndSwapP) ||
+ (bol->in(1)->Opcode() == Op_ShenandoahWeakCompareAndSwapN) ||
+ (bol->in(1)->Opcode() == Op_ShenandoahCompareAndSwapP) ||
+ (bol->in(1)->Opcode() == Op_ShenandoahCompareAndSwapN)))
return; // Allocation loops RARELY take backedge
// Find the OTHER exit path from the IF
Node* ex = iff->proj_out(1-test_con);
float p = iff->_prob;
- if( !phase->is_member( this, ex ) && iff->_fcnt == COUNT_UNKNOWN ) {
- if( top == Op_IfTrue ) {
- if( p < (PROB_FAIR + PROB_UNLIKELY_MAG(3))) {
+ if (!phase->is_member(this, ex) && iff->_fcnt == COUNT_UNKNOWN) {
+ if (top == Op_IfTrue) {
+ if (p < (PROB_FAIR + PROB_UNLIKELY_MAG(3))) {
iff->_prob = PROB_STATIC_FREQUENT;
}
} else {
- if( p > (PROB_FAIR - PROB_UNLIKELY_MAG(3))) {
+ if (p > (PROB_FAIR - PROB_UNLIKELY_MAG(3))) {
iff->_prob = PROB_STATIC_INFREQUENT;
}
}
@@ -2949,26 +3014,28 @@
phase->_igvn.replace_input_of(main_cmp, 2, main_cmp->in(2)->in(1));
}
-//------------------------------policy_do_remove_empty_loop--------------------
-// Micro-benchmark spamming. Policy is to always remove empty loops.
-// The 'DO' part is to replace the trip counter with the value it will
-// have on the last iteration. This will break the loop.
-bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) {
+//------------------------------do_remove_empty_loop---------------------------
+// We always attempt remove empty loops. The approach is to replace the trip
+// counter with the value it will have on the last iteration. This will break
+// the loop.
+bool IdealLoopTree::do_remove_empty_loop(PhaseIdealLoop *phase) {
// Minimum size must be empty loop
- if (_body.size() > EMPTY_LOOP_SIZE)
+ if (_body.size() > EMPTY_LOOP_SIZE) {
return false;
-
- if (!_head->is_CountedLoop())
- return false; // Dead loop
+ }
+ if (!_head->is_CountedLoop()) {
+ return false; // Dead loop
+ }
CountedLoopNode *cl = _head->as_CountedLoop();
- if (!cl->is_valid_counted_loop())
- return false; // Malformed loop
- if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue))))
- return false; // Infinite loop
-
+ if (!cl->is_valid_counted_loop()) {
+ return false; // Malformed loop
+ }
+ if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue)))) {
+ return false; // Infinite loop
+ }
if (cl->is_pre_loop()) {
- // If the loop we are removing is a pre-loop then the main and
- // post loop can be removed as well
+ // If the loop we are removing is a pre-loop then the main and post loop
+ // can be removed as well.
remove_main_post_loops(cl, phase);
}
@@ -2978,11 +3045,11 @@
for (DUIterator_Fast imax, i = cl->fast_outs(imax); i < imax; i++) {
Node* n = cl->fast_out(i);
if (n->Opcode() == Op_Phi) {
- assert(iv == NULL, "Too many phis" );
+ assert(iv == NULL, "Too many phis");
iv = n;
}
}
- assert(iv == cl->phi(), "Wrong phi" );
+ assert(iv == cl->phi(), "Wrong phi");
#endif
// main and post loops have explicitly created zero trip guard
@@ -3056,26 +3123,26 @@
}
// Note: the final value after increment should not overflow since
// counted loop has limit check predicate.
- Node *final = new SubINode( exact_limit, cl->stride() );
+ Node *final = new SubINode(exact_limit, cl->stride());
phase->register_new_node(final,cl->in(LoopNode::EntryControl));
phase->_igvn.replace_node(phi,final);
phase->C->set_major_progress();
return true;
}
-//------------------------------policy_do_one_iteration_loop-------------------
+//------------------------------do_one_iteration_loop--------------------------
// Convert one iteration loop into normal code.
-bool IdealLoopTree::policy_do_one_iteration_loop( PhaseIdealLoop *phase ) {
- if (!_head->as_Loop()->is_valid_counted_loop())
+bool IdealLoopTree::do_one_iteration_loop(PhaseIdealLoop *phase) {
+ if (!_head->as_Loop()->is_valid_counted_loop()) {
return false; // Only for counted loop
-
+ }
CountedLoopNode *cl = _head->as_CountedLoop();
if (!cl->has_exact_trip_count() || cl->trip_count() != 1) {
return false;
}
#ifndef PRODUCT
- if(TraceLoopOpts) {
+ if (TraceLoopOpts) {
tty->print("OneIteration ");
this->dump_head();
}
@@ -3096,20 +3163,22 @@
//=============================================================================
//------------------------------iteration_split_impl---------------------------
-bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ) {
+bool IdealLoopTree::iteration_split_impl(PhaseIdealLoop *phase, Node_List &old_new) {
// Compute loop trip count if possible.
compute_trip_count(phase);
// Convert one iteration loop into normal code.
- if (policy_do_one_iteration_loop(phase))
+ if (do_one_iteration_loop(phase)) {
return true;
-
+ }
// Check and remove empty loops (spam micro-benchmarks)
- if (policy_do_remove_empty_loop(phase))
+ if (do_remove_empty_loop(phase)) {
return true; // Here we removed an empty loop
-
- bool should_peel = policy_peeling(phase); // Should we peel?
-
+ }
+
+ AutoNodeBudget node_budget(phase);
+
+ bool should_peel = policy_peeling(phase);
bool should_unswitch = policy_unswitching(phase);
// Non-counted loops may be peeled; exactly 1 iteration is peeled.
@@ -3144,22 +3213,15 @@
phase->do_unswitching(this, old_new);
return true;
}
- bool should_maximally_unroll = policy_maximally_unroll(phase);
+ bool should_maximally_unroll = policy_maximally_unroll(phase);
if (should_maximally_unroll) {
// Here we did some unrolling and peeling. Eventually we will
// completely unroll this loop and it will no longer be a loop.
- phase->do_maximally_unroll(this,old_new);
+ phase->do_maximally_unroll(this, old_new);
return true;
}
}
- // Skip next optimizations if running low on nodes. Note that
- // policy_unswitching and policy_maximally_unroll have this check.
- int nodes_left = phase->C->max_node_limit() - phase->C->live_nodes();
- if ((int)(2 * _body.size()) > nodes_left) {
- return true;
- }
-
// Counted loops may be peeled, may need some iterations run up
// front for RCE, and may want to align loop refs to a cache
// line. Thus we clone a full loop up front whose trip count is
@@ -3173,26 +3235,28 @@
// unrolling), plus any needed for RCE purposes.
bool should_unroll = policy_unroll(phase);
-
- bool should_rce = policy_range_check(phase);
-
- bool should_align = policy_align(phase);
-
- // If not RCE'ing (iteration splitting) or Aligning, then we do not
- // need a pre-loop. We may still need to peel an initial iteration but
- // we will not be needing an unknown number of pre-iterations.
+ bool should_rce = policy_range_check(phase);
+ // TODO: Remove align -- not used.
+ bool should_align = policy_align(phase);
+
+ // If not RCE'ing (iteration splitting) or Aligning, then we do not need a
+ // pre-loop. We may still need to peel an initial iteration but we will not
+ // be needing an unknown number of pre-iterations.
//
- // Basically, if may_rce_align reports FALSE first time through,
- // we will not be able to later do RCE or Aligning on this loop.
+ // Basically, if may_rce_align reports FALSE first time through, we will not
+ // be able to later do RCE or Aligning on this loop.
bool may_rce_align = !policy_peel_only(phase) || should_rce || should_align;
// If we have any of these conditions (RCE, alignment, unrolling) met, then
// we switch to the pre-/main-/post-loop model. This model also covers
// peeling.
if (should_rce || should_align || should_unroll) {
- if (cl->is_normal_loop()) // Convert to 'pre/main/post' loops
+ if (cl->is_normal_loop()) { // Convert to 'pre/main/post' loops
+ if (!phase->may_require_nodes(est_loop_clone_sz(3, _body.size()))) {
+ return false;
+ }
phase->insert_pre_post_loops(this,old_new, !may_rce_align);
-
+ }
// Adjust the pre- and main-loop limits to let the pre and post loops run
// with full checks, but the main-loop with no checks. Remove said
// checks from the main body.
@@ -3223,14 +3287,14 @@
phase->do_unroll(this, old_new, true);
}
- // Adjust the pre-loop limits to align the main body
- // iterations.
- if (should_align)
+ // Adjust the pre-loop limits to align the main body iterations.
+ if (should_align) {
Unimplemented();
-
+ }
} else { // Else we have an unchanged counted loop
- if (should_peel) // Might want to peel but do nothing else
+ if (should_peel) { // Might want to peel but do nothing else
phase->do_peeling(this,old_new);
+ }
}
return true;
}
@@ -3238,35 +3302,32 @@
//=============================================================================
//------------------------------iteration_split--------------------------------
-bool IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new ) {
+bool IdealLoopTree::iteration_split(PhaseIdealLoop* phase, Node_List &old_new) {
// Recursively iteration split nested loops
- if (_child && !_child->iteration_split(phase, old_new))
+ if (_child && !_child->iteration_split(phase, old_new)) {
return false;
+ }
// Clean out prior deadwood
DCE_loop_body();
-
// Look for loop-exit tests with my 50/50 guesses from the Parsing stage.
// Replace with a 1-in-10 exit guess.
- if (_parent /*not the root loop*/ &&
- !_irreducible &&
- // Also ignore the occasional dead backedge
- !tail()->is_top()) {
+ if (!is_root() && is_loop()) {
adjust_loop_exit_prob(phase);
}
- // Gate unrolling, RCE and peeling efforts.
- if (!_child && // If not an inner loop, do not split
- !_irreducible &&
- _allow_optimizations &&
- !tail()->is_top()) { // Also ignore the occasional dead backedge
+ // Unrolling, RCE and peeling efforts, iff innermost loop.
+ if (_allow_optimizations && is_innermost()) {
if (!_has_call) {
- if (!iteration_split_impl(phase, old_new)) {
- return false;
- }
- } else if (policy_unswitching(phase)) {
- phase->do_unswitching(this, old_new);
+ if (!iteration_split_impl(phase, old_new)) {
+ return false;
+ }
+ } else {
+ AutoNodeBudget node_budget(phase);
+ if (policy_unswitching(phase)) {
+ phase->do_unswitching(this, old_new);
+ }
}
}
@@ -3274,8 +3335,9 @@
// trip counter when there was no major reshaping.
phase->reorg_offsets(this);
- if (_next && !_next->iteration_split(phase, old_new))
+ if (_next && !_next->iteration_split(phase, old_new)) {
return false;
+ }
return true;
}
@@ -3537,7 +3599,7 @@
bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) {
// Only for counted inner loops
- if (!lpt->is_counted() || !lpt->is_inner()) {
+ if (!lpt->is_counted() || !lpt->is_innermost()) {
return false;
}
--- a/src/hotspot/share/opto/loopUnswitch.cpp Mon May 06 09:43:26 2019 +0100
+++ b/src/hotspot/share/opto/loopUnswitch.cpp Mon May 06 09:43:48 2019 +0100
@@ -55,27 +55,31 @@
// Return TRUE or FALSE if the loop should be unswitched
// (ie. clone loop with an invariant test that does not exit the loop)
bool IdealLoopTree::policy_unswitching( PhaseIdealLoop *phase ) const {
- if( !LoopUnswitching ) {
+ if (!LoopUnswitching) {
return false;
}
if (!_head->is_Loop()) {
return false;
}
+ // If nodes are depleted, some transform has miscalculated its needs.
+ assert(!phase->exceeding_node_budget(), "sanity");
+
// check for vectorized loops, any unswitching was already applied
- if (_head->is_CountedLoop() && _head->as_CountedLoop()->do_unroll_only()) {
+ if (_head->is_CountedLoop() && _head->as_CountedLoop()->is_unroll_only()) {
return false;
}
- int nodes_left = phase->C->max_node_limit() - phase->C->live_nodes();
- if ((int)(2 * _body.size()) > nodes_left) {
- return false; // Too speculative if running low on nodes.
- }
LoopNode* head = _head->as_Loop();
if (head->unswitch_count() + 1 > head->unswitch_max()) {
return false;
}
- return phase->find_unswitching_candidate(this) != NULL;
+ if (phase->find_unswitching_candidate(this) == NULL) {
+ return false;
+ }
+
+ // Too speculative if running low on nodes.
+ return phase->may_require_nodes(est_loop_clone_sz(3, _body.size()));
}
//------------------------------find_unswitching_candidate-----------------------------
--- a/src/hotspot/share/opto/loopnode.cpp Mon May 06 09:43:26 2019 +0100
+++ b/src/hotspot/share/opto/loopnode.cpp Mon May 06 09:43:48 2019 +0100
@@ -42,17 +42,13 @@
#include "opto/superword.hpp"
//=============================================================================
-//------------------------------is_loop_iv-------------------------------------
-// Determine if a node is Counted loop induction variable.
-// The method is declared in node.hpp.
-const Node* Node::is_loop_iv() const {
- if (this->is_Phi() && !this->as_Phi()->is_copy() &&
- this->as_Phi()->region()->is_CountedLoop() &&
- this->as_Phi()->region()->as_CountedLoop()->phi() == this) {
- return this;
- } else {
- return NULL;
- }
+//--------------------------is_cloop_ind_var-----------------------------------
+// Determine if a node is a counted loop induction variable.
+// NOTE: The method is declared in "node.hpp".
+bool Node::is_cloop_ind_var() const {
+ return (is_Phi() && !as_Phi()->is_copy() &&
+ as_Phi()->region()->is_CountedLoop() &&
+ as_Phi()->region()->as_CountedLoop()->phi() == this);
}
//=============================================================================
@@ -2942,14 +2938,15 @@
}
if (ReassociateInvariants) {
+ AutoNodeBudget node_budget(this, AutoNodeBudget::NO_BUDGET_CHECK);
// Reassociate invariants and prep for split_thru_phi
for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
IdealLoopTree* lpt = iter.current();
bool is_counted = lpt->is_counted();
- if (!is_counted || !lpt->is_inner()) continue;
+ if (!is_counted || !lpt->is_innermost()) continue;
// check for vectorized loops, any reassociation of invariants was already done
- if (is_counted && lpt->_head->as_CountedLoop()->do_unroll_only()) continue;
+ if (is_counted && lpt->_head->as_CountedLoop()->is_unroll_only()) continue;
lpt->reassociate_invariants(this);
--- a/src/hotspot/share/opto/loopnode.hpp Mon May 06 09:43:26 2019 +0100
+++ b/src/hotspot/share/opto/loopnode.hpp Mon May 06 09:43:48 2019 +0100
@@ -264,7 +264,7 @@
bool is_reduction_loop() const { return (_loop_flags&HasReductions) == HasReductions; }
bool was_slp_analyzed () const { return (_loop_flags&WasSlpAnalyzed) == WasSlpAnalyzed; }
bool has_passed_slp () const { return (_loop_flags&PassedSlpAnalysis) == PassedSlpAnalysis; }
- bool do_unroll_only () const { return (_loop_flags&DoUnrollOnly) == DoUnrollOnly; }
+ bool is_unroll_only () const { return (_loop_flags&DoUnrollOnly) == DoUnrollOnly; }
bool is_main_no_pre_loop() const { return _loop_flags & MainHasNoPreLoop; }
bool has_atomic_post_loop () const { return (_loop_flags & HasAtomicPostLoop) == HasAtomicPostLoop; }
void set_main_no_pre_loop() { _loop_flags |= MainHasNoPreLoop; }
@@ -370,26 +370,49 @@
};
-inline CountedLoopEndNode *CountedLoopNode::loopexit_or_null() const {
- Node *bc = back_control();
- if( bc == NULL ) return NULL;
- Node *le = bc->in(0);
- if( le->Opcode() != Op_CountedLoopEnd )
- return NULL;
- return (CountedLoopEndNode*)le;
+inline CountedLoopEndNode* CountedLoopNode::loopexit_or_null() const {
+ Node* bctrl = back_control();
+ if (bctrl == NULL) return NULL;
+
+ Node* lexit = bctrl->in(0);
+ return (CountedLoopEndNode*)
+ (lexit->Opcode() == Op_CountedLoopEnd ? lexit : NULL);
}
-inline CountedLoopEndNode *CountedLoopNode::loopexit() const {
+
+inline CountedLoopEndNode* CountedLoopNode::loopexit() const {
CountedLoopEndNode* cle = loopexit_or_null();
assert(cle != NULL, "loopexit is NULL");
return cle;
}
-inline Node *CountedLoopNode::init_trip() const { return loopexit_or_null() ? loopexit()->init_trip() : NULL; }
-inline Node *CountedLoopNode::stride() const { return loopexit_or_null() ? loopexit()->stride() : NULL; }
-inline int CountedLoopNode::stride_con() const { return loopexit_or_null() ? loopexit()->stride_con() : 0; }
-inline bool CountedLoopNode::stride_is_con() const { return loopexit_or_null() && loopexit()->stride_is_con(); }
-inline Node *CountedLoopNode::limit() const { return loopexit_or_null() ? loopexit()->limit() : NULL; }
-inline Node *CountedLoopNode::incr() const { return loopexit_or_null() ? loopexit()->incr() : NULL; }
-inline Node *CountedLoopNode::phi() const { return loopexit_or_null() ? loopexit()->phi() : NULL; }
+
+inline Node* CountedLoopNode::init_trip() const {
+ CountedLoopEndNode* cle = loopexit_or_null();
+ return cle != NULL ? cle->init_trip() : NULL;
+}
+inline Node* CountedLoopNode::stride() const {
+ CountedLoopEndNode* cle = loopexit_or_null();
+ return cle != NULL ? cle->stride() : NULL;
+}
+inline int CountedLoopNode::stride_con() const {
+ CountedLoopEndNode* cle = loopexit_or_null();
+ return cle != NULL ? cle->stride_con() : 0;
+}
+inline bool CountedLoopNode::stride_is_con() const {
+ CountedLoopEndNode* cle = loopexit_or_null();
+ return cle != NULL && cle->stride_is_con();
+}
+inline Node* CountedLoopNode::limit() const {
+ CountedLoopEndNode* cle = loopexit_or_null();
+ return cle != NULL ? cle->limit() : NULL;
+}
+inline Node* CountedLoopNode::incr() const {
+ CountedLoopEndNode* cle = loopexit_or_null();
+ return cle != NULL ? cle->incr() : NULL;
+}
+inline Node* CountedLoopNode::phi() const {
+ CountedLoopEndNode* cle = loopexit_or_null();
+ return cle != NULL ? cle->phi() : NULL;
+}
//------------------------------LoopLimitNode-----------------------------
// Counted Loop limit node which represents exact final iterator value:
@@ -456,9 +479,9 @@
IdealLoopTree *_child; // First child in loop tree
// The head-tail backedge defines the loop.
- // If tail is NULL then this loop has multiple backedges as part of the
- // same loop. During cleanup I'll peel off the multiple backedges; merge
- // them at the loop bottom and flow 1 real backedge into the loop.
+ // If a loop has multiple backedges, this is addressed during cleanup where
+ // we peel off the multiple backedges, merging all edges at the bottom and
+ // ensuring that one proper backedge flow into the loop.
Node *_head; // Head of loop
Node *_tail; // Tail of loop
inline Node *tail(); // Handle lazy update of _tail field
@@ -487,7 +510,10 @@
_safepts(NULL),
_required_safept(NULL),
_allow_optimizations(true)
- { }
+ {
+ precond(_head != NULL);
+ precond(_tail != NULL);
+ }
// Is 'l' a member of 'this'?
bool is_member(const IdealLoopTree *l) const; // Test for nested membership
@@ -558,10 +584,10 @@
bool policy_unswitching( PhaseIdealLoop *phase ) const;
// Micro-benchmark spamming. Remove empty loops.
- bool policy_do_remove_empty_loop( PhaseIdealLoop *phase );
+ bool do_remove_empty_loop( PhaseIdealLoop *phase );
// Convert one iteration loop into normal code.
- bool policy_do_one_iteration_loop( PhaseIdealLoop *phase );
+ bool do_one_iteration_loop( PhaseIdealLoop *phase );
// Return TRUE or FALSE if the loop should be peeled or not. Peel if we can
// make some loop-invariant test (usually a null-check) happen before the
@@ -615,9 +641,11 @@
// Put loop body on igvn work list
void record_for_igvn();
- bool is_loop() { return !_irreducible && _tail && !_tail->is_top(); }
- bool is_inner() { return is_loop() && _child == NULL; }
- bool is_counted() { return is_loop() && _head != NULL && _head->is_CountedLoop(); }
+ bool is_root() { return _parent == NULL; }
+ // A proper/reducible loop w/o any (occasional) dead back-edge.
+ bool is_loop() { return !_irreducible && !tail()->is_top(); }
+ bool is_counted() { return is_loop() && _head->is_CountedLoop(); }
+ bool is_innermost() { return is_loop() && _child == NULL; }
void remove_main_post_loops(CountedLoopNode *cl, PhaseIdealLoop *phase);
@@ -630,13 +658,14 @@
};
// -----------------------------PhaseIdealLoop---------------------------------
-// Computes the mapping from Nodes to IdealLoopTrees. Organizes IdealLoopTrees into a
-// loop tree. Drives the loop-based transformations on the ideal graph.
+// Computes the mapping from Nodes to IdealLoopTrees. Organizes IdealLoopTrees
+// into a loop tree. Drives the loop-based transformations on the ideal graph.
class PhaseIdealLoop : public PhaseTransform {
friend class IdealLoopTree;
friend class SuperWord;
friend class CountedLoopReserveKit;
friend class ShenandoahBarrierC2Support;
+ friend class AutoNodeBudget;
// Pre-computed def-use info
PhaseIterGVN &_igvn;
@@ -731,8 +760,7 @@
}
Node *dom_lca_for_get_late_ctrl_internal( Node *lca, Node *n, Node *tag );
- // Helper function for directing control inputs away from CFG split
- // points.
+ // Helper function for directing control inputs away from CFG split points.
Node *find_non_split_ctrl( Node *ctrl ) const {
if (ctrl != NULL) {
if (ctrl->is_MultiBranch()) {
@@ -883,7 +911,8 @@
_igvn(igvn),
_verify_me(NULL),
_verify_only(true),
- _dom_lca_tags(arena()) { // Thread::resource_area
+ _dom_lca_tags(arena()), // Thread::resource_area
+ _nodes_required(UINT_MAX) {
build_and_optimize(LoopOptsVerify);
}
@@ -899,7 +928,8 @@
_igvn(igvn),
_verify_me(NULL),
_verify_only(false),
- _dom_lca_tags(arena()) { // Thread::resource_area
+ _dom_lca_tags(arena()), // Thread::resource_area
+ _nodes_required(UINT_MAX) {
build_and_optimize(mode);
}
@@ -909,7 +939,8 @@
_igvn(igvn),
_verify_me(verify_me),
_verify_only(false),
- _dom_lca_tags(arena()) { // Thread::resource_area
+ _dom_lca_tags(arena()), // Thread::resource_area
+ _nodes_required(UINT_MAX) {
build_and_optimize(LoopOptsVerify);
}
@@ -1320,8 +1351,54 @@
return C->live_nodes() > threshold;
}
+ // A simplistic node request tracking mechanism, where
+ // = UINT_MAX Request not valid or made final.
+ // < UINT_MAX Nodes currently requested (estimate).
+ uint _nodes_required;
+
+ bool exceeding_node_budget(uint required = 0) {
+ assert(C->live_nodes() < C->max_node_limit(), "sanity");
+ uint available = C->max_node_limit() - C->live_nodes();
+ return available < required + _nodes_required;
+ }
+
+ uint require_nodes(uint require) {
+ precond(require > 0);
+ _nodes_required += MAX2(100u, require); // Keep requests at minimum 100.
+ return _nodes_required;
+ }
+
+ bool may_require_nodes(uint require) {
+ return !exceeding_node_budget(require) && require_nodes(require) > 0;
+ }
+
+ void require_nodes_begin() {
+ assert(_nodes_required == UINT_MAX, "Bad state (begin).");
+ _nodes_required = 0;
+ }
+
+ // Final check that the requested nodes did not exceed the limit and that
+ // the request was reasonably correct with respect to the number of new
+ // nodes introduced by any transform since the last 'begin'.
+ void require_nodes_final_check(uint live_at_begin) {
+ uint required = _nodes_required;
+ require_nodes_final();
+ uint delta = C->live_nodes() - live_at_begin;
+ assert(delta <= 2 * required, "Bad node estimate (actual: %d, request: %d)",
+ delta, required);
+ }
+
+ void require_nodes_final() {
+ assert(_nodes_required < UINT_MAX, "Bad state (final).");
+ assert(!exceeding_node_budget(), "Too many NODES required!");
+ _nodes_required = UINT_MAX;
+ }
+
bool _created_loop_node;
+
public:
+ uint nodes_required() const { return _nodes_required; }
+
void set_created_loop_node() { _created_loop_node = true; }
bool created_loop_node() { return _created_loop_node; }
void register_new_node( Node *n, Node *blk );
@@ -1347,6 +1424,62 @@
void rpo( Node *start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list ) const;
};
+
+class AutoNodeBudget : public StackObj
+{
+public:
+ enum budget_check_t { BUDGET_CHECK, NO_BUDGET_CHECK };
+
+ AutoNodeBudget(PhaseIdealLoop* phase, budget_check_t chk = BUDGET_CHECK)
+ : _phase(phase),
+ _check_at_final(chk == BUDGET_CHECK),
+ _nodes_at_begin(0)
+ {
+ precond(_phase != NULL);
+
+ _nodes_at_begin = _phase->C->live_nodes();
+ _phase->require_nodes_begin();
+ }
+
+ ~AutoNodeBudget() {
+ if (_check_at_final) {
+#ifndef PRODUCT
+ if (TraceLoopOpts) {
+ uint request = _phase->nodes_required();
+
+ if (request > 0) {
+ uint delta = _phase->C->live_nodes() - _nodes_at_begin;
+
+ if (request < delta) {
+ tty->print_cr("Exceeding node budget: %d < %d", request, delta);
+ }
+ }
+ }
+#endif
+ _phase->require_nodes_final_check(_nodes_at_begin);
+ } else {
+ _phase->require_nodes_final();
+ }
+ }
+
+private:
+ PhaseIdealLoop* _phase;
+ bool _check_at_final;
+ uint _nodes_at_begin;
+};
+
+// The Estimated Loop Clone Size: CloneFactor * (BodySize + BC) + CC, where BC
+// and CC are totally ad-hoc/magic "body" and "clone" constants, respectively,
+// used to ensure that node usage estimates made are on the safe side, for the
+// most part.
+static inline uint est_loop_clone_sz(uint fact, uint size) {
+ uint const bc = 31;
+ uint const cc = 41;
+ uint estimate = fact * (size + bc) + cc;
+ return (estimate - cc) / fact == size + bc ? estimate : UINT_MAX;
+}
+
+
// This kit may be used for making of a reserved copy of a loop before this loop
// goes under non-reversible changes.
//
@@ -1410,14 +1543,11 @@
};// class CountedLoopReserveKit
inline Node* IdealLoopTree::tail() {
-// Handle lazy update of _tail field
- Node *n = _tail;
- //while( !n->in(0) ) // Skip dead CFG nodes
- //n = n->in(1);
- if (n->in(0) == NULL)
- n = _phase->get_ctrl(n);
- _tail = n;
- return n;
+ // Handle lazy update of _tail field.
+ if (_tail->in(0) == NULL) {
+ _tail = _phase->get_ctrl(_tail);
+ }
+ return _tail;
}
--- a/src/hotspot/share/opto/loopopts.cpp Mon May 06 09:43:26 2019 +0100
+++ b/src/hotspot/share/opto/loopopts.cpp Mon May 06 09:43:48 2019 +0100
@@ -2662,7 +2662,7 @@
assert(!loop->is_member(get_loop(use_c)), "should be outside loop");
get_loop(use_c)->_body.push(n_clone);
_igvn.register_new_node_with_optimizer(n_clone);
-#if !defined(PRODUCT)
+#ifndef PRODUCT
if (TracePartialPeeling) {
tty->print_cr("loop exit cloning old: %d new: %d newbb: %d", n->_idx, n_clone->_idx, get_ctrl(n_clone)->_idx);
}
@@ -2700,7 +2700,7 @@
set_ctrl(n_clone, get_ctrl(n));
sink_list.push(n_clone);
not_peel <<= n_clone->_idx; // add n_clone to not_peel set.
-#if !defined(PRODUCT)
+#ifndef PRODUCT
if (TracePartialPeeling) {
tty->print_cr("special not_peeled cloning old: %d new: %d", n->_idx, n_clone->_idx);
}
@@ -3046,7 +3046,7 @@
opc == Op_CatchProj ||
opc == Op_Jump ||
opc == Op_JumpProj) {
-#if !defined(PRODUCT)
+#ifndef PRODUCT
if (TracePartialPeeling) {
tty->print_cr("\nExit control too complex: lp: %d", head->_idx);
}
@@ -3102,7 +3102,7 @@
return false;
}
-#if !defined(PRODUCT)
+#ifndef PRODUCT
if (TraceLoopOpts) {
tty->print("PartialPeel ");
loop->dump_head();
@@ -3131,6 +3131,10 @@
Node_List worklist(area);
Node_List sink_list(area);
+ if (!may_require_nodes(est_loop_clone_sz(2, loop->_body.size()))) {
+ return false;
+ }
+
// Set of cfg nodes to peel are those that are executable from
// the head through last_peel.
assert(worklist.size() == 0, "should be empty");
@@ -3179,7 +3183,7 @@
if (use->is_Phi()) old_phi_cnt++;
}
-#if !defined(PRODUCT)
+#ifndef PRODUCT
if (TracePartialPeeling) {
tty->print_cr("\npeeled list");
}
@@ -3190,7 +3194,7 @@
uint cloned_for_outside_use = 0;
for (i = 0; i < peel_list.size();) {
Node* n = peel_list.at(i);
-#if !defined(PRODUCT)
+#ifndef PRODUCT
if (TracePartialPeeling) n->dump();
#endif
bool incr = true;
@@ -3212,7 +3216,7 @@
not_peel <<= n->_idx; // add n to not_peel set.
peel_list.remove(i);
incr = false;
-#if !defined(PRODUCT)
+#ifndef PRODUCT
if (TracePartialPeeling) {
tty->print_cr("sink to not_peeled region: %d newbb: %d",
n->_idx, get_ctrl(n)->_idx);
@@ -3231,7 +3235,7 @@
}
if (new_phi_cnt > old_phi_cnt + PartialPeelNewPhiDelta) {
-#if !defined(PRODUCT)
+#ifndef PRODUCT
if (TracePartialPeeling) {
tty->print_cr("\nToo many new phis: %d old %d new cmpi: %c",
new_phi_cnt, old_phi_cnt, new_peel_if != NULL?'T':'F');
@@ -3389,7 +3393,7 @@
C->set_major_progress();
loop->record_for_igvn();
-#if !defined(PRODUCT)
+#ifndef PRODUCT
if (TracePartialPeeling) {
tty->print_cr("\nafter partial peel one iteration");
Node_List wl(area);
@@ -3429,10 +3433,10 @@
Node *exit = cle->proj_out(false);
Node *phi = cl->phi();
- // Check for the special case of folks using the pre-incremented
- // trip-counter on the fall-out path (forces the pre-incremented
- // and post-incremented trip counter to be live at the same time).
- // Fix this by adjusting to use the post-increment trip counter.
+ // Check for the special case when using the pre-incremented trip-counter on
+ // the fall-out path (forces the pre-incremented and post-incremented trip
+ // counter to be live at the same time). Fix this by adjusting to use the
+ // post-increment trip counter.
bool progress = true;
while (progress) {
--- a/src/hotspot/share/opto/node.hpp Mon May 06 09:43:26 2019 +0100
+++ b/src/hotspot/share/opto/node.hpp Mon May 06 09:43:48 2019 +0100
@@ -1007,9 +1007,9 @@
// value, if it appears (by local graph inspection) to be computed by a simple conditional.
bool is_iteratively_computed();
- // Determine if a node is Counted loop induction variable.
- // The method is defined in loopnode.cpp.
- const Node* is_loop_iv() const;
+ // Determine if a node is a counted loop induction variable.
+ // NOTE: The method is defined in "loopnode.cpp".
+ bool is_cloop_ind_var() const;
// Return a node with opcode "opc" and same inputs as "this" if one can
// be found; Otherwise return NULL;
--- a/src/hotspot/share/opto/subnode.cpp Mon May 06 09:43:26 2019 +0100
+++ b/src/hotspot/share/opto/subnode.cpp Mon May 06 09:43:48 2019 +0100
@@ -114,31 +114,37 @@
}
//=============================================================================
+//------------------------------Helper function--------------------------------
-//------------------------------Helper function--------------------------------
-static bool ok_to_convert(Node* inc, Node* iv) {
- // Do not collapse (x+c0)-y if "+" is a loop increment, because the
- // "-" is loop invariant and collapsing extends the live-range of "x"
- // to overlap with the "+", forcing another register to be used in
- // the loop.
- // This test will be clearer with '&&' (apply DeMorgan's rule)
- // but I like the early cutouts that happen here.
- const PhiNode *phi;
- if( ( !inc->in(1)->is_Phi() ||
- !(phi=inc->in(1)->as_Phi()) ||
- phi->is_copy() ||
- !phi->region()->is_CountedLoop() ||
- inc != phi->region()->as_CountedLoop()->incr() )
- &&
- // Do not collapse (x+c0)-iv if "iv" is a loop induction variable,
- // because "x" maybe invariant.
- ( !iv->is_loop_iv() )
- ) {
- return true;
- } else {
- return false;
- }
+static bool is_cloop_increment(Node* inc) {
+ precond(inc->Opcode() == Op_AddI || inc->Opcode() == Op_AddL);
+
+ if (!inc->in(1)->is_Phi()) {
+ return false;
+ }
+ const PhiNode* phi = inc->in(1)->as_Phi();
+
+ if (phi->is_copy() || !phi->region()->is_CountedLoop()) {
+ return false;
+ }
+
+ return inc == phi->region()->as_CountedLoop()->incr();
}
+
+// Given the expression '(x + C) - v', or
+// 'v - (x + C)', we examine nodes '+' and 'v':
+//
+// 1. Do not convert if '+' is a counted-loop increment, because the '-' is
+// loop invariant and converting extends the live-range of 'x' to overlap
+// with the '+', forcing another register to be used in the loop.
+//
+// 2. Do not convert if 'v' is a counted-loop induction variable, because
+// 'x' might be invariant.
+//
+static bool ok_to_convert(Node* inc, Node* var) {
+ return !(is_cloop_increment(inc) || var->is_cloop_ind_var());
+}
+
//------------------------------Ideal------------------------------------------
Node *SubINode::Ideal(PhaseGVN *phase, bool can_reshape){
Node *in1 = in(1);
--- a/src/hotspot/share/opto/superword.cpp Mon May 06 09:43:26 2019 +0100
+++ b/src/hotspot/share/opto/superword.cpp Mon May 06 09:43:48 2019 +0100
@@ -145,7 +145,7 @@
// Skip any loops already optimized by slp
if (cl->is_vectorized_loop()) return;
- if (cl->do_unroll_only()) return;
+ if (cl->is_unroll_only()) return;
if (cl->is_main_loop()) {
// Check for pre-loop ending with CountedLoopEnd(Bool(Cmp(x,Opaque1(limit))))
--- a/src/hotspot/share/runtime/biasedLocking.cpp Mon May 06 09:43:26 2019 +0100
+++ b/src/hotspot/share/runtime/biasedLocking.cpp Mon May 06 09:43:48 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -499,6 +499,7 @@
JavaThread* _requesting_thread;
BiasedLocking::Condition _status_code;
traceid _biased_locker_id;
+ uint64_t _safepoint_id;
public:
VM_RevokeBias(Handle* obj, JavaThread* requesting_thread)
@@ -506,14 +507,16 @@
, _objs(NULL)
, _requesting_thread(requesting_thread)
, _status_code(BiasedLocking::NOT_BIASED)
- , _biased_locker_id(0) {}
+ , _biased_locker_id(0)
+ , _safepoint_id(0) {}
VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread)
: _obj(NULL)
, _objs(objs)
, _requesting_thread(requesting_thread)
, _status_code(BiasedLocking::NOT_BIASED)
- , _biased_locker_id(0) {}
+ , _biased_locker_id(0)
+ , _safepoint_id(0) {}
virtual VMOp_Type type() const { return VMOp_RevokeBias; }
@@ -545,6 +548,7 @@
if (biased_locker != NULL) {
_biased_locker_id = JFR_THREAD_ID(biased_locker);
}
+ _safepoint_id = SafepointSynchronize::safepoint_counter();
clean_up_cached_monitor_info();
return;
} else {
@@ -560,6 +564,10 @@
traceid biased_locker() const {
return _biased_locker_id;
}
+
+ uint64_t safepoint_id() const {
+ return _safepoint_id;
+ }
};
@@ -581,17 +589,15 @@
virtual void doit() {
_status_code = bulk_revoke_or_rebias_at_safepoint((*_obj)(), _bulk_rebias, _attempt_rebias_of_object, _requesting_thread);
+ _safepoint_id = SafepointSynchronize::safepoint_counter();
clean_up_cached_monitor_info();
}
+
+ bool is_bulk_rebias() const {
+ return _bulk_rebias;
+ }
};
-template <typename E>
-static void set_safepoint_id(E* event) {
- assert(event != NULL, "invariant");
- // Subtract 1 to match the id of events committed inside the safepoint
- event->set_safepointId(SafepointSynchronize::safepoint_counter() - 1);
-}
-
static void post_self_revocation_event(EventBiasedLockSelfRevocation* event, Klass* k) {
assert(event != NULL, "invariant");
assert(k != NULL, "invariant");
@@ -600,24 +606,25 @@
event->commit();
}
-static void post_revocation_event(EventBiasedLockRevocation* event, Klass* k, VM_RevokeBias* revoke) {
+static void post_revocation_event(EventBiasedLockRevocation* event, Klass* k, VM_RevokeBias* op) {
assert(event != NULL, "invariant");
assert(k != NULL, "invariant");
- assert(revoke != NULL, "invariant");
+ assert(op != NULL, "invariant");
assert(event->should_commit(), "invariant");
event->set_lockClass(k);
- set_safepoint_id(event);
- event->set_previousOwner(revoke->biased_locker());
+ event->set_safepointId(op->safepoint_id());
+ event->set_previousOwner(op->biased_locker());
event->commit();
}
-static void post_class_revocation_event(EventBiasedLockClassRevocation* event, Klass* k, bool disabled_bias) {
+static void post_class_revocation_event(EventBiasedLockClassRevocation* event, Klass* k, VM_BulkRevokeBias* op) {
assert(event != NULL, "invariant");
assert(k != NULL, "invariant");
+ assert(op != NULL, "invariant");
assert(event->should_commit(), "invariant");
event->set_revokedClass(k);
- event->set_disableBiasing(disabled_bias);
- set_safepoint_id(event);
+ event->set_disableBiasing(!op->is_bulk_rebias());
+ event->set_safepointId(op->safepoint_id());
event->commit();
}
@@ -729,7 +736,7 @@
attempt_rebias);
VMThread::execute(&bulk_revoke);
if (event.should_commit()) {
- post_class_revocation_event(&event, obj->klass(), heuristics != HR_BULK_REBIAS);
+ post_class_revocation_event(&event, obj->klass(), &bulk_revoke);
}
return bulk_revoke.status_code();
}
--- a/src/hotspot/share/utilities/debug.hpp Mon May 06 09:43:26 2019 +0100
+++ b/src/hotspot/share/utilities/debug.hpp Mon May 06 09:43:48 2019 +0100
@@ -63,6 +63,9 @@
// For backward compatibility.
#define assert(p, ...) vmassert(p, __VA_ARGS__)
+#define precond(p) assert(p, "precond")
+#define postcond(p) assert(p, "postcond")
+
#ifndef ASSERT
#define vmassert_status(p, status, msg)
#else
--- a/src/java.base/share/classes/java/nio/file/FileSystems.java Mon May 06 09:43:26 2019 +0100
+++ b/src/java.base/share/classes/java/nio/file/FileSystems.java Mon May 06 09:43:48 2019 +0100
@@ -252,10 +252,8 @@
* Suppose there is a provider identified by the scheme {@code "memory"}
* installed:
* <pre>
- * Map<String,String> env = new HashMap<>();
- * env.put("capacity", "16G");
- * env.put("blockSize", "4k");
- * FileSystem fs = FileSystems.newFileSystem(URI.create("memory:///?name=logfs"), env);
+ * FileSystem fs = FileSystems.newFileSystem(URI.create("memory:///?name=logfs"),
+ * Map.of("capacity", "16G", "blockSize", "4k"));
* </pre>
*
* @param uri
--- a/src/java.base/share/classes/java/util/Objects.java Mon May 06 09:43:26 2019 +0100
+++ b/src/java.base/share/classes/java/util/Objects.java Mon May 06 09:43:48 2019 +0100
@@ -62,10 +62,11 @@
* Returns {@code true} if the arguments are equal to each other
* and {@code false} otherwise.
* Consequently, if both arguments are {@code null}, {@code true}
- * is returned and if exactly one argument is {@code null}, {@code
- * false} is returned. Otherwise, equality is determined by using
- * the {@link Object#equals equals} method of the first
- * argument.
+ * is returned. Otherwise, if the first argument is not {@code
+ * null}, equality is determined by calling the {@link
+ * Object#equals equals} method of the first argument with the
+ * second argument of this method. Otherwise, {@code false} is
+ * returned.
*
* @param a an object
* @param b an object to be compared with {@code a} for equality
--- a/src/java.base/share/classes/java/util/regex/Pattern.java Mon May 06 09:43:26 2019 +0100
+++ b/src/java.base/share/classes/java/util/regex/Pattern.java Mon May 06 09:43:48 2019 +0100
@@ -1678,7 +1678,13 @@
return;
int j = i;
i += 2;
- int[] newtemp = new int[j + 3*(pLen-i) + 2];
+ int newTempLen;
+ try {
+ newTempLen = Math.addExact(j + 2, Math.multiplyExact(3, pLen - i));
+ } catch (ArithmeticException ae) {
+ throw new OutOfMemoryError();
+ }
+ int[] newtemp = new int[newTempLen];
System.arraycopy(temp, 0, newtemp, 0, j);
boolean inQuote = true;
--- a/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java Mon May 06 09:43:26 2019 +0100
+++ b/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java Mon May 06 09:43:48 2019 +0100
@@ -119,7 +119,6 @@
new LogMessageWithLevel("SystemDictionary Roots", Level.TRACE),
new LogMessageWithLevel("CLDG Roots", Level.TRACE),
new LogMessageWithLevel("JVMTI Roots", Level.TRACE),
- new LogMessageWithLevel("SATB Filtering", Level.TRACE),
new LogMessageWithLevel("CM RefProcessor Roots", Level.TRACE),
new LogMessageWithLevel("Wait For Strong CLD", Level.TRACE),
new LogMessageWithLevel("Weak CLD Roots", Level.TRACE),
--- a/test/hotspot/jtreg/runtime/8176717/TestInheritFD.java Mon May 06 09:43:26 2019 +0100
+++ b/test/hotspot/jtreg/runtime/8176717/TestInheritFD.java Mon May 06 09:43:48 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -21,7 +21,6 @@
* questions.
*/
-import static java.io.File.createTempFile;
import static java.lang.Long.parseLong;
import static java.lang.System.getProperty;
import static java.nio.file.Files.readAllBytes;
@@ -30,6 +29,7 @@
import static java.util.stream.Collectors.toList;
import static jdk.test.lib.process.ProcessTools.createJavaProcessBuilder;
import static jdk.test.lib.Platform.isWindows;
+import jdk.test.lib.Utils;
import java.io.BufferedReader;
import java.io.File;
@@ -43,7 +43,7 @@
/*
* @test TestInheritFD
- * @bug 8176717 8176809
+ * @bug 8176717 8176809 8222500
* @summary a new process should not inherit open file descriptors
* @comment On Aix lsof requires root privileges.
* @requires os.family != "aix"
@@ -79,8 +79,8 @@
// first VM
public static void main(String[] args) throws Exception {
- String logPath = createTempFile("logging", LOG_SUFFIX).getName();
- File commFile = createTempFile("communication", ".txt");
+ String logPath = Utils.createTempFile("logging", LOG_SUFFIX).toFile().getName();
+ File commFile = Utils.createTempFile("communication", ".txt").toFile();
if (!isWindows() && !lsofCommand().isPresent()) {
System.out.println("Could not find lsof like command");
--- a/test/jdk/ProblemList.txt Mon May 06 09:43:26 2019 +0100
+++ b/test/jdk/ProblemList.txt Mon May 06 09:43:48 2019 +0100
@@ -668,65 +668,7 @@
sun/security/provider/KeyStore/DKSTest.sh 8180266 windows-all
-sun/security/pkcs11/Cipher/ReinitCipher.java 8204203 windows-all
-sun/security/pkcs11/Cipher/TestPKCS5PaddingError.java 8204203 windows-all
-sun/security/pkcs11/Cipher/TestRSACipher.java 8204203 windows-all
-sun/security/pkcs11/Cipher/TestRSACipherWrap.java 8204203 windows-all
-sun/security/pkcs11/Cipher/TestRawRSACipher.java 8204203 windows-all
-sun/security/pkcs11/Cipher/TestSymmCiphers.java 8204203 windows-all
-sun/security/pkcs11/Cipher/TestSymmCiphersNoPad.java 8204203 windows-all
-sun/security/pkcs11/KeyAgreement/SupportedDHKeys.java 8204203 windows-all
-sun/security/pkcs11/KeyAgreement/TestDH.java 8204203 windows-all
-sun/security/pkcs11/KeyAgreement/TestInterop.java 8204203 windows-all
-sun/security/pkcs11/KeyAgreement/TestShort.java 8204203 windows-all
-sun/security/pkcs11/KeyAgreement/UnsupportedDHKeys.java 8204203 windows-all
-sun/security/pkcs11/KeyGenerator/DESParity.java 8204203 windows-all
-sun/security/pkcs11/KeyGenerator/TestKeyGenerator.java 8204203 windows-all
-sun/security/pkcs11/KeyPairGenerator/TestDH2048.java 8204203 windows-all
-sun/security/pkcs11/KeyStore/SecretKeysBasic.sh 8204203,8209398 generic-all
-sun/security/pkcs11/Mac/MacKAT.java 8204203 windows-all
-sun/security/pkcs11/Mac/MacSameTest.java 8204203 windows-all
-sun/security/pkcs11/Mac/ReinitMac.java 8204203 windows-all
-sun/security/pkcs11/MessageDigest/ByteBuffers.java 8204203 windows-all
-sun/security/pkcs11/MessageDigest/DigestKAT.java 8204203 windows-all
-sun/security/pkcs11/MessageDigest/ReinitDigest.java 8204203 windows-all
-sun/security/pkcs11/MessageDigest/TestCloning.java 8204203 windows-all
-sun/security/pkcs11/Provider/ConfigQuotedString.sh 8204203 windows-all
-sun/security/pkcs11/Provider/Login.sh 8204203 windows-all
-sun/security/pkcs11/SampleTest.java 8204203 windows-all
-sun/security/pkcs11/Secmod/AddPrivateKey.java 8204203 windows-all
-sun/security/pkcs11/Secmod/Crypto.java 8204203 windows-all
-sun/security/pkcs11/Secmod/GetPrivateKey.java 8204203 windows-all
-sun/security/pkcs11/Secmod/JksSetPrivateKey.java 8204203 windows-all
-sun/security/pkcs11/Secmod/LoadKeystore.java 8204203 windows-all
-sun/security/pkcs11/Secmod/TestNssDbSqlite.java 8204203 windows-all
-sun/security/pkcs11/SecureRandom/Basic.java 8204203 windows-all
-sun/security/pkcs11/SecureRandom/TestDeserialization.java 8204203 windows-all
-sun/security/pkcs11/Serialize/SerializeProvider.java 8204203 windows-all
-sun/security/pkcs11/Signature/ByteBuffers.java 8204203 windows-all
-sun/security/pkcs11/Signature/ReinitSignature.java 8204203 windows-all
-sun/security/pkcs11/Signature/TestDSA.java 8204203 windows-all
-sun/security/pkcs11/Signature/TestDSAKeyLength.java 8204203 windows-all
-sun/security/pkcs11/Signature/TestRSAKeyLength.java 8204203 windows-all
-sun/security/pkcs11/ec/ReadCertificates.java 8204203 windows-all
-sun/security/pkcs11/ec/ReadPKCS12.java 8204203 windows-all
-sun/security/pkcs11/ec/TestCurves.java 8204203 windows-all
-sun/security/pkcs11/ec/TestECDH.java 8204203 windows-all
-sun/security/pkcs11/ec/TestECDH2.java 8204203 windows-all
-sun/security/pkcs11/ec/TestECDSA.java 8204203 windows-all
-sun/security/pkcs11/ec/TestECDSA2.java 8204203 windows-all
-sun/security/pkcs11/ec/TestECGenSpec.java 8204203 windows-all
-sun/security/pkcs11/rsa/KeyWrap.java 8204203 windows-all
-sun/security/pkcs11/rsa/TestCACerts.java 8204203 windows-all
-sun/security/pkcs11/rsa/TestKeyFactory.java 8204203 windows-all
-sun/security/pkcs11/rsa/TestKeyPairGenerator.java 8204203 windows-all
-sun/security/pkcs11/rsa/TestSignatures.java 8204203 windows-all
-sun/security/pkcs11/sslecc/ClientJSSEServerJSSE.java 8204203 windows-all
-sun/security/pkcs11/tls/TestLeadingZeroesP11.java 8204203 windows-all
-sun/security/pkcs11/tls/TestMasterSecret.java 8204203 windows-all
-sun/security/pkcs11/tls/TestPRF.java 8204203 windows-all
-sun/security/pkcs11/tls/TestPremaster.java 8204203 windows-all
-sun/security/tools/keytool/NssTest.java 8204203 windows-all
+sun/security/pkcs11/KeyStore/SecretKeysBasic.sh 8209398 generic-all
############################################################################
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/java/util/regex/NegativeArraySize.java Mon May 06 09:43:48 2019 +0100
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8223174
+ * @summary Pattern.compile() can throw confusing NegativeArraySizeException
+ * @requires os.maxMemory >= 5g
+ * @run main/othervm NegativeArraySize -Xms5G -Xmx5G
+ */
+
+import java.util.regex.Pattern;
+
+public class NegativeArraySize {
+ public static void main(String[] args) {
+ try {
+ Pattern.compile("\\Q" + "a".repeat(42 + Integer.MAX_VALUE / 3));
+ throw new AssertionError("expected to throw");
+ } catch (OutOfMemoryError expected) {
+ }
+ }
+}
--- a/test/jdk/jdk/jfr/event/gc/collection/TestG1ParallelPhases.java Mon May 06 09:43:26 2019 +0100
+++ b/test/jdk/jdk/jfr/event/gc/collection/TestG1ParallelPhases.java Mon May 06 09:43:48 2019 +0100
@@ -100,7 +100,6 @@
"CMRefRoots",
"WaitForStrongCLD",
"WeakCLDRoots",
- "SATBFiltering",
"UpdateRS",
"ScanHCC",
"ScanRS",
--- a/test/jdk/sun/security/pkcs11/PKCS11Test.java Mon May 06 09:43:26 2019 +0100
+++ b/test/jdk/sun/security/pkcs11/PKCS11Test.java Mon May 06 09:43:48 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -384,11 +384,14 @@
static boolean loadNSPR(String libdir) throws Exception {
// load NSS softoken dependencies in advance to avoid resolver issues
- safeReload(libdir + System.mapLibraryName("nspr4"));
- safeReload(libdir + System.mapLibraryName("plc4"));
- safeReload(libdir + System.mapLibraryName("plds4"));
- safeReload(libdir + System.mapLibraryName("sqlite3"));
- safeReload(libdir + System.mapLibraryName("nssutil3"));
+ String dir = libdir.endsWith(File.separator)
+ ? libdir
+ : libdir + File.separator;
+ safeReload(dir + System.mapLibraryName("nspr4"));
+ safeReload(dir + System.mapLibraryName("plc4"));
+ safeReload(dir + System.mapLibraryName("plds4"));
+ safeReload(dir + System.mapLibraryName("sqlite3"));
+ safeReload(dir + System.mapLibraryName("nssutil3"));
return true;
}
@@ -903,21 +906,21 @@
@Artifact(
organization = "jpg.tests.jdk.nsslib",
name = "nsslib-windows_x64",
- revision = "3.35",
+ revision = "3.41-VS2017",
extension = "zip")
private static class WINDOWS_X64 { }
@Artifact(
organization = "jpg.tests.jdk.nsslib",
name = "nsslib-windows_x86",
- revision = "3.35",
+ revision = "3.41-VS2017",
extension = "zip")
private static class WINDOWS_X86 { }
@Artifact(
organization = "jpg.tests.jdk.nsslib",
name = "nsslib-macosx_x64",
- revision = "3.35",
+ revision = "3.41",
extension = "zip")
private static class MACOSX_X64 { }
}
--- a/test/jdk/sun/security/tools/keytool/NssTest.java Mon May 06 09:43:26 2019 +0100
+++ b/test/jdk/sun/security/tools/keytool/NssTest.java Mon May 06 09:43:48 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -47,6 +47,8 @@
copyFiles();
System.setProperty("nss", "");
System.setProperty("nss.lib", String.valueOf(libPath));
+
+ PKCS11Test.loadNSPR(libPath.getParent().toString());
KeyToolTest.main(args);
}