7074017: Introduce MemBarAcquireLock/MemBarReleaseLock nodes for monitor enter/exit code paths
Summary: replace MemBarAcquire/MemBarRelease nodes on the monitor enter/exit code paths with new MemBarAcquireLock/MemBarReleaseLock nodes
Reviewed-by: kvn, twisti
--- a/hotspot/src/cpu/sparc/vm/sparc.ad Mon Aug 08 13:19:46 2011 -0700
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad Tue Aug 02 18:36:40 2011 +0200
@@ -6605,8 +6605,7 @@
%}
instruct membar_acquire_lock() %{
- match(MemBarAcquire);
- predicate(Matcher::prior_fast_lock(n));
+ match(MemBarAcquireLock);
ins_cost(0);
size(0);
@@ -6626,8 +6625,7 @@
%}
instruct membar_release_lock() %{
- match(MemBarRelease);
- predicate(Matcher::post_fast_unlock(n));
+ match(MemBarReleaseLock);
ins_cost(0);
size(0);
--- a/hotspot/src/cpu/x86/vm/x86_32.ad Mon Aug 08 13:19:46 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/x86_32.ad Tue Aug 02 18:36:40 2011 +0200
@@ -7805,8 +7805,7 @@
%}
instruct membar_acquire_lock() %{
- match(MemBarAcquire);
- predicate(Matcher::prior_fast_lock(n));
+ match(MemBarAcquireLock);
ins_cost(0);
size(0);
@@ -7826,8 +7825,7 @@
%}
instruct membar_release_lock() %{
- match(MemBarRelease);
- predicate(Matcher::post_fast_unlock(n));
+ match(MemBarReleaseLock);
ins_cost(0);
size(0);
--- a/hotspot/src/cpu/x86/vm/x86_64.ad Mon Aug 08 13:19:46 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad Tue Aug 02 18:36:40 2011 +0200
@@ -7375,8 +7375,7 @@
instruct membar_acquire_lock()
%{
- match(MemBarAcquire);
- predicate(Matcher::prior_fast_lock(n));
+ match(MemBarAcquireLock);
ins_cost(0);
size(0);
@@ -7398,8 +7397,7 @@
instruct membar_release_lock()
%{
- match(MemBarRelease);
- predicate(Matcher::post_fast_unlock(n));
+ match(MemBarReleaseLock);
ins_cost(0);
size(0);
--- a/hotspot/src/share/vm/adlc/formssel.cpp Mon Aug 08 13:19:46 2011 -0700
+++ b/hotspot/src/share/vm/adlc/formssel.cpp Tue Aug 02 18:36:40 2011 +0200
@@ -624,6 +624,8 @@
if( strcmp(_matrule->_opType,"MemBarRelease") == 0 ) return true;
if( strcmp(_matrule->_opType,"MemBarAcquire") == 0 ) return true;
+ if( strcmp(_matrule->_opType,"MemBarReleaseLock") == 0 ) return true;
+ if( strcmp(_matrule->_opType,"MemBarAcquireLock") == 0 ) return true;
return false;
}
@@ -3941,6 +3943,8 @@
return
!strcmp(_opType,"MemBarAcquire" ) ||
!strcmp(_opType,"MemBarRelease" ) ||
+ !strcmp(_opType,"MemBarAcquireLock") ||
+ !strcmp(_opType,"MemBarReleaseLock") ||
!strcmp(_opType,"MemBarVolatile" ) ||
!strcmp(_opType,"MemBarCPUOrder" ) ;
}
--- a/hotspot/src/share/vm/opto/classes.hpp Mon Aug 08 13:19:46 2011 -0700
+++ b/hotspot/src/share/vm/opto/classes.hpp Tue Aug 02 18:36:40 2011 +0200
@@ -161,8 +161,10 @@
macro(MachProj)
macro(MaxI)
macro(MemBarAcquire)
+macro(MemBarAcquireLock)
macro(MemBarCPUOrder)
macro(MemBarRelease)
+macro(MemBarReleaseLock)
macro(MemBarVolatile)
macro(MergeMem)
macro(MinI)
--- a/hotspot/src/share/vm/opto/graphKit.cpp Mon Aug 08 13:19:46 2011 -0700
+++ b/hotspot/src/share/vm/opto/graphKit.cpp Tue Aug 02 18:36:40 2011 +0200
@@ -2856,7 +2856,7 @@
// lock has no side-effects, sets few values
set_predefined_output_for_runtime_call(lock, mem, TypeRawPtr::BOTTOM);
- insert_mem_bar(Op_MemBarAcquire);
+ insert_mem_bar(Op_MemBarAcquireLock);
// Add this to the worklist so that the lock can be eliminated
record_for_igvn(lock);
@@ -2889,7 +2889,7 @@
}
// Memory barrier to avoid floating things down past the locked region
- insert_mem_bar(Op_MemBarRelease);
+ insert_mem_bar(Op_MemBarReleaseLock);
const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
UnlockNode *unlock = new (C, tf->domain()->cnt()) UnlockNode(C, tf);
--- a/hotspot/src/share/vm/opto/macro.cpp Mon Aug 08 13:19:46 2011 -0700
+++ b/hotspot/src/share/vm/opto/macro.cpp Tue Aug 02 18:36:40 2011 +0200
@@ -1816,9 +1816,9 @@
// The input to a Lock is merged memory, so extract its RawMem input
// (unless the MergeMem has been optimized away.)
if (alock->is_Lock()) {
- // Seach for MemBarAcquire node and delete it also.
+ // Seach for MemBarAcquireLock node and delete it also.
MemBarNode* membar = fallthroughproj->unique_ctrl_out()->as_MemBar();
- assert(membar != NULL && membar->Opcode() == Op_MemBarAcquire, "");
+ assert(membar != NULL && membar->Opcode() == Op_MemBarAcquireLock, "");
Node* ctrlproj = membar->proj_out(TypeFunc::Control);
Node* memproj = membar->proj_out(TypeFunc::Memory);
_igvn.replace_node(ctrlproj, fallthroughproj);
@@ -1833,11 +1833,11 @@
}
}
- // Seach for MemBarRelease node and delete it also.
+ // Seach for MemBarReleaseLock node and delete it also.
if (alock->is_Unlock() && ctrl != NULL && ctrl->is_Proj() &&
ctrl->in(0)->is_MemBar()) {
MemBarNode* membar = ctrl->in(0)->as_MemBar();
- assert(membar->Opcode() == Op_MemBarRelease &&
+ assert(membar->Opcode() == Op_MemBarReleaseLock &&
mem->is_Proj() && membar == mem->in(0), "");
_igvn.replace_node(fallthroughproj, ctrl);
_igvn.replace_node(memproj_fallthrough, mem);
--- a/hotspot/src/share/vm/opto/matcher.cpp Mon Aug 08 13:19:46 2011 -0700
+++ b/hotspot/src/share/vm/opto/matcher.cpp Tue Aug 02 18:36:40 2011 +0200
@@ -2230,57 +2230,6 @@
}
}
-
-// Used by the DFA in dfa_sparc.cpp. Check for a prior FastLock
-// acting as an Acquire and thus we don't need an Acquire here. We
-// retain the Node to act as a compiler ordering barrier.
-bool Matcher::prior_fast_lock( const Node *acq ) {
- Node *r = acq->in(0);
- if( !r->is_Region() || r->req() <= 1 ) return false;
- Node *proj = r->in(1);
- if( !proj->is_Proj() ) return false;
- Node *call = proj->in(0);
- if( !call->is_Call() || call->as_Call()->entry_point() != OptoRuntime::complete_monitor_locking_Java() )
- return false;
-
- return true;
-}
-
-// Used by the DFA in dfa_sparc.cpp. Check for a following FastUnLock
-// acting as a Release and thus we don't need a Release here. We
-// retain the Node to act as a compiler ordering barrier.
-bool Matcher::post_fast_unlock( const Node *rel ) {
- Compile *C = Compile::current();
- assert( rel->Opcode() == Op_MemBarRelease, "" );
- const MemBarReleaseNode *mem = (const MemBarReleaseNode*)rel;
- DUIterator_Fast imax, i = mem->fast_outs(imax);
- Node *ctrl = NULL;
- while( true ) {
- ctrl = mem->fast_out(i); // Throw out-of-bounds if proj not found
- assert( ctrl->is_Proj(), "only projections here" );
- ProjNode *proj = (ProjNode*)ctrl;
- if( proj->_con == TypeFunc::Control &&
- !C->node_arena()->contains(ctrl) ) // Unmatched old-space only
- break;
- i++;
- }
- Node *iff = NULL;
- for( DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++ ) {
- Node *x = ctrl->fast_out(j);
- if( x->is_If() && x->req() > 1 &&
- !C->node_arena()->contains(x) ) { // Unmatched old-space only
- iff = x;
- break;
- }
- }
- if( !iff ) return false;
- Node *bol = iff->in(1);
- // The iff might be some random subclass of If or bol might be Con-Top
- if (!bol->is_Bool()) return false;
- assert( bol->req() > 1, "" );
- return (bol->in(1)->Opcode() == Op_FastUnlock);
-}
-
// Used by the DFA in dfa_xxx.cpp. Check for a following barrier or
// atomic instruction acting as a store_load barrier without any
// intervening volatile load, and thus we don't need a barrier here.
--- a/hotspot/src/share/vm/opto/matcher.hpp Mon Aug 08 13:19:46 2011 -0700
+++ b/hotspot/src/share/vm/opto/matcher.hpp Tue Aug 02 18:36:40 2011 +0200
@@ -441,16 +441,6 @@
else { fatal("SoftMatchFailure is not allowed except in product"); }
}
- // Used by the DFA in dfa_sparc.cpp. Check for a prior FastLock
- // acting as an Acquire and thus we don't need an Acquire here. We
- // retain the Node to act as a compiler ordering barrier.
- static bool prior_fast_lock( const Node *acq );
-
- // Used by the DFA in dfa_sparc.cpp. Check for a following
- // FastUnLock acting as a Release and thus we don't need a Release
- // here. We retain the Node to act as a compiler ordering barrier.
- static bool post_fast_unlock( const Node *rel );
-
// Check for a following volatile memory barrier without an
// intervening load and thus we don't need a barrier here. We
// retain the Node to act as a compiler ordering barrier.
--- a/hotspot/src/share/vm/opto/memnode.cpp Mon Aug 08 13:19:46 2011 -0700
+++ b/hotspot/src/share/vm/opto/memnode.cpp Tue Aug 02 18:36:40 2011 +0200
@@ -925,8 +925,9 @@
// a synchronized region.
while (current->is_Proj()) {
int opc = current->in(0)->Opcode();
- if ((final && opc == Op_MemBarAcquire) ||
- opc == Op_MemBarRelease || opc == Op_MemBarCPUOrder) {
+ if ((final && (opc == Op_MemBarAcquire || opc == Op_MemBarAcquireLock)) ||
+ opc == Op_MemBarRelease || opc == Op_MemBarCPUOrder ||
+ opc == Op_MemBarReleaseLock) {
Node* mem = current->in(0)->in(TypeFunc::Memory);
if (mem->is_MergeMem()) {
MergeMemNode* merge = mem->as_MergeMem();
@@ -2666,6 +2667,8 @@
switch (opcode) {
case Op_MemBarAcquire: return new(C, len) MemBarAcquireNode(C, atp, pn);
case Op_MemBarRelease: return new(C, len) MemBarReleaseNode(C, atp, pn);
+ case Op_MemBarAcquireLock: return new(C, len) MemBarAcquireLockNode(C, atp, pn);
+ case Op_MemBarReleaseLock: return new(C, len) MemBarReleaseLockNode(C, atp, pn);
case Op_MemBarVolatile: return new(C, len) MemBarVolatileNode(C, atp, pn);
case Op_MemBarCPUOrder: return new(C, len) MemBarCPUOrderNode(C, atp, pn);
case Op_Initialize: return new(C, len) InitializeNode(C, atp, pn);
--- a/hotspot/src/share/vm/opto/memnode.hpp Mon Aug 08 13:19:46 2011 -0700
+++ b/hotspot/src/share/vm/opto/memnode.hpp Tue Aug 02 18:36:40 2011 +0200
@@ -879,7 +879,7 @@
// "Acquire" - no following ref can move before (but earlier refs can
// follow, like an early Load stalled in cache). Requires multi-cpu
-// visibility. Inserted after a volatile load or FastLock.
+// visibility. Inserted after a volatile load.
class MemBarAcquireNode: public MemBarNode {
public:
MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
@@ -889,7 +889,7 @@
// "Release" - no earlier ref can move after (but later refs can move
// up, like a speculative pipelined cache-hitting Load). Requires
-// multi-cpu visibility. Inserted before a volatile store or FastUnLock.
+// multi-cpu visibility. Inserted before a volatile store.
class MemBarReleaseNode: public MemBarNode {
public:
MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent)
@@ -897,6 +897,26 @@
virtual int Opcode() const;
};
+// "Acquire" - no following ref can move before (but earlier refs can
+// follow, like an early Load stalled in cache). Requires multi-cpu
+// visibility. Inserted after a FastLock.
+class MemBarAcquireLockNode: public MemBarNode {
+public:
+ MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent)
+ : MemBarNode(C, alias_idx, precedent) {}
+ virtual int Opcode() const;
+};
+
+// "Release" - no earlier ref can move after (but later refs can move
+// up, like a speculative pipelined cache-hitting Load). Requires
+// multi-cpu visibility. Inserted before a FastUnLock.
+class MemBarReleaseLockNode: public MemBarNode {
+public:
+ MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent)
+ : MemBarNode(C, alias_idx, precedent) {}
+ virtual int Opcode() const;
+};
+
// Ordering between a volatile store and a following volatile load.
// Requires multi-CPU visibility?
class MemBarVolatileNode: public MemBarNode {