8209835: Aarch64: elide barriers on all volatile operations
Reviewed-by: aph, adinn
--- a/src/hotspot/cpu/aarch64/aarch64.ad Thu Oct 04 09:24:27 2018 +0200
+++ b/src/hotspot/cpu/aarch64/aarch64.ad Mon Nov 05 12:53:55 2018 +0100
@@ -1036,7 +1036,7 @@
}
};
- bool is_CAS(int opcode);
+ bool is_CAS(int opcode, bool maybe_volatile);
// predicates controlling emit of ldr<x>/ldar<x> and associated dmb
@@ -1259,12 +1259,12 @@
// relevant dmb instructions.
//
- // is_CAS(int opcode)
+ // is_CAS(int opcode, bool maybe_volatile)
//
// return true if opcode is one of the possible CompareAndSwapX
// values otherwise false.
- bool is_CAS(int opcode)
+ bool is_CAS(int opcode, bool maybe_volatile)
{
switch(opcode) {
// We handle these
@@ -1272,23 +1272,28 @@
case Op_CompareAndSwapL:
case Op_CompareAndSwapP:
case Op_CompareAndSwapN:
- // case Op_CompareAndSwapB:
- // case Op_CompareAndSwapS:
+ case Op_CompareAndSwapB:
+ case Op_CompareAndSwapS:
+ case Op_GetAndSetI:
+ case Op_GetAndSetL:
+ case Op_GetAndSetP:
+ case Op_GetAndSetN:
+ case Op_GetAndAddI:
+ case Op_GetAndAddL:
return true;
- // These are TBD
+ case Op_CompareAndExchangeI:
+ case Op_CompareAndExchangeN:
+ case Op_CompareAndExchangeB:
+ case Op_CompareAndExchangeS:
+ case Op_CompareAndExchangeL:
+ case Op_CompareAndExchangeP:
case Op_WeakCompareAndSwapB:
case Op_WeakCompareAndSwapS:
case Op_WeakCompareAndSwapI:
case Op_WeakCompareAndSwapL:
case Op_WeakCompareAndSwapP:
case Op_WeakCompareAndSwapN:
- case Op_CompareAndExchangeB:
- case Op_CompareAndExchangeS:
- case Op_CompareAndExchangeI:
- case Op_CompareAndExchangeL:
- case Op_CompareAndExchangeP:
- case Op_CompareAndExchangeN:
- return false;
+ return maybe_volatile;
default:
return false;
}
@@ -1318,7 +1323,7 @@
if (mb->trailing_load_store()) {
Node* load_store = mb->in(MemBarNode::Precedent);
assert(load_store->is_LoadStore(), "unexpected graph shape");
- return is_CAS(load_store->Opcode());
+ return is_CAS(load_store->Opcode(), true);
}
return false;
@@ -1365,7 +1370,7 @@
} else {
assert(mem->is_LoadStore(), "");
assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
- return is_CAS(mem->Opcode());
+ return is_CAS(mem->Opcode(), true);
}
}
return false;
@@ -1416,13 +1421,17 @@
bool needs_acquiring_load_exclusive(const Node *n)
{
- assert(is_CAS(n->Opcode()), "expecting a compare and swap");
+ assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
if (UseBarriersForVolatile) {
return false;
}
LoadStoreNode* ldst = n->as_LoadStore();
- assert(ldst->trailing_membar() != NULL, "expected trailing membar");
+ if (is_CAS(n->Opcode(), false)) {
+ assert(ldst->trailing_membar() != NULL, "expected trailing membar");
+ } else {
+ return ldst->trailing_membar() != NULL;
+ }
// so we can just return true here
return true;
@@ -2943,6 +2952,21 @@
/*weak*/ false, noreg);
%}
+ enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
+ MacroAssembler _masm(&cbuf);
+ guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
+ __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
+ Assembler::halfword, /*acquire*/ true, /*release*/ true,
+ /*weak*/ false, noreg);
+ %}
+
+ enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
+ MacroAssembler _masm(&cbuf);
+ guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
+ __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
+ Assembler::byte, /*acquire*/ true, /*release*/ true,
+ /*weak*/ false, noreg);
+ %}
// auxiliary used for CompareAndSwapX to set result register
enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
@@ -8240,6 +8264,44 @@
// alternative CompareAndSwapX when we are eliding barriers
+instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
+
+ predicate(needs_acquiring_load_exclusive(n));
+ match(Set res (CompareAndSwapB mem (Binary oldval newval)));
+ ins_cost(VOLATILE_REF_COST);
+
+ effect(KILL cr);
+
+ format %{
+ "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
+ "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+ %}
+
+ ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
+ aarch64_enc_cset_eq(res));
+
+ ins_pipe(pipe_slow);
+%}
+
+instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
+
+ predicate(needs_acquiring_load_exclusive(n));
+ match(Set res (CompareAndSwapS mem (Binary oldval newval)));
+ ins_cost(VOLATILE_REF_COST);
+
+ effect(KILL cr);
+
+ format %{
+ "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
+ "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+ %}
+
+ ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
+ aarch64_enc_cset_eq(res));
+
+ ins_pipe(pipe_slow);
+%}
+
instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
@@ -8338,7 +8400,7 @@
ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
- "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
+ "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
@@ -8354,7 +8416,7 @@
ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
- "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
+ "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
@@ -8370,7 +8432,7 @@
ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
- "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
+ "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
@@ -8400,7 +8462,7 @@
ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
- "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
+ "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
@@ -8425,12 +8487,112 @@
ins_pipe(pipe_slow);
%}
+instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
+ predicate(needs_acquiring_load_exclusive(n));
+ match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
+ ins_cost(VOLATILE_REF_COST);
+ effect(TEMP_DEF res, KILL cr);
+ format %{
+ "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
+ %}
+ ins_encode %{
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
+ Assembler::byte, /*acquire*/ true, /*release*/ true,
+ /*weak*/ false, $res$$Register);
+ __ sxtbw($res$$Register, $res$$Register);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
+ predicate(needs_acquiring_load_exclusive(n));
+ match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
+ ins_cost(VOLATILE_REF_COST);
+ effect(TEMP_DEF res, KILL cr);
+ format %{
+ "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
+ %}
+ ins_encode %{
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
+ Assembler::halfword, /*acquire*/ true, /*release*/ true,
+ /*weak*/ false, $res$$Register);
+ __ sxthw($res$$Register, $res$$Register);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+
+instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
+ predicate(needs_acquiring_load_exclusive(n));
+ match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
+ ins_cost(VOLATILE_REF_COST);
+ effect(TEMP_DEF res, KILL cr);
+ format %{
+ "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
+ %}
+ ins_encode %{
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
+ Assembler::word, /*acquire*/ true, /*release*/ true,
+ /*weak*/ false, $res$$Register);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
+ predicate(needs_acquiring_load_exclusive(n));
+ match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
+ ins_cost(VOLATILE_REF_COST);
+ effect(TEMP_DEF res, KILL cr);
+ format %{
+ "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
+ %}
+ ins_encode %{
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
+ Assembler::xword, /*acquire*/ true, /*release*/ true,
+ /*weak*/ false, $res$$Register);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+
+instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
+ predicate(needs_acquiring_load_exclusive(n));
+ match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
+ ins_cost(VOLATILE_REF_COST);
+ effect(TEMP_DEF res, KILL cr);
+ format %{
+ "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
+ %}
+ ins_encode %{
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
+ Assembler::word, /*acquire*/ true, /*release*/ true,
+ /*weak*/ false, $res$$Register);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
+ predicate(needs_acquiring_load_exclusive(n));
+ match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
+ ins_cost(VOLATILE_REF_COST);
+ effect(TEMP_DEF res, KILL cr);
+ format %{
+ "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
+ %}
+ ins_encode %{
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
+ Assembler::xword, /*acquire*/ true, /*release*/ true,
+ /*weak*/ false, $res$$Register);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
effect(KILL cr);
format %{
- "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
+ "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
@@ -8447,7 +8609,7 @@
ins_cost(2 * VOLATILE_REF_COST);
effect(KILL cr);
format %{
- "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
+ "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
@@ -8464,7 +8626,7 @@
ins_cost(2 * VOLATILE_REF_COST);
effect(KILL cr);
format %{
- "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
+ "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
@@ -8498,7 +8660,7 @@
ins_cost(2 * VOLATILE_REF_COST);
effect(KILL cr);
format %{
- "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
+ "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
@@ -8527,11 +8689,120 @@
ins_pipe(pipe_slow);
%}
+instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
+ predicate(needs_acquiring_load_exclusive(n));
+ match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
+ ins_cost(VOLATILE_REF_COST);
+ effect(KILL cr);
+ format %{
+ "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
+ "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+ %}
+ ins_encode %{
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
+ Assembler::byte, /*acquire*/ true, /*release*/ true,
+ /*weak*/ true, noreg);
+ __ csetw($res$$Register, Assembler::EQ);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
+ predicate(needs_acquiring_load_exclusive(n));
+ match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
+ ins_cost(VOLATILE_REF_COST);
+ effect(KILL cr);
+ format %{
+ "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
+ "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+ %}
+ ins_encode %{
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
+ Assembler::halfword, /*acquire*/ true, /*release*/ true,
+ /*weak*/ true, noreg);
+ __ csetw($res$$Register, Assembler::EQ);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
+ predicate(needs_acquiring_load_exclusive(n));
+ match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
+ ins_cost(VOLATILE_REF_COST);
+ effect(KILL cr);
+ format %{
+ "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
+ "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+ %}
+ ins_encode %{
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
+ Assembler::word, /*acquire*/ true, /*release*/ true,
+ /*weak*/ true, noreg);
+ __ csetw($res$$Register, Assembler::EQ);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
+ predicate(needs_acquiring_load_exclusive(n));
+ match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
+ ins_cost(VOLATILE_REF_COST);
+ effect(KILL cr);
+ format %{
+ "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
+ "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+ %}
+ ins_encode %{
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
+ Assembler::xword, /*acquire*/ true, /*release*/ true,
+ /*weak*/ true, noreg);
+ __ csetw($res$$Register, Assembler::EQ);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
+ predicate(needs_acquiring_load_exclusive(n));
+ match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
+ ins_cost(VOLATILE_REF_COST);
+ effect(KILL cr);
+ format %{
+ "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
+ "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+ %}
+ ins_encode %{
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
+ Assembler::word, /*acquire*/ true, /*release*/ true,
+ /*weak*/ true, noreg);
+ __ csetw($res$$Register, Assembler::EQ);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
+ predicate(needs_acquiring_load_exclusive(n));
+ match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
+ ins_cost(VOLATILE_REF_COST);
+ effect(KILL cr);
+ format %{
+ "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
+ "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+ %}
+ ins_encode %{
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
+ Assembler::xword, /*acquire*/ true, /*release*/ true,
+ /*weak*/ true, noreg);
+ __ csetw($res$$Register, Assembler::EQ);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
// END This section of the file is automatically generated. Do not edit --------------
// ---------------------------------------------------------------------
instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
match(Set prev (GetAndSetI mem newv));
+ ins_cost(2 * VOLATILE_REF_COST);
format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
ins_encode %{
__ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
@@ -8541,6 +8812,7 @@
instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
match(Set prev (GetAndSetL mem newv));
+ ins_cost(2 * VOLATILE_REF_COST);
format %{ "atomic_xchg $prev, $newv, [$mem]" %}
ins_encode %{
__ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
@@ -8550,6 +8822,7 @@
instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
match(Set prev (GetAndSetN mem newv));
+ ins_cost(2 * VOLATILE_REF_COST);
format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
ins_encode %{
__ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
@@ -8559,6 +8832,7 @@
instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
match(Set prev (GetAndSetP mem newv));
+ ins_cost(2 * VOLATILE_REF_COST);
format %{ "atomic_xchg $prev, $newv, [$mem]" %}
ins_encode %{
__ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
@@ -8566,10 +8840,54 @@
ins_pipe(pipe_serial);
%}
+instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
+ predicate(needs_acquiring_load_exclusive(n));
+ match(Set prev (GetAndSetI mem newv));
+ ins_cost(VOLATILE_REF_COST);
+ format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
+ ins_encode %{
+ __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
+ %}
+ ins_pipe(pipe_serial);
+%}
+
+instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
+ predicate(needs_acquiring_load_exclusive(n));
+ match(Set prev (GetAndSetL mem newv));
+ ins_cost(VOLATILE_REF_COST);
+ format %{ "atomic_xchg_acq $prev, $newv, [$mem]" %}
+ ins_encode %{
+ __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
+ %}
+ ins_pipe(pipe_serial);
+%}
+
+instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
+ predicate(needs_acquiring_load_exclusive(n));
+ match(Set prev (GetAndSetN mem newv));
+ ins_cost(VOLATILE_REF_COST);
+ format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
+ ins_encode %{
+ __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
+ %}
+ ins_pipe(pipe_serial);
+%}
+
+instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
+ predicate(needs_acquiring_load_exclusive(n));
+ match(Set prev (GetAndSetP mem newv));
+ ins_cost(VOLATILE_REF_COST);
+ format %{ "atomic_xchg_acq $prev, $newv, [$mem]" %}
+ ins_encode %{
+ __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
+ %}
+ ins_pipe(pipe_serial);
+%}
+
instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
match(Set newval (GetAndAddL mem incr));
- ins_cost(INSN_COST * 10);
+ ins_cost(2 * VOLATILE_REF_COST + 1);
format %{ "get_and_addL $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
@@ -8580,7 +8898,7 @@
instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
predicate(n->as_LoadStore()->result_not_used());
match(Set dummy (GetAndAddL mem incr));
- ins_cost(INSN_COST * 9);
+ ins_cost(2 * VOLATILE_REF_COST);
format %{ "get_and_addL [$mem], $incr" %}
ins_encode %{
__ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
@@ -8590,7 +8908,7 @@
instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
match(Set newval (GetAndAddL mem incr));
- ins_cost(INSN_COST * 10);
+ ins_cost(2 * VOLATILE_REF_COST + 1);
format %{ "get_and_addL $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
@@ -8601,7 +8919,7 @@
instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
predicate(n->as_LoadStore()->result_not_used());
match(Set dummy (GetAndAddL mem incr));
- ins_cost(INSN_COST * 9);
+ ins_cost(2 * VOLATILE_REF_COST);
format %{ "get_and_addL [$mem], $incr" %}
ins_encode %{
__ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
@@ -8611,7 +8929,7 @@
instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
match(Set newval (GetAndAddI mem incr));
- ins_cost(INSN_COST * 10);
+ ins_cost(2 * VOLATILE_REF_COST + 1);
format %{ "get_and_addI $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
@@ -8622,7 +8940,7 @@
instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
predicate(n->as_LoadStore()->result_not_used());
match(Set dummy (GetAndAddI mem incr));
- ins_cost(INSN_COST * 9);
+ ins_cost(2 * VOLATILE_REF_COST);
format %{ "get_and_addI [$mem], $incr" %}
ins_encode %{
__ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
@@ -8632,7 +8950,7 @@
instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
match(Set newval (GetAndAddI mem incr));
- ins_cost(INSN_COST * 10);
+ ins_cost(2 * VOLATILE_REF_COST + 1);
format %{ "get_and_addI $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
@@ -8643,7 +8961,7 @@
instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
predicate(n->as_LoadStore()->result_not_used());
match(Set dummy (GetAndAddI mem incr));
- ins_cost(INSN_COST * 9);
+ ins_cost(2 * VOLATILE_REF_COST);
format %{ "get_and_addI [$mem], $incr" %}
ins_encode %{
__ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
@@ -8651,6 +8969,94 @@
ins_pipe(pipe_serial);
%}
+instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
+ predicate(needs_acquiring_load_exclusive(n));
+ match(Set newval (GetAndAddL mem incr));
+ ins_cost(VOLATILE_REF_COST + 1);
+ format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
+ ins_encode %{
+ __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
+ %}
+ ins_pipe(pipe_serial);
+%}
+
+instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
+ predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
+ match(Set dummy (GetAndAddL mem incr));
+ ins_cost(VOLATILE_REF_COST);
+ format %{ "get_and_addL_acq [$mem], $incr" %}
+ ins_encode %{
+ __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
+ %}
+ ins_pipe(pipe_serial);
+%}
+
+instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
+ predicate(needs_acquiring_load_exclusive(n));
+ match(Set newval (GetAndAddL mem incr));
+ ins_cost(VOLATILE_REF_COST + 1);
+ format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
+ ins_encode %{
+ __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
+ %}
+ ins_pipe(pipe_serial);
+%}
+
+instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
+ predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
+ match(Set dummy (GetAndAddL mem incr));
+ ins_cost(VOLATILE_REF_COST);
+ format %{ "get_and_addL_acq [$mem], $incr" %}
+ ins_encode %{
+ __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
+ %}
+ ins_pipe(pipe_serial);
+%}
+
+instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
+ predicate(needs_acquiring_load_exclusive(n));
+ match(Set newval (GetAndAddI mem incr));
+ ins_cost(VOLATILE_REF_COST + 1);
+ format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
+ ins_encode %{
+ __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
+ %}
+ ins_pipe(pipe_serial);
+%}
+
+instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
+ predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
+ match(Set dummy (GetAndAddI mem incr));
+ ins_cost(VOLATILE_REF_COST);
+ format %{ "get_and_addI_acq [$mem], $incr" %}
+ ins_encode %{
+ __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
+ %}
+ ins_pipe(pipe_serial);
+%}
+
+instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
+ predicate(needs_acquiring_load_exclusive(n));
+ match(Set newval (GetAndAddI mem incr));
+ ins_cost(VOLATILE_REF_COST + 1);
+ format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
+ ins_encode %{
+ __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
+ %}
+ ins_pipe(pipe_serial);
+%}
+
+instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
+ predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
+ match(Set dummy (GetAndAddI mem incr));
+ ins_cost(VOLATILE_REF_COST);
+ format %{ "get_and_addI_acq [$mem], $incr" %}
+ ins_encode %{
+ __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
+ %}
+ ins_pipe(pipe_serial);
+%}
+
// Manifest a CmpL result in an integer register.
// (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/c2/aarch64/TestUnsafeVolatileCAE.java Mon Nov 05 12:53:55 2018 +0100
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package compiler.c2.aarch64;
+
+import java.lang.reflect.Field;
+import jdk.internal.misc.Unsafe;
+
+class TestUnsafeVolatileCAE
+{
+ public volatile int f_int = 0;
+ public volatile Integer f_obj = Integer.valueOf(0);
+ public volatile long f_long = 0;
+ public volatile byte f_byte = 0;
+ public volatile short f_short = 0;
+
+ public static Unsafe unsafe = Unsafe.getUnsafe();
+ public static Field f_int_field;
+ public static Field f_obj_field;
+ public static Field f_long_field;
+ public static Field f_byte_field;
+ public static Field f_short_field;
+ public static long f_int_off;
+ public static long f_obj_off;
+ public static long f_long_off;
+ public static long f_byte_off;
+ public static long f_short_off;
+
+ static {
+ try {
+ f_int_field = TestUnsafeVolatileCAE.class.getField("f_int");
+ f_obj_field = TestUnsafeVolatileCAE.class.getField("f_obj");
+ f_long_field = TestUnsafeVolatileCAE.class.getField("f_long");
+ f_byte_field = TestUnsafeVolatileCAE.class.getField("f_byte");
+ f_short_field = TestUnsafeVolatileCAE.class.getField("f_short");
+ f_int_off = unsafe.objectFieldOffset(f_int_field);
+ f_obj_off = unsafe.objectFieldOffset(f_obj_field);
+ f_long_off = unsafe.objectFieldOffset(f_long_field);
+ f_byte_off = unsafe.objectFieldOffset(f_byte_field);
+ f_short_off = unsafe.objectFieldOffset(f_short_field);
+ } catch (Exception e) {
+ System.out.println("reflection failed " + e);
+ e.printStackTrace();
+ }
+ }
+
+ public static void main(String[] args)
+ {
+ final TestUnsafeVolatileCAE t = new TestUnsafeVolatileCAE();
+ for (int i = 0; i < 100_000; i++) {
+ t.f_int = -1;
+ int res = t.testInt(-1, i);
+ if (res != -1 || t.f_int != i) {
+ throw new RuntimeException("bad result!");
+ }
+ }
+ for (int i = 0; i < 100_000; i++) {
+ t.f_long = -1;
+ long res = t.testLong(-1, i);
+ if (res != -1 || t.f_long != i) {
+ throw new RuntimeException("bad result!");
+ }
+ }
+ for (int i = 0; i < 100_000; i++) {
+ t.f_byte = -1;
+ byte i_b = (byte)i;
+ byte res = t.testByte((byte)-1, i_b);
+ if (res != (byte)-1 || t.f_byte != i_b) {
+ throw new RuntimeException("bad result!");
+ }
+ }
+ for (int i = 0; i < 100_000; i++) {
+ t.f_short = -1;
+ short i_s = (short)i;
+ short res = t.testShort((byte)-1, i_s);
+ if (res != (short)-1 || t.f_short != i_s) {
+ throw new RuntimeException("bad result!");
+ }
+ }
+ Integer minusOne = Integer.valueOf(-1);
+ for (int i = 0; i < 100_000; i++) {
+ t.f_obj = minusOne;
+ Object res = t.testObj(minusOne, Integer.valueOf(i));
+ if (res != minusOne || t.f_obj != i) {
+ throw new RuntimeException("bad result!");
+ }
+ }
+ }
+
+ public int testInt(int x, int i)
+ {
+ return unsafe.compareAndExchangeInt(this, f_int_off, x, i);
+ }
+
+ public Object testObj(Object x, Object o)
+ {
+ return unsafe.compareAndExchangeReference(this, f_obj_off, x, o);
+ }
+ public long testLong(long x, long i)
+ {
+ return unsafe.compareAndExchangeLong(this, f_long_off, x, i);
+ }
+
+ public byte testByte(byte x, byte i)
+ {
+ return unsafe.compareAndExchangeByte(this, f_byte_off, x, i);
+ }
+
+ public short testShort(short x, short i)
+ {
+ return unsafe.compareAndExchangeShort(this, f_short_off, x, i);
+ }
+}
--- a/test/hotspot/jtreg/compiler/c2/aarch64/TestUnsafeVolatileCAS.java Thu Oct 04 09:24:27 2018 +0200
+++ b/test/hotspot/jtreg/compiler/c2/aarch64/TestUnsafeVolatileCAS.java Mon Nov 05 12:53:55 2018 +0100
@@ -30,19 +30,34 @@
{
public volatile int f_int = 0;
public volatile Integer f_obj = Integer.valueOf(0);
+ public volatile long f_long = 0;
+ public volatile byte f_byte = 0;
+ public volatile short f_short = 0;
public static Unsafe unsafe = Unsafe.getUnsafe();
public static Field f_int_field;
public static Field f_obj_field;
+ public static Field f_long_field;
+ public static Field f_byte_field;
+ public static Field f_short_field;
public static long f_int_off;
public static long f_obj_off;
+ public static long f_long_off;
+ public static long f_byte_off;
+ public static long f_short_off;
static {
try {
f_int_field = TestUnsafeVolatileCAS.class.getField("f_int");
f_obj_field = TestUnsafeVolatileCAS.class.getField("f_obj");
+ f_long_field = TestUnsafeVolatileCAS.class.getField("f_long");
+ f_byte_field = TestUnsafeVolatileCAS.class.getField("f_byte");
+ f_short_field = TestUnsafeVolatileCAS.class.getField("f_short");
f_int_off = unsafe.objectFieldOffset(f_int_field);
f_obj_off = unsafe.objectFieldOffset(f_obj_field);
+ f_long_off = unsafe.objectFieldOffset(f_long_field);
+ f_byte_off = unsafe.objectFieldOffset(f_byte_field);
+ f_short_off = unsafe.objectFieldOffset(f_short_field);
} catch (Exception e) {
System.out.println("reflection failed " + e);
e.printStackTrace();
@@ -59,6 +74,29 @@
throw new RuntimeException("bad result!");
}
}
+ for (int i = 0; i < 100_000; i++) {
+ t.f_long = -1;
+ t.testLong(-1, i);
+ if (t.f_long != i) {
+ throw new RuntimeException("bad result!");
+ }
+ }
+ for (int i = 0; i < 100_000; i++) {
+ t.f_byte = -1;
+ byte i_b = (byte)i;
+ t.testByte((byte)-1, i_b);
+ if (t.f_byte != i_b) {
+ throw new RuntimeException("bad result!");
+ }
+ }
+ for (int i = 0; i < 100_000; i++) {
+ t.f_short = -1;
+ short i_s = (short)i;
+ t.testShort((byte)-1, i_s);
+ if (t.f_short != i_s) {
+ throw new RuntimeException("bad result!");
+ }
+ }
Integer minusOne = Integer.valueOf(-1);
for (int i = 0; i < 100_000; i++) {
t.f_obj = minusOne;
@@ -68,6 +106,7 @@
}
}
}
+
public void testInt(int x, int i)
{
unsafe.compareAndSetInt(this, f_int_off, x, i);
@@ -77,4 +116,19 @@
{
unsafe.compareAndSetReference(this, f_obj_off, x, o);
}
+
+ public void testLong(long x, long i)
+ {
+ unsafe.compareAndSetLong(this, f_long_off, x, i);
+ }
+
+ public void testByte(byte x, byte i)
+ {
+ unsafe.compareAndSetByte(this, f_byte_off, x, i);
+ }
+
+ public void testShort(short x, short i)
+ {
+ unsafe.compareAndSetShort(this, f_short_off, x, i);
+ }
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/c2/aarch64/TestUnsafeVolatileGAA.java Mon Nov 05 12:53:55 2018 +0100
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package compiler.c2.aarch64;
+
+import java.lang.reflect.Field;
+import jdk.internal.misc.Unsafe;
+
+class TestUnsafeVolatileGAA
+{
+ public volatile int f_int = -1;
+ public volatile long f_long = -1;
+
+ public static Unsafe unsafe = Unsafe.getUnsafe();
+ public static Field f_int_field;
+ public static Field f_long_field;
+ public static long f_int_off;
+ public static long f_long_off;
+
+ static {
+ try {
+ f_int_field = TestUnsafeVolatileGAA.class.getField("f_int");
+ f_long_field = TestUnsafeVolatileGAA.class.getField("f_long");
+ f_int_off = unsafe.objectFieldOffset(f_int_field);
+ f_long_off = unsafe.objectFieldOffset(f_long_field);
+ } catch (Exception e) {
+ System.out.println("reflection failed " + e);
+ e.printStackTrace();
+ }
+ }
+
+ public static void main(String[] args)
+ {
+ final TestUnsafeVolatileGAA t = new TestUnsafeVolatileGAA();
+ for (int i = 0; i < 100_000; i++) {
+ if (t.testInt() != i-1) {
+ throw new RuntimeException("bad result!");
+ }
+ }
+ for (int i = 0; i < 100_000; i++) {
+ if (t.testLong() != i-1) {
+ throw new RuntimeException("bad result!");
+ }
+ }
+ }
+
+ public int testInt()
+ {
+ return unsafe.getAndAddInt(this, f_int_off, 1);
+ }
+
+ public long testLong()
+ {
+ return unsafe.getAndAddLong(this, f_long_off, 1);
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/c2/aarch64/TestUnsafeVolatileGAS.java Mon Nov 05 12:53:55 2018 +0100
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package compiler.c2.aarch64;
+
+import java.lang.reflect.Field;
+import jdk.internal.misc.Unsafe;
+
+class TestUnsafeVolatileGAS
+{
+ public volatile int f_int = -1;
+ public volatile Integer f_obj = Integer.valueOf(-1);
+ public volatile long f_long = -1;
+
+ public static Unsafe unsafe = Unsafe.getUnsafe();
+ public static Field f_int_field;
+ public static Field f_obj_field;
+ public static Field f_long_field;
+ public static long f_int_off;
+ public static long f_obj_off;
+ public static long f_long_off;
+
+ static {
+ try {
+ f_int_field = TestUnsafeVolatileGAS.class.getField("f_int");
+ f_obj_field = TestUnsafeVolatileGAS.class.getField("f_obj");
+ f_long_field = TestUnsafeVolatileGAS.class.getField("f_long");
+ f_int_off = unsafe.objectFieldOffset(f_int_field);
+ f_obj_off = unsafe.objectFieldOffset(f_obj_field);
+ f_long_off = unsafe.objectFieldOffset(f_long_field);
+ } catch (Exception e) {
+ System.out.println("reflection failed " + e);
+ e.printStackTrace();
+ }
+ }
+
+ public static void main(String[] args)
+ {
+ final TestUnsafeVolatileGAS t = new TestUnsafeVolatileGAS();
+ for (int i = 0; i < 100_000; i++) {
+ if (t.testInt(i) != i-1) {
+ throw new RuntimeException("bad result!");
+ }
+ }
+ for (int i = 0; i < 100_000; i++) {
+ if (t.testLong(i) != i-1) {
+ throw new RuntimeException("bad result!");
+ }
+ }
+ for (int i = 0; i < 100_000; i++) {
+ if ((Integer)t.testObj(Integer.valueOf(i)) != i-1) {
+ throw new RuntimeException("bad result!");
+ }
+ }
+ }
+
+ public int testInt(int i)
+ {
+ return unsafe.getAndSetInt(this, f_int_off, i);
+ }
+
+ public Object testObj(Object o)
+ {
+ return unsafe.getAndSetReference(this, f_obj_off, o);
+ }
+ public long testLong(long i)
+ {
+ return unsafe.getAndSetLong(this, f_long_off, i);
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/c2/aarch64/TestUnsafeVolatileWeakCAS.java Mon Nov 05 12:53:55 2018 +0100
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package compiler.c2.aarch64;
+
+import java.lang.reflect.Field;
+import jdk.internal.misc.Unsafe;
+
+class TestUnsafeVolatileWeakCAS
+{
+ public volatile int f_int = 0;
+ public volatile Integer f_obj = Integer.valueOf(0);
+ public volatile long f_long = 0;
+ public volatile byte f_byte = 0;
+ public volatile short f_short = 0;
+
+ public static Unsafe unsafe = Unsafe.getUnsafe();
+ public static Field f_int_field;
+ public static Field f_obj_field;
+ public static Field f_long_field;
+ public static Field f_byte_field;
+ public static Field f_short_field;
+ public static long f_int_off;
+ public static long f_obj_off;
+ public static long f_long_off;
+ public static long f_byte_off;
+ public static long f_short_off;
+
+ static {
+ try {
+ f_int_field = TestUnsafeVolatileWeakCAS.class.getField("f_int");
+ f_obj_field = TestUnsafeVolatileWeakCAS.class.getField("f_obj");
+ f_long_field = TestUnsafeVolatileWeakCAS.class.getField("f_long");
+ f_byte_field = TestUnsafeVolatileWeakCAS.class.getField("f_byte");
+ f_short_field = TestUnsafeVolatileWeakCAS.class.getField("f_short");
+ f_int_off = unsafe.objectFieldOffset(f_int_field);
+ f_obj_off = unsafe.objectFieldOffset(f_obj_field);
+ f_long_off = unsafe.objectFieldOffset(f_long_field);
+ f_byte_off = unsafe.objectFieldOffset(f_byte_field);
+ f_short_off = unsafe.objectFieldOffset(f_short_field);
+ } catch (Exception e) {
+ System.out.println("reflection failed " + e);
+ e.printStackTrace();
+ }
+ }
+
+ public static void main(String[] args)
+ {
+ final TestUnsafeVolatileWeakCAS t = new TestUnsafeVolatileWeakCAS();
+ for (int i = 0; i < 100_000; i++) {
+ t.f_int = -1;
+ if (t.testInt(-1, i)) {
+ if (t.f_int != i) {
+ throw new RuntimeException("bad result!");
+ }
+ }
+ }
+ for (int i = 0; i < 100_000; i++) {
+ t.f_long = -1;
+ if (t.testLong(-1, i)) {
+ if (t.f_long != i) {
+ throw new RuntimeException("bad result!");
+ }
+ }
+ }
+ for (int i = 0; i < 100_000; i++) {
+ t.f_byte = -1;
+ byte i_b = (byte)i;
+ if (t.testByte((byte)-1, i_b)) {
+ if (t.f_byte != i_b) {
+ throw new RuntimeException("bad result!");
+ }
+ }
+ }
+ for (int i = 0; i < 100_000; i++) {
+ t.f_short = -1;
+ short i_s = (short)i;
+ if (t.testShort((byte)-1, i_s)) {
+ if (t.f_short != i_s) {
+ throw new RuntimeException("bad result!");
+ }
+ }
+ }
+ Integer minusOne = Integer.valueOf(-1);
+ for (int i = 0; i < 100_000; i++) {
+ t.f_obj = minusOne;
+ if (t.testObj(minusOne, Integer.valueOf(i))) {
+ if (t.f_obj != i) {
+ throw new RuntimeException("bad result!");
+ }
+ }
+ }
+ }
+
+ public boolean testInt(int x, int i)
+ {
+ return unsafe.weakCompareAndSetInt(this, f_int_off, x, i);
+ }
+
+ public boolean testObj(Object x, Object o)
+ {
+ return unsafe.weakCompareAndSetReference(this, f_obj_off, x, o);
+ }
+
+ public boolean testLong(long x, long i)
+ {
+ return unsafe.weakCompareAndSetLong(this, f_long_off, x, i);
+ }
+
+ public boolean testByte(byte x, byte i)
+ {
+ return unsafe.weakCompareAndSetByte(this, f_byte_off, x, i);
+ }
+
+ public boolean testShort(short x, short i)
+ {
+ return unsafe.weakCompareAndSetShort(this, f_short_off, x, i);
+ }
+}
--- a/test/hotspot/jtreg/compiler/c2/aarch64/TestVolatiles.java Thu Oct 04 09:24:27 2018 +0200
+++ b/test/hotspot/jtreg/compiler/c2/aarch64/TestVolatiles.java Mon Nov 05 12:53:55 2018 +0100
@@ -31,7 +31,10 @@
* TestVolatileStore,
* TestUnsafeVolatileLoad,
* TestUnsafeVolatileStore,
- * TestUnsafeVolatileCAS}
+ * TestUnsafeVolatileCAS,
+ * TestUnsafeVolatileWeakCAS,
+ * TestUnsafeVolatileCAE,
+ * TestUnsafeVolatileGAS}
* and <testtype> in {G1,
* CMS,
* CMSCondMark,
@@ -43,13 +46,16 @@
package compiler.c2.aarch64;
import java.util.List;
+import java.util.ListIterator;
import java.util.Iterator;
+import java.util.regex.Pattern;
import java.io.*;
import jdk.test.lib.Asserts;
import jdk.test.lib.compiler.InMemoryJavaCompiler;
import jdk.test.lib.process.OutputAnalyzer;
import jdk.test.lib.process.ProcessTools;
+import sun.hotspot.WhiteBox;
// runner class that spawns a new JVM to exercises a combination of
// volatile MemOp and GC. The ops are compiled with the dmb -->
@@ -68,28 +74,28 @@
// i.e. GC type plus GC conifg
switch(testType) {
case "G1":
- argcount = 8;
+ argcount = 9;
procArgs = new String[argcount];
procArgs[argcount - 2] = "-XX:+UseG1GC";
break;
case "Parallel":
- argcount = 8;
+ argcount = 9;
procArgs = new String[argcount];
procArgs[argcount - 2] = "-XX:+UseParallelGC";
break;
case "Serial":
- argcount = 8;
+ argcount = 9;
procArgs = new String[argcount];
procArgs[argcount - 2] = "-XX:+UseSerialGC";
break;
case "CMS":
- argcount = 9 ;
+ argcount = 10;
procArgs = new String[argcount];
procArgs[argcount - 3] = "-XX:+UseConcMarkSweepGC";
procArgs[argcount - 2] = "-XX:-UseCondCardMark";
break;
case "CMSCondMark":
- argcount = 9 ;
+ argcount = 10;
procArgs = new String[argcount];
procArgs[argcount - 3] = "-XX:+UseConcMarkSweepGC";
procArgs[argcount - 2] = "-XX:+UseCondCardMark";
@@ -106,14 +112,34 @@
// disable the transform.
procArgs[0] = "-XX:-UseBarriersForVolatile";
+ procArgs[1] = "-XX:+UseCompressedOops";
- procArgs[1] = "-XX:-TieredCompilation";
- procArgs[2] = "-XX:+PrintOptoAssembly";
- procArgs[3] = "-XX:CompileCommand=compileonly," + fullclassname + "::" + "test*";
- procArgs[4] = "--add-exports";
- procArgs[5] = "java.base/jdk.internal.misc=ALL-UNNAMED";
+ procArgs[2] = "-XX:-TieredCompilation";
+ procArgs[3] = "-XX:+PrintOptoAssembly";
+ procArgs[4] = "-XX:CompileCommand=compileonly," + fullclassname + "::" + "test*";
+ procArgs[5] = "--add-exports";
+ procArgs[6] = "java.base/jdk.internal.misc=ALL-UNNAMED";
procArgs[argcount - 1] = fullclassname;
+ runtest(classname, testType, false, true, procArgs);
+ // rerun the test class without the transform applied and
+ // check the alternative generation is as expected
+
+ procArgs[0] = "-XX:+UseBarriersForVolatile";
+ runtest(classname, testType, true, true, procArgs);
+
+ if (!classname.equals("TestUnsafeVolatileGAA")) {
+ procArgs[0] = "-XX:-UseBarriersForVolatile";
+ procArgs[1] = "-XX:-UseCompressedOops";
+ runtest(classname, testType, false, false, procArgs);
+
+ procArgs[0] = "-XX:+UseBarriersForVolatile";
+ runtest(classname, testType, true, false, procArgs);
+ }
+ }
+
+
+ public void runtest(String classname, String testType, boolean useBarriersForVolatile, boolean useCompressedOops, String[] procArgs) throws Throwable {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(procArgs);
OutputAnalyzer output = new OutputAnalyzer(pb.start());
@@ -125,23 +151,7 @@
// appropriate to test class, test type and whether transform
// was applied
- checkoutput(output, classname, testType, false);
-
- // rerun the test class without the transform applied and
- // check the alternative generation is as expected
-
- procArgs[0] = "-XX:+UseBarriersForVolatile";
-
- pb = ProcessTools.createJavaProcessBuilder(procArgs);
- output = new OutputAnalyzer(pb.start());
-
- output.stderrShouldBeEmptyIgnoreVMWarnings();
- output.stdoutShouldNotBeEmpty();
- output.shouldHaveExitValue(0);
-
- // again check the output for the correct asm sequence
-
- checkoutput(output, classname, testType, true);
+ checkoutput(output, classname, testType, useBarriersForVolatile, useCompressedOops);
}
// skip through output returning a line containing the desireed
@@ -150,7 +160,7 @@
{
while (iter.hasNext()) {
String nextLine = iter.next();
- if (nextLine.contains(substring)) {
+ if (nextLine.matches(".*" + substring + ".*")) {
return nextLine;
}
}
@@ -163,7 +173,7 @@
// n.b. the spawned JVM's output is included in the exception
// message to make it easeir to identify what is missing.
- private void checkCompile(Iterator<String> iter, String methodname, String[] expected, OutputAnalyzer output)
+ private boolean checkCompile(Iterator<String> iter, String methodname, String[] expected, OutputAnalyzer output, boolean do_throw)
{
// trace call to allow eyeball check of what we are checking against
System.out.println("checkCompile(" + methodname + ",");
@@ -176,30 +186,43 @@
System.out.println(" })");
// look for the start of an opto assembly print block
- String match = skipTo(iter, "{method}");
+ String match = skipTo(iter, Pattern.quote("{method}"));
if (match == null) {
- throw new RuntimeException("Missing compiler output for " + methodname + "!\n\n" + output.getOutput());
+ if (do_throw) {
+ throw new RuntimeException("Missing compiler output for " + methodname + "!\n\n" + output.getOutput());
+ }
+ return false;
}
// check the compiled method name is right
- match = skipTo(iter, "- name:");
+ match = skipTo(iter, Pattern.quote("- name:"));
if (match == null) {
- throw new RuntimeException("Missing compiled method name!\n\n" + output.getOutput());
+ if (do_throw) {
+ throw new RuntimeException("Missing compiled method name!\n\n" + output.getOutput());
+ }
+ return false;
}
if (!match.contains(methodname)) {
- throw new RuntimeException("Wrong method " + match + "!\n -- expecting " + methodname + "\n\n" + output.getOutput());
+ if (do_throw) {
+ throw new RuntimeException("Wrong method " + match + "!\n -- expecting " + methodname + "\n\n" + output.getOutput());
+ }
+ return false;
}
// make sure we can match each expected term in order
for (String s : expected) {
match = skipTo(iter, s);
if (match == null) {
- throw new RuntimeException("Missing expected output " + s + "!\n\n" + output.getOutput());
+ if (do_throw) {
+ throw new RuntimeException("Missing expected output " + s + "!\n\n" + output.getOutput());
+ }
+ return false;
}
}
+ return true;
}
// check for expected asm output from a volatile load
- private void checkload(OutputAnalyzer output, String testType, boolean useBarriersForVolatile) throws Throwable
+ private void checkload(OutputAnalyzer output, String testType, boolean useBarriersForVolatile, boolean useCompressedOops) throws Throwable
{
Iterator<String> iter = output.asLines().listIterator();
@@ -211,7 +234,7 @@
if (!useBarriersForVolatile) {
matches = new String[] {
"ldarw",
- "membar_acquire (elided)",
+ "membar_acquire \\(elided\\)",
"ret"
};
} else {
@@ -223,15 +246,30 @@
};
}
- checkCompile(iter, "testInt", matches, output);
+ checkCompile(iter, "testInt", matches, output, true);
- checkCompile(iter, "testObj", matches, output) ;
+ if (!useBarriersForVolatile) {
+ matches = new String[] {
+ useCompressedOops ? "ldarw?" : "ldar",
+ "membar_acquire \\(elided\\)",
+ "ret"
+ };
+ } else {
+ matches = new String[] {
+ useCompressedOops ? "ldrw?" : "ldr",
+ "membar_acquire",
+ "dmb ish",
+ "ret"
+ };
+ }
+
+ checkCompile(iter, "testObj", matches, output, true);
}
// check for expected asm output from a volatile store
- private void checkstore(OutputAnalyzer output, String testType, boolean useBarriersForVolatile) throws Throwable
+ private void checkstore(OutputAnalyzer output, String testType, boolean useBarriersForVolatile, boolean useCompressedOops) throws Throwable
{
Iterator<String> iter = output.asLines().listIterator();
@@ -241,9 +279,9 @@
if (!useBarriersForVolatile) {
// this is the sequence of instructions for all cases
matches = new String[] {
- "membar_release (elided)",
+ "membar_release \\(elided\\)",
"stlrw",
- "membar_volatile (elided)",
+ "membar_volatile \\(elided\\)",
"ret"
};
} else {
@@ -258,7 +296,7 @@
};
}
- checkCompile(iter, "testInt", matches, output);
+ checkCompile(iter, "testInt", matches, output, true);
// object stores will be as above except for when the GC
// introduces barriers for card marking
@@ -268,9 +306,9 @@
default:
// this is the basic sequence of instructions
matches = new String[] {
- "membar_release (elided)",
- "stlrw",
- "membar_volatile (elided)",
+ "membar_release \\(elided\\)",
+ useCompressedOops ? "stlrw?" : "stlr",
+ "membar_volatile \\(elided\\)",
"ret"
};
break;
@@ -278,12 +316,12 @@
// a card mark volatile barrier should be generated
// before the card mark strb
matches = new String[] {
- "membar_release (elided)",
- "stlrw",
+ "membar_release \\(elided\\)",
+ useCompressedOops ? "stlrw?" : "stlr",
"membar_volatile",
"dmb ish",
"strb",
- "membar_volatile (elided)",
+ "membar_volatile \\(elided\\)",
"ret"
};
break;
@@ -292,13 +330,13 @@
// before the card mark strb from the StoreCM and the
// storestore barrier from the StoreCM should be elided
matches = new String[] {
- "membar_release (elided)",
- "stlrw",
+ "membar_release \\(elided\\)",
+ useCompressedOops ? "stlrw?" : "stlr",
"membar_volatile",
"dmb ish",
- "storestore (elided)",
+ "storestore \\(elided\\)",
"strb",
- "membar_volatile (elided)",
+ "membar_volatile \\(elided\\)",
"ret"
};
break;
@@ -308,12 +346,12 @@
// storestore barrier from the StoreCM should be
// generated as "dmb ishst"
matches = new String[] {
- "membar_release (elided)",
- "stlrw",
+ "membar_release \\(elided\\)",
+ useCompressedOops ? "stlrw?" : "stlr",
"storestore",
"dmb ishst",
"strb",
- "membar_volatile (elided)",
+ "membar_volatile \\(elided\\)",
"ret"
};
break;
@@ -325,7 +363,7 @@
matches = new String[] {
"membar_release",
"dmb ish",
- "strw",
+ useCompressedOops ? "strw?" : "str",
"membar_volatile",
"dmb ish",
"ret"
@@ -337,7 +375,7 @@
matches = new String[] {
"membar_release",
"dmb ish",
- "strw",
+ useCompressedOops ? "strw?" : "str",
"membar_volatile",
"dmb ish",
"strb",
@@ -353,10 +391,10 @@
matches = new String[] {
"membar_release",
"dmb ish",
- "strw",
+ useCompressedOops ? "strw?" : "str",
"membar_volatile",
"dmb ish",
- "storestore (elided)",
+ "storestore \\(elided\\)",
"strb",
"membar_volatile",
"dmb ish",
@@ -371,7 +409,7 @@
matches = new String[] {
"membar_release",
"dmb ish",
- "strw",
+ useCompressedOops ? "strw?" : "str",
"storestore",
"dmb ishst",
"strb",
@@ -383,40 +421,48 @@
}
}
- checkCompile(iter, "testObj", matches, output);
+ checkCompile(iter, "testObj", matches, output, true);
}
// check for expected asm output from a volatile cas
- private void checkcas(OutputAnalyzer output, String testType, boolean useBarriersForVolatile) throws Throwable
+ private void checkcas(OutputAnalyzer output, String testType, boolean useBarriersForVolatile, boolean useCompressedOops) throws Throwable
{
Iterator<String> iter = output.asLines().listIterator();
String[] matches;
+ String[][] tests = {
+ { "testInt", "cmpxchgw" },
+ { "testLong", "cmpxchg" },
+ { "testByte", "cmpxchgb" },
+ { "testShort", "cmpxchgs" },
+ };
- // non object stores are straightforward
- if (!useBarriersForVolatile) {
- // this is the sequence of instructions for all cases
- matches = new String[] {
- "membar_release (elided)",
- "cmpxchgw_acq",
- "membar_acquire (elided)",
- "ret"
- };
- } else {
- // this is the alternative sequence of instructions
- matches = new String[] {
- "membar_release",
- "dmb ish",
- "cmpxchgw",
- "membar_acquire",
- "dmb ish",
- "ret"
- };
+ for (String[] test : tests) {
+ // non object stores are straightforward
+ if (!useBarriersForVolatile) {
+ // this is the sequence of instructions for all cases
+ matches = new String[] {
+ "membar_release \\(elided\\)",
+ test[1] + "_acq",
+ "membar_acquire \\(elided\\)",
+ "ret"
+ };
+ } else {
+ // this is the alternative sequence of instructions
+ matches = new String[] {
+ "membar_release",
+ "dmb ish",
+ test[1] + " ",
+ "membar_acquire",
+ "dmb ish",
+ "ret"
+ };
+ }
+
+ checkCompile(iter, test[0], matches, output, true);
}
- checkCompile(iter, "testInt", matches, output);
-
// object stores will be as above except for when the GC
// introduces barriers for card marking
@@ -425,10 +471,10 @@
default:
// this is the basic sequence of instructions
matches = new String[] {
- "membar_release (elided)",
- "cmpxchgw_acq",
+ "membar_release \\(elided\\)",
+ useCompressedOops ? "cmpxchgw?_acq" : "cmpxchg_acq",
"strb",
- "membar_acquire (elided)",
+ "membar_acquire \\(elided\\)",
"ret"
};
break;
@@ -436,12 +482,12 @@
// a card mark volatile barrier should be generated
// before the card mark strb
matches = new String[] {
- "membar_release (elided)",
- "cmpxchgw_acq",
+ "membar_release \\(elided\\)",
+ useCompressedOops ? "cmpxchgw?_acq" : "cmpxchg_acq",
"membar_volatile",
"dmb ish",
"strb",
- "membar_acquire (elided)",
+ "membar_acquire \\(elided\\)",
"ret"
};
break;
@@ -450,13 +496,13 @@
// before the card mark strb from the StoreCM and the
// storestore barrier from the StoreCM should be elided
matches = new String[] {
- "membar_release (elided)",
- "cmpxchgw_acq",
+ "membar_release \\(elided\\)",
+ useCompressedOops ? "cmpxchgw?_acq" : "cmpxchg_acq",
"membar_volatile",
"dmb ish",
- "storestore (elided)",
+ "storestore \\(elided\\)",
"strb",
- "membar_acquire (elided)",
+ "membar_acquire \\(elided\\)",
"ret"
};
break;
@@ -465,12 +511,12 @@
// before the card mark strb from the StoreCM and the
// storestore barrier from the StoreCM should be elided
matches = new String[] {
- "membar_release (elided)",
- "cmpxchgw_acq",
+ "membar_release \\(elided\\)",
+ useCompressedOops ? "cmpxchgw?_acq" : "cmpxchg_acq",
"storestore",
"dmb ishst",
"strb",
- "membar_acquire (elided)",
+ "membar_acquire \\(elided\\)",
"ret"
};
break;
@@ -482,7 +528,7 @@
matches = new String[] {
"membar_release",
"dmb ish",
- "cmpxchgw",
+ useCompressedOops ? "cmpxchgw? " : "cmpxchg ",
"membar_acquire",
"dmb ish",
"ret"
@@ -494,7 +540,7 @@
matches = new String[] {
"membar_release",
"dmb ish",
- "cmpxchgw",
+ useCompressedOops ? "cmpxchgw? " : "cmpxchg ",
"membar_volatile",
"dmb ish",
"strb",
@@ -510,10 +556,191 @@
matches = new String[] {
"membar_release",
"dmb ish",
- "cmpxchgw",
+ useCompressedOops ? "cmpxchgw? " : "cmpxchg ",
"membar_volatile",
"dmb ish",
- "storestore (elided)",
+ "storestore \\(elided\\)",
+ "strb",
+ "membar_acquire",
+ "dmb ish",
+ "ret"
+ };
+ break;
+ case "CMS":
+ // a volatile card mark membar should not be generated
+ // before the card mark strb from the StoreCM and the
+ // storestore barrier from the StoreCM should be generated
+ // as "dmb ishst"
+ matches = new String[] {
+ "membar_release",
+ "dmb ish",
+ useCompressedOops ? "cmpxchgw? " : "cmpxchg ",
+ "storestore",
+ "dmb ishst",
+ "strb",
+ "membar_acquire",
+ "dmb ish",
+ "ret"
+ };
+ break;
+ }
+ }
+
+ checkCompile(iter, "testObj", matches, output, true);
+ }
+
+ private void checkcae(OutputAnalyzer output, String testType, boolean useBarriersForVolatile, boolean useCompressedOops) throws Throwable
+ {
+ ListIterator<String> iter = output.asLines().listIterator();
+
+ String[] matches;
+ String[][] tests = {
+ { "testInt", "cmpxchgw" },
+ { "testLong", "cmpxchg" },
+ { "testByte", "cmpxchgb" },
+ { "testShort", "cmpxchgs" },
+ };
+
+ for (String[] test : tests) {
+ // non object stores are straightforward
+ if (!useBarriersForVolatile) {
+ // this is the sequence of instructions for all cases
+ matches = new String[] {
+ "membar_release \\(elided\\)",
+ test[1] + "_acq",
+ "membar_acquire \\(elided\\)",
+ "ret"
+ };
+ } else {
+ // this is the alternative sequence of instructions
+ matches = new String[] {
+ "membar_release",
+ "dmb ish",
+ test[1] + " ",
+ "membar_acquire",
+ "dmb ish",
+ "ret"
+ };
+ }
+
+ checkCompile(iter, test[0], matches, output, true);
+ }
+
+ // object stores will be as above except for when the GC
+ // introduces barriers for card marking
+
+ if (!useBarriersForVolatile) {
+ switch (testType) {
+ default:
+ // this is the basic sequence of instructions
+ matches = new String[] {
+ "membar_release \\(elided\\)",
+ "strb",
+ useCompressedOops ? "cmpxchgw?_acq" : "cmpxchg_acq",
+ "membar_acquire \\(elided\\)",
+ "ret"
+ };
+
+ // card marking store may be scheduled before or after
+ // the cmpxchg so try both sequences.
+ int idx = iter.nextIndex();
+ if (!checkCompile(iter, "testObj", matches, output, false)) {
+ iter = output.asLines().listIterator(idx);
+
+ matches = new String[] {
+ "membar_release \\(elided\\)",
+ useCompressedOops ? "cmpxchgw?_acq" : "cmpxchg_acq",
+ "strb",
+ "membar_acquire \\(elided\\)",
+ "ret"
+ };
+
+ checkCompile(iter, "testObj", matches, output, true);
+ }
+ return;
+
+ case "G1":
+ // a card mark volatile barrier should be generated
+ // before the card mark strb
+ matches = new String[] {
+ "membar_release \\(elided\\)",
+ useCompressedOops ? "cmpxchgw?_acq" : "cmpxchg_acq",
+ "membar_volatile",
+ "dmb ish",
+ "strb",
+ "membar_acquire \\(elided\\)",
+ "ret"
+ };
+ break;
+ case "CMSCondMark":
+ // a card mark volatile barrier should be generated
+ // before the card mark strb from the StoreCM and the
+ // storestore barrier from the StoreCM should be elided
+ matches = new String[] {
+ "membar_release \\(elided\\)",
+ useCompressedOops ? "cmpxchgw?_acq" : "cmpxchg_acq",
+ "membar_volatile",
+ "dmb ish",
+ "storestore \\(elided\\)",
+ "strb",
+ "membar_acquire \\(elided\\)",
+ "ret"
+ };
+ break;
+ case "CMS":
+ // a volatile card mark membar should not be generated
+ // before the card mark strb from the StoreCM and the
+ // storestore barrier from the StoreCM should be elided
+ matches = new String[] {
+ "membar_release \\(elided\\)",
+ useCompressedOops ? "cmpxchgw?_acq" : "cmpxchg_acq",
+ "storestore",
+ "dmb ishst",
+ "strb",
+ "membar_acquire \\(elided\\)",
+ "ret"
+ };
+ break;
+ }
+ } else {
+ switch (testType) {
+ default:
+ // this is the basic sequence of instructions
+ matches = new String[] {
+ "membar_release",
+ "dmb ish",
+ useCompressedOops ? "cmpxchgw? " : "cmpxchg ",
+ "membar_acquire",
+ "dmb ish",
+ "ret"
+ };
+ break;
+ case "G1":
+ // a card mark volatile barrier should be generated
+ // before the card mark strb
+ matches = new String[] {
+ "membar_release",
+ "dmb ish",
+ useCompressedOops ? "cmpxchgw? " : "cmpxchg ",
+ "membar_volatile",
+ "dmb ish",
+ "strb",
+ "membar_acquire",
+ "dmb ish",
+ "ret"
+ };
+ break;
+ case "CMSCondMark":
+ // a card mark volatile barrier should be generated
+ // before the card mark strb from the StoreCM and the
+ // storestore barrier from the StoreCM should be elided
+ matches = new String[] {
+ "membar_release",
+ "dmb ish",
+ useCompressedOops ? "cmpxchgw? " : "cmpxchg ",
+ "membar_volatile",
+ "dmb ish",
+ "storestore \\(elided\\)",
"strb",
"membar_acquire",
"dmb ish",
@@ -528,7 +755,7 @@
matches = new String[] {
"membar_release",
"dmb ish",
- "cmpxchgw",
+ useCompressedOops ? "cmpxchgw? " : "cmpxchg ",
"storestore",
"dmb ishst",
"strb",
@@ -540,12 +767,210 @@
}
}
- checkCompile(iter, "testObj", matches, output);
+ checkCompile(iter, "testObj", matches, output, true);
+ }
+
+ private void checkgas(OutputAnalyzer output, String testType, boolean useBarriersForVolatile, boolean useCompressedOops) throws Throwable
+ {
+ Iterator<String> iter = output.asLines().listIterator();
+
+ String[] matches;
+ String[][] tests = {
+ { "testInt", "atomic_xchgw" },
+ { "testLong", "atomic_xchg" },
+ };
+
+ for (String[] test : tests) {
+ // non object stores are straightforward
+ if (!useBarriersForVolatile) {
+ // this is the sequence of instructions for all cases
+ matches = new String[] {
+ "membar_release \\(elided\\)",
+ test[1] + "_acq",
+ "membar_acquire \\(elided\\)",
+ "ret"
+ };
+ } else {
+ // this is the alternative sequence of instructions
+ matches = new String[] {
+ "membar_release",
+ "dmb ish",
+ test[1] + " ",
+ "membar_acquire",
+ "dmb ish",
+ "ret"
+ };
+ }
+
+ checkCompile(iter, test[0], matches, output, true);
+ }
+
+ // object stores will be as above except for when the GC
+ // introduces barriers for card marking
+
+ if (!useBarriersForVolatile) {
+ switch (testType) {
+ default:
+ // this is the basic sequence of instructions
+ matches = new String[] {
+ "membar_release \\(elided\\)",
+ useCompressedOops ? "atomic_xchgw?_acq" : "atomic_xchg_acq",
+ "strb",
+ "membar_acquire \\(elided\\)",
+ "ret"
+ };
+ break;
+ case "G1":
+ // a card mark volatile barrier should be generated
+ // before the card mark strb
+ matches = new String[] {
+ "membar_release \\(elided\\)",
+ useCompressedOops ? "atomic_xchgw?_acq" : "atomic_xchg_acq",
+ "membar_volatile",
+ "dmb ish",
+ "strb",
+ "membar_acquire \\(elided\\)",
+ "ret"
+ };
+ break;
+ case "CMSCondMark":
+ // a card mark volatile barrier should be generated
+ // before the card mark strb from the StoreCM and the
+ // storestore barrier from the StoreCM should be elided
+ matches = new String[] {
+ "membar_release \\(elided\\)",
+ useCompressedOops ? "atomic_xchgw?_acq" : "atomic_xchg_acq",
+ "membar_volatile",
+ "dmb ish",
+ "storestore \\(elided\\)",
+ "strb",
+ "membar_acquire \\(elided\\)",
+ "ret"
+ };
+ break;
+ case "CMS":
+ // a volatile card mark membar should not be generated
+ // before the card mark strb from the StoreCM and the
+ // storestore barrier from the StoreCM should be elided
+ matches = new String[] {
+ "membar_release \\(elided\\)",
+ useCompressedOops ? "atomic_xchgw?_acq" : "atomic_xchg_acq",
+ "storestore",
+ "dmb ishst",
+ "strb",
+ "membar_acquire \\(elided\\)",
+ "ret"
+ };
+ break;
+ }
+ } else {
+ switch (testType) {
+ default:
+ // this is the basic sequence of instructions
+ matches = new String[] {
+ "membar_release",
+ "dmb ish",
+ useCompressedOops ? "atomic_xchgw? " : "atomic_xchg ",
+ "membar_acquire",
+ "dmb ish",
+ "ret"
+ };
+ break;
+ case "G1":
+ // a card mark volatile barrier should be generated
+ // before the card mark strb
+ matches = new String[] {
+ "membar_release",
+ "dmb ish",
+ useCompressedOops ? "atomic_xchgw? " : "atomic_xchg ",
+ "membar_volatile",
+ "dmb ish",
+ "strb",
+ "membar_acquire",
+ "dmb ish",
+ "ret"
+ };
+ break;
+ case "CMSCondMark":
+ // a card mark volatile barrier should be generated
+ // before the card mark strb from the StoreCM and the
+ // storestore barrier from the StoreCM should be elided
+ matches = new String[] {
+ "membar_release",
+ "dmb ish",
+ useCompressedOops ? "atomic_xchgw? " : "atomic_xchg ",
+ "membar_volatile",
+ "dmb ish",
+ "storestore \\(elided\\)",
+ "strb",
+ "membar_acquire",
+ "dmb ish",
+ "ret"
+ };
+ break;
+ case "CMS":
+ // a volatile card mark membar should not be generated
+ // before the card mark strb from the StoreCM and the
+ // storestore barrier from the StoreCM should be generated
+ // as "dmb ishst"
+ matches = new String[] {
+ "membar_release",
+ "dmb ish",
+ useCompressedOops ? "atomic_xchgw? " : "atomic_xchg ",
+ "storestore",
+ "dmb ishst",
+ "strb",
+ "membar_acquire",
+ "dmb ish",
+ "ret"
+ };
+ break;
+ }
+ }
+
+ checkCompile(iter, "testObj", matches, output, true);
+ }
+
+ private void checkgaa(OutputAnalyzer output, String testType, boolean useBarriersForVolatile) throws Throwable
+ {
+ Iterator<String> iter = output.asLines().listIterator();
+
+ String[] matches;
+ String[][] tests = {
+ { "testInt", "get_and_addI" },
+ { "testLong", "get_and_addL" },
+ };
+
+ for (String[] test : tests) {
+ // non object stores are straightforward
+ if (!useBarriersForVolatile) {
+ // this is the sequence of instructions for all cases
+ matches = new String[] {
+ "membar_release \\(elided\\)",
+ test[1] + "_acq",
+ "membar_acquire \\(elided\\)",
+ "ret"
+ };
+ } else {
+ // this is the alternative sequence of instructions
+ matches = new String[] {
+ "membar_release",
+ "dmb ish",
+ test[1] + " ",
+ "membar_acquire",
+ "dmb ish",
+ "ret"
+ };
+ }
+
+ checkCompile(iter, test[0], matches, output, true);
+ }
+
}
// perform a check appropriate to the classname
- private void checkoutput(OutputAnalyzer output, String classname, String testType, boolean useBarriersForVolatile) throws Throwable
+ private void checkoutput(OutputAnalyzer output, String classname, String testType, boolean useBarriersForVolatile, boolean useCompressedOops) throws Throwable
{
// trace call to allow eyeball check of what is being checked
System.out.println("checkoutput(" +
@@ -556,19 +981,29 @@
switch (classname) {
case "TestVolatileLoad":
- checkload(output, testType, useBarriersForVolatile);
+ checkload(output, testType, useBarriersForVolatile, useCompressedOops);
break;
case "TestVolatileStore":
- checkstore(output, testType, useBarriersForVolatile);
+ checkstore(output, testType, useBarriersForVolatile, useCompressedOops);
break;
case "TestUnsafeVolatileLoad":
- checkload(output, testType, useBarriersForVolatile);
+ checkload(output, testType, useBarriersForVolatile, useCompressedOops);
break;
case "TestUnsafeVolatileStore":
- checkstore(output, testType, useBarriersForVolatile);
+ checkstore(output, testType, useBarriersForVolatile, useCompressedOops);
break;
case "TestUnsafeVolatileCAS":
- checkcas(output, testType, useBarriersForVolatile);
+ case "TestUnsafeVolatileWeakCAS":
+ checkcas(output, testType, useBarriersForVolatile, useCompressedOops);
+ break;
+ case "TestUnsafeVolatileCAE":
+ checkcae(output, testType, useBarriersForVolatile, useCompressedOops);
+ break;
+ case "TestUnsafeVolatileGAS":
+ checkgas(output, testType, useBarriersForVolatile, useCompressedOops);
+ break;
+ case "TestUnsafeVolatileGAA":
+ checkgaa(output, testType, useBarriersForVolatile);
break;
}
}
--- a/test/hotspot/jtreg/compiler/c2/aarch64/TestVolatilesCMS.java Thu Oct 04 09:24:27 2018 +0200
+++ b/test/hotspot/jtreg/compiler/c2/aarch64/TestVolatilesCMS.java Mon Nov 05 12:53:55 2018 +0100
@@ -38,6 +38,9 @@
* compiler.c2.aarch64.TestVolatileStore
* compiler.c2.aarch64.TestUnsafeVolatileStore
* compiler.c2.aarch64.TestUnsafeVolatileCAS
+ * compiler.c2.aarch64.TestUnsafeVolatileWeakCAS
+ * compiler.c2.aarch64.TestUnsafeVolatileCAE
+ * compiler.c2.aarch64.TestUnsafeVolatileGAS
*
* @run driver compiler.c2.aarch64.TestVolatilesCMS
* TestVolatileLoad CMS
@@ -53,6 +56,15 @@
*
* @run driver compiler.c2.aarch64.TestVolatilesCMS
* TestUnsafeVolatileCAS CMS
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesCMS
+ * TestUnsafeVolatileWeakCAS CMS
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesCMS
+ * TestUnsafeVolatileCAE CMS
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesCMS
+ * TestUnsafeVolatileGAS CMS
*/
package compiler.c2.aarch64;
--- a/test/hotspot/jtreg/compiler/c2/aarch64/TestVolatilesCMSCondMark.java Thu Oct 04 09:24:27 2018 +0200
+++ b/test/hotspot/jtreg/compiler/c2/aarch64/TestVolatilesCMSCondMark.java Mon Nov 05 12:53:55 2018 +0100
@@ -38,6 +38,9 @@
* compiler.c2.aarch64.TestVolatileStore
* compiler.c2.aarch64.TestUnsafeVolatileStore
* compiler.c2.aarch64.TestUnsafeVolatileCAS
+ * compiler.c2.aarch64.TestUnsafeVolatileWeakCAS
+ * compiler.c2.aarch64.TestUnsafeVolatileCAE
+ * compiler.c2.aarch64.TestUnsafeVolatileGAS
*
* @run driver compiler.c2.aarch64.TestVolatilesCMSCondMark
* TestVolatileLoad CMSCondMark
@@ -53,6 +56,15 @@
*
* @run driver compiler.c2.aarch64.TestVolatilesCMSCondMark
* TestUnsafeVolatileCAS CMSCondMark
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesCMSCondMark
+ * TestUnsafeVolatileWeakCAS CMSCondMark
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesCMSCondMark
+ * TestUnsafeVolatileCAE CMSCondMark
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesCMSCondMark
+ * TestUnsafeVolatileGAS CMSCondMark
*/
package compiler.c2.aarch64;
--- a/test/hotspot/jtreg/compiler/c2/aarch64/TestVolatilesG1.java Thu Oct 04 09:24:27 2018 +0200
+++ b/test/hotspot/jtreg/compiler/c2/aarch64/TestVolatilesG1.java Mon Nov 05 12:53:55 2018 +0100
@@ -38,6 +38,10 @@
* compiler.c2.aarch64.TestVolatileStore
* compiler.c2.aarch64.TestUnsafeVolatileStore
* compiler.c2.aarch64.TestUnsafeVolatileCAS
+ * compiler.c2.aarch64.TestUnsafeVolatileWeakCAS
+ * compiler.c2.aarch64.TestUnsafeVolatileCAE
+ * compiler.c2.aarch64.TestUnsafeVolatileGAS
+ * compiler.c2.aarch64.TestUnsafeVolatileGAA
*
* @run driver compiler.c2.aarch64.TestVolatilesG1
* TestVolatileLoad G1
@@ -53,8 +57,21 @@
*
* @run driver compiler.c2.aarch64.TestVolatilesG1
* TestUnsafeVolatileCAS G1
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesG1
+ * TestUnsafeVolatileWeakCAS G1
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesG1
+ * TestUnsafeVolatileCAE G1
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesG1
+ * TestUnsafeVolatileGAS G1
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesG1
+ * TestUnsafeVolatileGAA G1
*/
+
package compiler.c2.aarch64;
public class TestVolatilesG1 {
--- a/test/hotspot/jtreg/compiler/c2/aarch64/TestVolatilesParallel.java Thu Oct 04 09:24:27 2018 +0200
+++ b/test/hotspot/jtreg/compiler/c2/aarch64/TestVolatilesParallel.java Mon Nov 05 12:53:55 2018 +0100
@@ -38,6 +38,9 @@
* compiler.c2.aarch64.TestVolatileStore
* compiler.c2.aarch64.TestUnsafeVolatileStore
* compiler.c2.aarch64.TestUnsafeVolatileCAS
+ * compiler.c2.aarch64.TestUnsafeVolatileWeakCAS
+ * compiler.c2.aarch64.TestUnsafeVolatileCAE
+ * compiler.c2.aarch64.TestUnsafeVolatileGAS
*
* @run driver compiler.c2.aarch64.TestVolatilesParallel
* TestVolatileLoad Parallel
@@ -53,6 +56,15 @@
*
* @run driver compiler.c2.aarch64.TestVolatilesParallel
* TestUnsafeVolatileCAS Parallel
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesParallel
+ * TestUnsafeVolatileWeakCAS Parallel
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesParallel
+ * TestUnsafeVolatileCAE Parallel
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesParallel
+ * TestUnsafeVolatileGAS Parallel
*/
package compiler.c2.aarch64;
--- a/test/hotspot/jtreg/compiler/c2/aarch64/TestVolatilesSerial.java Thu Oct 04 09:24:27 2018 +0200
+++ b/test/hotspot/jtreg/compiler/c2/aarch64/TestVolatilesSerial.java Mon Nov 05 12:53:55 2018 +0100
@@ -38,6 +38,9 @@
* compiler.c2.aarch64.TestVolatileStore
* compiler.c2.aarch64.TestUnsafeVolatileStore
* compiler.c2.aarch64.TestUnsafeVolatileCAS
+ * compiler.c2.aarch64.TestUnsafeVolatileWeakCAS
+ * compiler.c2.aarch64.TestUnsafeVolatileCAE
+ * compiler.c2.aarch64.TestUnsafeVolatileGAS
*
* @run driver compiler.c2.aarch64.TestVolatilesSerial
* TestVolatileLoad Serial
@@ -53,6 +56,15 @@
*
* @run driver compiler.c2.aarch64.TestVolatilesSerial
* TestUnsafeVolatileCAS Serial
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesSerial
+ * TestUnsafeVolatileWeakCAS Serial
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesSerial
+ * TestUnsafeVolatileCAE Serial
+ *
+ * @run driver compiler.c2.aarch64.TestVolatilesSerial
+ * TestUnsafeVolatileGAS Serial
*/
package compiler.c2.aarch64;