|
1 // |
|
2 // Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. |
|
3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 // |
|
5 // This code is free software; you can redistribute it and/or modify it |
|
6 // under the terms of the GNU General Public License version 2 only, as |
|
7 // published by the Free Software Foundation. |
|
8 // |
|
9 // This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 // version 2 for more details (a copy is included in the LICENSE file that |
|
13 // accompanied this code). |
|
14 // |
|
15 // You should have received a copy of the GNU General Public License version |
|
16 // 2 along with this work; if not, write to the Free Software Foundation, |
|
17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 // |
|
19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 // or visit www.oracle.com if you need additional information or have any |
|
21 // questions. |
|
22 // |
|
23 |
|
24 source_hpp %{ |
|
25 |
|
26 #include "gc/z/c2/zBarrierSetC2.hpp" |
|
27 |
|
28 %} |
|
29 |
|
30 source %{ |
|
31 |
|
32 #include "gc/z/zBarrierSetAssembler.hpp" |
|
33 |
|
34 static void z_load_barrier_slow_reg(MacroAssembler& _masm, Register dst, |
|
35 Register base, int index, int scale, |
|
36 int disp, bool weak) { |
|
37 const address stub = weak ? ZBarrierSet::assembler()->load_barrier_weak_slow_stub(dst) |
|
38 : ZBarrierSet::assembler()->load_barrier_slow_stub(dst); |
|
39 |
|
40 if (index == -1) { |
|
41 if (disp != 0) { |
|
42 __ lea(dst, Address(base, disp)); |
|
43 } else { |
|
44 __ mov(dst, base); |
|
45 } |
|
46 } else { |
|
47 Register index_reg = as_Register(index); |
|
48 if (disp == 0) { |
|
49 __ lea(dst, Address(base, index_reg, Address::lsl(scale))); |
|
50 } else { |
|
51 __ lea(dst, Address(base, disp)); |
|
52 __ lea(dst, Address(dst, index_reg, Address::lsl(scale))); |
|
53 } |
|
54 } |
|
55 |
|
56 __ far_call(RuntimeAddress(stub)); |
|
57 } |
|
58 |
|
59 %} |
|
60 |
|
61 // |
|
62 // Execute ZGC load barrier (strong) slow path |
|
63 // |
|
64 instruct loadBarrierSlowReg(iRegP dst, memory mem, rFlagsReg cr, |
|
65 vRegD_V0 v0, vRegD_V1 v1, vRegD_V2 v2, vRegD_V3 v3, vRegD_V4 v4, |
|
66 vRegD_V5 v5, vRegD_V6 v6, vRegD_V7 v7, vRegD_V8 v8, vRegD_V9 v9, |
|
67 vRegD_V10 v10, vRegD_V11 v11, vRegD_V12 v12, vRegD_V13 v13, vRegD_V14 v14, |
|
68 vRegD_V15 v15, vRegD_V16 v16, vRegD_V17 v17, vRegD_V18 v18, vRegD_V19 v19, |
|
69 vRegD_V20 v20, vRegD_V21 v21, vRegD_V22 v22, vRegD_V23 v23, vRegD_V24 v24, |
|
70 vRegD_V25 v25, vRegD_V26 v26, vRegD_V27 v27, vRegD_V28 v28, vRegD_V29 v29, |
|
71 vRegD_V30 v30, vRegD_V31 v31) %{ |
|
72 match(Set dst (LoadBarrierSlowReg mem)); |
|
73 predicate(!n->as_LoadBarrierSlowReg()->is_weak()); |
|
74 |
|
75 effect(DEF dst, KILL cr, |
|
76 KILL v0, KILL v1, KILL v2, KILL v3, KILL v4, KILL v5, KILL v6, KILL v7, |
|
77 KILL v8, KILL v9, KILL v10, KILL v11, KILL v12, KILL v13, KILL v14, |
|
78 KILL v15, KILL v16, KILL v17, KILL v18, KILL v19, KILL v20, KILL v21, |
|
79 KILL v22, KILL v23, KILL v24, KILL v25, KILL v26, KILL v27, KILL v28, |
|
80 KILL v29, KILL v30, KILL v31); |
|
81 |
|
82 format %{"LoadBarrierSlowReg $dst, $mem" %} |
|
83 ins_encode %{ |
|
84 z_load_barrier_slow_reg(_masm, $dst$$Register, $mem$$base$$Register, |
|
85 $mem$$index, $mem$$scale, $mem$$disp, false); |
|
86 %} |
|
87 ins_pipe(pipe_slow); |
|
88 %} |
|
89 |
|
90 // |
|
91 // Execute ZGC load barrier (weak) slow path |
|
92 // |
|
93 instruct loadBarrierWeakSlowReg(iRegP dst, memory mem, rFlagsReg cr, |
|
94 vRegD_V0 v0, vRegD_V1 v1, vRegD_V2 v2, vRegD_V3 v3, vRegD_V4 v4, |
|
95 vRegD_V5 v5, vRegD_V6 v6, vRegD_V7 v7, vRegD_V8 v8, vRegD_V9 v9, |
|
96 vRegD_V10 v10, vRegD_V11 v11, vRegD_V12 v12, vRegD_V13 v13, vRegD_V14 v14, |
|
97 vRegD_V15 v15, vRegD_V16 v16, vRegD_V17 v17, vRegD_V18 v18, vRegD_V19 v19, |
|
98 vRegD_V20 v20, vRegD_V21 v21, vRegD_V22 v22, vRegD_V23 v23, vRegD_V24 v24, |
|
99 vRegD_V25 v25, vRegD_V26 v26, vRegD_V27 v27, vRegD_V28 v28, vRegD_V29 v29, |
|
100 vRegD_V30 v30, vRegD_V31 v31) %{ |
|
101 match(Set dst (LoadBarrierSlowReg mem)); |
|
102 predicate(n->as_LoadBarrierSlowReg()->is_weak()); |
|
103 |
|
104 effect(DEF dst, KILL cr, |
|
105 KILL v0, KILL v1, KILL v2, KILL v3, KILL v4, KILL v5, KILL v6, KILL v7, |
|
106 KILL v8, KILL v9, KILL v10, KILL v11, KILL v12, KILL v13, KILL v14, |
|
107 KILL v15, KILL v16, KILL v17, KILL v18, KILL v19, KILL v20, KILL v21, |
|
108 KILL v22, KILL v23, KILL v24, KILL v25, KILL v26, KILL v27, KILL v28, |
|
109 KILL v29, KILL v30, KILL v31); |
|
110 |
|
111 format %{"LoadBarrierWeakSlowReg $dst, $mem" %} |
|
112 ins_encode %{ |
|
113 z_load_barrier_slow_reg(_masm, $dst$$Register, $mem$$base$$Register, |
|
114 $mem$$index, $mem$$scale, $mem$$disp, true); |
|
115 %} |
|
116 ins_pipe(pipe_slow); |
|
117 %} |
|
118 |
|
119 |
|
120 // Specialized versions of compareAndExchangeP that adds a keepalive that is consumed |
|
121 // but doesn't affect output. |
|
122 |
|
123 instruct z_compareAndExchangeP(iRegPNoSp res, indirect mem, |
|
124 iRegP oldval, iRegP newval, iRegP keepalive, |
|
125 rFlagsReg cr) %{ |
|
126 match(Set res (ZCompareAndExchangeP (Binary mem keepalive) (Binary oldval newval))); |
|
127 ins_cost(2 * VOLATILE_REF_COST); |
|
128 effect(TEMP_DEF res, KILL cr); |
|
129 format %{ |
|
130 "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval" |
|
131 %} |
|
132 ins_encode %{ |
|
133 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, |
|
134 Assembler::xword, /*acquire*/ false, /*release*/ true, |
|
135 /*weak*/ false, $res$$Register); |
|
136 %} |
|
137 ins_pipe(pipe_slow); |
|
138 %} |
|
139 |
|
140 instruct z_compareAndSwapP(iRegINoSp res, |
|
141 indirect mem, |
|
142 iRegP oldval, iRegP newval, iRegP keepalive, |
|
143 rFlagsReg cr) %{ |
|
144 |
|
145 match(Set res (ZCompareAndSwapP (Binary mem keepalive) (Binary oldval newval))); |
|
146 match(Set res (ZWeakCompareAndSwapP (Binary mem keepalive) (Binary oldval newval))); |
|
147 |
|
148 ins_cost(2 * VOLATILE_REF_COST); |
|
149 |
|
150 effect(KILL cr); |
|
151 |
|
152 format %{ |
|
153 "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval" |
|
154 "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)" |
|
155 %} |
|
156 |
|
157 ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval), |
|
158 aarch64_enc_cset_eq(res)); |
|
159 |
|
160 ins_pipe(pipe_slow); |
|
161 %} |
|
162 |
|
163 |
|
164 instruct z_get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev, |
|
165 iRegP keepalive) %{ |
|
166 match(Set prev (ZGetAndSetP mem (Binary newv keepalive))); |
|
167 |
|
168 ins_cost(2 * VOLATILE_REF_COST); |
|
169 format %{ "atomic_xchg $prev, $newv, [$mem]" %} |
|
170 ins_encode %{ |
|
171 __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base)); |
|
172 %} |
|
173 ins_pipe(pipe_serial); |
|
174 %} |