1// Copyright 2016 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Lowering arithmetic
6(Add(64|Ptr) ...) => (ADD ...)
7(Add(32|16|8) ...) => (ADDW ...)
8(Add32F x y) => (Select0 (FADDS x y))
9(Add64F x y) => (Select0 (FADD x y))
10
11(Sub(64|Ptr) ...) => (SUB ...)
12(Sub(32|16|8) ...) => (SUBW ...)
13(Sub32F x y) => (Select0 (FSUBS x y))
14(Sub64F x y) => (Select0 (FSUB x y))
15
16(Mul64 ...) => (MULLD ...)
17(Mul(32|16|8) ...) => (MULLW ...)
18(Mul32F ...) => (FMULS ...)
19(Mul64F ...) => (FMUL ...)
20(Mul64uhilo ...) => (MLGR ...)
21
22(Div32F ...) => (FDIVS ...)
23(Div64F ...) => (FDIV ...)
24
25(Div64 x y) => (DIVD x y)
26(Div64u ...) => (DIVDU ...)
27// DIVW/DIVWU has a 64-bit dividend and a 32-bit divisor,
28// so a sign/zero extension of the dividend is required.
29(Div32 x y) => (DIVW (MOVWreg x) y)
30(Div32u x y) => (DIVWU (MOVWZreg x) y)
31(Div16 x y) => (DIVW (MOVHreg x) (MOVHreg y))
32(Div16u x y) => (DIVWU (MOVHZreg x) (MOVHZreg y))
33(Div8 x y) => (DIVW (MOVBreg x) (MOVBreg y))
34(Div8u x y) => (DIVWU (MOVBZreg x) (MOVBZreg y))
35
36(Hmul(64|64u) ...) => (MULH(D|DU) ...)
37(Hmul32 x y) => (SRDconst [32] (MULLD (MOVWreg x) (MOVWreg y)))
38(Hmul32u x y) => (SRDconst [32] (MULLD (MOVWZreg x) (MOVWZreg y)))
39
40(Mod64 x y) => (MODD x y)
41(Mod64u ...) => (MODDU ...)
42// MODW/MODWU has a 64-bit dividend and a 32-bit divisor,
43// so a sign/zero extension of the dividend is required.
44(Mod32 x y) => (MODW (MOVWreg x) y)
45(Mod32u x y) => (MODWU (MOVWZreg x) y)
46(Mod16 x y) => (MODW (MOVHreg x) (MOVHreg y))
47(Mod16u x y) => (MODWU (MOVHZreg x) (MOVHZreg y))
48(Mod8 x y) => (MODW (MOVBreg x) (MOVBreg y))
49(Mod8u x y) => (MODWU (MOVBZreg x) (MOVBZreg y))
50
51// (x + y) / 2 with x>=y -> (x - y) / 2 + y
52(Avg64u <t> x y) => (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
53
54(And64 ...) => (AND ...)
55(And(32|16|8) ...) => (ANDW ...)
56
57(Or64 ...) => (OR ...)
58(Or(32|16|8) ...) => (ORW ...)
59
60(Xor64 ...) => (XOR ...)
61(Xor(32|16|8) ...) => (XORW ...)
62
63(Neg64 ...) => (NEG ...)
64(Neg(32|16|8) ...) => (NEGW ...)
65(Neg32F ...) => (FNEGS ...)
66(Neg64F ...) => (FNEG ...)
67
68(Com64 ...) => (NOT ...)
69(Com(32|16|8) ...) => (NOTW ...)
70(NOT x) => (XOR (MOVDconst [-1]) x)
71(NOTW x) => (XORWconst [-1] x)
72
73// Lowering boolean ops
74(AndB ...) => (ANDW ...)
75(OrB ...) => (ORW ...)
76(Not x) => (XORWconst [1] x)
77
78// Lowering pointer arithmetic
79(OffPtr [off] ptr:(SP)) => (MOVDaddr [int32(off)] ptr)
80(OffPtr [off] ptr) && is32Bit(off) => (ADDconst [int32(off)] ptr)
81(OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr)
82
83(Ctz(64|32|16|8)NonZero ...) => (Ctz64 ...)
84
85// Ctz(x) = 64 - findLeftmostOne((x-1)&^x)
86(Ctz64 <t> x) => (SUB (MOVDconst [64]) (FLOGR (AND <t> (SUBconst <t> [1] x) (NOT <t> x))))
87(Ctz32 <t> x) => (SUB (MOVDconst [64]) (FLOGR (MOVWZreg (ANDW <t> (SUBWconst <t> [1] x) (NOTW <t> x)))))
88(Ctz16 x) => (Ctz64 (Or64 <typ.UInt64> x (MOVDconst [1<<16])))
89(Ctz8 x) => (Ctz64 (Or64 <typ.UInt64> x (MOVDconst [1<<8])))
90
91(BitLen64 x) => (SUB (MOVDconst [64]) (FLOGR x))
92(BitLen(32|16|8) x) => (BitLen64 (ZeroExt(32|16|8)to64 x))
93
94// POPCNT treats the input register as a vector of 8 bytes, producing
95// a population count for each individual byte. For inputs larger than
96// a single byte we therefore need to sum the individual bytes produced
97// by the POPCNT instruction. For example, the following instruction
98// sequence could be used to calculate the population count of a 4-byte
99// value:
100//
101// MOVD $0x12345678, R1 // R1=0x12345678 <-- input
102// POPCNT R1, R2 // R2=0x02030404
103// SRW $16, R2, R3 // R3=0x00000203
104// ADDW R2, R3, R4 // R4=0x02030607
105// SRW $8, R4, R5 // R5=0x00020306
106// ADDW R4, R5, R6 // R6=0x0205090d
107// MOVBZ R6, R7 // R7=0x0000000d <-- result is 13
108//
109(PopCount8 x) => (POPCNT (MOVBZreg x))
110(PopCount16 x) => (MOVBZreg (SumBytes2 (POPCNT <typ.UInt16> x)))
111(PopCount32 x) => (MOVBZreg (SumBytes4 (POPCNT <typ.UInt32> x)))
112(PopCount64 x) => (MOVBZreg (SumBytes8 (POPCNT <typ.UInt64> x)))
113
114// SumBytes{2,4,8} pseudo operations sum the values of the rightmost
115// 2, 4 or 8 bytes respectively. The result is a single byte however
116// other bytes might contain junk so a zero extension is required if
117// the desired output type is larger than 1 byte.
118(SumBytes2 x) => (ADDW (SRWconst <typ.UInt8> x [8]) x)
119(SumBytes4 x) => (SumBytes2 (ADDW <typ.UInt16> (SRWconst <typ.UInt16> x [16]) x))
120(SumBytes8 x) => (SumBytes4 (ADDW <typ.UInt32> (SRDconst <typ.UInt32> x [32]) x))
121
122(Bswap64 ...) => (MOVDBR ...)
123(Bswap32 ...) => (MOVWBR ...)
124
125// add with carry
126(Select0 (Add64carry x y c))
127 => (Select0 <typ.UInt64> (ADDE x y (Select1 <types.TypeFlags> (ADDCconst c [-1]))))
128(Select1 (Add64carry x y c))
129 => (Select0 <typ.UInt64> (ADDE (MOVDconst [0]) (MOVDconst [0]) (Select1 <types.TypeFlags> (ADDE x y (Select1 <types.TypeFlags> (ADDCconst c [-1]))))))
130
131// subtract with borrow
132(Select0 (Sub64borrow x y c))
133 => (Select0 <typ.UInt64> (SUBE x y (Select1 <types.TypeFlags> (SUBC (MOVDconst [0]) c))))
134(Select1 (Sub64borrow x y c))
135 => (NEG (Select0 <typ.UInt64> (SUBE (MOVDconst [0]) (MOVDconst [0]) (Select1 <types.TypeFlags> (SUBE x y (Select1 <types.TypeFlags> (SUBC (MOVDconst [0]) c)))))))
136
137// math package intrinsics
138(Sqrt ...) => (FSQRT ...)
139(Floor x) => (FIDBR [7] x)
140(Ceil x) => (FIDBR [6] x)
141(Trunc x) => (FIDBR [5] x)
142(RoundToEven x) => (FIDBR [4] x)
143(Round x) => (FIDBR [1] x)
144(FMA x y z) => (FMADD z x y)
145
146(Sqrt32 ...) => (FSQRTS ...)
147
148// Atomic loads and stores.
149// The SYNC instruction (fast-BCR-serialization) prevents store-load
150// reordering. Other sequences of memory operations (load-load,
151// store-store and load-store) are already guaranteed not to be reordered.
152(AtomicLoad(8|32|Acq32|64|Ptr) ptr mem) => (MOV(BZ|WZ|WZ|D|D)atomicload ptr mem)
153(AtomicStore(8|32|64|PtrNoWB) ptr val mem) => (SYNC (MOV(B|W|D|D)atomicstore ptr val mem))
154
155// Store-release doesn't require store-load ordering.
156(AtomicStoreRel32 ptr val mem) => (MOVWatomicstore ptr val mem)
157
158// Atomic adds.
159(AtomicAdd32 ptr val mem) => (AddTupleFirst32 val (LAA ptr val mem))
160(AtomicAdd64 ptr val mem) => (AddTupleFirst64 val (LAAG ptr val mem))
161(Select0 <t> (AddTupleFirst32 val tuple)) => (ADDW val (Select0 <t> tuple))
162(Select1 (AddTupleFirst32 _ tuple)) => (Select1 tuple)
163(Select0 <t> (AddTupleFirst64 val tuple)) => (ADD val (Select0 <t> tuple))
164(Select1 (AddTupleFirst64 _ tuple)) => (Select1 tuple)
165
166// Atomic exchanges.
167(AtomicExchange32 ptr val mem) => (LoweredAtomicExchange32 ptr val mem)
168(AtomicExchange64 ptr val mem) => (LoweredAtomicExchange64 ptr val mem)
169
170// Atomic compare and swap.
171(AtomicCompareAndSwap32 ptr old new_ mem) => (LoweredAtomicCas32 ptr old new_ mem)
172(AtomicCompareAndSwap64 ptr old new_ mem) => (LoweredAtomicCas64 ptr old new_ mem)
173
174// Atomic and: *(*uint8)(ptr) &= val
175//
176// Round pointer down to nearest word boundary and pad value with ones before
177// applying atomic AND operation to target word.
178//
179// *(*uint32)(ptr &^ 3) &= rotateleft(uint32(val) | 0xffffff00, ((3 << 3) ^ ((ptr & 3) << 3))
180//
181(AtomicAnd8 ptr val mem)
182 => (LANfloor
183 ptr
184 (RLL <typ.UInt32>
185 (ORWconst <typ.UInt32> val [-1<<8])
186 (RXSBG <typ.UInt32> {s390x.NewRotateParams(59, 60, 3)} (MOVDconst [3<<3]) ptr))
187 mem)
188
189// Atomic or: *(*uint8)(ptr) |= val
190//
191// Round pointer down to nearest word boundary and pad value with zeros before
192// applying atomic OR operation to target word.
193//
194// *(*uint32)(ptr &^ 3) |= uint32(val) << ((3 << 3) ^ ((ptr & 3) << 3))
195//
196(AtomicOr8 ptr val mem)
197 => (LAOfloor
198 ptr
199 (SLW <typ.UInt32>
200 (MOVBZreg <typ.UInt32> val)
201 (RXSBG <typ.UInt32> {s390x.NewRotateParams(59, 60, 3)} (MOVDconst [3<<3]) ptr))
202 mem)
203
204(AtomicAnd32 ...) => (LAN ...)
205(AtomicOr32 ...) => (LAO ...)
206
207// Lowering extension
208// Note: we always extend to 64 bits even though some ops don't need that many result bits.
209(SignExt8to(16|32|64) ...) => (MOVBreg ...)
210(SignExt16to(32|64) ...) => (MOVHreg ...)
211(SignExt32to64 ...) => (MOVWreg ...)
212
213(ZeroExt8to(16|32|64) ...) => (MOVBZreg ...)
214(ZeroExt16to(32|64) ...) => (MOVHZreg ...)
215(ZeroExt32to64 ...) => (MOVWZreg ...)
216
217(Slicemask <t> x) => (SRADconst (NEG <t> x) [63])
218
219// Lowering truncation
220// Because we ignore high parts of registers, truncates are just copies.
221(Trunc(16|32|64)to8 ...) => (Copy ...)
222(Trunc(32|64)to16 ...) => (Copy ...)
223(Trunc64to32 ...) => (Copy ...)
224
225// Lowering float <-> int
226(Cvt32to32F ...) => (CEFBRA ...)
227(Cvt32to64F ...) => (CDFBRA ...)
228(Cvt64to32F ...) => (CEGBRA ...)
229(Cvt64to64F ...) => (CDGBRA ...)
230
231(Cvt32Fto32 ...) => (CFEBRA ...)
232(Cvt32Fto64 ...) => (CGEBRA ...)
233(Cvt64Fto32 ...) => (CFDBRA ...)
234(Cvt64Fto64 ...) => (CGDBRA ...)
235
236// Lowering float <-> uint
237(Cvt32Uto32F ...) => (CELFBR ...)
238(Cvt32Uto64F ...) => (CDLFBR ...)
239(Cvt64Uto32F ...) => (CELGBR ...)
240(Cvt64Uto64F ...) => (CDLGBR ...)
241
242(Cvt32Fto32U ...) => (CLFEBR ...)
243(Cvt32Fto64U ...) => (CLGEBR ...)
244(Cvt64Fto32U ...) => (CLFDBR ...)
245(Cvt64Fto64U ...) => (CLGDBR ...)
246
247// Lowering float32 <-> float64
248(Cvt32Fto64F ...) => (LDEBR ...)
249(Cvt64Fto32F ...) => (LEDBR ...)
250
251(CvtBoolToUint8 ...) => (Copy ...)
252
253(Round(32|64)F ...) => (LoweredRound(32|64)F ...)
254
255// Lowering shifts
256
257// Lower bounded shifts first. No need to check shift value.
258(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLD x y)
259(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y)
260(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y)
261(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y)
262(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRD x y)
263(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW x y)
264(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW (MOVHZreg x) y)
265(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW (MOVBZreg x) y)
266(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAD x y)
267(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW x y)
268(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW (MOVHreg x) y)
269(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW (MOVBreg x) y)
270
271// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
272// result = shift >= 64 ? 0 : arg << shift
273(Lsh(64|32|16|8)x64 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SL(D|W|W|W) <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
274(Lsh(64|32|16|8)x32 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SL(D|W|W|W) <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
275(Lsh(64|32|16|8)x16 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SL(D|W|W|W) <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
276(Lsh(64|32|16|8)x8 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SL(D|W|W|W) <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
277
278(Rsh(64|32)Ux64 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SR(D|W) <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
279(Rsh(64|32)Ux32 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SR(D|W) <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
280(Rsh(64|32)Ux16 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SR(D|W) <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
281(Rsh(64|32)Ux8 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SR(D|W) <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
282
283(Rsh(16|8)Ux64 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPUconst y [64]))
284(Rsh(16|8)Ux32 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPWUconst y [64]))
285(Rsh(16|8)Ux16 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
286(Rsh(16|8)Ux8 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
287
288// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
289// We implement this by setting the shift value to 63 (all ones) if the shift value is more than 63.
290// result = arg >> (shift >= 64 ? 63 : shift)
291(Rsh(64|32)x64 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPUconst y [64])))
292(Rsh(64|32)x32 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst y [64])))
293(Rsh(64|32)x16 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVHZreg y) [64])))
294(Rsh(64|32)x8 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVBZreg y) [64])))
295
296(Rsh(16|8)x64 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPUconst y [64])))
297(Rsh(16|8)x32 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst y [64])))
298(Rsh(16|8)x16 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVHZreg y) [64])))
299(Rsh(16|8)x8 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVBZreg y) [64])))
300
301// Lowering rotates
302(RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
303(RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
304(RotateLeft32 ...) => (RLL ...)
305(RotateLeft64 ...) => (RLLG ...)
306
307// Lowering comparisons
308(Less64 x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
309(Less32 x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
310(Less(16|8) x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B)reg x) (MOV(H|B)reg y)))
311(Less64U x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
312(Less32U x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
313(Less(16|8)U x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOV(H|B)Zreg x) (MOV(H|B)Zreg y)))
314(Less64F x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
315(Less32F x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
316
317(Leq64 x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
318(Leq32 x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
319(Leq(16|8) x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B)reg x) (MOV(H|B)reg y)))
320(Leq64U x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
321(Leq32U x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
322(Leq(16|8)U x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOV(H|B)Zreg x) (MOV(H|B)Zreg y)))
323(Leq64F x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
324(Leq32F x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
325
326(Eq(64|Ptr) x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
327(Eq32 x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
328(Eq(16|8|B) x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B|B)reg x) (MOV(H|B|B)reg y)))
329(Eq64F x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
330(Eq32F x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
331
332(Neq(64|Ptr) x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
333(Neq32 x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
334(Neq(16|8|B) x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B|B)reg x) (MOV(H|B|B)reg y)))
335(Neq64F x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
336(Neq32F x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
337
338// Lowering loads
339(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
340(Load <t> ptr mem) && is32BitInt(t) && t.IsSigned() => (MOVWload ptr mem)
341(Load <t> ptr mem) && is32BitInt(t) && !t.IsSigned() => (MOVWZload ptr mem)
342(Load <t> ptr mem) && is16BitInt(t) && t.IsSigned() => (MOVHload ptr mem)
343(Load <t> ptr mem) && is16BitInt(t) && !t.IsSigned() => (MOVHZload ptr mem)
344(Load <t> ptr mem) && is8BitInt(t) && t.IsSigned() => (MOVBload ptr mem)
345(Load <t> ptr mem) && (t.IsBoolean() || (is8BitInt(t) && !t.IsSigned())) => (MOVBZload ptr mem)
346(Load <t> ptr mem) && is32BitFloat(t) => (FMOVSload ptr mem)
347(Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
348
349// Lowering stores
350(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (FMOVDstore ptr val mem)
351(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (FMOVSstore ptr val mem)
352(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVDstore ptr val mem)
353(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
354(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
355(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
356
357// Lowering moves
358
359// Load and store for small copies.
360(Move [0] _ _ mem) => mem
361(Move [1] dst src mem) => (MOVBstore dst (MOVBZload src mem) mem)
362(Move [2] dst src mem) => (MOVHstore dst (MOVHZload src mem) mem)
363(Move [4] dst src mem) => (MOVWstore dst (MOVWZload src mem) mem)
364(Move [8] dst src mem) => (MOVDstore dst (MOVDload src mem) mem)
365(Move [16] dst src mem) =>
366 (MOVDstore [8] dst (MOVDload [8] src mem)
367 (MOVDstore dst (MOVDload src mem) mem))
368(Move [24] dst src mem) =>
369 (MOVDstore [16] dst (MOVDload [16] src mem)
370 (MOVDstore [8] dst (MOVDload [8] src mem)
371 (MOVDstore dst (MOVDload src mem) mem)))
372(Move [3] dst src mem) =>
373 (MOVBstore [2] dst (MOVBZload [2] src mem)
374 (MOVHstore dst (MOVHZload src mem) mem))
375(Move [5] dst src mem) =>
376 (MOVBstore [4] dst (MOVBZload [4] src mem)
377 (MOVWstore dst (MOVWZload src mem) mem))
378(Move [6] dst src mem) =>
379 (MOVHstore [4] dst (MOVHZload [4] src mem)
380 (MOVWstore dst (MOVWZload src mem) mem))
381(Move [7] dst src mem) =>
382 (MOVBstore [6] dst (MOVBZload [6] src mem)
383 (MOVHstore [4] dst (MOVHZload [4] src mem)
384 (MOVWstore dst (MOVWZload src mem) mem)))
385
386// MVC for other moves. Use up to 4 instructions (sizes up to 1024 bytes).
387(Move [s] dst src mem) && s > 0 && s <= 256 && logLargeCopy(v, s) =>
388 (MVC [makeValAndOff(int32(s), 0)] dst src mem)
389(Move [s] dst src mem) && s > 256 && s <= 512 && logLargeCopy(v, s) =>
390 (MVC [makeValAndOff(int32(s)-256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))
391(Move [s] dst src mem) && s > 512 && s <= 768 && logLargeCopy(v, s) =>
392 (MVC [makeValAndOff(int32(s)-512, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)))
393(Move [s] dst src mem) && s > 768 && s <= 1024 && logLargeCopy(v, s) =>
394 (MVC [makeValAndOff(int32(s)-768, 768)] dst src (MVC [makeValAndOff(256, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))))
395
396// Move more than 1024 bytes using a loop.
397(Move [s] dst src mem) && s > 1024 && logLargeCopy(v, s) =>
398 (LoweredMove [s%256] dst src (ADD <src.Type> src (MOVDconst [(s/256)*256])) mem)
399
400// Lowering Zero instructions
401(Zero [0] _ mem) => mem
402(Zero [1] destptr mem) => (MOVBstoreconst [0] destptr mem)
403(Zero [2] destptr mem) => (MOVHstoreconst [0] destptr mem)
404(Zero [4] destptr mem) => (MOVWstoreconst [0] destptr mem)
405(Zero [8] destptr mem) => (MOVDstoreconst [0] destptr mem)
406(Zero [3] destptr mem) =>
407 (MOVBstoreconst [makeValAndOff(0,2)] destptr
408 (MOVHstoreconst [0] destptr mem))
409(Zero [5] destptr mem) =>
410 (MOVBstoreconst [makeValAndOff(0,4)] destptr
411 (MOVWstoreconst [0] destptr mem))
412(Zero [6] destptr mem) =>
413 (MOVHstoreconst [makeValAndOff(0,4)] destptr
414 (MOVWstoreconst [0] destptr mem))
415(Zero [7] destptr mem) =>
416 (MOVWstoreconst [makeValAndOff(0,3)] destptr
417 (MOVWstoreconst [0] destptr mem))
418
419(Zero [s] destptr mem) && s > 0 && s <= 1024 =>
420 (CLEAR [makeValAndOff(int32(s), 0)] destptr mem)
421
422// Zero more than 1024 bytes using a loop.
423(Zero [s] destptr mem) && s > 1024 =>
424 (LoweredZero [s%256] destptr (ADDconst <destptr.Type> destptr [(int32(s)/256)*256]) mem)
425
426// Lowering constants
427(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
428(Const(32|64)F ...) => (FMOV(S|D)const ...)
429(ConstNil) => (MOVDconst [0])
430(ConstBool [t]) => (MOVDconst [b2i(t)])
431
432// Lowering calls
433(StaticCall ...) => (CALLstatic ...)
434(ClosureCall ...) => (CALLclosure ...)
435(InterCall ...) => (CALLinter ...)
436(TailCall ...) => (CALLtail ...)
437
438// Miscellaneous
439(IsNonNil p) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPconst p [0]))
440(IsInBounds idx len) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
441(IsSliceInBounds idx len) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
442(NilCheck ...) => (LoweredNilCheck ...)
443(GetG ...) => (LoweredGetG ...)
444(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
445(GetCallerSP ...) => (LoweredGetCallerSP ...)
446(GetCallerPC ...) => (LoweredGetCallerPC ...)
447(Addr {sym} base) => (MOVDaddr {sym} base)
448(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVDaddr {sym} (SPanchored base mem))
449(LocalAddr <t> {sym} base _) && !t.Elem().HasPointers() => (MOVDaddr {sym} base)
450(ITab (Load ptr mem)) => (MOVDload ptr mem)
451
452// block rewrites
453(If cond yes no) => (CLIJ {s390x.LessOrGreater} (MOVBZreg <typ.Bool> cond) [0] yes no)
454
455// Write barrier.
456(WB ...) => (LoweredWB ...)
457
458(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
459(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
460(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
461
462// ***************************
463// Above: lowering rules
464// Below: optimizations
465// ***************************
466// TODO: Should the optimizations be a separate pass?
467
468// Note: when removing unnecessary sign/zero extensions.
469//
470// After a value is spilled it is restored using a sign- or zero-extension
471// to register-width as appropriate for its type. For example, a uint8 will
472// be restored using a MOVBZ (llgc) instruction which will zero extend the
473// 8-bit value to 64-bits.
474//
475// This is a hazard when folding sign- and zero-extensions since we need to
476// ensure not only that the value in the argument register is correctly
477// extended but also that it will still be correctly extended if it is
478// spilled and restored.
479//
480// In general this means we need type checks when the RHS of a rule is an
481// OpCopy (i.e. "(... x:(...) ...) -> x").
482
483// Merge double extensions.
484(MOV(H|HZ)reg e:(MOV(B|BZ)reg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
485(MOV(W|WZ)reg e:(MOV(B|BZ)reg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
486(MOV(W|WZ)reg e:(MOV(H|HZ)reg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
487
488// Bypass redundant sign extensions.
489(MOV(B|BZ)reg e:(MOVBreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
490(MOV(B|BZ)reg e:(MOVHreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
491(MOV(B|BZ)reg e:(MOVWreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
492(MOV(H|HZ)reg e:(MOVHreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
493(MOV(H|HZ)reg e:(MOVWreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
494(MOV(W|WZ)reg e:(MOVWreg x)) && clobberIfDead(e) => (MOV(W|WZ)reg x)
495
496// Bypass redundant zero extensions.
497(MOV(B|BZ)reg e:(MOVBZreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
498(MOV(B|BZ)reg e:(MOVHZreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
499(MOV(B|BZ)reg e:(MOVWZreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
500(MOV(H|HZ)reg e:(MOVHZreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
501(MOV(H|HZ)reg e:(MOVWZreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
502(MOV(W|WZ)reg e:(MOVWZreg x)) && clobberIfDead(e) => (MOV(W|WZ)reg x)
503
504// Remove zero extensions after zero extending load.
505// Note: take care that if x is spilled it is restored correctly.
506(MOV(B|H|W)Zreg x:(MOVBZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 1) => x
507(MOV(H|W)Zreg x:(MOVHZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 2) => x
508(MOVWZreg x:(MOVWZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 4) => x
509
510// Remove sign extensions after sign extending load.
511// Note: take care that if x is spilled it is restored correctly.
512(MOV(B|H|W)reg x:(MOVBload _ _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x
513(MOV(H|W)reg x:(MOVHload _ _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x
514(MOVWreg x:(MOVWload _ _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x
515
516// Remove sign extensions after zero extending load.
517// These type checks are probably unnecessary but do them anyway just in case.
518(MOV(H|W)reg x:(MOVBZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 1) => x
519(MOVWreg x:(MOVHZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 2) => x
520
521// Fold sign and zero extensions into loads.
522//
523// Note: The combined instruction must end up in the same block
524// as the original load. If not, we end up making a value with
525// memory type live in two different blocks, which can lead to
526// multiple memory values alive simultaneously.
527//
528// Make sure we don't combine these ops if the load has another use.
529// This prevents a single load from being split into multiple loads
530// which then might return different values. See test/atomicload.go.
531(MOV(B|H|W)Zreg <t> x:(MOV(B|H|W)load [o] {s} p mem))
532 && x.Uses == 1
533 && clobber(x)
534 => @x.Block (MOV(B|H|W)Zload <t> [o] {s} p mem)
535(MOV(B|H|W)reg <t> x:(MOV(B|H|W)Zload [o] {s} p mem))
536 && x.Uses == 1
537 && clobber(x)
538 => @x.Block (MOV(B|H|W)load <t> [o] {s} p mem)
539
540// Remove zero extensions after argument load.
541(MOVBZreg x:(Arg <t>)) && !t.IsSigned() && t.Size() == 1 => x
542(MOVHZreg x:(Arg <t>)) && !t.IsSigned() && t.Size() <= 2 => x
543(MOVWZreg x:(Arg <t>)) && !t.IsSigned() && t.Size() <= 4 => x
544
545// Remove sign extensions after argument load.
546(MOVBreg x:(Arg <t>)) && t.IsSigned() && t.Size() == 1 => x
547(MOVHreg x:(Arg <t>)) && t.IsSigned() && t.Size() <= 2 => x
548(MOVWreg x:(Arg <t>)) && t.IsSigned() && t.Size() <= 4 => x
549
550// Fold zero extensions into constants.
551(MOVBZreg (MOVDconst [c])) => (MOVDconst [int64( uint8(c))])
552(MOVHZreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
553(MOVWZreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
554
555// Fold sign extensions into constants.
556(MOVBreg (MOVDconst [c])) => (MOVDconst [int64( int8(c))])
557(MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
558(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
559
560// Remove zero extension of conditional move.
561// Note: only for MOVBZreg for now since it is added as part of 'if' statement lowering.
562(MOVBZreg x:(LOCGR (MOVDconst [c]) (MOVDconst [d]) _))
563 && int64(uint8(c)) == c
564 && int64(uint8(d)) == d
565 && (!x.Type.IsSigned() || x.Type.Size() > 1)
566 => x
567
568// Fold boolean tests into blocks.
569// Note: this must match If statement lowering.
570(CLIJ {s390x.LessOrGreater} (LOCGR {d} (MOVDconst [0]) (MOVDconst [x]) cmp) [0] yes no)
571 && int32(x) != 0
572 => (BRC {d} cmp yes no)
573
574// Canonicalize BRC condition code mask by removing impossible conditions.
575// Integer comparisons cannot generate the unordered condition.
576(BRC {c} x:((CMP|CMPW|CMPU|CMPWU) _ _) yes no) && c&s390x.Unordered != 0 => (BRC {c&^s390x.Unordered} x yes no)
577(BRC {c} x:((CMP|CMPW|CMPU|CMPWU)const _) yes no) && c&s390x.Unordered != 0 => (BRC {c&^s390x.Unordered} x yes no)
578
579// Compare-and-branch.
580// Note: bit 3 (unordered) must not be set so we mask out s390x.Unordered.
581(BRC {c} (CMP x y) yes no) => (CGRJ {c&^s390x.Unordered} x y yes no)
582(BRC {c} (CMPW x y) yes no) => (CRJ {c&^s390x.Unordered} x y yes no)
583(BRC {c} (CMPU x y) yes no) => (CLGRJ {c&^s390x.Unordered} x y yes no)
584(BRC {c} (CMPWU x y) yes no) => (CLRJ {c&^s390x.Unordered} x y yes no)
585
586// Compare-and-branch (immediate).
587// Note: bit 3 (unordered) must not be set so we mask out s390x.Unordered.
588(BRC {c} (CMPconst x [y]) yes no) && y == int32( int8(y)) => (CGIJ {c&^s390x.Unordered} x [ int8(y)] yes no)
589(BRC {c} (CMPWconst x [y]) yes no) && y == int32( int8(y)) => (CIJ {c&^s390x.Unordered} x [ int8(y)] yes no)
590(BRC {c} (CMPUconst x [y]) yes no) && y == int32(uint8(y)) => (CLGIJ {c&^s390x.Unordered} x [uint8(y)] yes no)
591(BRC {c} (CMPWUconst x [y]) yes no) && y == int32(uint8(y)) => (CLIJ {c&^s390x.Unordered} x [uint8(y)] yes no)
592
593// Absorb immediate into compare-and-branch.
594(C(R|GR)J {c} x (MOVDconst [y]) yes no) && is8Bit(y) => (C(I|GI)J {c} x [ int8(y)] yes no)
595(CL(R|GR)J {c} x (MOVDconst [y]) yes no) && isU8Bit(y) => (CL(I|GI)J {c} x [uint8(y)] yes no)
596(C(R|GR)J {c} (MOVDconst [x]) y yes no) && is8Bit(x) => (C(I|GI)J {c.ReverseComparison()} y [ int8(x)] yes no)
597(CL(R|GR)J {c} (MOVDconst [x]) y yes no) && isU8Bit(x) => (CL(I|GI)J {c.ReverseComparison()} y [uint8(x)] yes no)
598
599// Prefer comparison with immediate to compare-and-branch.
600(CGRJ {c} x (MOVDconst [y]) yes no) && !is8Bit(y) && is32Bit(y) => (BRC {c} (CMPconst x [int32(y)]) yes no)
601(CRJ {c} x (MOVDconst [y]) yes no) && !is8Bit(y) && is32Bit(y) => (BRC {c} (CMPWconst x [int32(y)]) yes no)
602(CLGRJ {c} x (MOVDconst [y]) yes no) && !isU8Bit(y) && isU32Bit(y) => (BRC {c} (CMPUconst x [int32(y)]) yes no)
603(CLRJ {c} x (MOVDconst [y]) yes no) && !isU8Bit(y) && isU32Bit(y) => (BRC {c} (CMPWUconst x [int32(y)]) yes no)
604(CGRJ {c} (MOVDconst [x]) y yes no) && !is8Bit(x) && is32Bit(x) => (BRC {c.ReverseComparison()} (CMPconst y [int32(x)]) yes no)
605(CRJ {c} (MOVDconst [x]) y yes no) && !is8Bit(x) && is32Bit(x) => (BRC {c.ReverseComparison()} (CMPWconst y [int32(x)]) yes no)
606(CLGRJ {c} (MOVDconst [x]) y yes no) && !isU8Bit(x) && isU32Bit(x) => (BRC {c.ReverseComparison()} (CMPUconst y [int32(x)]) yes no)
607(CLRJ {c} (MOVDconst [x]) y yes no) && !isU8Bit(x) && isU32Bit(x) => (BRC {c.ReverseComparison()} (CMPWUconst y [int32(x)]) yes no)
608
609// Absorb sign/zero extensions into 32-bit compare-and-branch.
610(CIJ {c} (MOV(W|WZ)reg x) [y] yes no) => (CIJ {c} x [y] yes no)
611(CLIJ {c} (MOV(W|WZ)reg x) [y] yes no) => (CLIJ {c} x [y] yes no)
612
613// Bring out-of-range signed immediates into range by varying branch condition.
614(BRC {s390x.Less} (CMPconst x [ 128]) yes no) => (CGIJ {s390x.LessOrEqual} x [ 127] yes no)
615(BRC {s390x.Less} (CMPWconst x [ 128]) yes no) => (CIJ {s390x.LessOrEqual} x [ 127] yes no)
616(BRC {s390x.LessOrEqual} (CMPconst x [-129]) yes no) => (CGIJ {s390x.Less} x [-128] yes no)
617(BRC {s390x.LessOrEqual} (CMPWconst x [-129]) yes no) => (CIJ {s390x.Less} x [-128] yes no)
618(BRC {s390x.Greater} (CMPconst x [-129]) yes no) => (CGIJ {s390x.GreaterOrEqual} x [-128] yes no)
619(BRC {s390x.Greater} (CMPWconst x [-129]) yes no) => (CIJ {s390x.GreaterOrEqual} x [-128] yes no)
620(BRC {s390x.GreaterOrEqual} (CMPconst x [ 128]) yes no) => (CGIJ {s390x.Greater} x [ 127] yes no)
621(BRC {s390x.GreaterOrEqual} (CMPWconst x [ 128]) yes no) => (CIJ {s390x.Greater} x [ 127] yes no)
622
623// Bring out-of-range unsigned immediates into range by varying branch condition.
624(BRC {s390x.Less} (CMP(WU|U)const x [256]) yes no) => (C(L|LG)IJ {s390x.LessOrEqual} x [255] yes no)
625(BRC {s390x.GreaterOrEqual} (CMP(WU|U)const x [256]) yes no) => (C(L|LG)IJ {s390x.Greater} x [255] yes no)
626
627// Bring out-of-range immediates into range by switching signedness (only == and !=).
628(BRC {c} (CMPconst x [y]) yes no) && y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CLGIJ {c} x [uint8(y)] yes no)
629(BRC {c} (CMPWconst x [y]) yes no) && y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CLIJ {c} x [uint8(y)] yes no)
630(BRC {c} (CMPUconst x [y]) yes no) && y == int32( int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CGIJ {c} x [ int8(y)] yes no)
631(BRC {c} (CMPWUconst x [y]) yes no) && y == int32( int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CIJ {c} x [ int8(y)] yes no)
632
633// Fold constants into instructions.
634(ADD x (MOVDconst <t> [c])) && is32Bit(c) && !t.IsPtr() => (ADDconst [int32(c)] x)
635(ADDW x (MOVDconst [c])) => (ADDWconst [int32(c)] x)
636
637(SUB x (MOVDconst [c])) && is32Bit(c) => (SUBconst x [int32(c)])
638(SUB (MOVDconst [c]) x) && is32Bit(c) => (NEG (SUBconst <v.Type> x [int32(c)]))
639(SUBW x (MOVDconst [c])) => (SUBWconst x [int32(c)])
640(SUBW (MOVDconst [c]) x) => (NEGW (SUBWconst <v.Type> x [int32(c)]))
641
642(MULLD x (MOVDconst [c])) && is32Bit(c) => (MULLDconst [int32(c)] x)
643(MULLW x (MOVDconst [c])) => (MULLWconst [int32(c)] x)
644
645// NILF instructions leave the high 32 bits unchanged which is
646// equivalent to the leftmost 32 bits being set.
647// TODO(mundaym): modify the assembler to accept 64-bit values
648// and use isU32Bit(^c).
649(AND x (MOVDconst [c]))
650 && s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c)) != nil
651 => (RISBGZ x {*s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c))})
652(AND x (MOVDconst [c]))
653 && is32Bit(c)
654 && c < 0
655 => (ANDconst [c] x)
656(AND x (MOVDconst [c]))
657 && is32Bit(c)
658 && c >= 0
659 => (MOVWZreg (ANDWconst <typ.UInt32> [int32(c)] x))
660
661(ANDW x (MOVDconst [c])) => (ANDWconst [int32(c)] x)
662
663((AND|ANDW)const [c] ((AND|ANDW)const [d] x)) => ((AND|ANDW)const [c&d] x)
664
665((OR|XOR) x (MOVDconst [c])) && isU32Bit(c) => ((OR|XOR)const [c] x)
666((OR|XOR)W x (MOVDconst [c])) => ((OR|XOR)Wconst [int32(c)] x)
667
668// Constant shifts.
669(S(LD|RD|RAD) x (MOVDconst [c])) => (S(LD|RD|RAD)const x [uint8(c&63)])
670(S(LW|RW|RAW) x (MOVDconst [c])) && c&32 == 0 => (S(LW|RW|RAW)const x [uint8(c&31)])
671(S(LW|RW) _ (MOVDconst [c])) && c&32 != 0 => (MOVDconst [0])
672(SRAW x (MOVDconst [c])) && c&32 != 0 => (SRAWconst x [31])
673
674// Shifts only use the rightmost 6 bits of the shift value.
675(S(LD|RD|RAD|LW|RW|RAW) x (RISBGZ y {r}))
676 && r.Amount == 0
677 && r.OutMask()&63 == 63
678 => (S(LD|RD|RAD|LW|RW|RAW) x y)
679(S(LD|RD|RAD|LW|RW|RAW) x (AND (MOVDconst [c]) y))
680 => (S(LD|RD|RAD|LW|RW|RAW) x (ANDWconst <typ.UInt32> [int32(c&63)] y))
681(S(LD|RD|RAD|LW|RW|RAW) x (ANDWconst [c] y)) && c&63 == 63
682 => (S(LD|RD|RAD|LW|RW|RAW) x y)
683(SLD x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SLD x y)
684(SRD x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRD x y)
685(SRAD x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRAD x y)
686(SLW x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SLW x y)
687(SRW x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRW x y)
688(SRAW x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRAW x y)
689
690// Match rotate by constant.
691(RLLG x (MOVDconst [c])) => (RISBGZ x {s390x.NewRotateParams(0, 63, uint8(c&63))})
692(RLL x (MOVDconst [c])) => (RLLconst x [uint8(c&31)])
693
694// Signed 64-bit comparison with immediate.
695(CMP x (MOVDconst [c])) && is32Bit(c) => (CMPconst x [int32(c)])
696(CMP (MOVDconst [c]) x) && is32Bit(c) => (InvertFlags (CMPconst x [int32(c)]))
697
698// Unsigned 64-bit comparison with immediate.
699(CMPU x (MOVDconst [c])) && isU32Bit(c) => (CMPUconst x [int32(c)])
700(CMPU (MOVDconst [c]) x) && isU32Bit(c) => (InvertFlags (CMPUconst x [int32(c)]))
701
702// Signed and unsigned 32-bit comparison with immediate.
703(CMP(W|WU) x (MOVDconst [c])) => (CMP(W|WU)const x [int32(c)])
704(CMP(W|WU) (MOVDconst [c]) x) => (InvertFlags (CMP(W|WU)const x [int32(c)]))
705
706// Match (x >> c) << d to 'rotate then insert selected bits [into zero]'.
707(SLDconst (SRDconst x [c]) [d]) => (RISBGZ x {s390x.NewRotateParams(uint8(max(0, int8(c-d))), 63-d, uint8(int8(d-c)&63))})
708
709// Match (x << c) >> d to 'rotate then insert selected bits [into zero]'.
710(SRDconst (SLDconst x [c]) [d]) => (RISBGZ x {s390x.NewRotateParams(d, uint8(min(63, int8(63-c+d))), uint8(int8(c-d)&63))})
711
712// Absorb input zero extension into 'rotate then insert selected bits [into zero]'.
713(RISBGZ (MOVWZreg x) {r}) && r.InMerge(0xffffffff) != nil => (RISBGZ x {*r.InMerge(0xffffffff)})
714(RISBGZ (MOVHZreg x) {r}) && r.InMerge(0x0000ffff) != nil => (RISBGZ x {*r.InMerge(0x0000ffff)})
715(RISBGZ (MOVBZreg x) {r}) && r.InMerge(0x000000ff) != nil => (RISBGZ x {*r.InMerge(0x000000ff)})
716
717// Absorb 'rotate then insert selected bits [into zero]' into zero extension.
718(MOVWZreg (RISBGZ x {r})) && r.OutMerge(0xffffffff) != nil => (RISBGZ x {*r.OutMerge(0xffffffff)})
719(MOVHZreg (RISBGZ x {r})) && r.OutMerge(0x0000ffff) != nil => (RISBGZ x {*r.OutMerge(0x0000ffff)})
720(MOVBZreg (RISBGZ x {r})) && r.OutMerge(0x000000ff) != nil => (RISBGZ x {*r.OutMerge(0x000000ff)})
721
722// Absorb shift into 'rotate then insert selected bits [into zero]'.
723//
724// Any unsigned shift can be represented as a rotate and mask operation:
725//
726// x << c => RotateLeft64(x, c) & (^uint64(0) << c)
727// x >> c => RotateLeft64(x, -c) & (^uint64(0) >> c)
728//
729// Therefore when a shift is used as the input to a rotate then insert
730// selected bits instruction we can merge the two together. We just have
731// to be careful that the resultant mask is representable (non-zero and
732// contiguous). For example, assuming that x is variable and c, y and m
733// are constants, a shift followed by a rotate then insert selected bits
734// could be represented as:
735//
736// RotateLeft64(RotateLeft64(x, c) & (^uint64(0) << c), y) & m
737//
738// We can split the rotation by y into two, one rotate for x and one for
739// the mask:
740//
741// RotateLeft64(RotateLeft64(x, c), y) & (RotateLeft64(^uint64(0) << c, y)) & m
742//
743// The rotations of x by c followed by y can then be combined:
744//
745// RotateLeft64(x, c+y) & (RotateLeft64(^uint64(0) << c, y)) & m
746// ^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
747// rotate mask
748//
749// To perform this optimization we therefore just need to check that it
750// is valid to merge the shift mask (^(uint64(0)<<c)) into the selected
751// bits mask (i.e. that the resultant mask is non-zero and contiguous).
752//
753(RISBGZ (SLDconst x [c]) {r}) && r.InMerge(^uint64(0)<<c) != nil => (RISBGZ x {(*r.InMerge(^uint64(0)<<c)).RotateLeft(c)})
754(RISBGZ (SRDconst x [c]) {r}) && r.InMerge(^uint64(0)>>c) != nil => (RISBGZ x {(*r.InMerge(^uint64(0)>>c)).RotateLeft(-c)})
755
756// Absorb 'rotate then insert selected bits [into zero]' into left shift.
757(SLDconst (RISBGZ x {r}) [c])
758 && s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask()) != nil
759 => (RISBGZ x {(*s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask())).RotateLeft(r.Amount)})
760
761// Absorb 'rotate then insert selected bits [into zero]' into right shift.
762(SRDconst (RISBGZ x {r}) [c])
763 && s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask()) != nil
764 => (RISBGZ x {(*s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask())).RotateLeft(r.Amount)})
765
766// Merge 'rotate then insert selected bits [into zero]' instructions together.
767(RISBGZ (RISBGZ x {y}) {z})
768 && z.InMerge(y.OutMask()) != nil
769 => (RISBGZ x {(*z.InMerge(y.OutMask())).RotateLeft(y.Amount)})
770
771// Convert RISBGZ into 64-bit shift (helps CSE).
772(RISBGZ x {r}) && r.End == 63 && r.Start == -r.Amount&63 => (SRDconst x [-r.Amount&63])
773(RISBGZ x {r}) && r.Start == 0 && r.End == 63-r.Amount => (SLDconst x [r.Amount])
774
775// Optimize single bit isolation when it is known to be equivalent to
776// the most significant bit due to mask produced by arithmetic shift.
777// Simply isolate the most significant bit itself and place it in the
778// correct position.
779//
780// Example: (int64(x) >> 63) & 0x8 -> RISBGZ $60, $60, $4, Rsrc, Rdst
781(RISBGZ (SRADconst x [c]) {r})
782 && r.Start == r.End // single bit selected
783 && (r.Start+r.Amount)&63 <= c // equivalent to most significant bit of x
784 => (RISBGZ x {s390x.NewRotateParams(r.Start, r.Start, -r.Start&63)})
785
786// Canonicalize the order of arguments to comparisons - helps with CSE.
787((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
788
789// Use sign/zero extend instead of RISBGZ.
790(RISBGZ x {r}) && r == s390x.NewRotateParams(56, 63, 0) => (MOVBZreg x)
791(RISBGZ x {r}) && r == s390x.NewRotateParams(48, 63, 0) => (MOVHZreg x)
792(RISBGZ x {r}) && r == s390x.NewRotateParams(32, 63, 0) => (MOVWZreg x)
793
794// Use sign/zero extend instead of ANDW.
795(ANDWconst [0x00ff] x) => (MOVBZreg x)
796(ANDWconst [0xffff] x) => (MOVHZreg x)
797
798// Strength reduce multiplication to the sum (or difference) of two powers of two.
799//
800// Examples:
801// 5x -> 4x + 1x
802// 10x -> 8x + 2x
803// 120x -> 128x - 8x
804// -120x -> 8x - 128x
805//
806// We know that the rightmost bit of any positive value, once isolated, must either
807// be a power of 2 (because it is a single bit) or 0 (if the original value is 0).
808// In all of these rules we use a rightmost bit calculation to determine one operand
809// for the addition or subtraction. We then just need to calculate if the other
810// operand is a valid power of 2 before we can match the rule.
811//
812// Notes:
813// - the generic rules have already matched single powers of two so we ignore them here
814// - isPowerOfTwo asserts that its argument is greater than 0
815// - c&(c-1) = clear rightmost bit
816// - c&^(c-1) = isolate rightmost bit
817
818// c = 2ˣ + 2ʸ => c - 2ˣ = 2ʸ
819(MULL(D|W)const <t> x [c]) && isPowerOfTwo(c&(c-1))
820 => ((ADD|ADDW) (SL(D|W)const <t> x [uint8(log32(c&(c-1)))])
821 (SL(D|W)const <t> x [uint8(log32(c&^(c-1)))]))
822
823// c = 2ʸ - 2ˣ => c + 2ˣ = 2ʸ
824(MULL(D|W)const <t> x [c]) && isPowerOfTwo(c+(c&^(c-1)))
825 => ((SUB|SUBW) (SL(D|W)const <t> x [uint8(log32(c+(c&^(c-1))))])
826 (SL(D|W)const <t> x [uint8(log32(c&^(c-1)))]))
827
828// c = 2ˣ - 2ʸ => -c + 2ˣ = 2ʸ
829(MULL(D|W)const <t> x [c]) && isPowerOfTwo(-c+(-c&^(-c-1)))
830 => ((SUB|SUBW) (SL(D|W)const <t> x [uint8(log32(-c&^(-c-1)))])
831 (SL(D|W)const <t> x [uint8(log32(-c+(-c&^(-c-1))))]))
832
833// Fold ADD into MOVDaddr. Odd offsets from SB shouldn't be folded (LARL can't handle them).
834(ADDconst [c] (MOVDaddr [d] {s} x:(SB))) && ((c+d)&1 == 0) && is32Bit(int64(c)+int64(d)) => (MOVDaddr [c+d] {s} x)
835(ADDconst [c] (MOVDaddr [d] {s} x)) && x.Op != OpSB && is20Bit(int64(c)+int64(d)) => (MOVDaddr [c+d] {s} x)
836(ADD idx (MOVDaddr [c] {s} ptr)) && ptr.Op != OpSB => (MOVDaddridx [c] {s} ptr idx)
837
838// fold ADDconst into MOVDaddrx
839(ADDconst [c] (MOVDaddridx [d] {s} x y)) && is20Bit(int64(c)+int64(d)) => (MOVDaddridx [c+d] {s} x y)
840(MOVDaddridx [c] {s} (ADDconst [d] x) y) && is20Bit(int64(c)+int64(d)) => (MOVDaddridx [c+d] {s} x y)
841(MOVDaddridx [c] {s} x (ADDconst [d] y)) && is20Bit(int64(c)+int64(d)) => (MOVDaddridx [c+d] {s} x y)
842
843// reverse ordering of compare instruction
844(LOCGR {c} x y (InvertFlags cmp)) => (LOCGR {c.ReverseComparison()} x y cmp)
845
846// replace load from same location as preceding store with copy
847(MOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => x
848(MOVWload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVWreg x)
849(MOVHload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVHreg x)
850(MOVBload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVBreg x)
851(MOVWZload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVWZreg x)
852(MOVHZload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVHZreg x)
853(MOVBZload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVBZreg x)
854(MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (LGDR x)
855(FMOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (LDGR x)
856(FMOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => x
857(FMOVSload [off] {sym} ptr1 (FMOVSstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => x
858
859// prefer FPR <-> GPR moves over combined load ops
860(MULLDload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (MULLD x (LGDR <t> y))
861(ADDload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (ADD x (LGDR <t> y))
862(SUBload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (SUB x (LGDR <t> y))
863(ORload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (OR x (LGDR <t> y))
864(ANDload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (AND x (LGDR <t> y))
865(XORload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (XOR x (LGDR <t> y))
866
867// detect attempts to set/clear the sign bit
868// may need to be reworked when NIHH/OIHH are added
869(RISBGZ (LGDR <t> x) {r}) && r == s390x.NewRotateParams(1, 63, 0) => (LGDR <t> (LPDFR <x.Type> x))
870(LDGR <t> (RISBGZ x {r})) && r == s390x.NewRotateParams(1, 63, 0) => (LPDFR (LDGR <t> x))
871(OR (MOVDconst [-1<<63]) (LGDR <t> x)) => (LGDR <t> (LNDFR <x.Type> x))
872(LDGR <t> (OR (MOVDconst [-1<<63]) x)) => (LNDFR (LDGR <t> x))
873
874// detect attempts to set the sign bit with load
875(LDGR <t> x:(ORload <t1> [off] {sym} (MOVDconst [-1<<63]) ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (LNDFR <t> (LDGR <t> (MOVDload <t1> [off] {sym} ptr mem)))
876
877// detect copysign
878(OR (RISBGZ (LGDR x) {r}) (LGDR (LPDFR <t> y)))
879 && r == s390x.NewRotateParams(0, 0, 0)
880 => (LGDR (CPSDR <t> y x))
881(OR (RISBGZ (LGDR x) {r}) (MOVDconst [c]))
882 && c >= 0
883 && r == s390x.NewRotateParams(0, 0, 0)
884 => (LGDR (CPSDR <x.Type> (FMOVDconst <x.Type> [math.Float64frombits(uint64(c))]) x))
885(CPSDR y (FMOVDconst [c])) && !math.Signbit(c) => (LPDFR y)
886(CPSDR y (FMOVDconst [c])) && math.Signbit(c) => (LNDFR y)
887
888// absorb negations into set/clear sign bit
889(FNEG (LPDFR x)) => (LNDFR x)
890(FNEG (LNDFR x)) => (LPDFR x)
891(FNEGS (LPDFR x)) => (LNDFR x)
892(FNEGS (LNDFR x)) => (LPDFR x)
893
894// no need to convert float32 to float64 to set/clear sign bit
895(LEDBR (LPDFR (LDEBR x))) => (LPDFR x)
896(LEDBR (LNDFR (LDEBR x))) => (LNDFR x)
897
898// remove unnecessary FPR <-> GPR moves
899(LDGR (LGDR x)) => x
900(LGDR (LDGR x)) => x
901
902// Don't extend before storing
903(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
904(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
905(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
906(MOVWstore [off] {sym} ptr (MOVWZreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
907(MOVHstore [off] {sym} ptr (MOVHZreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
908(MOVBstore [off] {sym} ptr (MOVBZreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
909
910// Fold constants into memory operations.
911// Note that this is not always a good idea because if not all the uses of
912// the ADDconst get eliminated, we still have to compute the ADDconst and we now
913// have potentially two live values (ptr and (ADDconst [off] ptr)) instead of one.
914// Nevertheless, let's do it!
915(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVDload [off1+off2] {sym} ptr mem)
916(MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVWload [off1+off2] {sym} ptr mem)
917(MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVHload [off1+off2] {sym} ptr mem)
918(MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVBload [off1+off2] {sym} ptr mem)
919(MOVWZload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVWZload [off1+off2] {sym} ptr mem)
920(MOVHZload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVHZload [off1+off2] {sym} ptr mem)
921(MOVBZload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVBZload [off1+off2] {sym} ptr mem)
922(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVSload [off1+off2] {sym} ptr mem)
923(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVDload [off1+off2] {sym} ptr mem)
924
925(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVDstore [off1+off2] {sym} ptr val mem)
926(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVWstore [off1+off2] {sym} ptr val mem)
927(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVHstore [off1+off2] {sym} ptr val mem)
928(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVBstore [off1+off2] {sym} ptr val mem)
929(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVSstore [off1+off2] {sym} ptr val mem)
930(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVDstore [off1+off2] {sym} ptr val mem)
931
932(ADDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ADDload [off1+off2] {sym} x ptr mem)
933(ADDWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ADDWload [off1+off2] {sym} x ptr mem)
934(MULLDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (MULLDload [off1+off2] {sym} x ptr mem)
935(MULLWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (MULLWload [off1+off2] {sym} x ptr mem)
936(SUBload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (SUBload [off1+off2] {sym} x ptr mem)
937(SUBWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (SUBWload [off1+off2] {sym} x ptr mem)
938
939(ANDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ANDload [off1+off2] {sym} x ptr mem)
940(ANDWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ANDWload [off1+off2] {sym} x ptr mem)
941(ORload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ORload [off1+off2] {sym} x ptr mem)
942(ORWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ORWload [off1+off2] {sym} x ptr mem)
943(XORload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (XORload [off1+off2] {sym} x ptr mem)
944(XORWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (XORWload [off1+off2] {sym} x ptr mem)
945
946// Fold constants into stores.
947(MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) && is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB =>
948 (MOVDstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
949(MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) && is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB =>
950 (MOVWstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
951(MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) && isU12Bit(int64(off)) && ptr.Op != OpSB =>
952 (MOVHstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem)
953(MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) && is20Bit(int64(off)) && ptr.Op != OpSB =>
954 (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
955
956// Fold address offsets into constant stores.
957(MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off64()+int64(off)) =>
958 (MOVDstoreconst [sc.addOffset32(off)] {s} ptr mem)
959(MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off64()+int64(off)) =>
960 (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem)
961(MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off64()+int64(off)) =>
962 (MOVHstoreconst [sc.addOffset32(off)] {s} ptr mem)
963(MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem) && is20Bit(sc.Off64()+int64(off)) =>
964 (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem)
965
966// Merge address calculations into loads and stores.
967// Offsets from SB must not be merged into unaligned memory accesses because
968// loads/stores using PC-relative addressing directly must be aligned to the
969// size of the target.
970(MOVDload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0)) =>
971 (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
972(MOVWZload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) =>
973 (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
974(MOVHZload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) =>
975 (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
976(MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
977 (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
978(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
979 (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
980(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
981 (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
982
983(MOVWload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) =>
984 (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
985(MOVHload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) =>
986 (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
987(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
988 (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
989
990(MOVDstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0)) =>
991 (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
992(MOVWstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) =>
993 (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
994(MOVHstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) =>
995 (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
996(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
997 (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
998(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
999 (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
1000(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
1001 (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
1002
1003(ADDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ADDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
1004(ADDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ADDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
1005(MULLDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (MULLDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
1006(MULLWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (MULLWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
1007(SUBload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (SUBload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
1008(SUBWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (SUBWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
1009
1010(ANDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ANDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
1011(ANDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ANDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
1012(ORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ORload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
1013(ORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
1014(XORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (XORload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
1015(XORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (XORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
1016
1017// Cannot store constant to SB directly (no 'move relative long immediate' instructions).
1018(MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
1019 (MOVDstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
1020(MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
1021 (MOVWstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
1022(MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
1023 (MOVHstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
1024(MOVBstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
1025 (MOVBstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
1026
1027// MOVDaddr into MOVDaddridx
1028(MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
1029 (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
1030(MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && y.Op != OpSB =>
1031 (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
1032
1033// Absorb InvertFlags into branches.
1034(BRC {c} (InvertFlags cmp) yes no) => (BRC {c.ReverseComparison()} cmp yes no)
1035
1036// Constant comparisons.
1037(CMPconst (MOVDconst [x]) [y]) && x==int64(y) => (FlagEQ)
1038(CMPconst (MOVDconst [x]) [y]) && x<int64(y) => (FlagLT)
1039(CMPconst (MOVDconst [x]) [y]) && x>int64(y) => (FlagGT)
1040(CMPUconst (MOVDconst [x]) [y]) && uint64(x)==uint64(y) => (FlagEQ)
1041(CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) => (FlagLT)
1042(CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) => (FlagGT)
1043
1044(CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) => (FlagEQ)
1045(CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y) => (FlagLT)
1046(CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) => (FlagGT)
1047(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)==uint32(y) => (FlagEQ)
1048(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)<uint32(y) => (FlagLT)
1049(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) => (FlagGT)
1050
1051(CMP(W|WU)const (MOVBZreg _) [c]) && 0xff < c => (FlagLT)
1052(CMP(W|WU)const (MOVHZreg _) [c]) && 0xffff < c => (FlagLT)
1053
1054(CMPconst (SRDconst _ [c]) [n]) && c > 0 && n < 0 => (FlagGT)
1055(CMPWconst (SRWconst _ [c]) [n]) && c > 0 && n < 0 => (FlagGT)
1056
1057(CMPUconst (SRDconst _ [c]) [n]) && c > 0 && c < 64 && (1<<uint(64-c)) <= uint64(n) => (FlagLT)
1058(CMPWUconst (SRWconst _ [c]) [n]) && c > 0 && c < 32 && (1<<uint(32-c)) <= uint32(n) => (FlagLT)
1059
1060(CMPWconst (ANDWconst _ [m]) [n]) && int32(m) >= 0 && int32(m) < int32(n) => (FlagLT)
1061(CMPWUconst (ANDWconst _ [m]) [n]) && uint32(m) < uint32(n) => (FlagLT)
1062
1063(CMPconst (RISBGZ x {r}) [c]) && c > 0 && r.OutMask() < uint64(c) => (FlagLT)
1064(CMPUconst (RISBGZ x {r}) [c]) && r.OutMask() < uint64(uint32(c)) => (FlagLT)
1065
1066// Constant compare-and-branch with immediate.
1067(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal != 0 && int64(x) == int64(y) => (First yes no)
1068(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less != 0 && int64(x) < int64(y) => (First yes no)
1069(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 && int64(x) > int64(y) => (First yes no)
1070(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal != 0 && int32(x) == int32(y) => (First yes no)
1071(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less != 0 && int32(x) < int32(y) => (First yes no)
1072(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 && int32(x) > int32(y) => (First yes no)
1073(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal != 0 && uint64(x) == uint64(y) => (First yes no)
1074(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less != 0 && uint64(x) < uint64(y) => (First yes no)
1075(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 && uint64(x) > uint64(y) => (First yes no)
1076(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal != 0 && uint32(x) == uint32(y) => (First yes no)
1077(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less != 0 && uint32(x) < uint32(y) => (First yes no)
1078(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 && uint32(x) > uint32(y) => (First yes no)
1079(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal == 0 && int64(x) == int64(y) => (First no yes)
1080(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less == 0 && int64(x) < int64(y) => (First no yes)
1081(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 && int64(x) > int64(y) => (First no yes)
1082(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal == 0 && int32(x) == int32(y) => (First no yes)
1083(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less == 0 && int32(x) < int32(y) => (First no yes)
1084(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 && int32(x) > int32(y) => (First no yes)
1085(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal == 0 && uint64(x) == uint64(y) => (First no yes)
1086(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less == 0 && uint64(x) < uint64(y) => (First no yes)
1087(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 && uint64(x) > uint64(y) => (First no yes)
1088(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal == 0 && uint32(x) == uint32(y) => (First no yes)
1089(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less == 0 && uint32(x) < uint32(y) => (First no yes)
1090(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 && uint32(x) > uint32(y) => (First no yes)
1091
1092// Constant compare-and-branch with immediate when unsigned comparison with zero.
1093(C(L|LG)IJ {s390x.GreaterOrEqual} _ [0] yes no) => (First yes no)
1094(C(L|LG)IJ {s390x.Less} _ [0] yes no) => (First no yes)
1095
1096// Constant compare-and-branch when operands match.
1097(C(GR|R|LGR|LR)J {c} x y yes no) && x == y && c&s390x.Equal != 0 => (First yes no)
1098(C(GR|R|LGR|LR)J {c} x y yes no) && x == y && c&s390x.Equal == 0 => (First no yes)
1099
1100// Convert 64-bit comparisons to 32-bit comparisons and signed comparisons
1101// to unsigned comparisons.
1102// Helps simplify constant comparison detection.
1103(CM(P|PU)const (MOV(W|WZ)reg x) [c]) => (CMP(W|WU)const x [c])
1104(CM(P|P|PU|PU)const x:(MOV(H|HZ|H|HZ)reg _) [c]) => (CMP(W|W|WU|WU)const x [c])
1105(CM(P|P|PU|PU)const x:(MOV(B|BZ|B|BZ)reg _) [c]) => (CMP(W|W|WU|WU)const x [c])
1106(CMPconst (MOV(WZ|W)reg x:(ANDWconst [m] _)) [c]) && int32(m) >= 0 && c >= 0 => (CMPWUconst x [c])
1107(CMPUconst (MOV(WZ|W)reg x:(ANDWconst [m] _)) [c]) && int32(m) >= 0 => (CMPWUconst x [c])
1108(CMPconst x:(SRDconst _ [c]) [n]) && c > 0 && n >= 0 => (CMPUconst x [n])
1109(CMPWconst x:(SRWconst _ [c]) [n]) && c > 0 && n >= 0 => (CMPWUconst x [n])
1110
1111// Absorb sign and zero extensions into 32-bit comparisons.
1112(CMP(W|W|WU|WU) x (MOV(W|WZ|W|WZ)reg y)) => (CMP(W|W|WU|WU) x y)
1113(CMP(W|W|WU|WU) (MOV(W|WZ|W|WZ)reg x) y) => (CMP(W|W|WU|WU) x y)
1114(CMP(W|W|WU|WU)const (MOV(W|WZ|W|WZ)reg x) [c]) => (CMP(W|W|WU|WU)const x [c])
1115
1116// Absorb flag constants into branches.
1117(BRC {c} (FlagEQ) yes no) && c&s390x.Equal != 0 => (First yes no)
1118(BRC {c} (FlagLT) yes no) && c&s390x.Less != 0 => (First yes no)
1119(BRC {c} (FlagGT) yes no) && c&s390x.Greater != 0 => (First yes no)
1120(BRC {c} (FlagOV) yes no) && c&s390x.Unordered != 0 => (First yes no)
1121
1122(BRC {c} (FlagEQ) yes no) && c&s390x.Equal == 0 => (First no yes)
1123(BRC {c} (FlagLT) yes no) && c&s390x.Less == 0 => (First no yes)
1124(BRC {c} (FlagGT) yes no) && c&s390x.Greater == 0 => (First no yes)
1125(BRC {c} (FlagOV) yes no) && c&s390x.Unordered == 0 => (First no yes)
1126
1127// Absorb flag constants into SETxx ops.
1128(LOCGR {c} _ x (FlagEQ)) && c&s390x.Equal != 0 => x
1129(LOCGR {c} _ x (FlagLT)) && c&s390x.Less != 0 => x
1130(LOCGR {c} _ x (FlagGT)) && c&s390x.Greater != 0 => x
1131(LOCGR {c} _ x (FlagOV)) && c&s390x.Unordered != 0 => x
1132
1133(LOCGR {c} x _ (FlagEQ)) && c&s390x.Equal == 0 => x
1134(LOCGR {c} x _ (FlagLT)) && c&s390x.Less == 0 => x
1135(LOCGR {c} x _ (FlagGT)) && c&s390x.Greater == 0 => x
1136(LOCGR {c} x _ (FlagOV)) && c&s390x.Unordered == 0 => x
1137
1138// Remove redundant *const ops
1139(ADDconst [0] x) => x
1140(ADDWconst [c] x) && int32(c)==0 => x
1141(SUBconst [0] x) => x
1142(SUBWconst [c] x) && int32(c) == 0 => x
1143(ANDconst [0] _) => (MOVDconst [0])
1144(ANDWconst [c] _) && int32(c)==0 => (MOVDconst [0])
1145(ANDconst [-1] x) => x
1146(ANDWconst [c] x) && int32(c)==-1 => x
1147(ORconst [0] x) => x
1148(ORWconst [c] x) && int32(c)==0 => x
1149(ORconst [-1] _) => (MOVDconst [-1])
1150(ORWconst [c] _) && int32(c)==-1 => (MOVDconst [-1])
1151(XORconst [0] x) => x
1152(XORWconst [c] x) && int32(c)==0 => x
1153
1154// Shifts by zero (may be inserted during multiplication strength reduction).
1155((SLD|SLW|SRD|SRW|SRAD|SRAW)const x [0]) => x
1156
1157// Convert constant subtracts to constant adds.
1158(SUBconst [c] x) && c != -(1<<31) => (ADDconst [-c] x)
1159(SUBWconst [c] x) => (ADDWconst [-int32(c)] x)
1160
1161// generic constant folding
1162// TODO: more of this
1163(ADDconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)+d])
1164(ADDWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)+d])
1165(ADDconst [c] (ADDconst [d] x)) && is32Bit(int64(c)+int64(d)) => (ADDconst [c+d] x)
1166(ADDWconst [c] (ADDWconst [d] x)) => (ADDWconst [int32(c+d)] x)
1167(SUBconst (MOVDconst [d]) [c]) => (MOVDconst [d-int64(c)])
1168(SUBconst (SUBconst x [d]) [c]) && is32Bit(-int64(c)-int64(d)) => (ADDconst [-c-d] x)
1169(SRADconst [c] (MOVDconst [d])) => (MOVDconst [d>>uint64(c)])
1170(SRAWconst [c] (MOVDconst [d])) => (MOVDconst [int64(int32(d))>>uint64(c)])
1171(NEG (MOVDconst [c])) => (MOVDconst [-c])
1172(NEGW (MOVDconst [c])) => (MOVDconst [int64(int32(-c))])
1173(MULLDconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)*d])
1174(MULLWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c*int32(d))])
1175(AND (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c&d])
1176(ANDconst [c] (MOVDconst [d])) => (MOVDconst [c&d])
1177(ANDWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)&d])
1178(OR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c|d])
1179(ORconst [c] (MOVDconst [d])) => (MOVDconst [c|d])
1180(ORWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)|d])
1181(XOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c^d])
1182(XORconst [c] (MOVDconst [d])) => (MOVDconst [c^d])
1183(XORWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)^d])
1184(LoweredRound32F x:(FMOVSconst)) => x
1185(LoweredRound64F x:(FMOVDconst)) => x
1186
1187// generic simplifications
1188// TODO: more of this
1189(ADD x (NEG y)) => (SUB x y)
1190(ADDW x (NEGW y)) => (SUBW x y)
1191(SUB x (NEG y)) => (ADD x y)
1192(SUBW x (NEGW y)) => (ADDW x y)
1193(SUB x x) => (MOVDconst [0])
1194(SUBW x x) => (MOVDconst [0])
1195(AND x x) => x
1196(ANDW x x) => x
1197(OR x x) => x
1198(ORW x x) => x
1199(XOR x x) => (MOVDconst [0])
1200(XORW x x) => (MOVDconst [0])
1201(NEG (NEG x)) => x
1202(NEG (ADDconst [c] (NEG x))) && c != -(1<<31) => (ADDconst [-c] x)
1203(MOVBZreg (ANDWconst [m] x)) => (MOVWZreg (ANDWconst <typ.UInt32> [int32( uint8(m))] x))
1204(MOVHZreg (ANDWconst [m] x)) => (MOVWZreg (ANDWconst <typ.UInt32> [int32(uint16(m))] x))
1205(MOVBreg (ANDWconst [m] x)) && int8(m) >= 0 => (MOVWZreg (ANDWconst <typ.UInt32> [int32( uint8(m))] x))
1206(MOVHreg (ANDWconst [m] x)) && int16(m) >= 0 => (MOVWZreg (ANDWconst <typ.UInt32> [int32(uint16(m))] x))
1207
1208// carry flag generation
1209// (only constant fold carry of zero)
1210(Select1 (ADDCconst (MOVDconst [c]) [d]))
1211 && uint64(c+int64(d)) >= uint64(c) && c+int64(d) == 0
1212 => (FlagEQ)
1213(Select1 (ADDCconst (MOVDconst [c]) [d]))
1214 && uint64(c+int64(d)) >= uint64(c) && c+int64(d) != 0
1215 => (FlagLT)
1216
1217// borrow flag generation
1218// (only constant fold borrow of zero)
1219(Select1 (SUBC (MOVDconst [c]) (MOVDconst [d])))
1220 && uint64(d) <= uint64(c) && c-d == 0
1221 => (FlagGT)
1222(Select1 (SUBC (MOVDconst [c]) (MOVDconst [d])))
1223 && uint64(d) <= uint64(c) && c-d != 0
1224 => (FlagOV)
1225
1226// add with carry
1227(ADDE x y (FlagEQ)) => (ADDC x y)
1228(ADDE x y (FlagLT)) => (ADDC x y)
1229(ADDC x (MOVDconst [c])) && is16Bit(c) => (ADDCconst x [int16(c)])
1230(Select0 (ADDCconst (MOVDconst [c]) [d])) => (MOVDconst [c+int64(d)])
1231
1232// subtract with borrow
1233(SUBE x y (FlagGT)) => (SUBC x y)
1234(SUBE x y (FlagOV)) => (SUBC x y)
1235(Select0 (SUBC (MOVDconst [c]) (MOVDconst [d]))) => (MOVDconst [c-d])
1236
1237// collapse carry chain
1238(ADDE x y (Select1 (ADDCconst [-1] (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) c)))))
1239 => (ADDE x y c)
1240
1241// collapse borrow chain
1242(SUBE x y (Select1 (SUBC (MOVDconst [0]) (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) c))))))
1243 => (SUBE x y c)
1244
1245// branch on carry
1246(C(G|LG)IJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) => (BRC {s390x.NoCarry} carry)
1247(C(G|LG)IJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1]) => (BRC {s390x.Carry} carry)
1248(C(G|LG)IJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) => (BRC {s390x.Carry} carry)
1249(C(G|LG)IJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1]) => (BRC {s390x.NoCarry} carry)
1250(C(G|LG)IJ {s390x.Greater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) => (BRC {s390x.Carry} carry)
1251
1252// branch on borrow
1253(C(G|LG)IJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) => (BRC {s390x.NoBorrow} borrow)
1254(C(G|LG)IJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1]) => (BRC {s390x.Borrow} borrow)
1255(C(G|LG)IJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) => (BRC {s390x.Borrow} borrow)
1256(C(G|LG)IJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1]) => (BRC {s390x.NoBorrow} borrow)
1257(C(G|LG)IJ {s390x.Greater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) => (BRC {s390x.Borrow} borrow)
1258
1259// fused multiply-add
1260(Select0 (F(ADD|SUB) (FMUL y z) x)) && x.Block.Func.useFMA(v) => (FM(ADD|SUB) x y z)
1261(Select0 (F(ADDS|SUBS) (FMULS y z) x)) && x.Block.Func.useFMA(v) => (FM(ADDS|SUBS) x y z)
1262
1263// Convert floating point comparisons against zero into 'load and test' instructions.
1264(F(CMP|CMPS) x (FMOV(D|S)const [0.0])) => (LT(D|E)BR x)
1265(F(CMP|CMPS) (FMOV(D|S)const [0.0]) x) => (InvertFlags (LT(D|E)BR <v.Type> x))
1266
1267// FSUB, FSUBS, FADD, FADDS now produce a condition code representing the
1268// comparison of the result with 0.0. If a compare with zero instruction
1269// (e.g. LTDBR) is following one of those instructions, we can use the
1270// generated flag and remove the comparison instruction.
1271// Note: when inserting Select1 ops we need to ensure they are in the
1272// same block as their argument. We could also use @x.Block for this
1273// but moving the flag generating value to a different block seems to
1274// increase the likelihood that the flags value will have to be regenerated
1275// by flagalloc which is not what we want.
1276(LTDBR (Select0 x:(F(ADD|SUB) _ _))) && b == x.Block => (Select1 x)
1277(LTEBR (Select0 x:(F(ADDS|SUBS) _ _))) && b == x.Block => (Select1 x)
1278
1279// Fold memory operations into operations.
1280// Exclude global data (SB) because these instructions cannot handle relative addresses.
1281// TODO(mundaym): indexed versions of these?
1282((ADD|SUB|MULLD|AND|OR|XOR) <t> x g:(MOVDload [off] {sym} ptr mem))
1283 && ptr.Op != OpSB
1284 && is20Bit(int64(off))
1285 && canMergeLoadClobber(v, g, x)
1286 && clobber(g)
1287 => ((ADD|SUB|MULLD|AND|OR|XOR)load <t> [off] {sym} x ptr mem)
1288((ADD|SUB|MULL|AND|OR|XOR)W <t> x g:(MOVWload [off] {sym} ptr mem))
1289 && ptr.Op != OpSB
1290 && is20Bit(int64(off))
1291 && canMergeLoadClobber(v, g, x)
1292 && clobber(g)
1293 => ((ADD|SUB|MULL|AND|OR|XOR)Wload <t> [off] {sym} x ptr mem)
1294((ADD|SUB|MULL|AND|OR|XOR)W <t> x g:(MOVWZload [off] {sym} ptr mem))
1295 && ptr.Op != OpSB
1296 && is20Bit(int64(off))
1297 && canMergeLoadClobber(v, g, x)
1298 && clobber(g)
1299 => ((ADD|SUB|MULL|AND|OR|XOR)Wload <t> [off] {sym} x ptr mem)
1300
1301// Combine stores into store multiples.
1302// 32-bit
1303(MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem))
1304 && p.Op != OpSB
1305 && x.Uses == 1
1306 && is20Bit(int64(i)-4)
1307 && setPos(v, x.Pos)
1308 && clobber(x)
1309 => (STM2 [i-4] {s} p w0 w1 mem)
1310(MOVWstore [i] {s} p w2 x:(STM2 [i-8] {s} p w0 w1 mem))
1311 && x.Uses == 1
1312 && is20Bit(int64(i)-8)
1313 && setPos(v, x.Pos)
1314 && clobber(x)
1315 => (STM3 [i-8] {s} p w0 w1 w2 mem)
1316(MOVWstore [i] {s} p w3 x:(STM3 [i-12] {s} p w0 w1 w2 mem))
1317 && x.Uses == 1
1318 && is20Bit(int64(i)-12)
1319 && setPos(v, x.Pos)
1320 && clobber(x)
1321 => (STM4 [i-12] {s} p w0 w1 w2 w3 mem)
1322(STM2 [i] {s} p w2 w3 x:(STM2 [i-8] {s} p w0 w1 mem))
1323 && x.Uses == 1
1324 && is20Bit(int64(i)-8)
1325 && setPos(v, x.Pos)
1326 && clobber(x)
1327 => (STM4 [i-8] {s} p w0 w1 w2 w3 mem)
1328// 64-bit
1329(MOVDstore [i] {s} p w1 x:(MOVDstore [i-8] {s} p w0 mem))
1330 && p.Op != OpSB
1331 && x.Uses == 1
1332 && is20Bit(int64(i)-8)
1333 && setPos(v, x.Pos)
1334 && clobber(x)
1335 => (STMG2 [i-8] {s} p w0 w1 mem)
1336(MOVDstore [i] {s} p w2 x:(STMG2 [i-16] {s} p w0 w1 mem))
1337 && x.Uses == 1
1338 && is20Bit(int64(i)-16)
1339 && setPos(v, x.Pos)
1340 && clobber(x)
1341 => (STMG3 [i-16] {s} p w0 w1 w2 mem)
1342(MOVDstore [i] {s} p w3 x:(STMG3 [i-24] {s} p w0 w1 w2 mem))
1343 && x.Uses == 1
1344 && is20Bit(int64(i)-24)
1345 && setPos(v, x.Pos)
1346 && clobber(x)
1347 => (STMG4 [i-24] {s} p w0 w1 w2 w3 mem)
1348(STMG2 [i] {s} p w2 w3 x:(STMG2 [i-16] {s} p w0 w1 mem))
1349 && x.Uses == 1
1350 && is20Bit(int64(i)-16)
1351 && setPos(v, x.Pos)
1352 && clobber(x)
1353 => (STMG4 [i-16] {s} p w0 w1 w2 w3 mem)
1354
1355// Convert 32-bit store multiples into 64-bit stores.
1356(STM2 [i] {s} p (SRDconst [32] x) x mem) => (MOVDstore [i] {s} p x mem)
1357
1358// Fold bit reversal into loads.
1359(MOVWBR x:(MOVWZload [off] {sym} ptr mem)) && x.Uses == 1 => @x.Block (MOVWZreg (MOVWBRload [off] {sym} ptr mem)) // need zero extension?
1360(MOVWBR x:(MOVWZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 => @x.Block (MOVWZreg (MOVWBRloadidx [off] {sym} ptr idx mem)) // need zero extension?
1361(MOVDBR x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 => @x.Block (MOVDBRload [off] {sym} ptr mem)
1362(MOVDBR x:(MOVDloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 => @x.Block (MOVDBRloadidx [off] {sym} ptr idx mem)
1363
1364// Fold bit reversal into stores.
1365(MOV(D|W)store [off] {sym} ptr r:(MOV(D|W)BR x) mem) && r.Uses == 1 => (MOV(D|W)BRstore [off] {sym} ptr x mem)
1366(MOV(D|W)storeidx [off] {sym} ptr idx r:(MOV(D|W)BR x) mem) && r.Uses == 1 => (MOV(D|W)BRstoreidx [off] {sym} ptr idx x mem)
1367
1368// Special bswap16 rules
1369(Bswap16 x:(MOVHZload [off] {sym} ptr mem)) => @x.Block (MOVHZreg (MOVHBRload [off] {sym} ptr mem))
1370(Bswap16 x:(MOVHZloadidx [off] {sym} ptr idx mem)) => @x.Block (MOVHZreg (MOVHBRloadidx [off] {sym} ptr idx mem))
1371(MOVHstore [off] {sym} ptr (Bswap16 val) mem) => (MOVHBRstore [off] {sym} ptr val mem)
1372(MOVHstoreidx [off] {sym} ptr idx (Bswap16 val) mem) => (MOVHBRstoreidx [off] {sym} ptr idx val mem)
View as plain text