• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2016 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Lowering arithmetic
6(Add(Ptr|64|32|16|8) ...) => (ADD ...)
7(Add(64|32)F ...) => (FADD(D|S) ...)
8
9(Sub(Ptr|64|32|16|8) ...) => (SUB ...)
10(Sub(64|32)F ...) => (FSUB(D|S) ...)
11
12(Mul64 ...) => (MUL  ...)
13(Mul64uhilo ...) => (LoweredMuluhilo ...)
14(Mul64uover ...) => (LoweredMuluover ...)
15(Mul32 ...) => (MULW ...)
16(Mul16 x y) => (MULW (SignExt16to32 x) (SignExt16to32 y))
17(Mul8 x y)  => (MULW (SignExt8to32 x)  (SignExt8to32 y))
18(Mul(64|32)F ...) => (FMUL(D|S) ...)
19
20(Div(64|32)F ...) => (FDIV(D|S) ...)
21
22(Div64 x y [false])  => (DIV x y)
23(Div64u ...) => (DIVU ...)
24(Div32 x y [false])  => (DIVW x y)
25(Div32u ...) => (DIVUW ...)
26(Div16 x y [false])  => (DIVW  (SignExt16to32 x) (SignExt16to32 y))
27(Div16u x y) => (DIVUW (ZeroExt16to32 x) (ZeroExt16to32 y))
28(Div8 x y)   => (DIVW  (SignExt8to32 x)  (SignExt8to32 y))
29(Div8u x y)  => (DIVUW (ZeroExt8to32 x)  (ZeroExt8to32 y))
30
31(Hmul64 ...)  => (MULH  ...)
32(Hmul64u ...) => (MULHU ...)
33(Hmul32 x y)  => (SRAI [32] (MUL  (SignExt32to64 x) (SignExt32to64 y)))
34(Hmul32u x y) => (SRLI [32] (MUL  (ZeroExt32to64 x) (ZeroExt32to64 y)))
35
36(Select0 (Add64carry x y c)) => (ADD (ADD <typ.UInt64> x y) c)
37(Select1 (Add64carry x y c)) =>
38	(OR (SLTU <typ.UInt64> s:(ADD <typ.UInt64> x y) x) (SLTU <typ.UInt64> (ADD <typ.UInt64> s c) s))
39
40(Select0 (Sub64borrow x y c)) => (SUB (SUB <typ.UInt64> x y) c)
41(Select1 (Sub64borrow x y c)) =>
42	(OR (SLTU <typ.UInt64> x s:(SUB <typ.UInt64> x y)) (SLTU <typ.UInt64> s (SUB <typ.UInt64> s c)))
43
44// (x + y) / 2 => (x / 2) + (y / 2) + (x & y & 1)
45(Avg64u <t> x y) => (ADD (ADD <t> (SRLI <t> [1] x) (SRLI <t> [1] y)) (ANDI <t> [1] (AND <t> x y)))
46
47(Mod64 x y [false])  => (REM x y)
48(Mod64u ...) => (REMU  ...)
49(Mod32 x y [false])  => (REMW x y)
50(Mod32u ...) => (REMUW ...)
51(Mod16 x y [false])  => (REMW  (SignExt16to32 x) (SignExt16to32 y))
52(Mod16u x y) => (REMUW (ZeroExt16to32 x) (ZeroExt16to32 y))
53(Mod8 x y)   => (REMW  (SignExt8to32  x) (SignExt8to32  y))
54(Mod8u x y)  => (REMUW (ZeroExt8to32  x) (ZeroExt8to32  y))
55
56(And(64|32|16|8) ...) => (AND ...)
57(Or(64|32|16|8) ...) => (OR ...)
58(Xor(64|32|16|8) ...) => (XOR ...)
59
60(Neg(64|32|16|8) ...) => (NEG ...)
61(Neg(64|32)F ...) => (FNEG(D|S) ...)
62
63(Com(64|32|16|8) ...) => (NOT ...)
64
65
66(Sqrt ...) => (FSQRTD ...)
67(Sqrt32 ...) => (FSQRTS ...)
68
69(Copysign ...) => (FSGNJD ...)
70
71(Abs ...) => (FABSD ...)
72
73(FMA ...) => (FMADDD ...)
74
75(Min(64|32)F ...) => (LoweredFMIN(D|S) ...)
76(Max(64|32)F ...) => (LoweredFMAX(D|S) ...)
77
78// Sign and zero extension.
79
80(SignExt8to16  ...) => (MOVBreg ...)
81(SignExt8to32  ...) => (MOVBreg ...)
82(SignExt8to64  ...) => (MOVBreg ...)
83(SignExt16to32 ...) => (MOVHreg ...)
84(SignExt16to64 ...) => (MOVHreg ...)
85(SignExt32to64 ...) => (MOVWreg ...)
86
87(ZeroExt8to16  ...) => (MOVBUreg ...)
88(ZeroExt8to32  ...) => (MOVBUreg ...)
89(ZeroExt8to64  ...) => (MOVBUreg ...)
90(ZeroExt16to32 ...) => (MOVHUreg ...)
91(ZeroExt16to64 ...) => (MOVHUreg ...)
92(ZeroExt32to64 ...) => (MOVWUreg ...)
93
94(Cvt32to32F ...) => (FCVTSW ...)
95(Cvt32to64F ...) => (FCVTDW ...)
96(Cvt64to32F ...) => (FCVTSL ...)
97(Cvt64to64F ...) => (FCVTDL ...)
98
99(Cvt32Fto32 ...) => (FCVTWS ...)
100(Cvt32Fto64 ...) => (FCVTLS ...)
101(Cvt64Fto32 ...) => (FCVTWD ...)
102(Cvt64Fto64 ...) => (FCVTLD ...)
103
104(Cvt32Fto64F ...) => (FCVTDS ...)
105(Cvt64Fto32F ...) => (FCVTSD ...)
106
107(CvtBoolToUint8 ...) => (Copy ...)
108
109(Round(32|64)F ...) => (LoweredRound(32|64)F ...)
110
111(Slicemask <t> x) => (SRAI [63] (NEG <t> x))
112
113// Truncations
114// We ignore the unused high parts of registers, so truncates are just copies.
115(Trunc16to8  ...) => (Copy ...)
116(Trunc32to8  ...) => (Copy ...)
117(Trunc32to16 ...) => (Copy ...)
118(Trunc64to8  ...) => (Copy ...)
119(Trunc64to16 ...) => (Copy ...)
120(Trunc64to32 ...) => (Copy ...)
121
122// Shifts
123
124// SLL only considers the bottom 6 bits of y. If y > 64, the result should
125// always be 0.
126//
127// Breaking down the operation:
128//
129// (SLL x y) generates x << (y & 63).
130//
131// If y < 64, this is the value we want. Otherwise, we want zero.
132//
133// So, we AND with -1 * uint64(y < 64), which is 0xfffff... if y < 64 and 0 otherwise.
134(Lsh8x8   <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8  <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
135(Lsh8x16  <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8  <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
136(Lsh8x32  <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8  <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
137(Lsh8x64  <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8  <t> (SLTIU <t> [64] y)))
138(Lsh16x8  <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
139(Lsh16x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
140(Lsh16x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
141(Lsh16x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] y)))
142(Lsh32x8  <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
143(Lsh32x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
144(Lsh32x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
145(Lsh32x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] y)))
146(Lsh64x8  <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
147(Lsh64x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
148(Lsh64x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
149(Lsh64x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
150
151(Lsh8x(64|32|16|8)  x y) && shiftIsBounded(v) => (SLL x y)
152(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
153(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
154(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
155
156// SRL only considers the bottom 6 bits of y, similarly SRLW only considers the
157// bottom 5 bits of y. Ensure that the result is always zero if the shift exceeds
158// the maximum value. See Lsh above for a detailed description.
159(Rsh8Ux8   <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt8to64  x) y) (Neg8  <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
160(Rsh8Ux16  <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt8to64  x) y) (Neg8  <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
161(Rsh8Ux32  <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt8to64  x) y) (Neg8  <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
162(Rsh8Ux64  <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt8to64  x) y) (Neg8  <t> (SLTIU <t> [64] y)))
163(Rsh16Ux8  <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
164(Rsh16Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
165(Rsh16Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
166(Rsh16Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
167(Rsh32Ux8  <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t>  x                y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt8to64  y))))
168(Rsh32Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t>  x                y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt16to64 y))))
169(Rsh32Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t>  x                y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt32to64 y))))
170(Rsh32Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t>  x                y) (Neg32 <t> (SLTIU <t> [32] y)))
171(Rsh64Ux8  <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t>  x                y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
172(Rsh64Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t>  x                y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
173(Rsh64Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t>  x                y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
174(Rsh64Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t>  x                y) (Neg64 <t> (SLTIU <t> [64] y)))
175
176(Rsh8Ux(64|32|16|8)  x y) && shiftIsBounded(v) => (SRL  (ZeroExt8to64  x) y)
177(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL  (ZeroExt16to64 x) y)
178(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRLW x                 y)
179(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL  x                 y)
180
181// SRA only considers the bottom 6 bits of y, similarly SRAW only considers the
182// bottom 5 bits. If y is greater than the maximum value (either 63 or 31
183// depending on the instruction),  the result of the shift should be either 0
184// or -1 based on the sign bit of x.
185//
186// We implement this by performing the max shift (-1) if y > the maximum value.
187//
188// We OR (uint64(y < 64) - 1) into y before passing it to SRA. This leaves
189// us with -1 (0xffff...) if y >= 64.  Similarly, we OR (uint64(y < 32) - 1) into y
190// before passing it to SRAW.
191//
192// We don't need to sign-extend the OR result, as it will be at minimum 8 bits,
193// more than the 5 or 6 bits SRAW and SRA care about.
194(Rsh8x8   <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt8to64  x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64  y)))))
195(Rsh8x16  <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt8to64  x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
196(Rsh8x32  <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt8to64  x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
197(Rsh8x64  <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt8to64  x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
198(Rsh16x8  <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64  y)))))
199(Rsh16x16 <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
200(Rsh16x32 <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
201(Rsh16x64 <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
202(Rsh32x8  <t> x y) && !shiftIsBounded(v) => (SRAW <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt8to64  y)))))
203(Rsh32x16 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt16to64 y)))))
204(Rsh32x32 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt32to64 y)))))
205(Rsh32x64 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] y))))
206(Rsh64x8  <t> x y) && !shiftIsBounded(v) => (SRA  <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64  y)))))
207(Rsh64x16 <t> x y) && !shiftIsBounded(v) => (SRA  <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
208(Rsh64x32 <t> x y) && !shiftIsBounded(v) => (SRA  <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
209(Rsh64x64 <t> x y) && !shiftIsBounded(v) => (SRA  <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
210
211(Rsh8x(64|32|16|8)  x y) && shiftIsBounded(v) => (SRA  (SignExt8to64  x) y)
212(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA  (SignExt16to64 x) y)
213(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW  x                y)
214(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA   x                y)
215
216// Rotates.
217(RotateLeft8  <t> x y) => (OR (SLL  <t> x (ANDI [7]  <y.Type> y)) (SRL <t> (ZeroExt8to64  x) (ANDI [7]  <y.Type> (NEG <y.Type> y))))
218(RotateLeft16 <t> x y) => (OR (SLL  <t> x (ANDI [15] <y.Type> y)) (SRL <t> (ZeroExt16to64 x) (ANDI [15] <y.Type> (NEG <y.Type> y))))
219(RotateLeft32 ...) => (ROLW ...)
220(RotateLeft64 ...) => (ROL  ...)
221
222(Less64  ...) => (SLT  ...)
223(Less32  x y) => (SLT  (SignExt32to64 x) (SignExt32to64 y))
224(Less16  x y) => (SLT  (SignExt16to64 x) (SignExt16to64 y))
225(Less8   x y) => (SLT  (SignExt8to64  x) (SignExt8to64  y))
226(Less64U ...) => (SLTU ...)
227(Less32U x y) => (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y))
228(Less16U x y) => (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y))
229(Less8U  x y) => (SLTU (ZeroExt8to64  x) (ZeroExt8to64  y))
230(Less(64|32)F ...) => (FLT(D|S) ...)
231
232// Convert x <= y to !(y > x).
233(Leq(64|32|16|8)  x y) => (Not (Less(64|32|16|8)  y x))
234(Leq(64|32|16|8)U x y) => (Not (Less(64|32|16|8)U y x))
235(Leq(64|32)F ...) => (FLE(D|S) ...)
236
237(EqPtr x y) => (SEQZ (SUB <typ.Uintptr> x y))
238(Eq64  x y) => (SEQZ (SUB <x.Type> x y))
239(Eq32  x y) &&  x.Type.IsSigned() => (SEQZ (SUB <x.Type> (SignExt32to64 x) (SignExt32to64 y)))
240(Eq32  x y) && !x.Type.IsSigned() => (SEQZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y)))
241(Eq16  x y) => (SEQZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
242(Eq8   x y) => (SEQZ (SUB <x.Type> (ZeroExt8to64  x) (ZeroExt8to64  y)))
243(Eq(64|32)F ...) => (FEQ(D|S) ...)
244
245(NeqPtr x y) => (Not (EqPtr x y))
246(Neq64  x y) => (Not (Eq64  x y))
247(Neq32  x y) => (Not (Eq32  x y))
248(Neq16  x y) => (Not (Eq16  x y))
249(Neq8   x y) => (Not (Eq8   x y))
250(Neq(64|32)F ...) => (FNE(D|S) ...)
251
252// Loads
253(Load <t> ptr mem) &&  t.IsBoolean()                   => (MOVBUload ptr mem)
254(Load <t> ptr mem) && ( is8BitInt(t) &&  t.IsSigned()) => (MOVBload  ptr mem)
255(Load <t> ptr mem) && ( is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem)
256(Load <t> ptr mem) && (is16BitInt(t) &&  t.IsSigned()) => (MOVHload  ptr mem)
257(Load <t> ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem)
258(Load <t> ptr mem) && (is32BitInt(t) &&  t.IsSigned()) => (MOVWload  ptr mem)
259(Load <t> ptr mem) && (is32BitInt(t) && !t.IsSigned()) => (MOVWUload ptr mem)
260(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t))      => (MOVDload  ptr mem)
261(Load <t> ptr mem) &&  is32BitFloat(t)                 => (FMOVWload ptr mem)
262(Load <t> ptr mem) &&  is64BitFloat(t)                 => (FMOVDload ptr mem)
263
264// Stores
265(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
266(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
267(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
268(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVDstore ptr val mem)
269(Store {t} ptr val mem) && t.Size() == 4 &&  t.IsFloat() => (FMOVWstore ptr val mem)
270(Store {t} ptr val mem) && t.Size() == 8 &&  t.IsFloat() => (FMOVDstore ptr val mem)
271
272// We need to fold MOVaddr into the LD/MOVDstore ops so that the live variable analysis
273// knows what variables are being read/written by the ops.
274(MOVBUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
275	(MOVBUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
276(MOVBload  [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
277	(MOVBload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
278(MOVHUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
279	(MOVHUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
280(MOVHload  [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
281	(MOVHload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
282(MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
283	(MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
284(MOVWload  [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
285	(MOVWload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
286(MOVDload  [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
287	(MOVDload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
288
289(MOVBstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
290	(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
291(MOVHstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
292	(MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
293(MOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
294	(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
295(MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
296	(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
297(MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
298	(MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
299(MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
300	(MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
301(MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
302	(MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
303(MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
304	(MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
305
306(MOVBUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
307	(MOVBUload [off1+int32(off2)] {sym} base mem)
308(MOVBload  [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
309	(MOVBload  [off1+int32(off2)] {sym} base mem)
310(MOVHUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
311	(MOVHUload [off1+int32(off2)] {sym} base mem)
312(MOVHload  [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
313	(MOVHload  [off1+int32(off2)] {sym} base mem)
314(MOVWUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
315	(MOVWUload [off1+int32(off2)] {sym} base mem)
316(MOVWload  [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
317	(MOVWload  [off1+int32(off2)] {sym} base mem)
318(MOVDload  [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
319	(MOVDload  [off1+int32(off2)] {sym} base mem)
320
321(MOVBstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
322	(MOVBstore [off1+int32(off2)] {sym} base val mem)
323(MOVHstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
324	(MOVHstore [off1+int32(off2)] {sym} base val mem)
325(MOVWstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
326	(MOVWstore [off1+int32(off2)] {sym} base val mem)
327(MOVDstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
328	(MOVDstore [off1+int32(off2)] {sym} base val mem)
329(MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
330(MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
331(MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
332(MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVDstorezero [off1+int32(off2)] {sym} ptr mem)
333
334// Similarly, fold ADDI into MOVaddr to avoid confusing live variable analysis
335// with OffPtr -> ADDI.
336(ADDI [c] (MOVaddr [d] {s} x)) && is32Bit(c+int64(d)) => (MOVaddr [int32(c)+d] {s} x)
337
338// Small zeroing
339(Zero [0] _ mem) => mem
340(Zero [1] ptr mem) => (MOVBstore ptr (MOVDconst [0]) mem)
341(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
342	(MOVHstore ptr (MOVDconst [0]) mem)
343(Zero [2] ptr mem) =>
344	(MOVBstore [1] ptr (MOVDconst [0])
345		(MOVBstore ptr (MOVDconst [0]) mem))
346(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
347	(MOVWstore ptr (MOVDconst [0]) mem)
348(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
349	(MOVHstore [2] ptr (MOVDconst [0])
350		(MOVHstore ptr (MOVDconst [0]) mem))
351(Zero [4] ptr mem) =>
352	(MOVBstore [3] ptr (MOVDconst [0])
353		(MOVBstore [2] ptr (MOVDconst [0])
354			(MOVBstore [1] ptr (MOVDconst [0])
355				(MOVBstore ptr (MOVDconst [0]) mem))))
356(Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 =>
357	(MOVDstore ptr (MOVDconst [0]) mem)
358(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 =>
359	(MOVWstore [4] ptr (MOVDconst [0])
360		(MOVWstore ptr (MOVDconst [0]) mem))
361(Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 =>
362	(MOVHstore [6] ptr (MOVDconst [0])
363		(MOVHstore [4] ptr (MOVDconst [0])
364			(MOVHstore [2] ptr (MOVDconst [0])
365				(MOVHstore ptr (MOVDconst [0]) mem))))
366
367(Zero [3] ptr mem) =>
368	(MOVBstore [2] ptr (MOVDconst [0])
369		(MOVBstore [1] ptr (MOVDconst [0])
370			(MOVBstore ptr (MOVDconst [0]) mem)))
371(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 =>
372	(MOVHstore [4] ptr (MOVDconst [0])
373		(MOVHstore [2] ptr (MOVDconst [0])
374			(MOVHstore ptr (MOVDconst [0]) mem)))
375(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 =>
376	(MOVWstore [8] ptr (MOVDconst [0])
377		(MOVWstore [4] ptr (MOVDconst [0])
378			(MOVWstore ptr (MOVDconst [0]) mem)))
379(Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 =>
380	(MOVDstore [8] ptr (MOVDconst [0])
381		(MOVDstore ptr (MOVDconst [0]) mem))
382(Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 =>
383	(MOVDstore [16] ptr (MOVDconst [0])
384		(MOVDstore [8] ptr (MOVDconst [0])
385			(MOVDstore ptr (MOVDconst [0]) mem)))
386(Zero [32] {t} ptr mem) && t.Alignment()%8 == 0 =>
387	(MOVDstore [24] ptr (MOVDconst [0])
388		(MOVDstore [16] ptr (MOVDconst [0])
389			(MOVDstore [8] ptr (MOVDconst [0])
390				(MOVDstore ptr (MOVDconst [0]) mem))))
391
392// Medium 8-aligned zeroing uses a Duff's device
393// 8 and 128 are magic constants, see runtime/mkduff.go
394(Zero [s] {t} ptr mem)
395	&& s%8 == 0 && s <= 8*128
396	&& t.Alignment()%8 == 0 && !config.noDuffDevice =>
397	(DUFFZERO [8 * (128 - s/8)] ptr mem)
398
399// Generic zeroing uses a loop
400(Zero [s] {t} ptr mem) =>
401	(LoweredZero [t.Alignment()]
402		ptr
403		(ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.Alignment(), config)]))
404		mem)
405
406// Checks
407(IsNonNil ...) => (SNEZ ...)
408(IsInBounds ...) => (Less64U ...)
409(IsSliceInBounds ...) => (Leq64U ...)
410
411// Trivial lowering
412(NilCheck ...) => (LoweredNilCheck ...)
413(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
414(GetCallerSP ...) => (LoweredGetCallerSP ...)
415(GetCallerPC ...) => (LoweredGetCallerPC ...)
416
417// Write barrier.
418(WB ...) => (LoweredWB ...)
419
420// Publication barrier as intrinsic
421(PubBarrier ...) => (LoweredPubBarrier ...)
422
423(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
424(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
425(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
426
427// Small moves
428(Move [0] _ _ mem) => mem
429(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
430(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 =>
431	(MOVHstore dst (MOVHload src mem) mem)
432(Move [2] dst src mem) =>
433	(MOVBstore [1] dst (MOVBload [1] src mem)
434		(MOVBstore dst (MOVBload src mem) mem))
435(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 =>
436	(MOVWstore dst (MOVWload src mem) mem)
437(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 =>
438	(MOVHstore [2] dst (MOVHload [2] src mem)
439		(MOVHstore dst (MOVHload src mem) mem))
440(Move [4] dst src mem) =>
441	(MOVBstore [3] dst (MOVBload [3] src mem)
442		(MOVBstore [2] dst (MOVBload [2] src mem)
443			(MOVBstore [1] dst (MOVBload [1] src mem)
444				(MOVBstore dst (MOVBload src mem) mem))))
445(Move [8] {t} dst src mem) && t.Alignment()%8 == 0 =>
446	(MOVDstore dst (MOVDload src mem) mem)
447(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 =>
448	(MOVWstore [4] dst (MOVWload [4] src mem)
449		(MOVWstore dst (MOVWload src mem) mem))
450(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 =>
451	(MOVHstore [6] dst (MOVHload [6] src mem)
452		(MOVHstore [4] dst (MOVHload [4] src mem)
453			(MOVHstore [2] dst (MOVHload [2] src mem)
454				(MOVHstore dst (MOVHload src mem) mem))))
455
456(Move [3] dst src mem) =>
457	(MOVBstore [2] dst (MOVBload [2] src mem)
458		(MOVBstore [1] dst (MOVBload [1] src mem)
459			(MOVBstore dst (MOVBload src mem) mem)))
460(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 =>
461	(MOVHstore [4] dst (MOVHload [4] src mem)
462		(MOVHstore [2] dst (MOVHload [2] src mem)
463			(MOVHstore dst (MOVHload src mem) mem)))
464(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 =>
465	(MOVWstore [8] dst (MOVWload [8] src mem)
466		(MOVWstore [4] dst (MOVWload [4] src mem)
467			(MOVWstore dst (MOVWload src mem) mem)))
468(Move [16] {t} dst src mem) && t.Alignment()%8 == 0 =>
469	(MOVDstore [8] dst (MOVDload [8] src mem)
470		(MOVDstore dst (MOVDload src mem) mem))
471(Move [24] {t} dst src mem) && t.Alignment()%8 == 0 =>
472	(MOVDstore [16] dst (MOVDload [16] src mem)
473		(MOVDstore [8] dst (MOVDload [8] src mem)
474			(MOVDstore dst (MOVDload src mem) mem)))
475(Move [32] {t} dst src mem) && t.Alignment()%8 == 0 =>
476	(MOVDstore [24] dst (MOVDload [24] src mem)
477		(MOVDstore [16] dst (MOVDload [16] src mem)
478			(MOVDstore [8] dst (MOVDload [8] src mem)
479				(MOVDstore dst (MOVDload src mem) mem))))
480
481// Medium 8-aligned move uses a Duff's device
482// 16 and 128 are magic constants, see runtime/mkduff.go
483(Move [s] {t} dst src mem)
484	&& s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0
485	&& !config.noDuffDevice && logLargeCopy(v, s) =>
486	(DUFFCOPY [16 * (128 - s/8)] dst src mem)
487
488// Generic move uses a loop
489(Move [s] {t} dst src mem) && (s <= 16 || logLargeCopy(v, s)) =>
490	(LoweredMove [t.Alignment()]
491		dst
492		src
493		(ADDI <src.Type> [s-moveSize(t.Alignment(), config)] src)
494		mem)
495
496// Boolean ops; 0=false, 1=true
497(AndB ...) => (AND ...)
498(OrB  ...) => (OR  ...)
499(EqB  x y) => (SEQZ (SUB <typ.Bool> x y))
500(NeqB x y) => (SNEZ (SUB <typ.Bool> x y))
501(Not  ...) => (SEQZ ...)
502
503// Lowering pointer arithmetic
504// TODO: Special handling for SP offsets, like ARM
505(OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVaddr [int32(off)] ptr)
506(OffPtr [off] ptr) && is32Bit(off) => (ADDI [off] ptr)
507(OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr)
508
509(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
510(Const32F [val]) => (FMVSX (MOVDconst [int64(math.Float32bits(val))]))
511(Const64F [val]) => (FMVDX (MOVDconst [int64(math.Float64bits(val))]))
512(ConstNil) => (MOVDconst [0])
513(ConstBool [val]) => (MOVDconst [int64(b2i(val))])
514
515(Addr {sym} base) => (MOVaddr {sym} [0] base)
516(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVaddr {sym} (SPanchored base mem))
517(LocalAddr <t> {sym} base _)  && !t.Elem().HasPointers() => (MOVaddr {sym} base)
518
519// Calls
520(StaticCall  ...) => (CALLstatic  ...)
521(ClosureCall ...) => (CALLclosure ...)
522(InterCall   ...) => (CALLinter   ...)
523(TailCall ...) => (CALLtail ...)
524
525// Atomic Intrinsics
526(AtomicLoad(Ptr|64|32|8)  ...) => (LoweredAtomicLoad(64|64|32|8) ...)
527(AtomicStore(PtrNoWB|64|32|8) ...) => (LoweredAtomicStore(64|64|32|8) ...)
528(AtomicAdd(64|32) ...) => (LoweredAtomicAdd(64|32) ...)
529
530// AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3, ^((uint8(val) ^ 0xff) << ((ptr & 3) * 8)))
531(AtomicAnd8 ptr val mem) =>
532	(LoweredAtomicAnd32 (ANDI <typ.Uintptr> [^3] ptr)
533		(NOT <typ.UInt32> (SLL <typ.UInt32> (XORI <typ.UInt32> [0xff] (ZeroExt8to32 val))
534			(SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr)))) mem)
535
536(AtomicAnd32 ...) => (LoweredAtomicAnd32 ...)
537
538(AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
539(AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...)
540
541(AtomicExchange(64|32) ...) => (LoweredAtomicExchange(64|32) ...)
542
543// AtomicOr8(ptr,val)  => LoweredAtomicOr32(ptr&^3, uint32(val)<<((ptr&3)*8))
544(AtomicOr8 ptr val mem) =>
545	(LoweredAtomicOr32 (ANDI <typ.Uintptr> [^3] ptr)
546		(SLL <typ.UInt32> (ZeroExt8to32 val)
547			(SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr))) mem)
548
549(AtomicOr32  ...) => (LoweredAtomicOr32  ...)
550
551// Conditional branches
552(If cond yes no) => (BNEZ (MOVBUreg <typ.UInt64> cond) yes no)
553
554// Optimizations
555
556// Absorb SEQZ/SNEZ into branch.
557(BEQZ (SEQZ x) yes no) => (BNEZ x yes no)
558(BEQZ (SNEZ x) yes no) => (BEQZ x yes no)
559(BNEZ (SEQZ x) yes no) => (BEQZ x yes no)
560(BNEZ (SNEZ x) yes no) => (BNEZ x yes no)
561
562// Remove redundant NEG from BEQZ/BNEZ.
563(BEQZ (NEG x) yes no) => (BEQZ x yes no)
564(BNEZ (NEG x) yes no) => (BNEZ x yes no)
565
566// Negate comparison with FNES/FNED.
567(BEQZ (FNES <t> x y) yes no) => (BNEZ (FEQS <t> x y) yes no)
568(BNEZ (FNES <t> x y) yes no) => (BEQZ (FEQS <t> x y) yes no)
569(BEQZ (FNED <t> x y) yes no) => (BNEZ (FEQD <t> x y) yes no)
570(BNEZ (FNED <t> x y) yes no) => (BEQZ (FEQD <t> x y) yes no)
571
572// Convert BEQZ/BNEZ into more optimal branch conditions.
573(BEQZ (SUB x y) yes no) => (BEQ x y yes no)
574(BNEZ (SUB x y) yes no) => (BNE x y yes no)
575(BEQZ (SLT x y) yes no) => (BGE x y yes no)
576(BNEZ (SLT x y) yes no) => (BLT x y yes no)
577(BEQZ (SLTU x y) yes no) => (BGEU x y yes no)
578(BNEZ (SLTU x y) yes no) => (BLTU x y yes no)
579(BEQZ (SLTI [x] y) yes no) => (BGE y (MOVDconst [x]) yes no)
580(BNEZ (SLTI [x] y) yes no) => (BLT y (MOVDconst [x]) yes no)
581(BEQZ (SLTIU [x] y) yes no) => (BGEU y (MOVDconst [x]) yes no)
582(BNEZ (SLTIU [x] y) yes no) => (BLTU y (MOVDconst [x]) yes no)
583
584// Convert branch with zero to more optimal branch zero.
585(BEQ (MOVDconst [0]) cond yes no) => (BEQZ cond yes no)
586(BEQ cond (MOVDconst [0]) yes no) => (BEQZ cond yes no)
587(BNE (MOVDconst [0]) cond yes no) => (BNEZ cond yes no)
588(BNE cond (MOVDconst [0]) yes no) => (BNEZ cond yes no)
589(BLT (MOVDconst [0]) cond yes no) => (BGTZ cond yes no)
590(BLT cond (MOVDconst [0]) yes no) => (BLTZ cond yes no)
591(BGE (MOVDconst [0]) cond yes no) => (BLEZ cond yes no)
592(BGE cond (MOVDconst [0]) yes no) => (BGEZ cond yes no)
593
594// Remove redundant NEG from SEQZ/SNEZ.
595(SEQZ (NEG x)) => (SEQZ x)
596(SNEZ (NEG x)) => (SNEZ x)
597
598// Remove redundant SEQZ/SNEZ.
599(SEQZ (SEQZ x)) => (SNEZ x)
600(SEQZ (SNEZ x)) => (SEQZ x)
601(SNEZ (SEQZ x)) => (SEQZ x)
602(SNEZ (SNEZ x)) => (SNEZ x)
603
604// Store zero.
605(MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
606(MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
607(MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
608(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem)
609
610// Boolean ops are already extended.
611(MOVBUreg x:((FLES|FLTS|FEQS|FNES) _ _)) => x
612(MOVBUreg x:((FLED|FLTD|FEQD|FNED) _ _)) => x
613(MOVBUreg x:((SEQZ|SNEZ) _)) => x
614(MOVBUreg x:((SLT|SLTU) _ _)) => x
615
616// Avoid extending when already sufficiently masked.
617(MOVBreg  x:(ANDI [c] y)) && c >= 0 && int64(int8(c)) == c => x
618(MOVHreg  x:(ANDI [c] y)) && c >= 0 && int64(int16(c)) == c => x
619(MOVWreg  x:(ANDI [c] y)) && c >= 0 && int64(int32(c)) == c => x
620(MOVBUreg x:(ANDI [c] y)) && c >= 0 && int64(uint8(c)) == c => x
621(MOVHUreg x:(ANDI [c] y)) && c >= 0 && int64(uint16(c)) == c => x
622(MOVWUreg x:(ANDI [c] y)) && c >= 0 && int64(uint32(c)) == c => x
623
624// Combine masking and zero extension.
625(MOVBUreg (ANDI [c] x)) && c < 0 => (ANDI [int64(uint8(c))] x)
626(MOVHUreg (ANDI [c] x)) && c < 0 => (ANDI [int64(uint16(c))] x)
627(MOVWUreg (ANDI [c] x)) && c < 0 => (AND (MOVDconst [int64(uint32(c))]) x)
628
629// Avoid sign/zero extension for consts.
630(MOVBreg  (MOVDconst [c])) => (MOVDconst [int64(int8(c))])
631(MOVHreg  (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
632(MOVWreg  (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
633(MOVBUreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))])
634(MOVHUreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
635(MOVWUreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
636
637// Avoid sign/zero extension after properly typed load.
638(MOVBreg  x:(MOVBload  _ _)) => (MOVDreg x)
639(MOVHreg  x:(MOVBload  _ _)) => (MOVDreg x)
640(MOVHreg  x:(MOVBUload _ _)) => (MOVDreg x)
641(MOVHreg  x:(MOVHload  _ _)) => (MOVDreg x)
642(MOVWreg  x:(MOVBload  _ _)) => (MOVDreg x)
643(MOVWreg  x:(MOVBUload _ _)) => (MOVDreg x)
644(MOVWreg  x:(MOVHload  _ _)) => (MOVDreg x)
645(MOVWreg  x:(MOVHUload _ _)) => (MOVDreg x)
646(MOVWreg  x:(MOVWload  _ _)) => (MOVDreg x)
647(MOVBUreg x:(MOVBUload _ _)) => (MOVDreg x)
648(MOVHUreg x:(MOVBUload _ _)) => (MOVDreg x)
649(MOVHUreg x:(MOVHUload _ _)) => (MOVDreg x)
650(MOVWUreg x:(MOVBUload _ _)) => (MOVDreg x)
651(MOVWUreg x:(MOVHUload _ _)) => (MOVDreg x)
652(MOVWUreg x:(MOVWUload _ _)) => (MOVDreg x)
653
654// Avoid zero extension after properly typed atomic operation.
655(MOVBUreg x:(Select0 (LoweredAtomicLoad8 _ _))) => (MOVDreg x)
656(MOVBUreg x:(Select0 (LoweredAtomicCas32 _ _ _ _))) => (MOVDreg x)
657(MOVBUreg x:(Select0 (LoweredAtomicCas64 _ _ _ _))) => (MOVDreg x)
658
659// Avoid sign extension after word arithmetic.
660(MOVWreg x:(ADDIW   _)) => (MOVDreg x)
661(MOVWreg x:(SUBW  _ _)) => (MOVDreg x)
662(MOVWreg x:(NEGW    _)) => (MOVDreg x)
663(MOVWreg x:(MULW  _ _)) => (MOVDreg x)
664(MOVWreg x:(DIVW  _ _)) => (MOVDreg x)
665(MOVWreg x:(DIVUW _ _)) => (MOVDreg x)
666(MOVWreg x:(REMW  _ _)) => (MOVDreg x)
667(MOVWreg x:(REMUW _ _)) => (MOVDreg x)
668(MOVWreg x:(ROLW  _ _)) => (MOVDreg x)
669(MOVWreg x:(RORW  _ _)) => (MOVDreg x)
670(MOVWreg x:(RORIW   _)) => (MOVDreg x)
671
672// Fold double extensions.
673(MOVBreg  x:(MOVBreg  _)) => (MOVDreg x)
674(MOVHreg  x:(MOVBreg  _)) => (MOVDreg x)
675(MOVHreg  x:(MOVBUreg _)) => (MOVDreg x)
676(MOVHreg  x:(MOVHreg  _)) => (MOVDreg x)
677(MOVWreg  x:(MOVBreg  _)) => (MOVDreg x)
678(MOVWreg  x:(MOVBUreg _)) => (MOVDreg x)
679(MOVWreg  x:(MOVHreg  _)) => (MOVDreg x)
680(MOVWreg  x:(MOVWreg  _)) => (MOVDreg x)
681(MOVBUreg x:(MOVBUreg _)) => (MOVDreg x)
682(MOVHUreg x:(MOVBUreg _)) => (MOVDreg x)
683(MOVHUreg x:(MOVHUreg _)) => (MOVDreg x)
684(MOVWUreg x:(MOVBUreg _)) => (MOVDreg x)
685(MOVWUreg x:(MOVHUreg _)) => (MOVDreg x)
686(MOVWUreg x:(MOVWUreg _)) => (MOVDreg x)
687
688// Do not extend before store.
689(MOVBstore [off] {sym} ptr (MOVBreg  x) mem) => (MOVBstore [off] {sym} ptr x mem)
690(MOVBstore [off] {sym} ptr (MOVHreg  x) mem) => (MOVBstore [off] {sym} ptr x mem)
691(MOVBstore [off] {sym} ptr (MOVWreg  x) mem) => (MOVBstore [off] {sym} ptr x mem)
692(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
693(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
694(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
695(MOVHstore [off] {sym} ptr (MOVHreg  x) mem) => (MOVHstore [off] {sym} ptr x mem)
696(MOVHstore [off] {sym} ptr (MOVWreg  x) mem) => (MOVHstore [off] {sym} ptr x mem)
697(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
698(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
699(MOVWstore [off] {sym} ptr (MOVWreg  x) mem) => (MOVWstore [off] {sym} ptr x mem)
700(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
701
702// Replace extend after load with alternate load where possible.
703(MOVBreg  <t> x:(MOVBUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload  <t> [off] {sym} ptr mem)
704(MOVHreg  <t> x:(MOVHUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHload  <t> [off] {sym} ptr mem)
705(MOVWreg  <t> x:(MOVWUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload  <t> [off] {sym} ptr mem)
706(MOVBUreg <t> x:(MOVBload  [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBUload <t> [off] {sym} ptr mem)
707(MOVHUreg <t> x:(MOVHload  [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHUload <t> [off] {sym} ptr mem)
708(MOVWUreg <t> x:(MOVWload  [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWUload <t> [off] {sym} ptr mem)
709
710// If a register move has only 1 use, just use the same register without emitting instruction
711// MOVnop does not emit an instruction, only for ensuring the type.
712(MOVDreg x) && x.Uses == 1 => (MOVDnop x)
713
714// TODO: we should be able to get rid of MOVDnop all together.
715// But for now, this is enough to get rid of lots of them.
716(MOVDnop (MOVDconst [c])) => (MOVDconst [c])
717
718// Avoid unnecessary zero and sign extension when right shifting.
719(SRAI <t> [x] (MOVWreg  y)) && x >= 0 && x <= 31 => (SRAIW <t> [int64(x)] y)
720(SRLI <t> [x] (MOVWUreg y)) && x >= 0 && x <= 31 => (SRLIW <t> [int64(x)] y)
721
722// Replace right shifts that exceed size of signed type.
723(SRAI <t> [x] (MOVBreg y)) && x >=  8 => (SRAI  [63] (SLLI <t> [56] y))
724(SRAI <t> [x] (MOVHreg y)) && x >= 16 => (SRAI  [63] (SLLI <t> [48] y))
725(SRAI <t> [x] (MOVWreg y)) && x >= 32 => (SRAIW [31] y)
726
727// Eliminate right shifts that exceed size of unsigned type.
728(SRLI <t> [x] (MOVBUreg y)) && x >=  8 => (MOVDconst <t> [0])
729(SRLI <t> [x] (MOVHUreg y)) && x >= 16 => (MOVDconst <t> [0])
730(SRLI <t> [x] (MOVWUreg y)) && x >= 32 => (MOVDconst <t> [0])
731
732// Fold constant into immediate instructions where possible.
733(ADD (MOVDconst <t> [val]) x) && is32Bit(val) && !t.IsPtr() => (ADDI [val] x)
734(AND (MOVDconst [val]) x) && is32Bit(val) => (ANDI [val] x)
735(OR  (MOVDconst [val]) x) && is32Bit(val) => (ORI  [val] x)
736(XOR (MOVDconst [val]) x) && is32Bit(val) => (XORI [val] x)
737(ROL  x (MOVDconst [val])) => (RORI  [int64(int8(-val)&63)] x)
738(ROLW x (MOVDconst [val])) => (RORIW [int64(int8(-val)&31)] x)
739(ROR  x (MOVDconst [val])) => (RORI  [int64(val&63)] x)
740(RORW x (MOVDconst [val])) => (RORIW [int64(val&31)] x)
741(SLL  x (MOVDconst [val])) => (SLLI [int64(val&63)] x)
742(SRL  x (MOVDconst [val])) => (SRLI [int64(val&63)] x)
743(SLLW x (MOVDconst [val])) => (SLLIW [int64(val&31)] x)
744(SRLW x (MOVDconst [val])) => (SRLIW [int64(val&31)] x)
745(SRA  x (MOVDconst [val])) => (SRAI [int64(val&63)] x)
746(SRAW x (MOVDconst [val])) => (SRAIW [int64(val&31)] x)
747(SLT  x (MOVDconst [val])) && val >= -2048 && val <= 2047 => (SLTI  [val] x)
748(SLTU x (MOVDconst [val])) && val >= -2048 && val <= 2047 => (SLTIU [val] x)
749
750// Replace negated left rotation with right rotation.
751(ROL  x (NEG y)) => (ROR  x y)
752(ROLW x (NEG y)) => (RORW x y)
753
754// Convert const subtraction into ADDI with negative immediate, where possible.
755(SUB x (MOVDconst [val])) && is32Bit(-val) => (ADDI [-val] x)
756(SUB <t> (MOVDconst [val]) y) && is32Bit(-val) => (NEG (ADDI <t> [-val] y))
757
758// Subtraction of zero.
759(SUB  x (MOVDconst [0])) => x
760(SUBW x (MOVDconst [0])) => (ADDIW [0] x)
761
762// Subtraction from zero.
763(SUB  (MOVDconst [0]) x) => (NEG x)
764(SUBW (MOVDconst [0]) x) => (NEGW x)
765
766// Fold negation into subtraction.
767(NEG (SUB x y)) => (SUB y x)
768(NEG <t> s:(ADDI [val] (SUB x y))) && s.Uses == 1 && is32Bit(-val) => (ADDI [-val] (SUB <t> y x))
769
770// Double negation.
771(NEG (NEG x)) => x
772
773// Addition of zero or two constants.
774(ADDI [0] x) => x
775(ADDI [x] (MOVDconst [y])) && is32Bit(x + y) => (MOVDconst [x + y])
776
777// ANDI with all zeros, all ones or two constants.
778(ANDI [0]  x) => (MOVDconst [0])
779(ANDI [-1] x) => x
780(ANDI [x] (MOVDconst [y])) => (MOVDconst [x & y])
781
782// ORI with all zeroes, all ones or two constants.
783(ORI [0]  x) => x
784(ORI [-1] x) => (MOVDconst [-1])
785(ORI [x] (MOVDconst [y])) => (MOVDconst [x | y])
786
787// Combine operations with immediate.
788(ADDI [x] (ADDI [y] z)) && is32Bit(x + y) => (ADDI [x + y] z)
789(ANDI [x] (ANDI [y] z)) => (ANDI [x & y] z)
790(ORI  [x] (ORI  [y] z)) => (ORI  [x | y] z)
791
792// Negation of a constant.
793(NEG  (MOVDconst [x])) => (MOVDconst [-x])
794(NEGW (MOVDconst [x])) => (MOVDconst [int64(int32(-x))])
795
796// Shift of a constant.
797(SLLI [x] (MOVDconst [y])) && is32Bit(y << uint32(x)) => (MOVDconst [y << uint32(x)])
798(SRLI [x] (MOVDconst [y])) => (MOVDconst [int64(uint64(y) >> uint32(x))])
799(SRAI [x] (MOVDconst [y])) => (MOVDconst [int64(y) >> uint32(x)])
800
801// SLTI/SLTIU with constants.
802(SLTI  [x] (MOVDconst [y])) => (MOVDconst [b2i(int64(y) < int64(x))])
803(SLTIU [x] (MOVDconst [y])) => (MOVDconst [b2i(uint64(y) < uint64(x))])
804
805// SLTI/SLTIU with known outcomes.
806(SLTI  [x] (ANDI [y] _)) && y >= 0 && int64(y) < int64(x) => (MOVDconst [1])
807(SLTIU [x] (ANDI [y] _)) && y >= 0 && uint64(y) < uint64(x) => (MOVDconst [1])
808(SLTI  [x] (ORI  [y] _)) && y >= 0 && int64(y) >= int64(x) => (MOVDconst [0])
809(SLTIU [x] (ORI  [y] _)) && y >= 0 && uint64(y) >= uint64(x) => (MOVDconst [0])
810
811// SLT/SLTU with known outcomes.
812(SLT  x x) => (MOVDconst [0])
813(SLTU x x) => (MOVDconst [0])
814
815// Deadcode for LoweredMuluhilo
816(Select0 m:(LoweredMuluhilo x y)) && m.Uses == 1 => (MULHU x y)
817(Select1 m:(LoweredMuluhilo x y)) && m.Uses == 1 => (MUL x y)
818
819(FADD(S|D) a (FMUL(S|D) x y)) && a.Block.Func.useFMA(v) => (FMADD(S|D) x y a)
820(FSUB(S|D) a (FMUL(S|D) x y)) && a.Block.Func.useFMA(v) => (FNMSUB(S|D) x y a)
821(FSUB(S|D) (FMUL(S|D) x y) a) && a.Block.Func.useFMA(v) => (FMSUB(S|D) x y a)
822
823// Merge negation into fused multiply-add and multiply-subtract.
824//
825// Key:
826//
827//   [+ -](x * y [+ -] z).
828//    _ N         A S
829//                D U
830//                D B
831//
832// Note: multiplication commutativity handled by rule generator.
833(F(MADD|NMADD|MSUB|NMSUB)S neg:(FNEGS x) y z) && neg.Uses == 1 => (F(NMSUB|MSUB|NMADD|MADD)S x y z)
834(F(MADD|NMADD|MSUB|NMSUB)S x y neg:(FNEGS z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)S x y z)
835(F(MADD|NMADD|MSUB|NMSUB)D neg:(FNEGD x) y z) && neg.Uses == 1 => (F(NMSUB|MSUB|NMADD|MADD)D x y z)
836(F(MADD|NMADD|MSUB|NMSUB)D x y neg:(FNEGD z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)D x y z)
837