• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
2; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
3; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
4
5declare i32 @llvm.r600.read.tidig.x() #0
6
7; FUNC-LABEL: {{^}}test2:
8; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
9; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
10
11; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
12; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
13
14define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
15  %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
16  %a = load <2 x i32>, <2 x i32> addrspace(1) * %in
17  %b = load <2 x i32>, <2 x i32> addrspace(1) * %b_ptr
18  %result = and <2 x i32> %a, %b
19  store <2 x i32> %result, <2 x i32> addrspace(1)* %out
20  ret void
21}
22
23; FUNC-LABEL: {{^}}test4:
24; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
25; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
26; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
27; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
28
29; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
30; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
31; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
32; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
33
34define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
35  %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
36  %a = load <4 x i32>, <4 x i32> addrspace(1) * %in
37  %b = load <4 x i32>, <4 x i32> addrspace(1) * %b_ptr
38  %result = and <4 x i32> %a, %b
39  store <4 x i32> %result, <4 x i32> addrspace(1)* %out
40  ret void
41}
42
43; FUNC-LABEL: {{^}}s_and_i32:
44; SI: s_and_b32
45define void @s_and_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
46  %and = and i32 %a, %b
47  store i32 %and, i32 addrspace(1)* %out, align 4
48  ret void
49}
50
51; FUNC-LABEL: {{^}}s_and_constant_i32:
52; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x12d687
53define void @s_and_constant_i32(i32 addrspace(1)* %out, i32 %a) {
54  %and = and i32 %a, 1234567
55  store i32 %and, i32 addrspace(1)* %out, align 4
56  ret void
57}
58
59; FIXME: We should really duplicate the constant so that the SALU use
60; can fold into the s_and_b32 and the VALU one is materialized
61; directly without copying from the SGPR.
62
63; Second use is a VGPR use of the constant.
64; FUNC-LABEL: {{^}}s_and_multi_use_constant_i32_0:
65; SI: s_mov_b32 [[K:s[0-9]+]], 0x12d687
66; SI-DAG: s_and_b32 [[AND:s[0-9]+]], s{{[0-9]+}}, [[K]]
67; SI-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], [[K]]
68; SI: buffer_store_dword [[VK]]
69define void @s_and_multi_use_constant_i32_0(i32 addrspace(1)* %out, i32 %a, i32 %b) {
70  %and = and i32 %a, 1234567
71
72  ; Just to stop future replacement of copy to vgpr + store with VALU op.
73  %foo = add i32 %and, %b
74  store volatile i32 %foo, i32 addrspace(1)* %out
75  store volatile i32 1234567, i32 addrspace(1)* %out
76  ret void
77}
78
79; Second use is another SGPR use of the constant.
80; FUNC-LABEL: {{^}}s_and_multi_use_constant_i32_1:
81; SI: s_mov_b32 [[K:s[0-9]+]], 0x12d687
82; SI: s_and_b32 [[AND:s[0-9]+]], s{{[0-9]+}}, [[K]]
83; SI: s_add_i32
84; SI: s_add_i32 [[ADD:s[0-9]+]], s{{[0-9]+}}, [[K]]
85; SI: buffer_store_dword [[VK]]
86define void @s_and_multi_use_constant_i32_1(i32 addrspace(1)* %out, i32 %a, i32 %b) {
87  %and = and i32 %a, 1234567
88  %foo = add i32 %and, 1234567
89  %bar = add i32 %foo, %b
90  store volatile i32 %bar, i32 addrspace(1)* %out
91  ret void
92}
93
94; FUNC-LABEL: {{^}}v_and_i32_vgpr_vgpr:
95; SI: v_and_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
96define void @v_and_i32_vgpr_vgpr(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) {
97  %tid = call i32 @llvm.r600.read.tidig.x() #0
98  %gep.a = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
99  %gep.b = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
100  %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
101  %a = load i32, i32 addrspace(1)* %gep.a
102  %b = load i32, i32 addrspace(1)* %gep.b
103  %and = and i32 %a, %b
104  store i32 %and, i32 addrspace(1)* %gep.out
105  ret void
106}
107
108; FUNC-LABEL: {{^}}v_and_i32_sgpr_vgpr:
109; SI-DAG: s_load_dword [[SA:s[0-9]+]]
110; SI-DAG: {{buffer|flat}}_load_dword [[VB:v[0-9]+]]
111; SI: v_and_b32_e32 v{{[0-9]+}}, [[SA]], [[VB]]
112define void @v_and_i32_sgpr_vgpr(i32 addrspace(1)* %out, i32 %a, i32 addrspace(1)* %bptr) {
113  %tid = call i32 @llvm.r600.read.tidig.x() #0
114  %gep.b = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
115  %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
116  %b = load i32, i32 addrspace(1)* %gep.b
117  %and = and i32 %a, %b
118  store i32 %and, i32 addrspace(1)* %gep.out
119  ret void
120}
121
122; FUNC-LABEL: {{^}}v_and_i32_vgpr_sgpr:
123; SI-DAG: s_load_dword [[SA:s[0-9]+]]
124; SI-DAG: {{buffer|flat}}_load_dword [[VB:v[0-9]+]]
125; SI: v_and_b32_e32 v{{[0-9]+}}, [[SA]], [[VB]]
126define void @v_and_i32_vgpr_sgpr(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 %b) {
127  %tid = call i32 @llvm.r600.read.tidig.x() #0
128  %gep.a = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
129  %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
130  %a = load i32, i32 addrspace(1)* %gep.a
131  %and = and i32 %a, %b
132  store i32 %and, i32 addrspace(1)* %gep.out
133  ret void
134}
135
136; FUNC-LABEL: {{^}}v_and_constant_i32
137; SI: v_and_b32_e32 v{{[0-9]+}}, 0x12d687, v{{[0-9]+}}
138define void @v_and_constant_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
139  %a = load i32, i32 addrspace(1)* %aptr, align 4
140  %and = and i32 %a, 1234567
141  store i32 %and, i32 addrspace(1)* %out, align 4
142  ret void
143}
144
145; FUNC-LABEL: {{^}}v_and_inline_imm_64_i32
146; SI: v_and_b32_e32 v{{[0-9]+}}, 64, v{{[0-9]+}}
147define void @v_and_inline_imm_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
148  %a = load i32, i32 addrspace(1)* %aptr, align 4
149  %and = and i32 %a, 64
150  store i32 %and, i32 addrspace(1)* %out, align 4
151  ret void
152}
153
154; FUNC-LABEL: {{^}}v_and_inline_imm_neg_16_i32
155; SI: v_and_b32_e32 v{{[0-9]+}}, -16, v{{[0-9]+}}
156define void @v_and_inline_imm_neg_16_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
157  %a = load i32, i32 addrspace(1)* %aptr, align 4
158  %and = and i32 %a, -16
159  store i32 %and, i32 addrspace(1)* %out, align 4
160  ret void
161}
162
163; FUNC-LABEL: {{^}}s_and_i64
164; SI: s_and_b64
165define void @s_and_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
166  %and = and i64 %a, %b
167  store i64 %and, i64 addrspace(1)* %out, align 8
168  ret void
169}
170
171; FIXME: Should use SGPRs
172; FUNC-LABEL: {{^}}s_and_i1:
173; SI: v_and_b32
174define void @s_and_i1(i1 addrspace(1)* %out, i1 %a, i1 %b) {
175  %and = and i1 %a, %b
176  store i1 %and, i1 addrspace(1)* %out
177  ret void
178}
179
180; FUNC-LABEL: {{^}}s_and_constant_i64:
181; SI-DAG: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80000{{$}}
182; SI-DAG: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80{{$}}
183; SI: buffer_store_dwordx2
184define void @s_and_constant_i64(i64 addrspace(1)* %out, i64 %a) {
185  %and = and i64 %a, 549756338176
186  store i64 %and, i64 addrspace(1)* %out, align 8
187  ret void
188}
189
190; FUNC-LABEL: {{^}}s_and_multi_use_constant_i64:
191; XSI-DAG: s_mov_b32 s[[KLO:[0-9]+]], 0x80000{{$}}
192; XSI-DAG: s_mov_b32 s[[KHI:[0-9]+]], 0x80{{$}}
193; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[KLO]]:[[KHI]]{{\]}}
194define void @s_and_multi_use_constant_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
195  %and0 = and i64 %a, 549756338176
196  %and1 = and i64 %b, 549756338176
197  store volatile i64 %and0, i64 addrspace(1)* %out
198  store volatile i64 %and1, i64 addrspace(1)* %out
199  ret void
200}
201
202; FUNC-LABEL: {{^}}s_and_32_bit_constant_i64:
203; SI: s_load_dwordx2
204; SI-NOT: and
205; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x12d687{{$}}
206; SI-NOT: and
207; SI: buffer_store_dwordx2
208define void @s_and_32_bit_constant_i64(i64 addrspace(1)* %out, i64 %a) {
209  %and = and i64 %a, 1234567
210  store i64 %and, i64 addrspace(1)* %out, align 8
211  ret void
212}
213
214; FUNC-LABEL: {{^}}s_and_multi_use_inline_imm_i64:
215; SI: s_load_dword [[A:s[0-9]+]]
216; SI: s_load_dword [[B:s[0-9]+]]
217; SI: s_load_dwordx2
218; SI: s_load_dwordx2
219; SI-NOT: and
220; SI: s_lshl_b32 [[A]], [[A]], 1
221; SI: s_lshl_b32 [[B]], [[B]], 1
222; SI: s_and_b32 s{{[0-9]+}}, [[A]], 62
223; SI: s_and_b32 s{{[0-9]+}}, [[B]], 62
224; SI-NOT: and
225; SI: buffer_store_dwordx2
226define void @s_and_multi_use_inline_imm_i64(i64 addrspace(1)* %out, i64 %a, i64 %b, i64 %c) {
227  %shl.a = shl i64 %a, 1
228  %shl.b = shl i64 %b, 1
229  %and0 = and i64 %shl.a, 62
230  %and1 = and i64 %shl.b, 62
231  %add0 = add i64 %and0, %c
232  %add1 = add i64 %and1, %c
233  store volatile i64 %add0, i64 addrspace(1)* %out
234  store volatile i64 %add1, i64 addrspace(1)* %out
235  ret void
236}
237
238; FUNC-LABEL: {{^}}v_and_i64:
239; SI: v_and_b32
240; SI: v_and_b32
241define void @v_and_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) {
242  %a = load i64, i64 addrspace(1)* %aptr, align 8
243  %b = load i64, i64 addrspace(1)* %bptr, align 8
244  %and = and i64 %a, %b
245  store i64 %and, i64 addrspace(1)* %out, align 8
246  ret void
247}
248
249; FUNC-LABEL: {{^}}v_and_constant_i64:
250; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, 0xab19b207, {{v[0-9]+}}
251; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, 0x11e, {{v[0-9]+}}
252; SI: buffer_store_dwordx2
253define void @v_and_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
254  %a = load i64, i64 addrspace(1)* %aptr, align 8
255  %and = and i64 %a, 1231231234567
256  store i64 %and, i64 addrspace(1)* %out, align 8
257  ret void
258}
259
260; FUNC-LABEL: {{^}}v_and_multi_use_constant_i64:
261; SI: buffer_load_dwordx2 v{{\[}}[[LO0:[0-9]+]]:[[HI0:[0-9]+]]{{\]}}
262; SI: buffer_load_dwordx2 v{{\[}}[[LO1:[0-9]+]]:[[HI1:[0-9]+]]{{\]}}
263; SI-DAG: s_mov_b32 [[KLO:s[0-9]+]], 0xab19b207{{$}}
264; SI-DAG: s_movk_i32 [[KHI:s[0-9]+]], 0x11e{{$}}
265; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, [[KLO]], v[[LO0]]
266; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, [[KHI]], v[[HI0]]
267; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, [[KLO]], v[[LO1]]
268; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, [[KHI]], v[[HI1]]
269; SI: buffer_store_dwordx2
270; SI: buffer_store_dwordx2
271define void @v_and_multi_use_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
272  %a = load volatile i64, i64 addrspace(1)* %aptr
273  %b = load volatile i64, i64 addrspace(1)* %aptr
274  %and0 = and i64 %a, 1231231234567
275  %and1 = and i64 %b, 1231231234567
276  store volatile i64 %and0, i64 addrspace(1)* %out
277  store volatile i64 %and1, i64 addrspace(1)* %out
278  ret void
279}
280
281; FUNC-LABEL: {{^}}v_and_multi_use_inline_imm_i64:
282; SI: buffer_load_dwordx2 v{{\[}}[[LO0:[0-9]+]]:[[HI0:[0-9]+]]{{\]}}
283; SI-NOT: and
284; SI: buffer_load_dwordx2 v{{\[}}[[LO1:[0-9]+]]:[[HI1:[0-9]+]]{{\]}}
285; SI-NOT: and
286; SI: v_and_b32_e32 v[[RESLO0:[0-9]+]], 63, v[[LO0]]
287; SI-NOT: and
288; SI: buffer_store_dwordx2 v{{\[}}[[RESLO0]]
289; SI: v_and_b32_e32 v[[RESLO1:[0-9]+]], 63, v[[LO1]]
290; SI-NOT: and
291; SI: buffer_store_dwordx2 v{{\[}}[[RESLO1]]
292define void @v_and_multi_use_inline_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
293  %a = load volatile i64, i64 addrspace(1)* %aptr
294  %b = load volatile i64, i64 addrspace(1)* %aptr
295  %and0 = and i64 %a, 63
296  %and1 = and i64 %b, 63
297  store volatile i64 %and0, i64 addrspace(1)* %out
298  store volatile i64 %and1, i64 addrspace(1)* %out
299  ret void
300}
301
302; FUNC-LABEL: {{^}}v_and_i64_32_bit_constant:
303; SI: buffer_load_dword [[VAL:v[0-9]+]]
304; SI-NOT: and
305; SI: v_and_b32_e32 {{v[0-9]+}}, 0x12d687, [[VAL]]
306; SI-NOT: and
307; SI: buffer_store_dwordx2
308define void @v_and_i64_32_bit_constant(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
309  %a = load i64, i64 addrspace(1)* %aptr, align 8
310  %and = and i64 %a, 1234567
311  store i64 %and, i64 addrspace(1)* %out, align 8
312  ret void
313}
314
315; FUNC-LABEL: {{^}}v_and_inline_imm_i64:
316; SI: buffer_load_dword v{{[0-9]+}}
317; SI-NOT: and
318; SI: v_and_b32_e32 {{v[0-9]+}}, 64, {{v[0-9]+}}
319; SI-NOT: and
320; SI: buffer_store_dwordx2
321define void @v_and_inline_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
322  %a = load i64, i64 addrspace(1)* %aptr, align 8
323  %and = and i64 %a, 64
324  store i64 %and, i64 addrspace(1)* %out, align 8
325  ret void
326}
327
328; FUNC-LABEL: {{^}}s_and_inline_imm_64_i64
329; SI: s_load_dword
330; SI-NOT: and
331; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 64
332; SI-NOT: and
333; SI: buffer_store_dword
334define void @s_and_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
335  %and = and i64 %a, 64
336  store i64 %and, i64 addrspace(1)* %out, align 8
337  ret void
338}
339
340; FUNC-LABEL: {{^}}s_and_inline_imm_64_i64_noshrink:
341; SI: s_load_dword [[A:s[0-9]+]]
342; SI: s_lshl_b32 [[A]], [[A]], 1{{$}}
343; SI-NOT: and
344; SI: s_and_b32 s{{[0-9]+}}, [[A]], 64
345; SI-NOT: and
346; SI: s_add_u32
347; SI-NEXT: s_addc_u32
348define void @s_and_inline_imm_64_i64_noshrink(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a, i64 %b) {
349  %shl = shl i64 %a, 1
350  %and = and i64 %shl, 64
351  %add = add i64 %and, %b
352  store i64 %add, i64 addrspace(1)* %out, align 8
353  ret void
354}
355
356; FUNC-LABEL: {{^}}s_and_inline_imm_1_i64
357; SI: s_load_dwordx2
358; SI-NOT: and
359; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 1
360; SI-NOT: and
361; SI: buffer_store_dwordx2
362define void @s_and_inline_imm_1_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
363  %and = and i64 %a, 1
364  store i64 %and, i64 addrspace(1)* %out, align 8
365  ret void
366}
367
368; FUNC-LABEL: {{^}}s_and_inline_imm_1.0_i64
369; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 1.0
370
371; SI: s_load_dwordx2
372; SI: s_load_dwordx2
373; SI-NOT: and
374; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0x3ff00000
375; SI-NOT: and
376; SI: buffer_store_dwordx2
377define void @s_and_inline_imm_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
378  %and = and i64 %a, 4607182418800017408
379  store i64 %and, i64 addrspace(1)* %out, align 8
380  ret void
381}
382
383; FUNC-LABEL: {{^}}s_and_inline_imm_neg_1.0_i64
384; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -1.0
385
386; SI: s_load_dwordx2
387; SI: s_load_dwordx2
388; SI-NOT: and
389; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0xbff00000
390; SI-NOT: and
391; SI: buffer_store_dwordx2
392define void @s_and_inline_imm_neg_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
393  %and = and i64 %a, 13830554455654793216
394  store i64 %and, i64 addrspace(1)* %out, align 8
395  ret void
396}
397
398; FUNC-LABEL: {{^}}s_and_inline_imm_0.5_i64
399; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0.5
400
401; SI: s_load_dwordx2
402; SI: s_load_dwordx2
403; SI-NOT: and
404; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0x3fe00000
405; SI-NOT: and
406; SI: buffer_store_dwordx2
407define void @s_and_inline_imm_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
408  %and = and i64 %a, 4602678819172646912
409  store i64 %and, i64 addrspace(1)* %out, align 8
410  ret void
411}
412
413; FUNC-LABEL: {{^}}s_and_inline_imm_neg_0.5_i64:
414; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -0.5
415
416; SI: s_load_dwordx2
417; SI: s_load_dwordx2
418; SI-NOT: and
419; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0xbfe00000
420; SI-NOT: and
421; SI: buffer_store_dwordx2
422define void @s_and_inline_imm_neg_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
423  %and = and i64 %a, 13826050856027422720
424  store i64 %and, i64 addrspace(1)* %out, align 8
425  ret void
426}
427
428; FUNC-LABEL: {{^}}s_and_inline_imm_2.0_i64:
429; SI: s_load_dwordx2
430; SI: s_load_dwordx2
431; SI-NOT: and
432; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 2.0
433; SI-NOT: and
434; SI: buffer_store_dwordx2
435define void @s_and_inline_imm_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
436  %and = and i64 %a, 4611686018427387904
437  store i64 %and, i64 addrspace(1)* %out, align 8
438  ret void
439}
440
441; FUNC-LABEL: {{^}}s_and_inline_imm_neg_2.0_i64:
442; SI: s_load_dwordx2
443; SI: s_load_dwordx2
444; SI-NOT: and
445; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, -2.0
446; SI-NOT: and
447; SI: buffer_store_dwordx2
448define void @s_and_inline_imm_neg_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
449  %and = and i64 %a, 13835058055282163712
450  store i64 %and, i64 addrspace(1)* %out, align 8
451  ret void
452}
453
454; FUNC-LABEL: {{^}}s_and_inline_imm_4.0_i64:
455; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 4.0
456
457; SI: s_load_dwordx2
458; SI: s_load_dwordx2
459; SI-NOT: and
460; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0x40100000
461; SI-NOT: and
462; SI: buffer_store_dwordx2
463define void @s_and_inline_imm_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
464  %and = and i64 %a, 4616189618054758400
465  store i64 %and, i64 addrspace(1)* %out, align 8
466  ret void
467}
468
469; FUNC-LABEL: {{^}}s_and_inline_imm_neg_4.0_i64:
470; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -4.0
471
472; SI: s_load_dwordx2
473; SI: s_load_dwordx2
474; SI-NOT: and
475; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0xc0100000
476; SI-NOT: and
477; SI: buffer_store_dwordx2
478define void @s_and_inline_imm_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
479  %and = and i64 %a, 13839561654909534208
480  store i64 %and, i64 addrspace(1)* %out, align 8
481  ret void
482}
483
484
485; Test with the 64-bit integer bitpattern for a 32-bit float in the
486; low 32-bits, which is not a valid 64-bit inline immmediate.
487
488; FUNC-LABEL: {{^}}s_and_inline_imm_f32_4.0_i64:
489; SI: s_load_dword s
490; SI: s_load_dwordx2
491; SI-NOT: and
492; SI: s_and_b32 s[[K_HI:[0-9]+]], s{{[0-9]+}}, 4.0
493; SI-NOT: and
494; SI: buffer_store_dwordx2
495define void @s_and_inline_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
496  %and = and i64 %a, 1082130432
497  store i64 %and, i64 addrspace(1)* %out, align 8
498  ret void
499}
500
501; FUNC-LABEL: {{^}}s_and_inline_imm_f32_neg_4.0_i64:
502; SI: s_load_dwordx2
503; SI: s_load_dwordx2
504; SI-NOT: and
505; SI: s_and_b32 s[[K_HI:[0-9]+]], s{{[0-9]+}}, -4.0
506; SI-NOT: and
507; SI: buffer_store_dwordx2
508define void @s_and_inline_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
509  %and = and i64 %a, -1065353216
510  store i64 %and, i64 addrspace(1)* %out, align 8
511  ret void
512}
513
514; Shift into upper 32-bits
515; SI: s_load_dwordx2
516; SI: s_load_dwordx2
517; SI-NOT: and
518; SI: s_and_b32 s[[K_HI:[0-9]+]], s{{[0-9]+}}, 4.0
519; SI-NOT: and
520; SI: buffer_store_dwordx2
521define void @s_and_inline_high_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
522  %and = and i64 %a, 4647714815446351872
523  store i64 %and, i64 addrspace(1)* %out, align 8
524  ret void
525}
526
527; FUNC-LABEL: {{^}}s_and_inline_high_imm_f32_neg_4.0_i64:
528; SI: s_load_dwordx2
529; SI: s_load_dwordx2
530; SI-NOT: and
531; SI: s_and_b32 s[[K_HI:[0-9]+]], s{{[0-9]+}}, -4.0
532; SI-NOT: and
533; SI: buffer_store_dwordx2
534define void @s_and_inline_high_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
535  %and = and i64 %a, 13871086852301127680
536  store i64 %and, i64 addrspace(1)* %out, align 8
537  ret void
538}
539
540attributes #0 = { nounwind readnone }
541