• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=tahiti -fp-contract=on -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,SI %s
2; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=verde  -fp-contract=on -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,SI %s
3; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=tahiti -fp-contract=fast -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,SI %s
4; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=verde  -fp-contract=fast -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,SI %s
5; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=tonga -mattr=-flat-for-global  -fp-contract=on -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-STRICT,VI %s
6; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=tonga -mattr=-flat-for-global  -fp-contract=fast -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-CONTRACT,VI %s
7
8; GCN-LABEL: {{^}}fmuladd_f64:
9; GCN: v_fma_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
10define amdgpu_kernel void @fmuladd_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
11                         double addrspace(1)* %in2, double addrspace(1)* %in3) #0 {
12  %r0 = load double, double addrspace(1)* %in1
13  %r1 = load double, double addrspace(1)* %in2
14  %r2 = load double, double addrspace(1)* %in3
15  %r3 = tail call double @llvm.fmuladd.f64(double %r0, double %r1, double %r2)
16  store double %r3, double addrspace(1)* %out
17  ret void
18}
19
20; GCN-LABEL: {{^}}fmul_fadd_f64:
21; GCN-CONTRACT: v_fma_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
22
23; GCN-STRICT: v_mul_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
24; GCN-STRICT: v_add_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
25define amdgpu_kernel void @fmul_fadd_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
26                           double addrspace(1)* %in2, double addrspace(1)* %in3) #0 {
27  %r0 = load double, double addrspace(1)* %in1
28  %r1 = load double, double addrspace(1)* %in2
29  %r2 = load double, double addrspace(1)* %in3
30  %tmp = fmul double %r0, %r1
31  %r3 = fadd double %tmp, %r2
32  store double %r3, double addrspace(1)* %out
33  ret void
34}
35
36; GCN-LABEL: {{^}}fmul_fadd_contract_f64:
37; GCN: v_fma_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
38
39define amdgpu_kernel void @fmul_fadd_contract_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
40                           double addrspace(1)* %in2, double addrspace(1)* %in3) #0 {
41  %r0 = load double, double addrspace(1)* %in1
42  %r1 = load double, double addrspace(1)* %in2
43  %r2 = load double, double addrspace(1)* %in3
44  %tmp = fmul double %r0, %r1
45  %r3 = fadd contract double %tmp, %r2
46  store double %r3, double addrspace(1)* %out
47  ret void
48}
49
50; GCN-LABEL: {{^}}fadd_a_a_b_f64:
51; GCN: {{buffer|flat}}_load_dwordx2 [[R1:v\[[0-9]+:[0-9]+\]]],
52; GCN: {{buffer|flat}}_load_dwordx2 [[R2:v\[[0-9]+:[0-9]+\]]],
53
54; GCN-STRICT: v_add_f64 [[TMP:v\[[0-9]+:[0-9]+\]]], [[R1]], [[R1]]
55; GCN-STRICT: v_add_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[TMP]], [[R2]]
56
57; GCN-CONTRACT: v_fma_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[R1]], 2.0, [[R2]]
58
59; SI: buffer_store_dwordx2 [[RESULT]]
60; VI: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
61define amdgpu_kernel void @fadd_a_a_b_f64(double addrspace(1)* %out,
62                            double addrspace(1)* %in1,
63                            double addrspace(1)* %in2) #0 {
64  %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
65  %gep.0 = getelementptr double, double addrspace(1)* %out, i32 %tid
66  %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
67  %gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid
68
69  %r0 = load volatile double, double addrspace(1)* %gep.0
70  %r1 = load volatile double, double addrspace(1)* %gep.1
71
72  %add.0 = fadd double %r0, %r0
73  %add.1 = fadd double %add.0, %r1
74  store double %add.1, double addrspace(1)* %gep.out
75  ret void
76}
77
78; GCN-LABEL: {{^}}fadd_b_a_a_f64:
79; GCN: {{buffer|flat}}_load_dwordx2 [[R1:v\[[0-9]+:[0-9]+\]]],
80; GCN: {{buffer|flat}}_load_dwordx2 [[R2:v\[[0-9]+:[0-9]+\]]],
81
82; GCN-STRICT: v_add_f64 [[TMP:v\[[0-9]+:[0-9]+\]]], [[R1]], [[R1]]
83; GCN-STRICT: v_add_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[R2]], [[TMP]]
84
85; GCN-CONTRACT: v_fma_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[R1]], 2.0, [[R2]]
86
87; SI: buffer_store_dwordx2 [[RESULT]]
88; VI: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
89define amdgpu_kernel void @fadd_b_a_a_f64(double addrspace(1)* %out,
90                            double addrspace(1)* %in1,
91                            double addrspace(1)* %in2) #0 {
92  %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
93  %gep.0 = getelementptr double, double addrspace(1)* %out, i32 %tid
94  %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
95  %gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid
96
97  %r0 = load volatile double, double addrspace(1)* %gep.0
98  %r1 = load volatile double, double addrspace(1)* %gep.1
99
100  %add.0 = fadd double %r0, %r0
101  %add.1 = fadd double %r1, %add.0
102  store double %add.1, double addrspace(1)* %gep.out
103  ret void
104}
105
106; GCN-LABEL: {{^}}mad_sub_f64:
107; GCN-STRICT: v_mul_f64 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}
108; GCN-STRICT: v_add_f64 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, -v{{\[[0-9]+:[0-9]+\]}}
109
110; GCN-CONTRACT: v_fma_f64 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, -v{{\[[0-9]+:[0-9]+\]}}
111define amdgpu_kernel void @mad_sub_f64(double addrspace(1)* noalias nocapture %out, double addrspace(1)* noalias nocapture readonly %ptr) #1 {
112  %tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0
113  %tid.ext = sext i32 %tid to i64
114  %gep0 = getelementptr double, double addrspace(1)* %ptr, i64 %tid.ext
115  %add1 = add i64 %tid.ext, 1
116  %gep1 = getelementptr double, double addrspace(1)* %ptr, i64 %add1
117  %add2 = add i64 %tid.ext, 2
118  %gep2 = getelementptr double, double addrspace(1)* %ptr, i64 %add2
119  %outgep = getelementptr double, double addrspace(1)* %out, i64 %tid.ext
120  %a = load volatile double, double addrspace(1)* %gep0, align 8
121  %b = load volatile double, double addrspace(1)* %gep1, align 8
122  %c = load volatile double, double addrspace(1)* %gep2, align 8
123  %mul = fmul double %a, %b
124  %sub = fsub double %mul, %c
125  store double %sub, double addrspace(1)* %outgep, align 8
126  ret void
127}
128
129; GCN-LABEL: {{^}}fadd_a_a_b_f64_fast_add0:
130; GCN-STRICT: v_add_f64
131; GCN-STRICT: v_add_f64
132
133; GCN-CONTRACT: v_fma_f64
134define amdgpu_kernel void @fadd_a_a_b_f64_fast_add0(double addrspace(1)* %out,
135                                      double addrspace(1)* %in1,
136                                      double addrspace(1)* %in2) #0 {
137  %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
138  %gep.0 = getelementptr double, double addrspace(1)* %out, i32 %tid
139  %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
140  %gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid
141
142  %r0 = load volatile double, double addrspace(1)* %gep.0
143  %r1 = load volatile double, double addrspace(1)* %gep.1
144
145  %add.0 = fadd fast double %r0, %r0
146  %add.1 = fadd double %add.0, %r1
147  store double %add.1, double addrspace(1)* %gep.out
148  ret void
149}
150
151; GCN-LABEL: {{^}}fadd_a_a_b_f64_fast_add1:
152; GCN-STRICT: v_add_f64
153; GCN-STRICT: v_add_f64
154
155; GCN-CONTRACT: v_fma_f64
156define amdgpu_kernel void @fadd_a_a_b_f64_fast_add1(double addrspace(1)* %out,
157                                      double addrspace(1)* %in1,
158                                      double addrspace(1)* %in2) #0 {
159  %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
160  %gep.0 = getelementptr double, double addrspace(1)* %out, i32 %tid
161  %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
162  %gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid
163
164  %r0 = load volatile double, double addrspace(1)* %gep.0
165  %r1 = load volatile double, double addrspace(1)* %gep.1
166
167  %add.0 = fadd double %r0, %r0
168  %add.1 = fadd fast double %add.0, %r1
169  store double %add.1, double addrspace(1)* %gep.out
170  ret void
171}
172
173; GCN-LABEL: {{^}}fadd_a_a_b_f64_fast:
174; GCN: v_fma_f64
175define amdgpu_kernel void @fadd_a_a_b_f64_fast(double addrspace(1)* %out,
176                                 double addrspace(1)* %in1,
177                                double addrspace(1)* %in2) #0 {
178  %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
179  %gep.0 = getelementptr double, double addrspace(1)* %out, i32 %tid
180  %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
181  %gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid
182
183  %r0 = load volatile double, double addrspace(1)* %gep.0
184  %r1 = load volatile double, double addrspace(1)* %gep.1
185
186  %add.0 = fadd fast double %r0, %r0
187  %add.1 = fadd fast double %add.0, %r1
188  store double %add.1, double addrspace(1)* %gep.out
189  ret void
190}
191
192declare i32 @llvm.amdgcn.workitem.id.x() #1
193declare double @llvm.fmuladd.f64(double, double, double) #1
194
195attributes #0 = { nounwind }
196attributes #1 = { nounwind readnone }
197