• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=SI-UNSAFE -check-prefix=SI %s
2; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI-SAFE -check-prefix=SI %s
3
4declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
5declare float @llvm.sqrt.f32(float) nounwind readnone
6declare double @llvm.sqrt.f64(double) nounwind readnone
7
8; SI-LABEL: {{^}}rsq_f32:
9; SI: v_rsq_f32_e32
10; SI: s_endpgm
11define amdgpu_kernel void @rsq_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #0 {
12  %val = load float, float addrspace(1)* %in, align 4
13  %sqrt = call float @llvm.sqrt.f32(float %val) nounwind readnone
14  %div = fdiv float 1.0, %sqrt, !fpmath !0
15  store float %div, float addrspace(1)* %out, align 4
16  ret void
17}
18
19; SI-LABEL: {{^}}rsq_f64:
20; SI-UNSAFE: v_rsq_f64_e32
21; SI-SAFE: v_sqrt_f64_e32
22; SI: s_endpgm
23define amdgpu_kernel void @rsq_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #0 {
24  %val = load double, double addrspace(1)* %in, align 4
25  %sqrt = call double @llvm.sqrt.f64(double %val) nounwind readnone
26  %div = fdiv double 1.0, %sqrt
27  store double %div, double addrspace(1)* %out, align 4
28  ret void
29}
30
31; SI-LABEL: {{^}}rsq_f32_sgpr:
32; SI: v_rsq_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}
33; SI: s_endpgm
34define amdgpu_kernel void @rsq_f32_sgpr(float addrspace(1)* noalias %out, float %val) #0 {
35  %sqrt = call float @llvm.sqrt.f32(float %val) nounwind readnone
36  %div = fdiv float 1.0, %sqrt, !fpmath !0
37  store float %div, float addrspace(1)* %out, align 4
38  ret void
39}
40
41; Recognize that this is rsqrt(a) * rcp(b) * c,
42; not 1 / ( 1 / sqrt(a)) * rcp(b) * c.
43
44; NOTE: c * rcp( sqrt(a) * b ) is generated when we move rcp generation to AMGGPUCogenPrepare.
45
46; SI-LABEL: @rsqrt_fmul
47; SI-DAG: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
48; SI-DAG: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
49; SI-DAG: buffer_load_dword [[C:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8
50
51; SI-UNSAFE-DAG: v_sqrt_f32_e32 [[SQRT:v[0-9]+]], [[A]]
52; SI-UNSAFE-DAG: v_mul_f32_e32  [[MUL:v[0-9]+]], [[SQRT]], [[B]]
53; SI-UNSAFE-DAG: v_rcp_f32_e32  [[RCP:v[0-9]+]], [[MUL]]
54; SI-UNSAFE-DAG: v_mul_f32_e32  [[RESULT:v[0-9]+]], [[C]], [[RCP]]
55; SI-UNSAFE: buffer_store_dword [[RESULT]]
56
57; SI-SAFE-NOT: v_rsq_f32
58
59; SI: s_endpgm
60define amdgpu_kernel void @rsqrt_fmul(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
61  %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
62  %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
63  %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
64  %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
65  %gep.2 = getelementptr float, float addrspace(1)* %gep.0, i32 2
66
67  %a = load volatile float, float addrspace(1)* %gep.0
68  %b = load volatile float, float addrspace(1)* %gep.1
69  %c = load volatile float, float addrspace(1)* %gep.2
70
71  %x = call float @llvm.sqrt.f32(float %a)
72  %y = fmul float %x, %b
73  %z = fdiv float %c, %y
74  store float %z, float addrspace(1)* %out.gep
75  ret void
76}
77
78; SI-LABEL: {{^}}neg_rsq_f32:
79; SI-SAFE: v_sqrt_f32_e32 [[SQRT:v[0-9]+]], v{{[0-9]+}}
80; SI-SAFE: v_rcp_f32_e64 [[RSQ:v[0-9]+]], -[[SQRT]]
81; SI-SAFE: buffer_store_dword [[RSQ]]
82
83; SI-UNSAFE: v_sqrt_f32_e32 [[SQRT:v[0-9]+]], v{{[0-9]+}}
84; SI-UNSAFE: v_rcp_f32_e64 [[RSQ:v[0-9]+]], -[[SQRT]]
85; SI-UNSAFE: buffer_store_dword [[RSQ]]
86define amdgpu_kernel void @neg_rsq_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #0 {
87  %val = load float, float addrspace(1)* %in, align 4
88  %sqrt = call float @llvm.sqrt.f32(float %val)
89  %div = fdiv float -1.0, %sqrt, !fpmath !0
90  store float %div, float addrspace(1)* %out, align 4
91  ret void
92}
93
94; SI-LABEL: {{^}}neg_rsq_f64:
95; SI-SAFE: v_sqrt_f64_e32
96; SI-SAFE: v_div_scale_f64
97
98; SI-UNSAFE: v_sqrt_f64_e32 [[SQRT:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}
99; SI-UNSAFE: v_rcp_f64_e64 [[RCP:v\[[0-9]+:[0-9]+\]]], -[[SQRT]]
100; SI-UNSAFE: buffer_store_dwordx2 [[RCP]]
101define amdgpu_kernel void @neg_rsq_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #0 {
102  %val = load double, double addrspace(1)* %in, align 4
103  %sqrt = call double @llvm.sqrt.f64(double %val)
104  %div = fdiv double -1.0, %sqrt
105  store double %div, double addrspace(1)* %out, align 4
106  ret void
107}
108
109; SI-LABEL: {{^}}neg_rsq_neg_f32:
110; SI-SAFE: v_sqrt_f32_e64 [[SQRT:v[0-9]+]], -v{{[0-9]+}}
111; SI-SAFE: v_rcp_f32_e64 [[RSQ:v[0-9]+]], -[[SQRT]]
112; SI-SAFE: buffer_store_dword [[RSQ]]
113
114; SI-UNSAFE: v_sqrt_f32_e64 [[SQRT:v[0-9]+]], -v{{[0-9]+}}
115; SI-UNSAFE: v_rcp_f32_e64 [[RSQ:v[0-9]+]], -[[SQRT]]
116; SI-UNSAFE: buffer_store_dword [[RSQ]]
117define amdgpu_kernel void @neg_rsq_neg_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #0 {
118  %val = load float, float addrspace(1)* %in, align 4
119  %val.fneg = fsub float -0.0, %val
120  %sqrt = call float @llvm.sqrt.f32(float %val.fneg)
121  %div = fdiv float -1.0, %sqrt, !fpmath !0
122  store float %div, float addrspace(1)* %out, align 4
123  ret void
124}
125
126; SI-LABEL: {{^}}neg_rsq_neg_f64:
127; SI-SAFE: v_sqrt_f64_e64 v{{\[[0-9]+:[0-9]+\]}}, -v{{\[[0-9]+:[0-9]+\]}}
128; SI-SAFE: v_div_scale_f64
129
130; SI-UNSAFE: v_sqrt_f64_e64 [[SQRT:v\[[0-9]+:[0-9]+\]]], -v{{\[[0-9]+:[0-9]+\]}}
131; SI-UNSAFE: v_rcp_f64_e64 [[RCP:v\[[0-9]+:[0-9]+\]]], -[[SQRT]]
132; SI-UNSAFE: buffer_store_dwordx2 [[RCP]]
133define amdgpu_kernel void @neg_rsq_neg_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #0 {
134  %val = load double, double addrspace(1)* %in, align 4
135  %val.fneg = fsub double -0.0, %val
136  %sqrt = call double @llvm.sqrt.f64(double %val.fneg)
137  %div = fdiv double -1.0, %sqrt
138  store double %div, double addrspace(1)* %out, align 4
139  ret void
140}
141
142!0 = !{float 2.500000e+00}
143
144attributes #0 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
145