• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
2; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=FUNC %s
3; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=FUNC %s
4
5; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -enable-unsafe-fp-math -verify-machineinstrs < %s | FileCheck -check-prefix=GCN-UNSAFE -check-prefix=SI-UNSAFE -check-prefix=FUNC %s
6; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -enable-unsafe-fp-math -verify-machineinstrs < %s | FileCheck -check-prefix=GCN-UNSAFE -check-prefix=VI-UNSAFE -check-prefix=FUNC %s
7
8declare double @llvm.fabs.f64(double) #0
9declare double @llvm.floor.f64(double) #0
10
11; FUNC-LABEL: {{^}}fract_f64:
12; SI-DAG: v_fract_f64_e32 [[FRC:v\[[0-9]+:[0-9]+\]]], v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]]
13; SI-DAG: v_mov_b32_e32 v[[UPLO:[0-9]+]], -1
14; SI-DAG: v_mov_b32_e32 v[[UPHI:[0-9]+]], 0x3fefffff
15; SI-DAG: v_min_f64 v{{\[}}[[MINLO:[0-9]+]]:[[MINHI:[0-9]+]]], [[FRC]], v{{\[}}[[UPLO]]:[[UPHI]]]
16; SI-DAG: v_cmp_class_f64_e64 vcc, v{{\[}}[[LO]]:[[HI]]], 3
17; SI: v_cndmask_b32_e32 v[[RESLO:[0-9]+]], v[[MINLO]], v[[LO]], vcc
18; SI: v_cndmask_b32_e32 v[[RESHI:[0-9]+]], v[[MINHI]], v[[HI]], vcc
19; SI: v_add_f64 [[SUB0:v\[[0-9]+:[0-9]+\]]], v{{\[}}[[LO]]:[[HI]]{{\]}}, -v{{\[}}[[RESLO]]:[[RESHI]]{{\]}}
20; SI: v_add_f64 [[FRACT:v\[[0-9]+:[0-9]+\]]], v{{\[}}[[LO]]:[[HI]]{{\]}}, -[[SUB0]]
21
22; CI: buffer_load_dwordx2 [[X:v\[[0-9]+:[0-9]+\]]]
23; CI: v_floor_f64_e32 [[FLOORX:v\[[0-9]+:[0-9]+\]]], [[X]]
24; CI: v_add_f64 [[FRACT:v\[[0-9]+:[0-9]+\]]], [[X]], -[[FLOORX]]
25
26; GCN-UNSAFE: buffer_load_dwordx2 [[X:v\[[0-9]+:[0-9]+\]]]
27; GCN-UNSAFE: v_fract_f64_e32 [[FRACT:v\[[0-9]+:[0-9]+\]]], [[X]]
28
29; GCN: buffer_store_dwordx2 [[FRACT]]
30define amdgpu_kernel void @fract_f64(double addrspace(1)* %out, double addrspace(1)* %src) #1 {
31  %x = load double, double addrspace(1)* %src
32  %floor.x = call double @llvm.floor.f64(double %x)
33  %fract = fsub double %x, %floor.x
34  store double %fract, double addrspace(1)* %out
35  ret void
36}
37
38; FUNC-LABEL: {{^}}fract_f64_neg:
39; SI-DAG: v_fract_f64_e64 [[FRC:v\[[0-9]+:[0-9]+\]]], -v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]]
40; SI-DAG: v_mov_b32_e32 v[[UPLO:[0-9]+]], -1
41; SI-DAG: v_mov_b32_e32 v[[UPHI:[0-9]+]], 0x3fefffff
42; SI-DAG: v_min_f64 v{{\[}}[[MINLO:[0-9]+]]:[[MINHI:[0-9]+]]], [[FRC]], v{{\[}}[[UPLO]]:[[UPHI]]]
43; SI-DAG: v_cmp_class_f64_e64 vcc, v{{\[}}[[LO]]:[[HI]]], 3
44; SI: v_cndmask_b32_e32 v[[RESLO:[0-9]+]], v[[MINLO]], v[[LO]], vcc
45; SI: v_cndmask_b32_e32 v[[RESHI:[0-9]+]], v[[MINHI]], v[[HI]], vcc
46; SI: v_add_f64 [[SUB0:v\[[0-9]+:[0-9]+\]]], -v{{\[}}[[LO]]:[[HI]]{{\]}}, -v{{\[}}[[RESLO]]:[[RESHI]]{{\]}}
47; SI: v_add_f64 [[FRACT:v\[[0-9]+:[0-9]+\]]], -v{{\[}}[[LO]]:[[HI]]{{\]}}, -[[SUB0]]
48
49; CI: buffer_load_dwordx2 [[X:v\[[0-9]+:[0-9]+\]]]
50; CI: v_floor_f64_e64 [[FLOORX:v\[[0-9]+:[0-9]+\]]], -[[X]]
51; CI: v_add_f64 [[FRACT:v\[[0-9]+:[0-9]+\]]], -[[X]], -[[FLOORX]]
52
53; GCN-UNSAFE: buffer_load_dwordx2 [[X:v\[[0-9]+:[0-9]+\]]]
54; GCN-UNSAFE: v_fract_f64_e64 [[FRACT:v\[[0-9]+:[0-9]+\]]], -[[X]]
55
56; GCN: buffer_store_dwordx2 [[FRACT]]
57define amdgpu_kernel void @fract_f64_neg(double addrspace(1)* %out, double addrspace(1)* %src) #1 {
58  %x = load double, double addrspace(1)* %src
59  %neg.x = fsub double -0.0, %x
60  %floor.neg.x = call double @llvm.floor.f64(double %neg.x)
61  %fract = fsub double %neg.x, %floor.neg.x
62  store double %fract, double addrspace(1)* %out
63  ret void
64}
65
66; FUNC-LABEL: {{^}}fract_f64_neg_abs:
67; SI-DAG: v_fract_f64_e64 [[FRC:v\[[0-9]+:[0-9]+\]]], -|v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]]|
68; SI-DAG: v_mov_b32_e32 v[[UPLO:[0-9]+]], -1
69; SI-DAG: v_mov_b32_e32 v[[UPHI:[0-9]+]], 0x3fefffff
70; SI-DAG: v_min_f64 v{{\[}}[[MINLO:[0-9]+]]:[[MINHI:[0-9]+]]], [[FRC]], v{{\[}}[[UPLO]]:[[UPHI]]]
71; SI-DAG: v_cmp_class_f64_e64 vcc, v{{\[}}[[LO]]:[[HI]]], 3
72; SI: v_cndmask_b32_e32 v[[RESLO:[0-9]+]], v[[MINLO]], v[[LO]], vcc
73; SI: v_cndmask_b32_e32 v[[RESHI:[0-9]+]], v[[MINHI]], v[[HI]], vcc
74; SI: v_add_f64 [[SUB0:v\[[0-9]+:[0-9]+\]]], -|v{{\[}}[[LO]]:[[HI]]{{\]}}|, -v{{\[}}[[RESLO]]:[[RESHI]]{{\]}}
75; SI: v_add_f64 [[FRACT:v\[[0-9]+:[0-9]+\]]], -|v{{\[}}[[LO]]:[[HI]]{{\]}}|, -[[SUB0]]
76
77; CI: buffer_load_dwordx2 [[X:v\[[0-9]+:[0-9]+\]]]
78; CI: v_floor_f64_e64 [[FLOORX:v\[[0-9]+:[0-9]+\]]], -|[[X]]|
79; CI: v_add_f64 [[FRACT:v\[[0-9]+:[0-9]+\]]], -|[[X]]|, -[[FLOORX]]
80
81; GCN-UNSAFE: buffer_load_dwordx2 [[X:v\[[0-9]+:[0-9]+\]]]
82; GCN-UNSAFE: v_fract_f64_e64 [[FRACT:v\[[0-9]+:[0-9]+\]]], -|[[X]]|
83
84; GCN: buffer_store_dwordx2 [[FRACT]]
85define amdgpu_kernel void @fract_f64_neg_abs(double addrspace(1)* %out, double addrspace(1)* %src) #1 {
86  %x = load double, double addrspace(1)* %src
87  %abs.x = call double @llvm.fabs.f64(double %x)
88  %neg.abs.x = fsub double -0.0, %abs.x
89  %floor.neg.abs.x = call double @llvm.floor.f64(double %neg.abs.x)
90  %fract = fsub double %neg.abs.x, %floor.neg.abs.x
91  store double %fract, double addrspace(1)* %out
92  ret void
93}
94
95; FUNC-LABEL: {{^}}multi_use_floor_fract_f64:
96; VI-UNSAFE: buffer_load_dwordx2 [[X:v\[[0-9]+:[0-9]+\]]]
97; VI-UNSAFE-DAG: v_floor_f64_e32 [[FLOOR:v\[[0-9]+:[0-9]+\]]], [[X]]
98; VI-UNSAFE-DAG: v_fract_f64_e32 [[FRACT:v\[[0-9]+:[0-9]+\]]], [[X]]
99; VI-UNSAFE: buffer_store_dwordx2 [[FLOOR]]
100; VI-UNSAFE: buffer_store_dwordx2 [[FRACT]]
101define amdgpu_kernel void @multi_use_floor_fract_f64(double addrspace(1)* %out, double addrspace(1)* %src) #1 {
102  %x = load double, double addrspace(1)* %src
103  %floor.x = call double @llvm.floor.f64(double %x)
104  %fract = fsub double %x, %floor.x
105  store volatile double %floor.x, double addrspace(1)* %out
106  store volatile double %fract, double addrspace(1)* %out
107  ret void
108}
109
110attributes #0 = { nounwind readnone }
111attributes #1 = { nounwind }
112