• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tahiti -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
2; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
3
4; GCN-LABEL: {{^}}uitofp_i16_to_f16
5; GCN: buffer_load_ushort v[[A_I16:[0-9]+]]
6; SI:  v_cvt_f32_u32_e32 v[[A_F32:[0-9]+]], v[[A_I16]]
7; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[A_F32]]
8
9; VI:  v_cvt_f16_u16_e32 v[[R_F16:[0-9]+]], v[[A_I16]]
10
11; GCN: buffer_store_short v[[R_F16]]
12; GCN: s_endpgm
13define amdgpu_kernel void @uitofp_i16_to_f16(
14    half addrspace(1)* %r,
15    i16 addrspace(1)* %a) {
16entry:
17  %a.val = load i16, i16 addrspace(1)* %a
18  %r.val = uitofp i16 %a.val to half
19  store half %r.val, half addrspace(1)* %r
20  ret void
21}
22
23; GCN-LABEL: {{^}}uitofp_i32_to_f16
24; GCN: buffer_load_dword v[[A_I32:[0-9]+]]
25; GCN: v_cvt_f32_u32_e32 v[[A_I16:[0-9]+]], v[[A_I32]]
26; GCN: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[A_I16]]
27; GCN: buffer_store_short v[[R_F16]]
28; GCN: s_endpgm
29define amdgpu_kernel void @uitofp_i32_to_f16(
30    half addrspace(1)* %r,
31    i32 addrspace(1)* %a) {
32entry:
33  %a.val = load i32, i32 addrspace(1)* %a
34  %r.val = uitofp i32 %a.val to half
35  store half %r.val, half addrspace(1)* %r
36  ret void
37}
38
39; f16 = uitofp i64 is in uint_to_fp.i64.ll
40
41; GCN-LABEL: {{^}}uitofp_v2i16_to_v2f16
42; GCN:     buffer_load_dword
43
44; SI: v_cvt_f32_u32_e32
45; SI: v_cvt_f32_u32_e32
46; SI: v_cvt_f16_f32_e32
47; SI: v_cvt_f16_f32_e32
48; SI-DAG: v_lshlrev_b32_e32
49; SI: v_or_b32_e32
50
51
52; VI-DAG: v_cvt_f16_u16_e32
53; VI-DAG: v_cvt_f16_u16_sdwa v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1
54; VI:     v_or_b32_e32
55
56; GCN: buffer_store_dword
57; GCN: s_endpgm
58define amdgpu_kernel void @uitofp_v2i16_to_v2f16(
59    <2 x half> addrspace(1)* %r,
60    <2 x i16> addrspace(1)* %a) {
61entry:
62  %a.val = load <2 x i16>, <2 x i16> addrspace(1)* %a
63  %r.val = uitofp <2 x i16> %a.val to <2 x half>
64  store <2 x half> %r.val, <2 x half> addrspace(1)* %r
65  ret void
66}
67
68; GCN-LABEL: {{^}}uitofp_v2i32_to_v2f16
69; GCN:     buffer_load_dwordx2
70
71; SI: v_cvt_f32_u32_e32
72; SI: v_cvt_f32_u32_e32
73; SI: v_cvt_f16_f32_e32
74; SI: v_cvt_f16_f32_e32
75; SI-DAG: v_lshlrev_b32_e32
76; SI: v_or_b32_e32
77
78; VI-DAG: v_cvt_f32_u32_e32
79; VI-DAG: v_cvt_f32_u32_e32
80; VI-DAG: v_cvt_f16_f32_e32
81; VI-DAG: v_cvt_f16_f32_sdwa
82; VI:     v_or_b32_e32
83
84; GCN:     buffer_store_dword
85; GCN:     s_endpgm
86define amdgpu_kernel void @uitofp_v2i32_to_v2f16(
87    <2 x half> addrspace(1)* %r,
88    <2 x i32> addrspace(1)* %a) {
89entry:
90  %a.val = load <2 x i32>, <2 x i32> addrspace(1)* %a
91  %r.val = uitofp <2 x i32> %a.val to <2 x half>
92  store <2 x half> %r.val, <2 x half> addrspace(1)* %r
93  ret void
94}
95
96; GCN-LABEL: {{^}}s_uint_to_fp_i1_to_f16:
97; GCN-DAG: v_cmp_le_f32_e32 [[CMP0:vcc]], 1.0, {{v[0-9]+}}
98; GCN-DAG: v_cmp_le_f32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]], 0, {{v[0-9]+}}
99; GCN: s_xor_b64 [[R_CMP:s\[[0-9]+:[0-9]+\]]], [[CMP1]], [[CMP0]]
100; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1.0, [[R_CMP]]
101; GCN-NEXT: v_cvt_f16_f32_e32 [[R_F16:v[0-9]+]], [[RESULT]]
102; GCN: buffer_store_short
103; GCN: s_endpgm
104define amdgpu_kernel void @s_uint_to_fp_i1_to_f16(half addrspace(1)* %out, float addrspace(1)* %in0, float addrspace(1)* %in1) {
105  %a = load float, float addrspace(1) * %in0
106  %b = load float, float addrspace(1) * %in1
107  %acmp = fcmp oge float %a, 0.000000e+00
108  %bcmp = fcmp oge float %b, 1.000000e+00
109  %result = xor i1 %acmp, %bcmp
110  %fp = uitofp i1 %result to half
111  store half %fp, half addrspace(1)* %out
112  ret void
113}
114
115; f16 = uitofp i64 is in uint_to_fp.i64.ll
116