• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s
2; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI %s
3; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9 %s
4
5; GCN-LABEL: {{^}}test_fmin3_olt_0_f32:
6; GCN: buffer_load_dword [[REGC:v[0-9]+]]
7; GCN: buffer_load_dword [[REGB:v[0-9]+]]
8; GCN: buffer_load_dword [[REGA:v[0-9]+]]
9; GCN: v_min3_f32 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]]
10; GCN: buffer_store_dword [[RESULT]],
11define amdgpu_kernel void @test_fmin3_olt_0_f32(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #0 {
12  %a = load volatile float, float addrspace(1)* %aptr, align 4
13  %b = load volatile float, float addrspace(1)* %bptr, align 4
14  %c = load volatile float, float addrspace(1)* %cptr, align 4
15  %f0 = call float @llvm.minnum.f32(float %a, float %b)
16  %f1 = call float @llvm.minnum.f32(float %f0, float %c)
17  store float %f1, float addrspace(1)* %out, align 4
18  ret void
19}
20
21; Commute operand of second fmin
22; GCN-LABEL: {{^}}test_fmin3_olt_1_f32:
23; GCN: buffer_load_dword [[REGB:v[0-9]+]]
24; GCN: buffer_load_dword [[REGA:v[0-9]+]]
25; GCN: buffer_load_dword [[REGC:v[0-9]+]]
26; GCN: v_min3_f32 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]]
27; GCN: buffer_store_dword [[RESULT]],
28define amdgpu_kernel void @test_fmin3_olt_1_f32(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #0 {
29  %a = load volatile float, float addrspace(1)* %aptr, align 4
30  %b = load volatile float, float addrspace(1)* %bptr, align 4
31  %c = load volatile float, float addrspace(1)* %cptr, align 4
32  %f0 = call float @llvm.minnum.f32(float %a, float %b)
33  %f1 = call float @llvm.minnum.f32(float %c, float %f0)
34  store float %f1, float addrspace(1)* %out, align 4
35  ret void
36}
37
38; GCN-LABEL: {{^}}test_fmin3_olt_0_f16:
39; GCN: buffer_load_ushort [[REGC:v[0-9]+]]
40; GCN: buffer_load_ushort [[REGB:v[0-9]+]]
41; GCN: buffer_load_ushort [[REGA:v[0-9]+]]
42
43; SI: v_min3_f32 [[RESULT_F32:v[0-9]+]],
44; SI: v_cvt_f16_f32_e32 [[RESULT:v[0-9]+]], [[RESULT]]
45
46; VI: v_min_f16_e32
47; VI: v_min_f16_e32 [[RESULT:v[0-9]+]],
48
49; GFX9: v_min3_f16 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]]
50; GCN: buffer_store_short [[RESULT]],
51define amdgpu_kernel void @test_fmin3_olt_0_f16(half addrspace(1)* %out, half addrspace(1)* %aptr, half addrspace(1)* %bptr, half addrspace(1)* %cptr) #0 {
52  %a = load volatile half, half addrspace(1)* %aptr, align 2
53  %b = load volatile half, half addrspace(1)* %bptr, align 2
54  %c = load volatile half, half addrspace(1)* %cptr, align 2
55  %f0 = call half @llvm.minnum.f16(half %a, half %b)
56  %f1 = call half @llvm.minnum.f16(half %f0, half %c)
57  store half %f1, half addrspace(1)* %out, align 2
58  ret void
59}
60
61; Commute operand of second fmin
62; GCN-LABEL: {{^}}test_fmin3_olt_1_f16:
63; GCN: buffer_load_ushort [[REGA:v[0-9]+]]
64; GCN: buffer_load_ushort [[REGB:v[0-9]+]]
65; GCN: buffer_load_ushort [[REGC:v[0-9]+]]
66
67; SI-DAG: v_cvt_f32_f16_e32 [[CVT_A:v[0-9]+]], [[REGA]]
68; SI-DAG: v_cvt_f32_f16_e32 [[CVT_B:v[0-9]+]], [[REGB]]
69; SI-DAG: v_cvt_f32_f16_e32 [[CVT_C:v[0-9]+]], [[REGC]]
70; SI: v_min3_f32 [[RESULT_F32:v[0-9]+]], [[CVT_C]], [[CVT_A]], [[CVT_B]]
71; SI: v_cvt_f16_f32_e32 [[RESULT:v[0-9]+]], [[RESULT_F32]]
72
73; VI: v_min_f16_e32
74; VI: v_min_f16_e32 [[RESULT:v[0-9]+]],
75
76; GFX9: v_min3_f16 [[RESULT:v[0-9]+]], [[REGC]], [[REGA]], [[REGB]]
77; GCN: buffer_store_short [[RESULT]],
78define amdgpu_kernel void @test_fmin3_olt_1_f16(half addrspace(1)* %out, half addrspace(1)* %aptr, half addrspace(1)* %bptr, half addrspace(1)* %cptr) #0 {
79  %a = load volatile half, half addrspace(1)* %aptr, align 2
80  %b = load volatile half, half addrspace(1)* %bptr, align 2
81  %c = load volatile half, half addrspace(1)* %cptr, align 2
82  %f0 = call half @llvm.minnum.f16(half %a, half %b)
83  %f1 = call half @llvm.minnum.f16(half %c, half %f0)
84  store half %f1, half addrspace(1)* %out, align 2
85  ret void
86}
87
88; Checks whether the test passes; performMinMaxCombine() should not optimize vector patterns of min3
89; since there are no pack instructions for fmin3.
90; GCN-LABEL: {{^}}no_fmin3_v2f16:
91
92; SI: v_cvt_f16_f32_e32
93; SI: v_min_f32_e32
94; SI-NEXT: v_min_f32_e32
95; SI-NEXT: v_min3_f32
96; SI-NEXT: v_min3_f32
97
98; VI: v_min_f16_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
99; VI: v_min_f16_e32 v0, v0, v1
100; VI: v_min_f16_sdwa v1, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
101; VI: v_min_f16_e32 v0, v2, v0
102; VI: v_min_f16_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
103; VI: v_min_f16_e32 v0, v0, v3
104; VI: v_or_b32_e32 v0, v0, v1
105
106; GFX9: v_pk_min_f16
107; GFX9: v_pk_min_f16
108; GFX9: v_pk_min_f16
109define <2 x half> @no_fmin3_v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x half> %d) {
110entry:
111  %min = tail call fast <2 x half> @llvm.minnum.v2f16(<2 x half> %a, <2 x half> %b)
112  %min1 = tail call fast <2 x half> @llvm.minnum.v2f16(<2 x half> %c, <2 x half> %min)
113  %res = tail call fast <2 x half> @llvm.minnum.v2f16(<2 x half> %min1, <2 x half> %d)
114  ret <2 x half> %res
115}
116
117declare i32 @llvm.amdgcn.workitem.id.x() #1
118declare float @llvm.minnum.f32(float, float) #1
119declare half @llvm.minnum.f16(half, half) #1
120declare <2 x half> @llvm.minnum.v2f16(<2 x half>, <2 x half>)
121
122attributes #0 = { nounwind }
123attributes #1 = { nounwind readnone speculatable }
124