• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=irtranslator %s -o - | FileCheck %s
3
4define amdgpu_vs void @test_f32_inreg(float inreg %arg0) {
5  ; CHECK-LABEL: name: test_f32_inreg
6  ; CHECK: bb.1 (%ir-block.0):
7  ; CHECK:   liveins: $sgpr2
8  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
9  ; CHECK:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
10  ; CHECK:   G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.exp), 32, 15, [[COPY]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), 0, 0
11  ; CHECK:   S_ENDPGM 0
12  call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float %arg0, float undef, float undef, float undef, i1 false, i1 false) #0
13  ret void
14}
15
16define amdgpu_vs void @test_f32(float %arg0) {
17  ; CHECK-LABEL: name: test_f32
18  ; CHECK: bb.1 (%ir-block.0):
19  ; CHECK:   liveins: $vgpr0
20  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
21  ; CHECK:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
22  ; CHECK:   G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.exp), 32, 15, [[COPY]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), 0, 0
23  ; CHECK:   S_ENDPGM 0
24  call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float %arg0, float undef, float undef, float undef, i1 false, i1 false) #0
25  ret void
26}
27
28define amdgpu_vs void @test_ptr2_inreg(i32 addrspace(4)* inreg %arg0) {
29  ; CHECK-LABEL: name: test_ptr2_inreg
30  ; CHECK: bb.1 (%ir-block.0):
31  ; CHECK:   liveins: $sgpr2, $sgpr3
32  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
33  ; CHECK:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
34  ; CHECK:   [[MV:%[0-9]+]]:_(p4) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
35  ; CHECK:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[MV]](p4) :: (volatile load 4 from %ir.arg0, addrspace 4)
36  ; CHECK:   S_ENDPGM 0
37  %tmp0 = load volatile i32, i32 addrspace(4)* %arg0
38  ret void
39}
40
41define amdgpu_vs void @test_sgpr_alignment0(float inreg %arg0, i32 addrspace(4)* inreg %arg1) {
42  ; CHECK-LABEL: name: test_sgpr_alignment0
43  ; CHECK: bb.1 (%ir-block.0):
44  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4
45  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
46  ; CHECK:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
47  ; CHECK:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
48  ; CHECK:   [[MV:%[0-9]+]]:_(p4) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32)
49  ; CHECK:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
50  ; CHECK:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[MV]](p4) :: (volatile load 4 from %ir.arg1, addrspace 4)
51  ; CHECK:   G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.exp), 32, 15, [[COPY]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), 0, 0
52  ; CHECK:   S_ENDPGM 0
53  %tmp0 = load volatile i32, i32 addrspace(4)* %arg1
54  call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float %arg0, float undef, float undef, float undef, i1 false, i1 false) #0
55  ret void
56}
57
58define amdgpu_vs void @test_order(float inreg %arg0, float inreg %arg1, float %arg2, float %arg3) {
59  ; CHECK-LABEL: name: test_order
60  ; CHECK: bb.1 (%ir-block.0):
61  ; CHECK:   liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1
62  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
63  ; CHECK:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
64  ; CHECK:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr0
65  ; CHECK:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr1
66  ; CHECK:   G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.exp), 32, 15, [[COPY2]](s32), [[COPY]](s32), [[COPY3]](s32), [[COPY1]](s32), 0, 0
67  ; CHECK:   S_ENDPGM 0
68  call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float %arg2, float %arg0, float %arg3, float %arg1, i1 false, i1 false) #0
69  ret void
70}
71
72define amdgpu_vs <{ i32, i32 }> @ret_struct(i32 inreg %arg0, i32 inreg %arg1) {
73  ; CHECK-LABEL: name: ret_struct
74  ; CHECK: bb.1.main_body:
75  ; CHECK:   liveins: $sgpr2, $sgpr3
76  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
77  ; CHECK:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
78  ; CHECK:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
79  ; CHECK:   [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.readfirstlane), [[COPY]](s32)
80  ; CHECK:   $sgpr0 = COPY [[INT]](s32)
81  ; CHECK:   [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.readfirstlane), [[COPY1]](s32)
82  ; CHECK:   $sgpr1 = COPY [[INT1]](s32)
83  ; CHECK:   SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
84main_body:
85  %tmp0 = insertvalue <{ i32, i32 }> undef, i32 %arg0, 0
86  %tmp1 = insertvalue <{ i32, i32 }> %tmp0, i32 %arg1, 1
87  ret <{ i32, i32 }> %tmp1
88}
89
90define amdgpu_vs i32 @non_void_ret() {
91  ; CHECK-LABEL: name: non_void_ret
92  ; CHECK: bb.1 (%ir-block.0):
93  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
94  ; CHECK:   [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.readfirstlane), [[C]](s32)
95  ; CHECK:   $sgpr0 = COPY [[INT]](s32)
96  ; CHECK:   SI_RETURN_TO_EPILOG implicit $sgpr0
97  ret i32 0
98}
99
100declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1)  #0
101
102attributes #0 = { nounwind }
103