• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -march=amdgcn -mcpu=SI -show-mc-encoding -verify-machineinstrs | FileCheck --check-prefix=SI --check-prefix=GCN %s
2; RUN: llc < %s -march=amdgcn -mcpu=tonga -show-mc-encoding -verify-machineinstrs | FileCheck --check-prefix=VI --check-prefix=GCN %s
3
4; SMRD load with an immediate offset.
5; GCN-LABEL: {{^}}smrd0:
6; SI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x1 ; encoding: [0x01
7; VI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x4
8define void @smrd0(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
9entry:
10  %0 = getelementptr i32, i32 addrspace(2)* %ptr, i64 1
11  %1 = load i32, i32 addrspace(2)* %0
12  store i32 %1, i32 addrspace(1)* %out
13  ret void
14}
15
16; SMRD load with the largest possible immediate offset.
17; GCN-LABEL: {{^}}smrd1:
18; SI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xff ; encoding: [0xff
19; VI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3fc
20define void @smrd1(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
21entry:
22  %0 = getelementptr i32, i32 addrspace(2)* %ptr, i64 255
23  %1 = load i32, i32 addrspace(2)* %0
24  store i32 %1, i32 addrspace(1)* %out
25  ret void
26}
27
28; SMRD load with an offset greater than the largest possible immediate.
29; GCN-LABEL: {{^}}smrd2:
30; SI: s_movk_i32 s[[OFFSET:[0-9]]], 0x400
31; SI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], s[[OFFSET]] ; encoding: [0x0[[OFFSET]]
32; VI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x400
33; GCN: s_endpgm
34define void @smrd2(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
35entry:
36  %0 = getelementptr i32, i32 addrspace(2)* %ptr, i64 256
37  %1 = load i32, i32 addrspace(2)* %0
38  store i32 %1, i32 addrspace(1)* %out
39  ret void
40}
41
42; SMRD load with a 64-bit offset
43; GCN-LABEL: {{^}}smrd3:
44; FIXME: There are too many copies here because we don't fold immediates
45;        through REG_SEQUENCE
46; SI: s_mov_b32 s[[SLO:[0-9]+]], 0 ;
47; SI: s_mov_b32 s[[SHI:[0-9]+]], 4
48; SI: s_mov_b32 s[[SSLO:[0-9]+]], s[[SLO]]
49; SI-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], s[[SSLO]]
50; SI-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], s[[SHI]]
51; FIXME: We should be able to use s_load_dword here
52; SI: buffer_load_dword v{{[0-9]+}}, v{{\[}}[[VLO]]:[[VHI]]{{\]}}, s[{{[0-9]+:[0-9]+}}], 0 addr64
53; TODO: Add VI checks
54; GCN: s_endpgm
55define void @smrd3(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
56entry:
57  %0 = getelementptr i32, i32 addrspace(2)* %ptr, i64 4294967296 ; 2 ^ 32
58  %1 = load i32, i32 addrspace(2)* %0
59  store i32 %1, i32 addrspace(1)* %out
60  ret void
61}
62
63; SMRD load using the load.const intrinsic with an immediate offset
64; GCN-LABEL: {{^}}smrd_load_const0:
65; SI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x4 ; encoding: [0x04
66; VI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x10
67define void @smrd_load_const0(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
68main_body:
69  %20 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %0, i32 0
70  %21 = load <16 x i8>, <16 x i8> addrspace(2)* %20
71  %22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 16)
72  call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %22, float %22, float %22, float %22)
73  ret void
74}
75
76; SMRD load using the load.const intrinsic with the largest possible immediate
77; offset.
78; GCN-LABEL: {{^}}smrd_load_const1:
79; SI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xff ; encoding: [0xff
80; VI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3fc
81define void @smrd_load_const1(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
82main_body:
83  %20 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %0, i32 0
84  %21 = load <16 x i8>, <16 x i8> addrspace(2)* %20
85  %22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 1020)
86  call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %22, float %22, float %22, float %22)
87  ret void
88}
89; SMRD load using the load.const intrinsic with an offset greater than the
90; largets possible immediate.
91; immediate offset.
92; GCN-LABEL: {{^}}smrd_load_const2:
93; SI: s_movk_i32 s[[OFFSET:[0-9]]], 0x400
94; SI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], s[[OFFSET]] ; encoding: [0x0[[OFFSET]]
95; VI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x400
96define void @smrd_load_const2(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
97main_body:
98  %20 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %0, i32 0
99  %21 = load <16 x i8>, <16 x i8> addrspace(2)* %20
100  %22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 1024)
101  call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %22, float %22, float %22, float %22)
102  ret void
103}
104
105; Function Attrs: nounwind readnone
106declare float @llvm.SI.load.const(<16 x i8>, i32) #1
107
108declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
109
110attributes #0 = { "ShaderType"="0" }
111attributes #1 = { nounwind readnone }
112