• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=FUNC %s
2; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
3; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
4
5@local_memory_two_objects.local_mem0 = internal unnamed_addr addrspace(3) global [4 x i32] undef, align 4
6@local_memory_two_objects.local_mem1 = internal unnamed_addr addrspace(3) global [4 x i32] undef, align 4
7
8
9; Check that the LDS size emitted correctly
10; EG: .long 166120
11; EG-NEXT: .long 8
12; GCN: .long 47180
13; GCN-NEXT: .long 32900
14
15
16; FUNC-LABEL: {{^}}local_memory_two_objects:
17
18; We would like to check the lds writes are using different
19; addresses, but due to variations in the scheduler, we can't do
20; this consistently on evergreen GPUs.
21; EG: LDS_WRITE
22; EG: LDS_WRITE
23
24; GROUP_BARRIER must be the last instruction in a clause
25; EG: GROUP_BARRIER
26; EG-NEXT: ALU clause
27
28; Make sure the lds reads are using different addresses, at different
29; constant offsets.
30; EG: LDS_READ_RET {{[*]*}} OQAP, {{PV|T}}[[ADDRR:[0-9]*\.[XYZW]]]
31; EG-NOT: LDS_READ_RET {{[*]*}} OQAP, T[[ADDRR]]
32
33
34; GCN: v_lshlrev_b32_e32 [[ADDRW:v[0-9]+]], 2, v0
35; CI-DAG: ds_write_b32 [[ADDRW]], {{v[0-9]*}} offset:16
36; CI-DAG: ds_write_b32 [[ADDRW]], {{v[0-9]*$}}
37
38
39; SI: v_add_i32_e32 [[ADDRW_OFF:v[0-9]+]], vcc, 16, [[ADDRW]]
40
41; SI-DAG: ds_write_b32 [[ADDRW]],
42; SI-DAG: ds_write_b32 [[ADDRW_OFF]],
43
44; GCN: s_barrier
45
46; SI-DAG: v_sub_i32_e32 [[SUB0:v[0-9]+]], vcc, 28, [[ADDRW]]
47; SI-DAG: v_sub_i32_e32 [[SUB1:v[0-9]+]], vcc, 12, [[ADDRW]]
48
49; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[SUB0]]
50; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[SUB1]]
51
52; CI: v_sub_i32_e32 [[SUB:v[0-9]+]], vcc, 0, [[ADDRW]]
53; CI: ds_read2_b32 {{v\[[0-9]+:[0-9]+\]}}, [[SUB]] offset0:3 offset1:7
54
55define void @local_memory_two_objects(i32 addrspace(1)* %out) {
56entry:
57  %x.i = call i32 @llvm.r600.read.tidig.x() #0
58  %arrayidx = getelementptr inbounds [4 x i32], [4 x i32] addrspace(3)* @local_memory_two_objects.local_mem0, i32 0, i32 %x.i
59  store i32 %x.i, i32 addrspace(3)* %arrayidx, align 4
60  %mul = shl nsw i32 %x.i, 1
61  %arrayidx1 = getelementptr inbounds [4 x i32], [4 x i32] addrspace(3)* @local_memory_two_objects.local_mem1, i32 0, i32 %x.i
62  store i32 %mul, i32 addrspace(3)* %arrayidx1, align 4
63  %sub = sub nsw i32 3, %x.i
64  call void @llvm.AMDGPU.barrier.local()
65  %arrayidx2 = getelementptr inbounds [4 x i32], [4 x i32] addrspace(3)* @local_memory_two_objects.local_mem0, i32 0, i32 %sub
66  %0 = load i32, i32 addrspace(3)* %arrayidx2, align 4
67  %arrayidx3 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %x.i
68  store i32 %0, i32 addrspace(1)* %arrayidx3, align 4
69  %arrayidx4 = getelementptr inbounds [4 x i32], [4 x i32] addrspace(3)* @local_memory_two_objects.local_mem1, i32 0, i32 %sub
70  %1 = load i32, i32 addrspace(3)* %arrayidx4, align 4
71  %add = add nsw i32 %x.i, 4
72  %arrayidx5 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %add
73  store i32 %1, i32 addrspace(1)* %arrayidx5, align 4
74  ret void
75}
76
77declare i32 @llvm.r600.read.tidig.x() #0
78declare void @llvm.AMDGPU.barrier.local()
79
80attributes #0 = { readnone }
81