• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
3
4@lds = internal addrspace(3) global [576 x double] undef, align 16
5
6; Stores to the same address appear multiple places in the same
7; block. When sorted by offset, the merges would fail. We should form
8; two groupings of ds_write2_b64 on either side of the fence.
9define amdgpu_kernel void @same_address_fence_merge_write2() #0 {
10; GCN-LABEL: same_address_fence_merge_write2:
11; GCN:       ; %bb.0: ; %bb
12; GCN-NEXT:    s_mov_b32 s0, 0
13; GCN-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
14; GCN-NEXT:    s_mov_b32 s1, 0x40100000
15; GCN-NEXT:    v_mov_b32_e32 v0, s0
16; GCN-NEXT:    v_mov_b32_e32 v1, s1
17; GCN-NEXT:    v_add_u32_e32 v3, 0x840, v2
18; GCN-NEXT:    v_add_u32_e32 v4, 0xc60, v2
19; GCN-NEXT:    ds_write2_b64 v2, v[0:1], v[0:1] offset1:66
20; GCN-NEXT:    ds_write2_b64 v2, v[0:1], v[0:1] offset0:132 offset1:198
21; GCN-NEXT:    ds_write2_b64 v3, v[0:1], v[0:1] offset1:66
22; GCN-NEXT:    ds_write2_b64 v4, v[0:1], v[0:1] offset1:66
23; GCN-NEXT:    s_mov_b32 s1, 0x3ff00000
24; GCN-NEXT:    v_mov_b32_e32 v0, s0
25; GCN-NEXT:    v_mov_b32_e32 v1, s1
26; GCN-NEXT:    s_waitcnt lgkmcnt(0)
27; GCN-NEXT:    s_barrier
28; GCN-NEXT:    s_waitcnt lgkmcnt(0)
29; GCN-NEXT:    ds_write2_b64 v2, v[0:1], v[0:1] offset1:66
30; GCN-NEXT:    ds_write2_b64 v2, v[0:1], v[0:1] offset0:132 offset1:198
31; GCN-NEXT:    ds_write2_b64 v3, v[0:1], v[0:1] offset1:66
32; GCN-NEXT:    ds_write2_b64 v4, v[0:1], v[0:1] offset1:66
33; GCN-NEXT:    s_endpgm
34bb:
35  %tmp = tail call i32 @llvm.amdgcn.workitem.id.x(), !range !0
36  %tmp1 = getelementptr inbounds [576 x double], [576 x double] addrspace(3)* @lds, i32 0, i32 %tmp
37  store double 4.000000e+00, double addrspace(3)* %tmp1, align 8
38  %tmp2 = getelementptr inbounds double, double addrspace(3)* %tmp1, i32 66
39  store double 4.000000e+00, double addrspace(3)* %tmp2, align 8
40  %tmp3 = getelementptr inbounds double, double addrspace(3)* %tmp1, i32 132
41  store double 4.000000e+00, double addrspace(3)* %tmp3, align 8
42  %tmp4 = getelementptr inbounds double, double addrspace(3)* %tmp1, i32 198
43  store double 4.000000e+00, double addrspace(3)* %tmp4, align 8
44  %tmp5 = getelementptr inbounds double, double addrspace(3)* %tmp1, i32 264
45  store double 4.000000e+00, double addrspace(3)* %tmp5, align 8
46  %tmp6 = getelementptr inbounds double, double addrspace(3)* %tmp1, i32 330
47  store double 4.000000e+00, double addrspace(3)* %tmp6, align 8
48  %tmp7 = getelementptr inbounds double, double addrspace(3)* %tmp1, i32 396
49  store double 4.000000e+00, double addrspace(3)* %tmp7, align 8
50  %tmp8 = getelementptr inbounds double, double addrspace(3)* %tmp1, i32 462
51  store double 4.000000e+00, double addrspace(3)* %tmp8, align 8
52  fence syncscope("workgroup") release
53  tail call void @llvm.amdgcn.s.barrier()
54  fence syncscope("workgroup") acquire
55  store double 1.000000e+00, double addrspace(3)* %tmp1, align 8
56  store double 1.000000e+00, double addrspace(3)* %tmp2, align 8
57  store double 1.000000e+00, double addrspace(3)* %tmp3, align 8
58  store double 1.000000e+00, double addrspace(3)* %tmp4, align 8
59  store double 1.000000e+00, double addrspace(3)* %tmp5, align 8
60  store double 1.000000e+00, double addrspace(3)* %tmp6, align 8
61  store double 1.000000e+00, double addrspace(3)* %tmp7, align 8
62  store double 1.000000e+00, double addrspace(3)* %tmp8, align 8
63  ret void
64}
65
66declare i32 @llvm.amdgcn.workitem.id.x() #0
67declare void @llvm.amdgcn.s.barrier() #1
68
69attributes #0 = { nounwind readnone speculatable }
70attributes #1 = { convergent nounwind }
71
72!0 = !{i32 0, i32 1024}
73