• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s
2; RUN: llc -mtriple=amdgcn-amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-HSA -check-prefix=FUNC %s
3; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s
4
5; FUNC-LABEL: {{^}}constant_load_f64:
6; GCN: s_load_dwordx2 s[{{[0-9]+:[0-9]+}}]
7; GCN-NOHSA: buffer_store_dwordx2
8; GCN-HSA: flat_store_dwordx2
9define amdgpu_kernel void @constant_load_f64(double addrspace(1)* %out, double addrspace(4)* %in) #0 {
10  %ld = load double, double addrspace(4)* %in
11  store double %ld, double addrspace(1)* %out
12  ret void
13}
14
15attributes #0 = { nounwind }
16
17; Tests whether a load-chain of 8 constants of 64bit each gets vectorized into a wider load.
18; FUNC-LABEL: {{^}}constant_load_2v4f64:
19; GCN: s_load_dwordx16
20define amdgpu_kernel void @constant_load_2v4f64(double addrspace(4)* noalias nocapture readonly %weights, double addrspace(1)* noalias nocapture %out_ptr) {
21entry:
22  %out_ptr.promoted = load double, double addrspace(1)* %out_ptr, align 4
23  %tmp = load double, double addrspace(4)* %weights, align 4
24  %add = fadd double %tmp, %out_ptr.promoted
25  %arrayidx.1 = getelementptr inbounds double, double addrspace(4)* %weights, i64 1
26  %tmp1 = load double, double addrspace(4)* %arrayidx.1, align 4
27  %add.1 = fadd double %tmp1, %add
28  %arrayidx.2 = getelementptr inbounds double, double addrspace(4)* %weights, i64 2
29  %tmp2 = load double, double addrspace(4)* %arrayidx.2, align 4
30  %add.2 = fadd double %tmp2, %add.1
31  %arrayidx.3 = getelementptr inbounds double, double addrspace(4)* %weights, i64 3
32  %tmp3 = load double, double addrspace(4)* %arrayidx.3, align 4
33  %add.3 = fadd double %tmp3, %add.2
34  %arrayidx.4 = getelementptr inbounds double, double addrspace(4)* %weights, i64 4
35  %tmp4 = load double, double addrspace(4)* %arrayidx.4, align 4
36  %add.4 = fadd double %tmp4, %add.3
37  %arrayidx.5 = getelementptr inbounds double, double addrspace(4)* %weights, i64 5
38  %tmp5 = load double, double addrspace(4)* %arrayidx.5, align 4
39  %add.5 = fadd double %tmp5, %add.4
40  %arrayidx.6 = getelementptr inbounds double, double addrspace(4)* %weights, i64 6
41  %tmp6 = load double, double addrspace(4)* %arrayidx.6, align 4
42  %add.6 = fadd double %tmp6, %add.5
43  %arrayidx.7 = getelementptr inbounds double, double addrspace(4)* %weights, i64 7
44  %tmp7 = load double, double addrspace(4)* %arrayidx.7, align 4
45  %add.7 = fadd double %tmp7, %add.6
46  store double %add.7, double addrspace(1)* %out_ptr, align 4
47  ret void
48}
49