• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
2
3; Even though general vector types are not supported in PTX, we can still
4; optimize loads/stores with pseudo-vector instructions of the form:
5;
6; ld.v2.f32 {%f0, %f1}, [%r0]
7;
8; which will load two floats at once into scalar registers.
9
10define void @foo(<2 x float>* %a) {
11; CHECK: .func foo
12; CHECK: ld.v2.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}}
13  %t1 = load <2 x float>* %a
14  %t2 = fmul <2 x float> %t1, %t1
15  store <2 x float> %t2, <2 x float>* %a
16  ret void
17}
18
19define void @foo2(<4 x float>* %a) {
20; CHECK: .func foo2
21; CHECK: ld.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}
22  %t1 = load <4 x float>* %a
23  %t2 = fmul <4 x float> %t1, %t1
24  store <4 x float> %t2, <4 x float>* %a
25  ret void
26}
27
28define void @foo3(<8 x float>* %a) {
29; CHECK: .func foo3
30; CHECK: ld.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}
31; CHECK-NEXT: ld.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}
32  %t1 = load <8 x float>* %a
33  %t2 = fmul <8 x float> %t1, %t1
34  store <8 x float> %t2, <8 x float>* %a
35  ret void
36}
37
38
39
40define void @foo4(<2 x i32>* %a) {
41; CHECK: .func foo4
42; CHECK: ld.v2.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}}
43  %t1 = load <2 x i32>* %a
44  %t2 = mul <2 x i32> %t1, %t1
45  store <2 x i32> %t2, <2 x i32>* %a
46  ret void
47}
48
49define void @foo5(<4 x i32>* %a) {
50; CHECK: .func foo5
51; CHECK: ld.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}
52  %t1 = load <4 x i32>* %a
53  %t2 = mul <4 x i32> %t1, %t1
54  store <4 x i32> %t2, <4 x i32>* %a
55  ret void
56}
57
58define void @foo6(<8 x i32>* %a) {
59; CHECK: .func foo6
60; CHECK: ld.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}
61; CHECK-NEXT: ld.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}
62  %t1 = load <8 x i32>* %a
63  %t2 = mul <8 x i32> %t1, %t1
64  store <8 x i32> %t2, <8 x i32>* %a
65  ret void
66}
67