• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -O0 -mcpu=pwr7 <%s | FileCheck %s
2
3; Test optimizations of build_vector for 6-bit immediates.
4
5target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
6target triple = "powerpc64-unknown-linux-gnu"
7
8%v4i32 = type <4 x i32>
9%v8i16 = type <8 x i16>
10%v16i8 = type <16 x i8>
11
12define void @test_v4i32_pos_even(%v4i32* %P, %v4i32* %S) {
13       %p = load %v4i32* %P
14       %r = add %v4i32 %p, < i32 18, i32 18, i32 18, i32 18 >
15       store %v4i32 %r, %v4i32* %S
16       ret void
17}
18
19; CHECK: test_v4i32_pos_even:
20; CHECK: vspltisw [[REG1:[0-9]+]], 9
21; CHECK: vadduwm {{[0-9]+}}, [[REG1]], [[REG1]]
22
23define void @test_v4i32_neg_even(%v4i32* %P, %v4i32* %S) {
24       %p = load %v4i32* %P
25       %r = add %v4i32 %p, < i32 -28, i32 -28, i32 -28, i32 -28 >
26       store %v4i32 %r, %v4i32* %S
27       ret void
28}
29
30; CHECK: test_v4i32_neg_even:
31; CHECK: vspltisw [[REG1:[0-9]+]], -14
32; CHECK: vadduwm {{[0-9]+}}, [[REG1]], [[REG1]]
33
34define void @test_v8i16_pos_even(%v8i16* %P, %v8i16* %S) {
35       %p = load %v8i16* %P
36       %r = add %v8i16 %p, < i16 30, i16 30, i16 30, i16 30, i16 30, i16 30, i16 30, i16 30 >
37       store %v8i16 %r, %v8i16* %S
38       ret void
39}
40
41; CHECK: test_v8i16_pos_even:
42; CHECK: vspltish [[REG1:[0-9]+]], 15
43; CHECK: vadduhm {{[0-9]+}}, [[REG1]], [[REG1]]
44
45define void @test_v8i16_neg_even(%v8i16* %P, %v8i16* %S) {
46       %p = load %v8i16* %P
47       %r = add %v8i16 %p, < i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32 >
48       store %v8i16 %r, %v8i16* %S
49       ret void
50}
51
52; CHECK: test_v8i16_neg_even:
53; CHECK: vspltish [[REG1:[0-9]+]], -16
54; CHECK: vadduhm {{[0-9]+}}, [[REG1]], [[REG1]]
55
56define void @test_v16i8_pos_even(%v16i8* %P, %v16i8* %S) {
57       %p = load %v16i8* %P
58       %r = add %v16i8 %p, < i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16 >
59       store %v16i8 %r, %v16i8* %S
60       ret void
61}
62
63; CHECK: test_v16i8_pos_even:
64; CHECK: vspltisb [[REG1:[0-9]+]], 8
65; CHECK: vaddubm {{[0-9]+}}, [[REG1]], [[REG1]]
66
67define void @test_v16i8_neg_even(%v16i8* %P, %v16i8* %S) {
68       %p = load %v16i8* %P
69       %r = add %v16i8 %p, < i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18 >
70       store %v16i8 %r, %v16i8* %S
71       ret void
72}
73
74; CHECK: test_v16i8_neg_even:
75; CHECK: vspltisb [[REG1:[0-9]+]], -9
76; CHECK: vaddubm {{[0-9]+}}, [[REG1]], [[REG1]]
77
78define void @test_v4i32_pos_odd(%v4i32* %P, %v4i32* %S) {
79       %p = load %v4i32* %P
80       %r = add %v4i32 %p, < i32 27, i32 27, i32 27, i32 27 >
81       store %v4i32 %r, %v4i32* %S
82       ret void
83}
84
85; CHECK: test_v4i32_pos_odd:
86; CHECK: vspltisw [[REG2:[0-9]+]], -16
87; CHECK: vspltisw [[REG1:[0-9]+]], 11
88; CHECK: vsubuwm {{[0-9]+}}, [[REG1]], [[REG2]]
89
90define void @test_v4i32_neg_odd(%v4i32* %P, %v4i32* %S) {
91       %p = load %v4i32* %P
92       %r = add %v4i32 %p, < i32 -27, i32 -27, i32 -27, i32 -27 >
93       store %v4i32 %r, %v4i32* %S
94       ret void
95}
96
97; CHECK: test_v4i32_neg_odd:
98; CHECK: vspltisw [[REG2:[0-9]+]], -16
99; CHECK: vspltisw [[REG1:[0-9]+]], -11
100; CHECK: vadduwm {{[0-9]+}}, [[REG1]], [[REG2]]
101
102define void @test_v8i16_pos_odd(%v8i16* %P, %v8i16* %S) {
103       %p = load %v8i16* %P
104       %r = add %v8i16 %p, < i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31 >
105       store %v8i16 %r, %v8i16* %S
106       ret void
107}
108
109; CHECK: test_v8i16_pos_odd:
110; CHECK: vspltish [[REG2:[0-9]+]], -16
111; CHECK: vspltish [[REG1:[0-9]+]], 15
112; CHECK: vsubuhm {{[0-9]+}}, [[REG1]], [[REG2]]
113
114define void @test_v8i16_neg_odd(%v8i16* %P, %v8i16* %S) {
115       %p = load %v8i16* %P
116       %r = add %v8i16 %p, < i16 -31, i16 -31, i16 -31, i16 -31, i16 -31, i16 -31, i16 -31, i16 -31 >
117       store %v8i16 %r, %v8i16* %S
118       ret void
119}
120
121; CHECK: test_v8i16_neg_odd:
122; CHECK: vspltish [[REG2:[0-9]+]], -16
123; CHECK: vspltish [[REG1:[0-9]+]], -15
124; CHECK: vadduhm {{[0-9]+}}, [[REG1]], [[REG2]]
125
126define void @test_v16i8_pos_odd(%v16i8* %P, %v16i8* %S) {
127       %p = load %v16i8* %P
128       %r = add %v16i8 %p, < i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17 >
129       store %v16i8 %r, %v16i8* %S
130       ret void
131}
132
133; CHECK: test_v16i8_pos_odd:
134; CHECK: vspltisb [[REG2:[0-9]+]], -16
135; CHECK: vspltisb [[REG1:[0-9]+]], 1
136; CHECK: vsububm {{[0-9]+}}, [[REG1]], [[REG2]]
137
138define void @test_v16i8_neg_odd(%v16i8* %P, %v16i8* %S) {
139       %p = load %v16i8* %P
140       %r = add %v16i8 %p, < i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17 >
141       store %v16i8 %r, %v16i8* %S
142       ret void
143}
144
145; CHECK: test_v16i8_neg_odd:
146; CHECK: vspltisb [[REG2:[0-9]+]], -16
147; CHECK: vspltisb [[REG1:[0-9]+]], -1
148; CHECK: vaddubm {{[0-9]+}}, [[REG1]], [[REG2]]
149
150