Lines Matching refs:i8
6 define <16 x i8> @identity_test(<16 x i8> %InVec) {
8 ; CHECK-NEXT: ret <16 x i8> %InVec
10 …i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i…
11 ret <16 x i8> %1
14 define <32 x i8> @identity_test_avx2(<32 x i8> %InVec) {
16 ; CHECK-NEXT: ret <32 x i8> %InVec
18 …i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, …
19 ret <32 x i8> %1
24 define <16 x i8> @fold_to_zero_vector(<16 x i8> %InVec) {
26 ; CHECK-NEXT: ret <16 x i8> zeroinitializer
28 …i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> <i8 -128, i8 -128, i8 -128, i8 -128, i…
29 ret <16 x i8> %1
32 define <32 x i8> @fold_to_zero_vector_avx2(<32 x i8> %InVec) {
34 ; CHECK-NEXT: ret <32 x i8> zeroinitializer
36 …i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> <i8 -128, i8 -128, i8 -128, i8 -128, i8 -12…
37 ret <32 x i8> %1
43 define <16 x i8> @splat_test(<16 x i8> %InVec) {
45 ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> %InVec, <16 x i8> undef, <16 x i32> zeroini…
46 ; CHECK-NEXT: ret <16 x i8> [[TMP1]]
48 %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> zeroinitializer)
49 ret <16 x i8> %1
57 define <32 x i8> @splat_test_avx2(<32 x i8> %InVec) {
59 ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i8> %InVec, <32 x i8> undef, <32 x i32> <i32 0,…
60 ; CHECK-NEXT: ret <32 x i8> [[TMP1]]
62 %1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> zeroinitializer)
63 ret <32 x i8> %1
69 define <16 x i8> @blend1(<16 x i8> %InVec) {
71 …i8> %InVec, <16 x i8> <i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef,…
72 ; CHECK-NEXT: ret <16 x i8> [[TMP1]]
74 …i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> <i8 -128, i8 1, i8 -128, i8 3, i8 -128…
75 ret <16 x i8> %1
78 define <16 x i8> @blend2(<16 x i8> %InVec) {
80 …i8> %InVec, <16 x i8> <i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef,…
81 ; CHECK-NEXT: ret <16 x i8> [[TMP1]]
83 …i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> <i8 -128, i8 -128, i8 2, i8 3, i8 -128…
84 ret <16 x i8> %1
87 define <16 x i8> @blend3(<16 x i8> %InVec) {
89 …i8> %InVec, <16 x i8> <i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef,…
90 ; CHECK-NEXT: ret <16 x i8> [[TMP1]]
92 …i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> <i8 -128, i8 -128, i8 -128, i8 -128, i…
93 ret <16 x i8> %1
96 define <16 x i8> @blend4(<16 x i8> %InVec) {
98 …i8> %InVec, <16 x i8> <i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef,…
99 ; CHECK-NEXT: ret <16 x i8> [[TMP1]]
101 …i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> <i8 -128, i8 -128, i8 -128, i8 -128, i…
102 ret <16 x i8> %1
105 define <16 x i8> @blend5(<16 x i8> %InVec) {
107 …i8> %InVec, <16 x i8> <i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef,…
108 ; CHECK-NEXT: ret <16 x i8> [[TMP1]]
110 …i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 -128, i8 -…
111 ret <16 x i8> %1
114 define <16 x i8> @blend6(<16 x i8> %InVec) {
116 …i8> %InVec, <16 x i8> <i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef,…
117 ; CHECK-NEXT: ret <16 x i8> [[TMP1]]
119 …i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> <i8 0, i8 1, i8 -128, i8 -128, i8 -128…
120 ret <16 x i8> %1
123 define <32 x i8> @blend1_avx2(<32 x i8> %InVec) {
125 …i8> %InVec, <32 x i8> <i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef,…
126 ; CHECK-NEXT: ret <32 x i8> [[TMP1]]
128 …i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> <i8 -128, i8 1, i8 -128, i8 3, i8 -128, i8 …
129 ret <32 x i8> %1
132 define <32 x i8> @blend2_avx2(<32 x i8> %InVec) {
134 …i8> %InVec, <32 x i8> <i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef,…
135 ; CHECK-NEXT: ret <32 x i8> [[TMP1]]
137 …i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> <i8 -128, i8 -128, i8 2, i8 3, i8 -128, i8 …
138 ret <32 x i8> %1
141 define <32 x i8> @blend3_avx2(<32 x i8> %InVec) {
143 …i8> %InVec, <32 x i8> <i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef,…
144 ; CHECK-NEXT: ret <32 x i8> [[TMP1]]
146 …i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> <i8 -128, i8 -128, i8 -128, i8 -128, i8 4, …
147 ret <32 x i8> %1
150 define <32 x i8> @blend4_avx2(<32 x i8> %InVec) {
152 …i8> %InVec, <32 x i8> <i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef,…
153 ; CHECK-NEXT: ret <32 x i8> [[TMP1]]
155 …i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> <i8 -128, i8 -128, i8 -128, i8 -128, i8 -12…
156 ret <32 x i8> %1
159 define <32 x i8> @blend5_avx2(<32 x i8> %InVec) {
161 …i8> %InVec, <32 x i8> <i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef,…
162 ; CHECK-NEXT: ret <32 x i8> [[TMP1]]
164 …i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 -128, i8 -128, …
165 ret <32 x i8> %1
168 define <32 x i8> @blend6_avx2(<32 x i8> %InVec) {
170 …i8> %InVec, <32 x i8> <i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef,…
171 ; CHECK-NEXT: ret <32 x i8> [[TMP1]]
173 …i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> <i8 0, i8 1, i8 -128, i8 -128, i8 -128, i8 …
174 ret <32 x i8> %1
178 define <16 x i8> @movq_idiom(<16 x i8> %InVec) {
180 …i8> %InVec, <16 x i8> <i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef,…
181 ; CHECK-NEXT: ret <16 x i8> [[TMP1]]
183 …i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i…
184 ret <16 x i8> %1
187 define <32 x i8> @movq_idiom_avx2(<32 x i8> %InVec) {
189 …i8> %InVec, <32 x i8> <i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef,…
190 ; CHECK-NEXT: ret <32 x i8> [[TMP1]]
192 …i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, …
193 ret <32 x i8> %1
198 define <16 x i8> @permute1(<16 x i8> %InVec) {
200 ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> %InVec, <16 x i8> undef, <16 x i32> <i32 4,…
201 ; CHECK-NEXT: ret <16 x i8> [[TMP1]]
203 …i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> <i8 4, i8 5, i8 6, i8 7, i8 4, i8 5, i…
204 ret <16 x i8> %1
207 define <16 x i8> @permute2(<16 x i8> %InVec) {
209 ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> %InVec, <16 x i8> undef, <16 x i32> <i32 0,…
210 ; CHECK-NEXT: ret <16 x i8> [[TMP1]]
212 …i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i…
213 ret <16 x i8> %1
216 define <32 x i8> @permute1_avx2(<32 x i8> %InVec) {
218 ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i8> %InVec, <32 x i8> undef, <32 x i32> <i32 4,…
219 ; CHECK-NEXT: ret <32 x i8> [[TMP1]]
221 …i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> <i8 4, i8 5, i8 6, i8 7, i8 4, i8 5, i8 6, …
222 ret <32 x i8> %1
225 define <32 x i8> @permute2_avx2(<32 x i8> %InVec) {
227 ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i8> %InVec, <32 x i8> undef, <32 x i32> <i32 0,…
228 ; CHECK-NEXT: ret <32 x i8> [[TMP1]]
230 …i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, …
231 ret <32 x i8> %1
237 define <16 x i8> @identity_test2_2(<16 x i8> %InVec) {
239 ; CHECK-NEXT: ret <16 x i8> %InVec
241 …i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8…
242 ret <16 x i8> %1
245 define <32 x i8> @identity_test_avx2_2(<32 x i8> %InVec) {
247 ; CHECK-NEXT: ret <32 x i8> %InVec
249 …i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> <i8 16, i8 33, i8 66, i8 19, i8 36, i8 69, …
250 ret <32 x i8> %1
253 define <16 x i8> @fold_to_zero_vector_2(<16 x i8> %InVec) {
255 ; CHECK-NEXT: ret <16 x i8> zeroinitializer
257 …i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> <i8 -125, i8 -1, i8 -53, i8 -32, i8 -4…
258 ret <16 x i8> %1
261 define <32 x i8> @fold_to_zero_vector_avx2_2(<32 x i8> %InVec) {
263 ; CHECK-NEXT: ret <32 x i8> zeroinitializer
265 …i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> <i8 -127, i8 -1, i8 -53, i8 -32, i8 -4, i8 …
266 ret <32 x i8> %1
269 define <16 x i8> @permute3(<16 x i8> %InVec) {
271 ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> %InVec, <16 x i8> undef, <16 x i32> <i32 0,…
272 ; CHECK-NEXT: ret <16 x i8> [[TMP1]]
274 …i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> <i8 48, i8 17, i8 34, i8 51, i8 20, i8…
275 ret <16 x i8> %1
278 define <32 x i8> @permute3_avx2(<32 x i8> %InVec) {
280 ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x i8> %InVec, <32 x i8> undef, <32 x i32> <i32 4,…
281 ; CHECK-NEXT: ret <32 x i8> [[TMP1]]
283 …i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> <i8 52, i8 21, i8 38, i8 55, i8 20, i8 37, …
284 ret <32 x i8> %1
289 define <16 x i8> @fold_with_undef_elts(<16 x i8> %InVec) {
291 …i8> %InVec, <16 x i8> <i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef,…
292 ; CHECK-NEXT: ret <16 x i8> [[TMP1]]
294 …i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> <i8 0, i8 -128, i8 undef, i8 -128, i8 …
295 ret <16 x i8> %1
298 define <32 x i8> @fold_with_undef_elts_avx2(<32 x i8> %InVec) {
300 …i8> %InVec, <32 x i8> <i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef,…
301 ; CHECK-NEXT: ret <32 x i8> [[TMP1]]
303 …i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> <i8 0, i8 -128, i8 undef, i8 -128, i8 1, i8…
304 ret <32 x i8> %1
307 define <16 x i8> @fold_with_allundef_elts(<16 x i8> %InVec) {
309 ; CHECK-NEXT: ret <16 x i8> undef
311 %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %InVec, <16 x i8> undef)
312 ret <16 x i8> %1
315 define <32 x i8> @fold_with_allundef_elts_avx2(<32 x i8> %InVec) {
317 ; CHECK-NEXT: ret <32 x i8> undef
319 %1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %InVec, <32 x i8> undef)
320 ret <32 x i8> %1
323 declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>)
324 declare <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8>, <32 x i8>)