• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s
2
3; Make sure that we don't match this shuffle using the vpblendw YMM instruction.
4; The mask for the vpblendw instruction needs to be identical for both halves
5; of the YMM. Need to use two vpblendw instructions.
6
7; CHECK: vpblendw_test1
8; mask = 10010110,b = 150,d
9; CHECK: vpblendw  $150, %ymm
10; CHECK: ret
11define <16 x i16> @vpblendw_test1(<16 x i16> %a, <16 x i16> %b) nounwind alwaysinline {
12  %t = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 17, i32 18, i32 3,  i32 20, i32 5,  i32 6,  i32 23,
13                                                               i32 8, i32 25, i32 26, i32 11, i32 28, i32 13, i32 14, i32 31>
14  ret <16 x i16> %t
15}
16
17; CHECK: vpblendw_test2
18; mask1 = 00010110 = 22
19; mask2 = 10000000 = 128
20; CHECK: vpblendw  $128, %xmm
21; CHECK: vpblendw  $22, %xmm
22; CHECK: vinserti128
23; CHECK: ret
24define <16 x i16> @vpblendw_test2(<16 x i16> %a, <16 x i16> %b) nounwind alwaysinline {
25  %t = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 17, i32 18, i32 3, i32 20, i32 5, i32 6, i32 7,
26                                                               i32 8, i32 9,  i32 10, i32 11, i32 12, i32 13, i32 14, i32 31>
27  ret <16 x i16> %t
28}
29
30; CHECK: blend_test1
31; CHECK: vpblendd
32; CHECK: ret
33define <8 x i32> @blend_test1(<8 x i32> %a, <8 x i32> %b) nounwind alwaysinline {
34  %t = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 0, i32 9, i32 10, i32 3, i32 12, i32 5, i32 6, i32 7>
35  ret <8 x i32> %t
36}
37
38; CHECK: blend_test2
39; CHECK: vpblendd
40; CHECK: ret
41define <8 x i32> @blend_test2(<8 x i32> %a, <8 x i32> %b) nounwind alwaysinline {
42  %t = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 0, i32 9, i32 10, i32 3, i32 12, i32 5, i32 6, i32 7>
43  ret <8 x i32> %t
44}
45
46
47; CHECK: blend_test3
48; CHECK: vblendps
49; CHECK: ret
50define <8 x float> @blend_test3(<8 x float> %a, <8 x float> %b) nounwind alwaysinline {
51  %t = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 9, i32 10, i32 3, i32 12, i32 5, i32 6, i32 7>
52  ret <8 x float> %t
53}
54
55; CHECK: blend_test4
56; CHECK: vblendpd
57; CHECK: ret
58define <4 x i64> @blend_test4(<4 x i64> %a, <4 x i64> %b) nounwind alwaysinline {
59  %t = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
60  ret <4 x i64> %t
61}
62
63;; 2 tests for shufflevectors that optimize to blend + immediate
64; CHECK-LABEL: @blend_test5
65; CHECK: vpblendd $10, %xmm1, %xmm0, %xmm0
66; CHECK: ret
67define <4 x i32> @blend_test5(<4 x i32> %a, <4 x i32> %b) {
68  %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
69  ret <4 x i32> %1
70}
71
72; CHECK-LABEL: @blend_test6
73; CHECK: vpblendw $134, %ymm1, %ymm0, %ymm0
74; CHECK: ret
75define <16 x i16> @blend_test6(<16 x i16> %a, <16 x i16> %b) {
76  %1 = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 17, i32 18, i32  3, i32  4, i32  5, i32  6, i32 23,
77                                                               i32 8, i32 25, i32 26, i32 11, i32 12, i32 13, i32 14, i32 31>
78  ret <16 x i16> %1
79}
80
81; CHECK: vpshufhw $27, %ymm
82define <16 x i16> @vpshufhw(<16 x i16> %src1) nounwind uwtable readnone ssp {
83entry:
84  %shuffle.i = shufflevector <16 x i16> %src1, <16 x i16> %src1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 7, i32 6, i32 5, i32 4, i32 8, i32 9, i32 10, i32 11, i32 15, i32 14, i32 13, i32 12>
85  ret <16 x i16> %shuffle.i
86}
87
88; CHECK: vpshuflw $27, %ymm
89define <16 x i16> @vpshuflw(<16 x i16> %src1) nounwind uwtable readnone ssp {
90entry:
91  %shuffle.i = shufflevector <16 x i16> %src1, <16 x i16> %src1, <16 x i32> <i32 3, i32 undef, i32 1, i32 0, i32 4, i32 5, i32 6, i32 7, i32 11, i32 10, i32 9, i32 8, i32 12, i32 13, i32 14, i32 15>
92  ret <16 x i16> %shuffle.i
93}
94
95; CHECK: vpshufb_test
96; CHECK: vpshufb {{.*\(%r.*}}, %ymm
97; CHECK: ret
98define <32 x i8> @vpshufb_test(<32 x i8> %a) nounwind {
99  %S = shufflevector <32 x i8> %a, <32 x i8> undef, <32 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15,
100                                                                i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15,
101                                                                i32 18, i32 19, i32 30, i32 16, i32 25, i32 23, i32 17, i32 25,
102                                                                i32 20, i32 19, i32 31, i32 17, i32 23, i32 undef, i32 29, i32 18>
103  ret <32 x i8>%S
104}
105
106; CHECK: vpshufb1_test
107; CHECK: vpshufb {{.*\(%r.*}}, %ymm
108; CHECK: ret
109define <32 x i8> @vpshufb1_test(<32 x i8> %a) nounwind {
110  %S = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15,
111                                                                i32 1, i32 9, i32 36, i32 11, i32 5, i32 13, i32 7, i32 15,
112                                                                i32 18, i32 49, i32 30, i32 16, i32 25, i32 23, i32 17, i32 25,
113                                                                i32 20, i32 19, i32 31, i32 17, i32 23, i32 undef, i32 29, i32 18>
114  ret <32 x i8>%S
115}
116
117
118; CHECK: vpshufb2_test
119; CHECK: vpshufb {{.*\(%r.*}}, %ymm
120; CHECK: ret
121define <32 x i8> @vpshufb2_test(<32 x i8> %a) nounwind {
122  %S = shufflevector <32 x i8> zeroinitializer, <32 x i8> %a, <32 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15,
123                                                                i32 1, i32 9, i32 36, i32 11, i32 5, i32 13, i32 7, i32 15,
124                                                                i32 18, i32 49, i32 30, i32 16, i32 25, i32 23, i32 17, i32 25,
125                                                                i32 20, i32 19, i32 31, i32 17, i32 23, i32 undef, i32 29, i32 18>
126  ret <32 x i8>%S
127}
128