• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -march=x86-64 -mattr=+ssse3 | FileCheck %s
2
3; Test that the pshufb mask comment is correct.
4
5define <16 x i8> @test1(<16 x i8> %V) {
6; CHECK-LABEL: test1:
7; CHECK: pshufb {{.*}}# xmm0 = xmm0[1,0,0,0,0,2,0,0,0,0,3,0,0,0,0,4]
8  %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %V, <16 x i8> <i8 1, i8 0, i8 0, i8 0, i8 0, i8 2, i8 0, i8 0, i8 0, i8 0, i8 3, i8 0, i8 0, i8 0, i8 0, i8 4>)
9  ret <16 x i8> %1
10}
11
12; Test that indexes larger than the size of the vector are shown masked (bottom 4 bits).
13
14define <16 x i8> @test2(<16 x i8> %V) {
15; CHECK-LABEL: test2:
16; CHECK: pshufb {{.*}}# xmm0 = xmm0[15,0,0,0,0,0,0,0,0,0,1,0,0,0,0,2]
17  %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %V, <16 x i8> <i8 15, i8 0, i8 0, i8 0, i8 0, i8 16, i8 0, i8 0, i8 0, i8 0, i8 17, i8 0, i8 0, i8 0, i8 0, i8 50>)
18  ret <16 x i8> %1
19}
20
21; Test that indexes with bit seven set are shown as zero.
22
23define <16 x i8> @test3(<16 x i8> %V) {
24; CHECK-LABEL: test3:
25; CHECK: pshufb {{.*}}# xmm0 = xmm0[1,0,0,15,0,2,0,0],zero,xmm0[0,3,0,0],zero,xmm0[0,4]
26  %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %V, <16 x i8> <i8 1, i8 0, i8 0, i8 127, i8 0, i8 2, i8 0, i8 0, i8 128, i8 0, i8 3, i8 0, i8 0, i8 255, i8 0, i8 4>)
27  ret <16 x i8> %1
28}
29
30; Test that we won't crash when the constant was reused for another instruction.
31
32define <16 x i8> @test4(<2 x i64>* %V) {
33; CHECK-LABEL: test4
34; CHECK: pshufb {{.*}}
35  store <2 x i64> <i64 1084818905618843912, i64 506097522914230528>, <2 x i64>* %V, align 16
36  %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> undef, <16 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>)
37  ret <16 x i8> %1
38}
39
40define <16 x i8> @test5() {
41; CHECK-LABEL: test5
42; CHECK: pshufb {{.*}}
43  store <2 x i64> <i64 1, i64 0>, <2 x i64>* undef, align 16
44  %l = load <2 x i64>, <2 x i64>* undef, align 16
45  %shuffle = shufflevector <2 x i64> %l, <2 x i64> undef, <2 x i32> zeroinitializer
46  store <2 x i64> %shuffle, <2 x i64>* undef, align 16
47  %1 = load <16 x i8>, <16 x i8>* undef, align 16
48  %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> undef, <16 x i8> %1)
49  ret <16 x i8> %2
50}
51
52declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>) nounwind readnone
53