• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
2
3; Test instrumentation of vector shift instructions.
4
5target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
6target triple = "x86_64-unknown-linux-gnu"
7
8declare x86_mmx @llvm.x86.mmx.psll.d(x86_mmx, x86_mmx)
9declare <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32>, <8 x i32>)
10declare <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32>, <4 x i32>)
11declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>)
12declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32)
13
14define i64 @test_mmx(i64 %x.coerce, i64 %y.coerce) sanitize_memory {
15entry:
16  %0 = bitcast i64 %x.coerce to <2 x i32>
17  %1 = bitcast <2 x i32> %0 to x86_mmx
18  %2 = bitcast i64 %y.coerce to x86_mmx
19  %3 = tail call x86_mmx @llvm.x86.mmx.psll.d(x86_mmx %1, x86_mmx %2)
20  %4 = bitcast x86_mmx %3 to <2 x i32>
21  %5 = bitcast <2 x i32> %4 to <1 x i64>
22  %6 = extractelement <1 x i64> %5, i32 0
23  ret i64 %6
24}
25
26; CHECK-LABEL: @test_mmx
27; CHECK: = icmp ne i64 {{.*}}, 0
28; CHECK: [[C:%.*]] = sext i1 {{.*}} to i64
29; CHECK: [[A:%.*]] = call x86_mmx @llvm.x86.mmx.psll.d(
30; CHECK: [[B:%.*]] = bitcast x86_mmx {{.*}}[[A]] to i64
31; CHECK: = or i64 {{.*}}[[B]], {{.*}}[[C]]
32; CHECK: call x86_mmx @llvm.x86.mmx.psll.d(
33; CHECK: ret i64
34
35
36define <8 x i16> @test_sse2_scalar(<8 x i16> %x, i32 %y) sanitize_memory {
37entry:
38  %0 = tail call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %x, i32 %y)
39  ret <8 x i16> %0
40}
41
42; CHECK-LABEL: @test_sse2_scalar
43; CHECK: = icmp ne i32 {{.*}}, 0
44; CHECK: = sext i1 {{.*}} to i128
45; CHECK: = bitcast i128 {{.*}} to <8 x i16>
46; CHECK: = call <8 x i16> @llvm.x86.sse2.pslli.w(
47; CHECK: = or <8 x i16>
48; CHECK: call <8 x i16> @llvm.x86.sse2.pslli.w(
49; CHECK: ret <8 x i16>
50
51
52define <8 x i16> @test_sse2(<8 x i16> %x, <8 x i16> %y) sanitize_memory {
53entry:
54  %0 = tail call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %x, <8 x i16> %y)
55  ret <8 x i16> %0
56}
57
58; CHECK-LABEL: @test_sse2
59; CHECK: = bitcast <8 x i16> {{.*}} to i128
60; CHECK: = trunc i128 {{.*}} to i64
61; CHECK: = icmp ne i64 {{.*}}, 0
62; CHECK: = sext i1 {{.*}} to i128
63; CHECK: = bitcast i128 {{.*}} to <8 x i16>
64; CHECK: = call <8 x i16> @llvm.x86.sse2.psrl.w(
65; CHECK: = or <8 x i16>
66; CHECK: call <8 x i16> @llvm.x86.sse2.psrl.w(
67; CHECK: ret <8 x i16>
68
69
70; Test variable shift (i.e. vector by vector).
71
72define <4 x i32> @test_avx2(<4 x i32> %x, <4 x i32> %y) sanitize_memory {
73entry:
74  %0 = tail call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> %x, <4 x i32> %y)
75  ret <4 x i32> %0
76}
77
78; CHECK-LABEL: @test_avx2
79; CHECK: = icmp ne <4 x i32> {{.*}}, zeroinitializer
80; CHECK: = sext <4 x i1> {{.*}} to <4 x i32>
81; CHECK: = call <4 x i32> @llvm.x86.avx2.psllv.d(
82; CHECK: = or <4 x i32>
83; CHECK: = tail call <4 x i32> @llvm.x86.avx2.psllv.d(
84; CHECK: ret <4 x i32>
85
86define <8 x i32> @test_avx2_256(<8 x i32> %x, <8 x i32> %y) sanitize_memory {
87entry:
88  %0 = tail call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> %x, <8 x i32> %y)
89  ret <8 x i32> %0
90}
91
92; CHECK-LABEL: @test_avx2_256
93; CHECK: = icmp ne <8 x i32> {{.*}}, zeroinitializer
94; CHECK: = sext <8 x i1> {{.*}} to <8 x i32>
95; CHECK: = call <8 x i32> @llvm.x86.avx2.psllv.d.256(
96; CHECK: = or <8 x i32>
97; CHECK: = tail call <8 x i32> @llvm.x86.avx2.psllv.d.256(
98; CHECK: ret <8 x i32>
99