• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck  \
2; RUN: %s
3; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
4; REQUIRES: x86-registered-target
5
6; Test instrumentation of vector shift instructions.
7
8target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
9target triple = "x86_64-unknown-linux-gnu"
10
11declare x86_mmx @llvm.x86.mmx.psll.d(x86_mmx, x86_mmx)
12declare <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32>, <16 x i32>)
13declare <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32>, <8 x i32>)
14declare <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32>, <4 x i32>)
15declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>)
16declare <32 x i16> @llvm.x86.avx512.psrl.w.512(<32 x i16>, <8 x i16>)
17declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32)
18declare <32 x i16> @llvm.x86.avx512.pslli.w.512(<32 x i16>, i32)
19
20define i64 @test_mmx(i64 %x.coerce, i64 %y.coerce) sanitize_memory {
21entry:
22  %0 = bitcast i64 %x.coerce to <2 x i32>
23  %1 = bitcast <2 x i32> %0 to x86_mmx
24  %2 = bitcast i64 %y.coerce to x86_mmx
25  %3 = tail call x86_mmx @llvm.x86.mmx.psll.d(x86_mmx %1, x86_mmx %2)
26  %4 = bitcast x86_mmx %3 to <2 x i32>
27  %5 = bitcast <2 x i32> %4 to <1 x i64>
28  %6 = extractelement <1 x i64> %5, i32 0
29  ret i64 %6
30}
31
32; CHECK-LABEL: @test_mmx
33; CHECK: = icmp ne i64 {{.*}}, 0
34; CHECK: [[C:%.*]] = sext i1 {{.*}} to i64
35; CHECK: [[A:%.*]] = call x86_mmx @llvm.x86.mmx.psll.d(
36; CHECK: [[B:%.*]] = bitcast x86_mmx {{.*}}[[A]] to i64
37; CHECK: = or i64 {{.*}}[[B]], {{.*}}[[C]]
38; CHECK: call x86_mmx @llvm.x86.mmx.psll.d(
39; CHECK: ret i64
40
41
42define <8 x i16> @test_sse2_scalar(<8 x i16> %x, i32 %y) sanitize_memory {
43entry:
44  %0 = tail call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %x, i32 %y)
45  ret <8 x i16> %0
46}
47
48; CHECK-LABEL: @test_sse2_scalar
49; CHECK: = icmp ne i32 {{.*}}, 0
50; CHECK: = sext i1 {{.*}} to i128
51; CHECK: = bitcast i128 {{.*}} to <8 x i16>
52; CHECK: = call <8 x i16> @llvm.x86.sse2.pslli.w(
53; CHECK: = or <8 x i16>
54; CHECK: call <8 x i16> @llvm.x86.sse2.pslli.w(
55; CHECK: ret <8 x i16>
56
57
58define <32 x i16> @test_avx512_scalar(<32 x i16> %x, i32 %y) sanitize_memory {
59entry:
60  %0 = tail call <32 x i16> @llvm.x86.avx512.pslli.w.512(<32 x i16> %x, i32 %y)
61  ret <32 x i16> %0
62}
63
64; CHECK-LABEL: @test_avx512_scalar
65; CHECK: = icmp ne i32 {{.*}}, 0
66; CHECK: = sext i1 {{.*}} to i512
67; CHECK: = bitcast i512 {{.*}} to <32 x i16>
68; CHECK: = call <32 x i16> @llvm.x86.avx512.pslli.w.512(
69; CHECK: = or <32 x i16>
70; CHECK: call <32 x i16> @llvm.x86.avx512.pslli.w.512(
71; CHECK: ret <32 x i16>
72
73
74define <8 x i16> @test_sse2(<8 x i16> %x, <8 x i16> %y) sanitize_memory {
75entry:
76  %0 = tail call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %x, <8 x i16> %y)
77  ret <8 x i16> %0
78}
79
80; CHECK-LABEL: @test_sse2
81; CHECK: = bitcast <8 x i16> {{.*}} to i128
82; CHECK: = trunc i128 {{.*}} to i64
83; CHECK: = icmp ne i64 {{.*}}, 0
84; CHECK: = sext i1 {{.*}} to i128
85; CHECK: = bitcast i128 {{.*}} to <8 x i16>
86; CHECK: = call <8 x i16> @llvm.x86.sse2.psrl.w(
87; CHECK: = or <8 x i16>
88; CHECK: call <8 x i16> @llvm.x86.sse2.psrl.w(
89; CHECK: ret <8 x i16>
90
91
92define <32 x i16> @test_avx512(<32 x i16> %x, <8 x i16> %y) sanitize_memory {
93entry:
94  %0 = tail call <32 x i16> @llvm.x86.avx512.psrl.w.512(<32 x i16> %x, <8 x i16> %y)
95  ret <32 x i16> %0
96}
97
98; CHECK-LABEL: @test_avx512
99; CHECK: = bitcast <8 x i16> {{.*}} to i128
100; CHECK: = trunc i128 {{.*}} to i64
101; CHECK: = icmp ne i64 {{.*}}, 0
102; CHECK: = sext i1 {{.*}} to i512
103; CHECK: = bitcast i512 {{.*}} to <32 x i16>
104; CHECK: = call <32 x i16> @llvm.x86.avx512.psrl.w.512(
105; CHECK: = or <32 x i16>
106; CHECK: call <32 x i16> @llvm.x86.avx512.psrl.w.512(
107; CHECK: ret <32 x i16>
108
109
110; Test variable shift (i.e. vector by vector).
111
112define <4 x i32> @test_avx2(<4 x i32> %x, <4 x i32> %y) sanitize_memory {
113entry:
114  %0 = tail call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> %x, <4 x i32> %y)
115  ret <4 x i32> %0
116}
117
118; CHECK-LABEL: @test_avx2
119; CHECK: = icmp ne <4 x i32> {{.*}}, zeroinitializer
120; CHECK: = sext <4 x i1> {{.*}} to <4 x i32>
121; CHECK: = call <4 x i32> @llvm.x86.avx2.psllv.d(
122; CHECK: = or <4 x i32>
123; CHECK: = tail call <4 x i32> @llvm.x86.avx2.psllv.d(
124; CHECK: ret <4 x i32>
125
126define <8 x i32> @test_avx2_256(<8 x i32> %x, <8 x i32> %y) sanitize_memory {
127entry:
128  %0 = tail call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> %x, <8 x i32> %y)
129  ret <8 x i32> %0
130}
131
132; CHECK-LABEL: @test_avx2_256
133; CHECK: = icmp ne <8 x i32> {{.*}}, zeroinitializer
134; CHECK: = sext <8 x i1> {{.*}} to <8 x i32>
135; CHECK: = call <8 x i32> @llvm.x86.avx2.psllv.d.256(
136; CHECK: = or <8 x i32>
137; CHECK: = tail call <8 x i32> @llvm.x86.avx2.psllv.d.256(
138; CHECK: ret <8 x i32>
139
140define <16 x i32> @test_avx512_512(<16 x i32> %x, <16 x i32> %y) sanitize_memory {
141entry:
142  %0 = tail call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> %x, <16 x i32> %y)
143  ret <16 x i32> %0
144}
145
146; CHECK-LABEL: @test_avx512_512
147; CHECK: = icmp ne <16 x i32> {{.*}}, zeroinitializer
148; CHECK: = sext <16 x i1> {{.*}} to <16 x i32>
149; CHECK: = call <16 x i32> @llvm.x86.avx512.psllv.d.512(
150; CHECK: = or <16 x i32>
151; CHECK: = tail call <16 x i32> @llvm.x86.avx512.psllv.d.512(
152; CHECK: ret <16 x i32>
153