• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt %s -S -msan-check-access-address=0 -passes=msan 2>&1 | FileCheck %s
3; RUN: opt %s -S -msan-check-access-address=0 -msan | FileCheck %s
4; RUN: opt %s -S -msan-check-access-address=0 -msan-track-origins=2 -passes=msan 2>&1 | FileCheck %s --check-prefixes=CHECK,ORIGIN
5; RUN: opt %s -S -msan-check-access-address=0 -msan-track-origins=2 -msan | FileCheck %s --check-prefixes=CHECK,ORIGIN
6
7target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
8target triple = "x86_64-unknown-linux-gnu"
9
10define <4 x i64> @test_mm256_abs_epi8(<4 x i64> %a) local_unnamed_addr #0 {
11; CHECK-LABEL: @test_mm256_abs_epi8(
12; CHECK-NEXT:  entry:
13; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
14; ORIGIN-NEXT:   [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
15; CHECK:         call void @llvm.donothing()
16; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <32 x i8>
17; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <32 x i8>
18; CHECK-NEXT:    [[TMP4:%.*]] = tail call <32 x i8> @llvm.abs.v32i8(<32 x i8> [[TMP3]], i1 false)
19; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <32 x i8> [[TMP2]] to <4 x i64>
20; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <32 x i8> [[TMP4]] to <4 x i64>
21; CHECK-NEXT:    store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
22; ORIGIN-NEXT:   store i32 [[TMP1]], i32* @__msan_retval_origin_tls, align 4
23; CHECK:         ret <4 x i64> [[TMP6]]
24;
25entry:
26  %0 = bitcast <4 x i64> %a to <32 x i8>
27  %1 = tail call <32 x i8> @llvm.abs.v32i8(<32 x i8> %0, i1 false)
28  %2 = bitcast <32 x i8> %1 to <4 x i64>
29  ret <4 x i64> %2
30}
31
32define <4 x i64> @test_mm256_abs_epi16(<4 x i64> %a) local_unnamed_addr #0 {
33; CHECK-LABEL: @test_mm256_abs_epi16(
34; CHECK-NEXT:  entry:
35; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
36; ORIGIN-NEXT:   [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
37; CHECK:         call void @llvm.donothing()
38; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <16 x i16>
39; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <16 x i16>
40; CHECK-NEXT:    [[TMP4:%.*]] = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> [[TMP3]], i1 false)
41; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <16 x i16> [[TMP2]] to <4 x i64>
42; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <16 x i16> [[TMP4]] to <4 x i64>
43; CHECK-NEXT:    store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
44; ORIGIN-NEXT:   store i32 [[TMP1]], i32* @__msan_retval_origin_tls, align 4
45; CHECK:         ret <4 x i64> [[TMP6]]
46;
47entry:
48  %0 = bitcast <4 x i64> %a to <16 x i16>
49  %1 = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> %0, i1 false)
50  %2 = bitcast <16 x i16> %1 to <4 x i64>
51  ret <4 x i64> %2
52}
53
54define <4 x i64> @test_mm256_abs_epi32(<4 x i64> %a) local_unnamed_addr #0 {
55; CHECK-LABEL: @test_mm256_abs_epi32(
56; CHECK-NEXT:  entry:
57; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
58; ORIGIN-NEXT:   [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
59; CHECK:         call void @llvm.donothing()
60; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <8 x i32>
61; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <8 x i32>
62; CHECK-NEXT:    [[TMP4:%.*]] = tail call <8 x i32> @llvm.abs.v8i32(<8 x i32> [[TMP3]], i1 false)
63; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <8 x i32> [[TMP2]] to <4 x i64>
64; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <8 x i32> [[TMP4]] to <4 x i64>
65; CHECK-NEXT:    store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
66; ORIGIN-NEXT:   store i32 [[TMP1]], i32* @__msan_retval_origin_tls, align 4
67; CHECK:         ret <4 x i64> [[TMP6]]
68;
69entry:
70  %0 = bitcast <4 x i64> %a to <8 x i32>
71  %1 = tail call <8 x i32> @llvm.abs.v8i32(<8 x i32> %0, i1 false)
72  %2 = bitcast <8 x i32> %1 to <4 x i64>
73  ret <4 x i64> %2
74}
75
76define <4 x double> @test_fabs(<4 x double> %a) local_unnamed_addr #0 {
77; CHECK-LABEL: @test_fabs(
78; CHECK-NEXT:  entry:
79; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
80; ORIGIN-NEXT:   [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
81; CHECK:         call void @llvm.donothing()
82; CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x double> @llvm.fabs.v4f64(<4 x double> [[A:%.*]])
83; CHECK-NEXT:    store <4 x i64> [[TMP0]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
84; ORIGIN-NEXT:   store i32 [[TMP1]], i32* @__msan_retval_origin_tls, align 4
85; CHECK:         ret <4 x double> [[TMP2]]
86;
87entry:
88  %0 = tail call <4 x double> @llvm.fabs.v4f64(<4 x double> %a)
89  ret <4 x double> %0
90}
91
92declare <32 x i8> @llvm.abs.v32i8(<32 x i8>, i1 immarg) #1
93declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1 immarg) #1
94declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1 immarg) #1
95declare <4 x double> @llvm.fabs.v4f64(<4 x double>) #1
96
97attributes #0 = { nounwind readnone sanitize_memory }
98attributes #1 = { nounwind readnone speculatable willreturn }
99
100!llvm.module.flags = !{!0}
101!llvm.ident = !{!1}
102
103!0 = !{i32 1, !"wchar_size", i32 4}
104!1 = !{!"clang version 12.0.0"}
105