• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -slp-vectorizer -S -mtriple=x86_64-unknown-linux -mcpu=corei7-avx -slp-threshold=-999 < %s | FileCheck %s
3
4
5; S[0] = %v1 + %v2
6; S[1] = %v2 + %v1
7; S[2] = %v2 + %v1
8; S[3] = %v1 + %v2
9;
10; We broadcast %v1 and %v2
11;
12
13define void @bcast_vals(i64 *%A, i64 *%B, i64 *%S) {
14; CHECK-LABEL: @bcast_vals(
15; CHECK-NEXT:  entry:
16; CHECK-NEXT:    [[A0:%.*]] = load i64, i64* [[A:%.*]], align 8
17; CHECK-NEXT:    [[B0:%.*]] = load i64, i64* [[B:%.*]], align 8
18; CHECK-NEXT:    [[V1:%.*]] = sub i64 [[A0]], 1
19; CHECK-NEXT:    [[V2:%.*]] = sub i64 [[B0]], 1
20; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <4 x i64> undef, i64 [[V1]], i32 0
21; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <4 x i64> [[TMP0]], i64 [[V1]], i32 1
22; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <4 x i64> [[TMP1]], i64 [[V1]], i32 2
23; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <4 x i64> [[TMP2]], i64 [[V1]], i32 3
24; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <4 x i64> undef, i64 [[V2]], i32 0
25; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <4 x i64> [[TMP4]], i64 [[V2]], i32 1
26; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <4 x i64> [[TMP5]], i64 [[V2]], i32 2
27; CHECK-NEXT:    [[TMP7:%.*]] = insertelement <4 x i64> [[TMP6]], i64 [[V2]], i32 3
28; CHECK-NEXT:    [[TMP8:%.*]] = add <4 x i64> [[TMP3]], [[TMP7]]
29; CHECK-NEXT:    [[IDXS0:%.*]] = getelementptr inbounds i64, i64* [[S:%.*]], i64 0
30; CHECK-NEXT:    [[IDXS1:%.*]] = getelementptr inbounds i64, i64* [[S]], i64 1
31; CHECK-NEXT:    [[IDXS2:%.*]] = getelementptr inbounds i64, i64* [[S]], i64 2
32; CHECK-NEXT:    [[IDXS3:%.*]] = getelementptr inbounds i64, i64* [[S]], i64 3
33; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i64* [[IDXS0]] to <4 x i64>*
34; CHECK-NEXT:    store <4 x i64> [[TMP8]], <4 x i64>* [[TMP9]], align 8
35; CHECK-NEXT:    ret void
36;
37entry:
38  %A0 = load i64, i64 *%A, align 8
39  %B0 = load i64, i64 *%B, align 8
40
41  %v1 = sub i64 %A0, 1
42  %v2 = sub i64 %B0, 1
43
44  %Add0 = add i64 %v1, %v2
45  %Add1 = add i64 %v2, %v1
46  %Add2 = add i64 %v2, %v1
47  %Add3 = add i64 %v1, %v2
48
49  %idxS0 = getelementptr inbounds i64, i64* %S, i64 0
50  %idxS1 = getelementptr inbounds i64, i64* %S, i64 1
51  %idxS2 = getelementptr inbounds i64, i64* %S, i64 2
52  %idxS3 = getelementptr inbounds i64, i64* %S, i64 3
53
54  store i64 %Add0, i64 *%idxS0, align 8
55  store i64 %Add1, i64 *%idxS1, align 8
56  store i64 %Add2, i64 *%idxS2, align 8
57  store i64 %Add3, i64 *%idxS3, align 8
58  ret void
59}
60
61; S[0] = %v1 + %v2
62; S[1] = %v3 + %v1
63; S[2] = %v5 + %v1
64; S[3] = %v1 + %v4
65;
66; We broadcast %v1.
67
68;
69define void @bcast_vals2(i16 *%A, i16 *%B, i16 *%C, i16 *%D, i16 *%E, i32 *%S) {
70; CHECK-LABEL: @bcast_vals2(
71; CHECK-NEXT:  entry:
72; CHECK-NEXT:    [[A0:%.*]] = load i16, i16* [[A:%.*]], align 8
73; CHECK-NEXT:    [[B0:%.*]] = load i16, i16* [[B:%.*]], align 8
74; CHECK-NEXT:    [[C0:%.*]] = load i16, i16* [[C:%.*]], align 8
75; CHECK-NEXT:    [[D0:%.*]] = load i16, i16* [[D:%.*]], align 8
76; CHECK-NEXT:    [[E0:%.*]] = load i16, i16* [[E:%.*]], align 8
77; CHECK-NEXT:    [[V1:%.*]] = sext i16 [[A0]] to i32
78; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <4 x i16> undef, i16 [[B0]], i32 0
79; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <4 x i16> [[TMP0]], i16 [[C0]], i32 1
80; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <4 x i16> [[TMP1]], i16 [[E0]], i32 2
81; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <4 x i16> [[TMP2]], i16 [[D0]], i32 3
82; CHECK-NEXT:    [[TMP4:%.*]] = sext <4 x i16> [[TMP3]] to <4 x i32>
83; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <4 x i32> undef, i32 [[V1]], i32 0
84; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[V1]], i32 1
85; CHECK-NEXT:    [[TMP7:%.*]] = insertelement <4 x i32> [[TMP6]], i32 [[V1]], i32 2
86; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <4 x i32> [[TMP7]], i32 [[V1]], i32 3
87; CHECK-NEXT:    [[TMP9:%.*]] = add <4 x i32> [[TMP8]], [[TMP4]]
88; CHECK-NEXT:    [[IDXS0:%.*]] = getelementptr inbounds i32, i32* [[S:%.*]], i64 0
89; CHECK-NEXT:    [[IDXS1:%.*]] = getelementptr inbounds i32, i32* [[S]], i64 1
90; CHECK-NEXT:    [[IDXS2:%.*]] = getelementptr inbounds i32, i32* [[S]], i64 2
91; CHECK-NEXT:    [[IDXS3:%.*]] = getelementptr inbounds i32, i32* [[S]], i64 3
92; CHECK-NEXT:    [[TMP10:%.*]] = bitcast i32* [[IDXS0]] to <4 x i32>*
93; CHECK-NEXT:    store <4 x i32> [[TMP9]], <4 x i32>* [[TMP10]], align 8
94; CHECK-NEXT:    ret void
95;
96entry:
97  %A0 = load i16, i16 *%A, align 8
98  %B0 = load i16, i16 *%B, align 8
99  %C0 = load i16, i16 *%C, align 8
100  %D0 = load i16, i16 *%D, align 8
101  %E0 = load i16, i16 *%E, align 8
102
103  %v1 = sext i16 %A0 to i32
104  %v2 = sext i16 %B0 to i32
105  %v3 = sext i16 %C0 to i32
106  %v4 = sext i16 %D0 to i32
107  %v5 = sext i16 %E0 to i32
108
109  %Add0 = add i32 %v1, %v2
110  %Add1 = add i32 %v3, %v1
111  %Add2 = add i32 %v5, %v1
112  %Add3 = add i32 %v1, %v4
113
114  %idxS0 = getelementptr inbounds i32, i32* %S, i64 0
115  %idxS1 = getelementptr inbounds i32, i32* %S, i64 1
116  %idxS2 = getelementptr inbounds i32, i32* %S, i64 2
117  %idxS3 = getelementptr inbounds i32, i32* %S, i64 3
118
119  store i32 %Add0, i32 *%idxS0, align 8
120  store i32 %Add1, i32 *%idxS1, align 8
121  store i32 %Add2, i32 *%idxS2, align 8
122  store i32 %Add3, i32 *%idxS3, align 8
123  ret void
124}
125