• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X86
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X64
4
5define <4 x i32> @add_4i32(<4 x i32> %a0, <4 x i32> %a1) {
6; X86-LABEL: add_4i32:
7; X86:       # %bb.0:
8; X86-NEXT:    paddd %xmm1, %xmm0
9; X86-NEXT:    retl
10;
11; X64-LABEL: add_4i32:
12; X64:       # %bb.0:
13; X64-NEXT:    paddd %xmm1, %xmm0
14; X64-NEXT:    retq
15  %1 = add <4 x i32> %a0, <i32  1, i32 -2, i32  3, i32 -4>
16  %2 = add <4 x i32> %a1, <i32 -1, i32  2, i32 -3, i32  4>
17  %3 = add <4 x i32> %1, %2
18  ret <4 x i32> %3
19}
20
21define <4 x i32> @add_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
22; X86-LABEL: add_4i32_commute:
23; X86:       # %bb.0:
24; X86-NEXT:    paddd %xmm1, %xmm0
25; X86-NEXT:    retl
26;
27; X64-LABEL: add_4i32_commute:
28; X64:       # %bb.0:
29; X64-NEXT:    paddd %xmm1, %xmm0
30; X64-NEXT:    retq
31  %1 = add <4 x i32> <i32  1, i32 -2, i32  3, i32 -4>, %a0
32  %2 = add <4 x i32> <i32 -1, i32  2, i32 -3, i32  4>, %a1
33  %3 = add <4 x i32> %1, %2
34  ret <4 x i32> %3
35}
36
37define <4 x i32> @mul_4i32(<4 x i32> %a0, <4 x i32> %a1) {
38; X86-LABEL: mul_4i32:
39; X86:       # %bb.0:
40; X86-NEXT:    pmulld %xmm1, %xmm0
41; X86-NEXT:    pmulld {{\.LCPI.*}}, %xmm0
42; X86-NEXT:    retl
43;
44; X64-LABEL: mul_4i32:
45; X64:       # %bb.0:
46; X64-NEXT:    pmulld %xmm1, %xmm0
47; X64-NEXT:    pmulld {{.*}}(%rip), %xmm0
48; X64-NEXT:    retq
49  %1 = mul <4 x i32> %a0, <i32 1, i32 2, i32 3, i32 4>
50  %2 = mul <4 x i32> %a1, <i32 4, i32 3, i32 2, i32 1>
51  %3 = mul <4 x i32> %1, %2
52  ret <4 x i32> %3
53}
54
55define <4 x i32> @mul_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
56; X86-LABEL: mul_4i32_commute:
57; X86:       # %bb.0:
58; X86-NEXT:    pmulld %xmm1, %xmm0
59; X86-NEXT:    pmulld {{\.LCPI.*}}, %xmm0
60; X86-NEXT:    retl
61;
62; X64-LABEL: mul_4i32_commute:
63; X64:       # %bb.0:
64; X64-NEXT:    pmulld %xmm1, %xmm0
65; X64-NEXT:    pmulld {{.*}}(%rip), %xmm0
66; X64-NEXT:    retq
67  %1 = mul <4 x i32> <i32 1, i32 2, i32 3, i32 4>, %a0
68  %2 = mul <4 x i32> <i32 4, i32 3, i32 2, i32 1>, %a1
69  %3 = mul <4 x i32> %1, %2
70  ret <4 x i32> %3
71}
72
73define <4 x i32> @and_4i32(<4 x i32> %a0, <4 x i32> %a1) {
74; X86-LABEL: and_4i32:
75; X86:       # %bb.0:
76; X86-NEXT:    andps %xmm1, %xmm0
77; X86-NEXT:    andps {{\.LCPI.*}}, %xmm0
78; X86-NEXT:    retl
79;
80; X64-LABEL: and_4i32:
81; X64:       # %bb.0:
82; X64-NEXT:    andps %xmm1, %xmm0
83; X64-NEXT:    andps {{.*}}(%rip), %xmm0
84; X64-NEXT:    retq
85  %1 = and <4 x i32> %a0, <i32 -2, i32 -2, i32  3, i32  3>
86  %2 = and <4 x i32> %a1, <i32 -1, i32 -1, i32  1, i32  1>
87  %3 = and <4 x i32> %1, %2
88  ret <4 x i32> %3
89}
90
91define <4 x i32> @and_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
92; X86-LABEL: and_4i32_commute:
93; X86:       # %bb.0:
94; X86-NEXT:    andps %xmm1, %xmm0
95; X86-NEXT:    andps {{\.LCPI.*}}, %xmm0
96; X86-NEXT:    retl
97;
98; X64-LABEL: and_4i32_commute:
99; X64:       # %bb.0:
100; X64-NEXT:    andps %xmm1, %xmm0
101; X64-NEXT:    andps {{.*}}(%rip), %xmm0
102; X64-NEXT:    retq
103  %1 = and <4 x i32> <i32 -2, i32 -2, i32  3, i32  3>, %a0
104  %2 = and <4 x i32> <i32 -1, i32 -1, i32  1, i32  1>, %a1
105  %3 = and <4 x i32> %1, %2
106  ret <4 x i32> %3
107}
108
109define <4 x i32> @or_4i32(<4 x i32> %a0, <4 x i32> %a1) {
110; X86-LABEL: or_4i32:
111; X86:       # %bb.0:
112; X86-NEXT:    orps %xmm1, %xmm0
113; X86-NEXT:    orps {{\.LCPI.*}}, %xmm0
114; X86-NEXT:    retl
115;
116; X64-LABEL: or_4i32:
117; X64:       # %bb.0:
118; X64-NEXT:    orps %xmm1, %xmm0
119; X64-NEXT:    orps {{.*}}(%rip), %xmm0
120; X64-NEXT:    retq
121  %1 = or <4 x i32> %a0, <i32 -2, i32 -2, i32  3, i32  3>
122  %2 = or <4 x i32> %a1, <i32 -1, i32 -1, i32  1, i32  1>
123  %3 = or <4 x i32> %1, %2
124  ret <4 x i32> %3
125}
126
127define <4 x i32> @or_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
128; X86-LABEL: or_4i32_commute:
129; X86:       # %bb.0:
130; X86-NEXT:    orps %xmm1, %xmm0
131; X86-NEXT:    orps {{\.LCPI.*}}, %xmm0
132; X86-NEXT:    retl
133;
134; X64-LABEL: or_4i32_commute:
135; X64:       # %bb.0:
136; X64-NEXT:    orps %xmm1, %xmm0
137; X64-NEXT:    orps {{.*}}(%rip), %xmm0
138; X64-NEXT:    retq
139  %1 = or <4 x i32> <i32 -2, i32 -2, i32  3, i32  3>, %a0
140  %2 = or <4 x i32> <i32 -1, i32 -1, i32  1, i32  1>, %a1
141  %3 = or <4 x i32> %1, %2
142  ret <4 x i32> %3
143}
144
145define <4 x i32> @xor_4i32(<4 x i32> %a0, <4 x i32> %a1) {
146; X86-LABEL: xor_4i32:
147; X86:       # %bb.0:
148; X86-NEXT:    xorps %xmm1, %xmm0
149; X86-NEXT:    xorps {{\.LCPI.*}}, %xmm0
150; X86-NEXT:    retl
151;
152; X64-LABEL: xor_4i32:
153; X64:       # %bb.0:
154; X64-NEXT:    xorps %xmm1, %xmm0
155; X64-NEXT:    xorps {{.*}}(%rip), %xmm0
156; X64-NEXT:    retq
157  %1 = xor <4 x i32> %a0, <i32 -2, i32 -2, i32  3, i32  3>
158  %2 = xor <4 x i32> %a1, <i32 -1, i32 -1, i32  1, i32  1>
159  %3 = xor <4 x i32> %1, %2
160  ret <4 x i32> %3
161}
162
163define <4 x i32> @xor_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
164; X86-LABEL: xor_4i32_commute:
165; X86:       # %bb.0:
166; X86-NEXT:    xorps %xmm1, %xmm0
167; X86-NEXT:    xorps {{\.LCPI.*}}, %xmm0
168; X86-NEXT:    retl
169;
170; X64-LABEL: xor_4i32_commute:
171; X64:       # %bb.0:
172; X64-NEXT:    xorps %xmm1, %xmm0
173; X64-NEXT:    xorps {{.*}}(%rip), %xmm0
174; X64-NEXT:    retq
175  %1 = xor <4 x i32> <i32 -2, i32 -2, i32  3, i32  3>, %a0
176  %2 = xor <4 x i32> <i32 -1, i32 -1, i32  1, i32  1>, %a1
177  %3 = xor <4 x i32> %1, %2
178  ret <4 x i32> %3
179}
180