• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+sse2 -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=SSE2
3# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx  -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=AVX1
4# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=AVX2
5
6--- |
7  define void @test_add_v32i8() {
8    %ret = add <32 x i8> undef, undef
9    ret void
10  }
11
12  define void @test_add_v16i16() {
13    %ret = add <16 x i16> undef, undef
14    ret void
15  }
16
17  define void @test_add_v8i32() {
18    %ret = add <8 x i32> undef, undef
19    ret void
20  }
21
22  define void @test_add_v4i64() {
23    %ret = add <4 x i64> undef, undef
24    ret void
25  }
26
27...
28---
29name:            test_add_v32i8
30alignment:       16
31legalized:       false
32regBankSelected: false
33registers:
34  - { id: 0, class: _ }
35  - { id: 1, class: _ }
36  - { id: 2, class: _ }
37body:             |
38  bb.1 (%ir-block.0):
39    liveins: $ymm0, $ymm1
40    ; SSE2-LABEL: name: test_add_v32i8
41    ; SSE2: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
42    ; SSE2: [[DEF1:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
43    ; SSE2: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF]](<32 x s8>)
44    ; SSE2: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF1]](<32 x s8>)
45    ; SSE2: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV]], [[UV2]]
46    ; SSE2: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV3]]
47    ; SSE2: [[CONCAT_VECTORS:%[0-9]+]]:_(<32 x s8>) = G_CONCAT_VECTORS [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>)
48    ; SSE2: $ymm0 = COPY [[CONCAT_VECTORS]](<32 x s8>)
49    ; SSE2: RET 0
50    ; AVX1-LABEL: name: test_add_v32i8
51    ; AVX1: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
52    ; AVX1: [[DEF1:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
53    ; AVX1: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF]](<32 x s8>)
54    ; AVX1: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF1]](<32 x s8>)
55    ; AVX1: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV]], [[UV2]]
56    ; AVX1: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV3]]
57    ; AVX1: [[CONCAT_VECTORS:%[0-9]+]]:_(<32 x s8>) = G_CONCAT_VECTORS [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>)
58    ; AVX1: $ymm0 = COPY [[CONCAT_VECTORS]](<32 x s8>)
59    ; AVX1: RET 0
60    ; AVX2-LABEL: name: test_add_v32i8
61    ; AVX2: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
62    ; AVX2: [[DEF1:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
63    ; AVX2: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[DEF]], [[DEF1]]
64    ; AVX2: $ymm0 = COPY [[ADD]](<32 x s8>)
65    ; AVX2: RET 0
66    %0(<32 x s8>) = IMPLICIT_DEF
67    %1(<32 x s8>) = IMPLICIT_DEF
68    %2(<32 x s8>) = G_ADD %0, %1
69    $ymm0 = COPY %2
70    RET 0
71
72...
73---
74name:            test_add_v16i16
75alignment:       16
76legalized:       false
77regBankSelected: false
78registers:
79  - { id: 0, class: _ }
80  - { id: 1, class: _ }
81  - { id: 2, class: _ }
82body:             |
83  bb.1 (%ir-block.0):
84    liveins: $ymm0, $ymm1
85    ; SSE2-LABEL: name: test_add_v16i16
86    ; SSE2: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
87    ; SSE2: [[DEF1:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
88    ; SSE2: [[UV:%[0-9]+]]:_(<8 x s16>), [[UV1:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF]](<16 x s16>)
89    ; SSE2: [[UV2:%[0-9]+]]:_(<8 x s16>), [[UV3:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF1]](<16 x s16>)
90    ; SSE2: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV]], [[UV2]]
91    ; SSE2: [[ADD1:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV1]], [[UV3]]
92    ; SSE2: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s16>) = G_CONCAT_VECTORS [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>)
93    ; SSE2: $ymm0 = COPY [[CONCAT_VECTORS]](<16 x s16>)
94    ; SSE2: RET 0
95    ; AVX1-LABEL: name: test_add_v16i16
96    ; AVX1: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
97    ; AVX1: [[DEF1:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
98    ; AVX1: [[UV:%[0-9]+]]:_(<8 x s16>), [[UV1:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF]](<16 x s16>)
99    ; AVX1: [[UV2:%[0-9]+]]:_(<8 x s16>), [[UV3:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF1]](<16 x s16>)
100    ; AVX1: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV]], [[UV2]]
101    ; AVX1: [[ADD1:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV1]], [[UV3]]
102    ; AVX1: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s16>) = G_CONCAT_VECTORS [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>)
103    ; AVX1: $ymm0 = COPY [[CONCAT_VECTORS]](<16 x s16>)
104    ; AVX1: RET 0
105    ; AVX2-LABEL: name: test_add_v16i16
106    ; AVX2: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
107    ; AVX2: [[DEF1:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
108    ; AVX2: [[ADD:%[0-9]+]]:_(<16 x s16>) = G_ADD [[DEF]], [[DEF1]]
109    ; AVX2: $ymm0 = COPY [[ADD]](<16 x s16>)
110    ; AVX2: RET 0
111    %0(<16 x s16>) = IMPLICIT_DEF
112    %1(<16 x s16>) = IMPLICIT_DEF
113    %2(<16 x s16>) = G_ADD %0, %1
114    $ymm0 = COPY %2
115    RET 0
116
117...
118---
119name:            test_add_v8i32
120alignment:       16
121legalized:       false
122regBankSelected: false
123registers:
124  - { id: 0, class: _ }
125  - { id: 1, class: _ }
126  - { id: 2, class: _ }
127body:             |
128  bb.1 (%ir-block.0):
129    liveins: $ymm0, $ymm1
130    ; SSE2-LABEL: name: test_add_v8i32
131    ; SSE2: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
132    ; SSE2: [[DEF1:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
133    ; SSE2: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<8 x s32>)
134    ; SSE2: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<8 x s32>)
135    ; SSE2: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV]], [[UV2]]
136    ; SSE2: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV3]]
137    ; SSE2: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>)
138    ; SSE2: $ymm0 = COPY [[CONCAT_VECTORS]](<8 x s32>)
139    ; SSE2: RET 0
140    ; AVX1-LABEL: name: test_add_v8i32
141    ; AVX1: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
142    ; AVX1: [[DEF1:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
143    ; AVX1: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<8 x s32>)
144    ; AVX1: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<8 x s32>)
145    ; AVX1: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV]], [[UV2]]
146    ; AVX1: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV3]]
147    ; AVX1: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>)
148    ; AVX1: $ymm0 = COPY [[CONCAT_VECTORS]](<8 x s32>)
149    ; AVX1: RET 0
150    ; AVX2-LABEL: name: test_add_v8i32
151    ; AVX2: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
152    ; AVX2: [[DEF1:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
153    ; AVX2: [[ADD:%[0-9]+]]:_(<8 x s32>) = G_ADD [[DEF]], [[DEF1]]
154    ; AVX2: $ymm0 = COPY [[ADD]](<8 x s32>)
155    ; AVX2: RET 0
156    %0(<8 x s32>) = IMPLICIT_DEF
157    %1(<8 x s32>) = IMPLICIT_DEF
158    %2(<8 x s32>) = G_ADD %0, %1
159    $ymm0 = COPY %2
160    RET 0
161
162...
163---
164name:            test_add_v4i64
165alignment:       16
166legalized:       false
167regBankSelected: false
168registers:
169  - { id: 0, class: _ }
170  - { id: 1, class: _ }
171  - { id: 2, class: _ }
172body:             |
173  bb.1 (%ir-block.0):
174    liveins: $ymm0, $ymm1
175    ; SSE2-LABEL: name: test_add_v4i64
176    ; SSE2: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
177    ; SSE2: [[DEF1:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
178    ; SSE2: [[UV:%[0-9]+]]:_(<2 x s64>), [[UV1:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF]](<4 x s64>)
179    ; SSE2: [[UV2:%[0-9]+]]:_(<2 x s64>), [[UV3:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF1]](<4 x s64>)
180    ; SSE2: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV]], [[UV2]]
181    ; SSE2: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV1]], [[UV3]]
182    ; SSE2: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>)
183    ; SSE2: $ymm0 = COPY [[CONCAT_VECTORS]](<4 x s64>)
184    ; SSE2: RET 0
185    ; AVX1-LABEL: name: test_add_v4i64
186    ; AVX1: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
187    ; AVX1: [[DEF1:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
188    ; AVX1: [[UV:%[0-9]+]]:_(<2 x s64>), [[UV1:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF]](<4 x s64>)
189    ; AVX1: [[UV2:%[0-9]+]]:_(<2 x s64>), [[UV3:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF1]](<4 x s64>)
190    ; AVX1: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV]], [[UV2]]
191    ; AVX1: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV1]], [[UV3]]
192    ; AVX1: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>)
193    ; AVX1: $ymm0 = COPY [[CONCAT_VECTORS]](<4 x s64>)
194    ; AVX1: RET 0
195    ; AVX2-LABEL: name: test_add_v4i64
196    ; AVX2: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
197    ; AVX2: [[DEF1:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
198    ; AVX2: [[ADD:%[0-9]+]]:_(<4 x s64>) = G_ADD [[DEF]], [[DEF1]]
199    ; AVX2: $ymm0 = COPY [[ADD]](<4 x s64>)
200    ; AVX2: RET 0
201    %0(<4 x s64>) = IMPLICIT_DEF
202    %1(<4 x s64>) = IMPLICIT_DEF
203    %2(<4 x s64>) = G_ADD %0, %1
204    $ymm0 = COPY %2
205    RET 0
206
207...
208