• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+sse2 -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NOT_AVX2 --check-prefix=SSE2
3# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx  -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NOT_AVX2 --check-prefix=AVX1
4# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX2
5
6--- |
7  define void @test_add_v32i8() {
8    %ret = add <32 x i8> undef, undef
9    ret void
10  }
11
12  define void @test_add_v16i16() {
13    %ret = add <16 x i16> undef, undef
14    ret void
15  }
16
17  define void @test_add_v8i32() {
18    %ret = add <8 x i32> undef, undef
19    ret void
20  }
21
22  define void @test_add_v4i64() {
23    %ret = add <4 x i64> undef, undef
24    ret void
25  }
26
27...
28---
29name:            test_add_v32i8
30alignment:       4
31legalized:       false
32regBankSelected: false
33registers:
34  - { id: 0, class: _ }
35  - { id: 1, class: _ }
36  - { id: 2, class: _ }
37body:             |
38  bb.1 (%ir-block.0):
39    liveins: $ymm0, $ymm1
40
41    ; CHECK-LABEL: name: test_add_v32i8
42    ; ALL: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
43    ; ALL: [[DEF1:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
44    ; SSE2: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF]](<32 x s8>)
45    ; SSE2: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF1]](<32 x s8>)
46    ; AVX1: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF]](<32 x s8>)
47    ; AVX1: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF1]](<32 x s8>)
48    ; SSE2: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV]], [[UV2]]
49    ; SSE2: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV3]]
50    ; AVX1: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV]], [[UV2]]
51    ; AVX1: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV3]]
52    ; SSE2: [[MV:%[0-9]+]]:_(<32 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>)
53    ; AVX1: [[MV:%[0-9]+]]:_(<32 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>)
54    ; SSE2: $ymm0 = COPY [[MV]](<32 x s8>)
55    ; AVX1: $ymm0 = COPY [[MV]](<32 x s8>)
56    ; AVX2: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[DEF]], [[DEF1]]
57    ; AVX2: $ymm0 = COPY [[ADD]](<32 x s8>)
58    ; ALL: RET 0
59    %0(<32 x s8>) = IMPLICIT_DEF
60    %1(<32 x s8>) = IMPLICIT_DEF
61    %2(<32 x s8>) = G_ADD %0, %1
62    $ymm0 = COPY %2
63    RET 0
64
65...
66---
67name:            test_add_v16i16
68alignment:       4
69legalized:       false
70regBankSelected: false
71registers:
72  - { id: 0, class: _ }
73  - { id: 1, class: _ }
74  - { id: 2, class: _ }
75body:             |
76  bb.1 (%ir-block.0):
77    liveins: $ymm0, $ymm1
78
79    ; ALL-LABEL: name: test_add_v16i16
80    ; ALL: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
81    ; ALL: [[DEF1:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
82    ; SSE2: [[UV:%[0-9]+]]:_(<8 x s16>), [[UV1:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF]](<16 x s16>)
83    ; SSE2: [[UV2:%[0-9]+]]:_(<8 x s16>), [[UV3:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF1]](<16 x s16>)
84    ; SSE2: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV]], [[UV2]]
85    ; SSE2: [[ADD1:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV1]], [[UV3]]
86    ; SSE2: [[MV:%[0-9]+]]:_(<16 x s16>) = G_MERGE_VALUES [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>)
87    ; AVX1: [[UV:%[0-9]+]]:_(<8 x s16>), [[UV1:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF]](<16 x s16>)
88    ; AVX1: [[UV2:%[0-9]+]]:_(<8 x s16>), [[UV3:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF1]](<16 x s16>)
89    ; AVX1: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV]], [[UV2]]
90    ; AVX1: [[ADD1:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV1]], [[UV3]]
91    ; AVX1: [[MV:%[0-9]+]]:_(<16 x s16>) = G_MERGE_VALUES [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>)
92    ; SSE2: $ymm0 = COPY [[MV]](<16 x s16>)
93    ; AVX1: $ymm0 = COPY [[MV]](<16 x s16>)
94    ; AVX2: [[ADD:%[0-9]+]]:_(<16 x s16>) = G_ADD [[DEF]], [[DEF1]]
95    ; AVX2: $ymm0 = COPY [[ADD]](<16 x s16>)
96    ; ALL: RET 0
97    %0(<16 x s16>) = IMPLICIT_DEF
98    %1(<16 x s16>) = IMPLICIT_DEF
99    %2(<16 x s16>) = G_ADD %0, %1
100    $ymm0 = COPY %2
101    RET 0
102
103...
104---
105name:            test_add_v8i32
106alignment:       4
107legalized:       false
108regBankSelected: false
109registers:
110  - { id: 0, class: _ }
111  - { id: 1, class: _ }
112  - { id: 2, class: _ }
113body:             |
114  bb.1 (%ir-block.0):
115    liveins: $ymm0, $ymm1
116
117    ; ALL-LABEL: name: test_add_v8i32
118    ; ALL: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
119    ; ALL: [[DEF1:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
120    ; SSE2: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<8 x s32>)
121    ; SSE2: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<8 x s32>)
122    ; SSE2: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV]], [[UV2]]
123    ; SSE2: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV3]]
124    ; SSE2: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>)
125    ; SSE2: $ymm0 = COPY [[MV]](<8 x s32>)
126    ; AVX1: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<8 x s32>)
127    ; AVX1: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<8 x s32>)
128    ; AVX1: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV]], [[UV2]]
129    ; AVX1: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV3]]
130    ; AVX1: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>)
131    ; AVX1: $ymm0 = COPY [[MV]](<8 x s32>)
132    ; AVX2: [[ADD:%[0-9]+]]:_(<8 x s32>) = G_ADD [[DEF]], [[DEF1]]
133    ; AVX2: $ymm0 = COPY [[ADD]](<8 x s32>)
134    ; ALL: RET 0
135    %0(<8 x s32>) = IMPLICIT_DEF
136    %1(<8 x s32>) = IMPLICIT_DEF
137    %2(<8 x s32>) = G_ADD %0, %1
138    $ymm0 = COPY %2
139    RET 0
140
141...
142---
143name:            test_add_v4i64
144alignment:       4
145legalized:       false
146regBankSelected: false
147registers:
148  - { id: 0, class: _ }
149  - { id: 1, class: _ }
150  - { id: 2, class: _ }
151body:             |
152  bb.1 (%ir-block.0):
153    liveins: $ymm0, $ymm1
154
155    ; ALL-LABEL: name: test_add_v4i64
156    ; ALL: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
157    ; ALL: [[DEF1:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
158    ; SSE2: [[UV:%[0-9]+]]:_(<2 x s64>), [[UV1:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF]](<4 x s64>)
159    ; SSE2: [[UV2:%[0-9]+]]:_(<2 x s64>), [[UV3:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF1]](<4 x s64>)
160    ; SSE2: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV]], [[UV2]]
161    ; SSE2: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV1]], [[UV3]]
162    ; SSE2: [[MV:%[0-9]+]]:_(<4 x s64>) = G_MERGE_VALUES [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>)
163    ; AVX1: [[UV:%[0-9]+]]:_(<2 x s64>), [[UV1:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF]](<4 x s64>)
164    ; AVX1: [[UV2:%[0-9]+]]:_(<2 x s64>), [[UV3:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF1]](<4 x s64>)
165    ; AVX1: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV]], [[UV2]]
166    ; AVX1: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV1]], [[UV3]]
167    ; AVX1: [[MV:%[0-9]+]]:_(<4 x s64>) = G_MERGE_VALUES [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>)
168    ; SSE2: $ymm0 = COPY [[MV]](<4 x s64>)
169    ; AVX1: $ymm0 = COPY [[MV]](<4 x s64>)
170    ; AVX2: [[ADD:%[0-9]+]]:_(<4 x s64>) = G_ADD [[DEF]], [[DEF1]]
171    ; AVX2: $ymm0 = COPY [[ADD]](<4 x s64>)
172    ; ALL: RET 0
173    %0(<4 x s64>) = IMPLICIT_DEF
174    %1(<4 x s64>) = IMPLICIT_DEF
175    %2(<4 x s64>) = G_ADD %0, %1
176    $ymm0 = COPY %2
177    RET 0
178
179...
180