• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f -mattr=+avx512vl| FileCheck %s
3
4declare void @func_f32(float)
5define <8 x float> @_256_broadcast_ss_spill(float %x) {
6; CHECK-LABEL: _256_broadcast_ss_spill:
7; CHECK:       # %bb.0:
8; CHECK-NEXT:    subq $24, %rsp
9; CHECK-NEXT:    .cfi_def_cfa_offset 32
10; CHECK-NEXT:    vaddss %xmm0, %xmm0, %xmm0
11; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
12; CHECK-NEXT:    callq func_f32
13; CHECK-NEXT:    vbroadcastss (%rsp), %ymm0 # 16-byte Folded Reload
14; CHECK-NEXT:    addq $24, %rsp
15; CHECK-NEXT:    .cfi_def_cfa_offset 8
16; CHECK-NEXT:    retq
17  %a  = fadd float %x, %x
18  call void @func_f32(float %a)
19  %b = insertelement <8 x float> undef, float %a, i32 0
20  %c = shufflevector <8 x float> %b, <8 x float> undef, <8 x i32> zeroinitializer
21  ret <8 x float> %c
22}
23
24define <4 x float> @_128_broadcast_ss_spill(float %x) {
25; CHECK-LABEL: _128_broadcast_ss_spill:
26; CHECK:       # %bb.0:
27; CHECK-NEXT:    subq $24, %rsp
28; CHECK-NEXT:    .cfi_def_cfa_offset 32
29; CHECK-NEXT:    vaddss %xmm0, %xmm0, %xmm0
30; CHECK-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
31; CHECK-NEXT:    callq func_f32
32; CHECK-NEXT:    vbroadcastss (%rsp), %xmm0 # 16-byte Folded Reload
33; CHECK-NEXT:    addq $24, %rsp
34; CHECK-NEXT:    .cfi_def_cfa_offset 8
35; CHECK-NEXT:    retq
36  %a  = fadd float %x, %x
37  call void @func_f32(float %a)
38  %b = insertelement <4 x float> undef, float %a, i32 0
39  %c = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> zeroinitializer
40  ret <4 x float> %c
41}
42
43declare void @func_f64(double)
44define <4 x double> @_256_broadcast_sd_spill(double %x) {
45; CHECK-LABEL: _256_broadcast_sd_spill:
46; CHECK:       # %bb.0:
47; CHECK-NEXT:    subq $24, %rsp
48; CHECK-NEXT:    .cfi_def_cfa_offset 32
49; CHECK-NEXT:    vaddsd %xmm0, %xmm0, %xmm0
50; CHECK-NEXT:    vmovapd %xmm0, (%rsp) # 16-byte Spill
51; CHECK-NEXT:    callq func_f64
52; CHECK-NEXT:    vbroadcastsd (%rsp), %ymm0 # 16-byte Folded Reload
53; CHECK-NEXT:    addq $24, %rsp
54; CHECK-NEXT:    .cfi_def_cfa_offset 8
55; CHECK-NEXT:    retq
56  %a  = fadd double %x, %x
57  call void @func_f64(double %a)
58  %b = insertelement <4 x double> undef, double %a, i32 0
59  %c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer
60  ret <4 x double> %c
61}
62
63define   <8 x float> @_inreg8xfloat(float %a) {
64; CHECK-LABEL: _inreg8xfloat:
65; CHECK:       # %bb.0:
66; CHECK-NEXT:    vbroadcastss %xmm0, %ymm0
67; CHECK-NEXT:    retq
68  %b = insertelement <8 x float> undef, float %a, i32 0
69  %c = shufflevector <8 x float> %b, <8 x float> undef, <8 x i32> zeroinitializer
70  ret <8 x float> %c
71}
72
73define   <8 x float> @_ss8xfloat_mask(<8 x float> %i, float %a, <8 x i32> %mask1) {
74; CHECK-LABEL: _ss8xfloat_mask:
75; CHECK:       # %bb.0:
76; CHECK-NEXT:    vptestmd %ymm2, %ymm2, %k1
77; CHECK-NEXT:    vbroadcastss %xmm1, %ymm0 {%k1}
78; CHECK-NEXT:    retq
79  %mask = icmp ne <8 x i32> %mask1, zeroinitializer
80  %b = insertelement <8 x float> undef, float %a, i32 0
81  %c = shufflevector <8 x float> %b, <8 x float> undef, <8 x i32> zeroinitializer
82  %r = select <8 x i1> %mask, <8 x float> %c, <8 x float> %i
83  ret <8 x float> %r
84}
85
86define   <8 x float> @_ss8xfloat_maskz(float %a, <8 x i32> %mask1) {
87; CHECK-LABEL: _ss8xfloat_maskz:
88; CHECK:       # %bb.0:
89; CHECK-NEXT:    vptestmd %ymm1, %ymm1, %k1
90; CHECK-NEXT:    vbroadcastss %xmm0, %ymm0 {%k1} {z}
91; CHECK-NEXT:    retq
92  %mask = icmp ne <8 x i32> %mask1, zeroinitializer
93  %b = insertelement <8 x float> undef, float %a, i32 0
94  %c = shufflevector <8 x float> %b, <8 x float> undef, <8 x i32> zeroinitializer
95  %r = select <8 x i1> %mask, <8 x float> %c, <8 x float> zeroinitializer
96  ret <8 x float> %r
97}
98
99define   <4 x float> @_inreg4xfloat(float %a) {
100; CHECK-LABEL: _inreg4xfloat:
101; CHECK:       # %bb.0:
102; CHECK-NEXT:    vbroadcastss %xmm0, %xmm0
103; CHECK-NEXT:    retq
104  %b = insertelement <4 x float> undef, float %a, i32 0
105  %c = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> zeroinitializer
106  ret <4 x float> %c
107}
108
109define   <4 x float> @_ss4xfloat_mask(<4 x float> %i, float %a, <4 x i32> %mask1) {
110; CHECK-LABEL: _ss4xfloat_mask:
111; CHECK:       # %bb.0:
112; CHECK-NEXT:    vptestmd %xmm2, %xmm2, %k1
113; CHECK-NEXT:    vbroadcastss %xmm1, %xmm0 {%k1}
114; CHECK-NEXT:    retq
115  %mask = icmp ne <4 x i32> %mask1, zeroinitializer
116  %b = insertelement <4 x float> undef, float %a, i32 0
117  %c = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> zeroinitializer
118  %r = select <4 x i1> %mask, <4 x float> %c, <4 x float> %i
119  ret <4 x float> %r
120}
121
122define   <4 x float> @_ss4xfloat_maskz(float %a, <4 x i32> %mask1) {
123; CHECK-LABEL: _ss4xfloat_maskz:
124; CHECK:       # %bb.0:
125; CHECK-NEXT:    vptestmd %xmm1, %xmm1, %k1
126; CHECK-NEXT:    vbroadcastss %xmm0, %xmm0 {%k1} {z}
127; CHECK-NEXT:    retq
128  %mask = icmp ne <4 x i32> %mask1, zeroinitializer
129  %b = insertelement <4 x float> undef, float %a, i32 0
130  %c = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> zeroinitializer
131  %r = select <4 x i1> %mask, <4 x float> %c, <4 x float> zeroinitializer
132  ret <4 x float> %r
133}
134
135define   <4 x double> @_inreg4xdouble(double %a) {
136; CHECK-LABEL: _inreg4xdouble:
137; CHECK:       # %bb.0:
138; CHECK-NEXT:    vbroadcastsd %xmm0, %ymm0
139; CHECK-NEXT:    retq
140  %b = insertelement <4 x double> undef, double %a, i32 0
141  %c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer
142  ret <4 x double> %c
143}
144
145define   <4 x double> @_ss4xdouble_mask(<4 x double> %i, double %a, <4 x i32> %mask1) {
146; CHECK-LABEL: _ss4xdouble_mask:
147; CHECK:       # %bb.0:
148; CHECK-NEXT:    vptestmd %xmm2, %xmm2, %k1
149; CHECK-NEXT:    vbroadcastsd %xmm1, %ymm0 {%k1}
150; CHECK-NEXT:    retq
151  %mask = icmp ne <4 x i32> %mask1, zeroinitializer
152  %b = insertelement <4 x double> undef, double %a, i32 0
153  %c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer
154  %r = select <4 x i1> %mask, <4 x double> %c, <4 x double> %i
155  ret <4 x double> %r
156}
157
158define   <4 x double> @_ss4xdouble_maskz(double %a, <4 x i32> %mask1) {
159; CHECK-LABEL: _ss4xdouble_maskz:
160; CHECK:       # %bb.0:
161; CHECK-NEXT:    vptestmd %xmm1, %xmm1, %k1
162; CHECK-NEXT:    vbroadcastsd %xmm0, %ymm0 {%k1} {z}
163; CHECK-NEXT:    retq
164  %mask = icmp ne <4 x i32> %mask1, zeroinitializer
165  %b = insertelement <4 x double> undef, double %a, i32 0
166  %c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer
167  %r = select <4 x i1> %mask, <4 x double> %c, <4 x double> zeroinitializer
168  ret <4 x double> %r
169}
170
171define <2 x double> @test_v2f64_broadcast_fold(<2 x double> *%a0, <2 x double> %a1) {
172; CHECK-LABEL: test_v2f64_broadcast_fold:
173; CHECK:       # %bb.0:
174; CHECK-NEXT:    vaddpd (%rdi){1to2}, %xmm0, %xmm0
175; CHECK-NEXT:    retq
176  %1 = load <2 x double>, <2 x double> *%a0, align 16
177  %2 = shufflevector <2 x double> %1, <2 x double> undef, <2 x i32> zeroinitializer
178  %3 = fadd <2 x double> %2, %a1
179  ret <2 x double> %3
180}
181
182define <2 x double> @test_v2f64_broadcast_fold_mask(<2 x double> *%a0, <2 x double> %a1, <2 x i64> %mask1, <2 x double> %a2) {
183; CHECK-LABEL: test_v2f64_broadcast_fold_mask:
184; CHECK:       # %bb.0:
185; CHECK-NEXT:    vptestmq %xmm1, %xmm1, %k1
186; CHECK-NEXT:    vaddpd (%rdi){1to2}, %xmm0, %xmm2 {%k1}
187; CHECK-NEXT:    vmovapd %xmm2, %xmm0
188; CHECK-NEXT:    retq
189  %mask = icmp ne <2 x i64> %mask1, zeroinitializer
190  %1 = load <2 x double>, <2 x double> *%a0, align 16
191  %2 = shufflevector <2 x double> %1, <2 x double> undef, <2 x i32> zeroinitializer
192  %3 = fadd <2 x double> %2, %a1
193  %4 = select <2 x i1> %mask, <2 x double> %3, <2 x double> %a2
194  ret <2 x double> %4
195}
196