• Home
  • Raw
  • Download

Lines Matching refs:i16

12 define <4 x i16> @vmuli16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
14 ;CHECK: vmul.i16
15 %tmp1 = load <4 x i16>* %A
16 %tmp2 = load <4 x i16>* %B
17 %tmp3 = mul <4 x i16> %tmp1, %tmp2
18 ret <4 x i16> %tmp3
57 define <8 x i16> @vmulQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
59 ;CHECK: vmul.i16
60 %tmp1 = load <8 x i16>* %A
61 %tmp2 = load <8 x i16>* %B
62 %tmp3 = mul <8 x i16> %tmp1, %tmp2
63 ret <8 x i16> %tmp3
105 define arm_aapcs_vfpcc <4 x i16> @test_vmul_lanes16(<4 x i16> %arg0_int16x4_t, <4 x i16> %arg1_int1…
108 ; CHECK: vmul.i16 d0, d0, d1[1]
109 …%0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32…
110 %1 = mul <4 x i16> %0, %arg0_int16x4_t ; <<4 x i16>> [#uses=1]
111 ret <4 x i16> %1
132 define arm_aapcs_vfpcc <8 x i16> @test_vmulQ_lanes16(<8 x i16> %arg0_int16x8_t, <4 x i16> %arg1_int…
135 ; CHECK: vmul.i16 q0, q0, d2[1]
136 …%0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <8 x i32> <i32 1, i32 1, i32 1, i32…
137 %1 = mul <8 x i16> %0, %arg0_int16x8_t ; <<8 x i16>> [#uses=1]
138 ret <8 x i16> %1
150 define <8 x i16> @vmulls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
155 %tmp3 = sext <8 x i8> %tmp1 to <8 x i16>
156 %tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
157 %tmp5 = mul <8 x i16> %tmp3, %tmp4
158 ret <8 x i16> %tmp5
161 define <8 x i16> @vmulls8_int(<8 x i8>* %A, <8 x i8>* %B) nounwind {
166 %tmp3 = call <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
167 ret <8 x i16> %tmp3
170 define <4 x i32> @vmulls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
173 %tmp1 = load <4 x i16>* %A
174 %tmp2 = load <4 x i16>* %B
175 %tmp3 = sext <4 x i16> %tmp1 to <4 x i32>
176 %tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
181 define <4 x i32> @vmulls16_int(<4 x i16>* %A, <4 x i16>* %B) nounwind {
184 %tmp1 = load <4 x i16>* %A
185 %tmp2 = load <4 x i16>* %B
186 %tmp3 = call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
210 define <8 x i16> @vmullu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
215 %tmp3 = zext <8 x i8> %tmp1 to <8 x i16>
216 %tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
217 %tmp5 = mul <8 x i16> %tmp3, %tmp4
218 ret <8 x i16> %tmp5
221 define <8 x i16> @vmullu8_int(<8 x i8>* %A, <8 x i8>* %B) nounwind {
226 %tmp3 = call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
227 ret <8 x i16> %tmp3
230 define <4 x i32> @vmullu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
233 %tmp1 = load <4 x i16>* %A
234 %tmp2 = load <4 x i16>* %B
235 %tmp3 = zext <4 x i16> %tmp1 to <4 x i32>
236 %tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
241 define <4 x i32> @vmullu16_int(<4 x i16>* %A, <4 x i16>* %B) nounwind {
244 %tmp1 = load <4 x i16>* %A
245 %tmp2 = load <4 x i16>* %B
246 %tmp3 = call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
270 define <8 x i16> @vmullp8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
275 %tmp3 = call <8 x i16> @llvm.arm.neon.vmullp.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
276 ret <8 x i16> %tmp3
279 define arm_aapcs_vfpcc <4 x i32> @test_vmull_lanes16(<4 x i16> %arg0_int16x4_t, <4 x i16> %arg1_int…
283 …%0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32…
284 %1 = sext <4 x i16> %arg0_int16x4_t to <4 x i32>
285 %2 = sext <4 x i16> %0 to <4 x i32>
290 define arm_aapcs_vfpcc <4 x i32> @test_vmull_lanes16_int(<4 x i16> %arg0_int16x4_t, <4 x i16> %arg1…
294 …%0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32…
295 …%1 = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %arg0_int16x4_t, <4 x i16> %0) ; <<…
319 define arm_aapcs_vfpcc <4 x i32> @test_vmull_laneu16(<4 x i16> %arg0_uint16x4_t, <4 x i16> %arg1_ui…
323 …%0 = shufflevector <4 x i16> %arg1_uint16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i3…
324 %1 = zext <4 x i16> %arg0_uint16x4_t to <4 x i32>
325 %2 = zext <4 x i16> %0 to <4 x i32>
330 define arm_aapcs_vfpcc <4 x i32> @test_vmull_laneu16_int(<4 x i16> %arg0_uint16x4_t, <4 x i16> %arg…
334 …%0 = shufflevector <4 x i16> %arg1_uint16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i3…
335 …%1 = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %arg0_uint16x4_t, <4 x i16> %0) ; <…
359 declare <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
360 declare <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
363 declare <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
364 declare <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
367 declare <8 x i16> @llvm.arm.neon.vmullp.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
373 define <8 x i16> @vmull_extvec_s8(<8 x i8> %arg) nounwind {
376 %tmp3 = sext <8 x i8> %arg to <8 x i16>
377 …%tmp4 = mul <8 x i16> %tmp3, <i16 -12, i16 -12, i16 -12, i16 -12, i16 -12, i16 -12, i16 -12, i16 -…
378 ret <8 x i16> %tmp4
381 define <8 x i16> @vmull_extvec_u8(<8 x i8> %arg) nounwind {
384 %tmp3 = zext <8 x i8> %arg to <8 x i16>
385 %tmp4 = mul <8 x i16> %tmp3, <i16 12, i16 12, i16 12, i16 12, i16 12, i16 12, i16 12, i16 12>
386 ret <8 x i16> %tmp4
389 define <8 x i16> @vmull_noextvec_s8(<8 x i8> %arg) nounwind {
393 ; CHECK: vmul.i16
394 %tmp3 = sext <8 x i8> %arg to <8 x i16>
395 …%tmp4 = mul <8 x i16> %tmp3, <i16 -999, i16 -999, i16 -999, i16 -999, i16 -999, i16 -999, i16 -999…
396 ret <8 x i16> %tmp4
399 define <8 x i16> @vmull_noextvec_u8(<8 x i8> %arg) nounwind {
403 ; CHECK: vmul.i16
404 %tmp3 = zext <8 x i8> %arg to <8 x i16>
405 …%tmp4 = mul <8 x i16> %tmp3, <i16 999, i16 999, i16 999, i16 999, i16 999, i16 999, i16 999, i16 9…
406 ret <8 x i16> %tmp4
409 define <4 x i32> @vmull_extvec_s16(<4 x i16> %arg) nounwind {
412 %tmp3 = sext <4 x i16> %arg to <4 x i32>
417 define <4 x i32> @vmull_extvec_u16(<4 x i16> %arg) nounwind {
420 %tmp3 = zext <4 x i16> %arg to <4 x i32>
442 define void @distribute(i16* %dst, i8* %src, i32 %mul) nounwind {
454 %7 = zext <8 x i8> %6 to <8 x i16>
455 %8 = zext <8 x i8> %2 to <8 x i16>
458 %11 = zext <8 x i8> %10 to <8 x i16>
459 %12 = add <8 x i16> %7, %11
460 %13 = mul <8 x i16> %12, %8
461 %14 = bitcast i16* %dst to i8*
462 tail call void @llvm.arm.neon.vst1.v8i16(i8* %14, <8 x i16> %13, i32 2)
468 declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>, i32) nounwind
520 define i16 @vmullWithInconsistentExtensions(<8 x i8> %vec) {
523 %1 = sext <8 x i8> %vec to <8 x i16>
524 %2 = mul <8 x i16> %1, <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
525 %3 = extractelement <8 x i16> %2, i32 0
526 ret i16 %3
555 %vmovl.i249 = zext <8 x i8> %1 to <8 x i16>
560 %vmovl.i237 = zext <8 x i8> undef to <8 x i16>
563 %vmovl.i225 = zext <8 x i8> undef to <8 x i16>
564 %mul.i223 = mul <8 x i16> %vmovl.i249, %vmovl.i249
565 %vshl_n = shl <8 x i16> %mul.i223, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
566 …ail call <8 x i16> @llvm.arm.neon.vqsubu.v8i16(<8 x i16> <i16 256, i16 256, i16 256, i16 256, i16
567 %mul.i209 = mul <8 x i16> undef, <i16 80, i16 80, i16 80, i16 80, i16 80, i16 80, i16 80, i16 80>
568 %vshr_n130 = lshr <8 x i16> undef, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
569 %vshr_n134 = lshr <8 x i16> %mul.i209, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
570 …%sub.i205 = sub <8 x i16> <i16 80, i16 80, i16 80, i16 80, i16 80, i16 80, i16 80, i16 80>, %vshr_…
571 …%sub.i203 = sub <8 x i16> <i16 80, i16 80, i16 80, i16 80, i16 80, i16 80, i16 80, i16 80>, %vshr_…
572 …%add.i200 = add <8 x i16> %sub.i205, <i16 96, i16 96, i16 96, i16 96, i16 96, i16 96, i16 96, i16
573 %add.i198 = add <8 x i16> %add.i200, %sub.i203
574 %mul.i194 = mul <8 x i16> %add.i198, %vmovl.i237
575 %mul.i191 = mul <8 x i16> %vshr_n130, undef
576 %add.i192 = add <8 x i16> %mul.i191, %mul.i194
577 %mul.i187 = mul <8 x i16> %vshr_n134, undef
578 %add.i188 = add <8 x i16> %mul.i187, %add.i192
579 %mul.i185 = mul <8 x i16> undef, undef
580 %add.i186 = add <8 x i16> %mul.i185, undef
581 …il call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %add.i188, <8 x i16> <i16 -8, i16 -8, i1…
582 …il call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %add.i186, <8 x i16> <i16 -8, i16 -8, i1…
583 %mul.i184 = mul <8 x i16> undef, %vrshr_n160
584 %mul.i181 = mul <8 x i16> undef, %vmovl.i225
585 %add.i182 = add <8 x i16> %mul.i181, %mul.i184
586 …il call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %add.i182, <8 x i16> <i16 -7, i16 -7, i1…
587 %vqmovn1.i180 = tail call <8 x i8> @llvm.arm.neon.vqmovnu.v8i8(<8 x i16> %vrshr_n170) nounwind
599 declare <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
600 declare <8 x i16> @llvm.arm.neon.vqsubu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
601 declare <8 x i8> @llvm.arm.neon.vqmovnu.v8i8(<8 x i16>) nounwind readnone