/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | sve-intrinsics-fp-arith.ll | 11 define <vscale x 8 x half> @fabd_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x hal… 15 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fabd.nxv8f16(<vscale x 8 x i1> %pg, 16 <vscale x 8 x half> %a, 17 <vscale x 8 x half> %b) 18 ret <vscale x 8 x half> %out 21 define <vscale x 4 x float> @fabd_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x f… 25 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fabd.nxv4f32(<vscale x 4 x i1> %pg, 26 <vscale x 4 x float> %a, 27 <vscale x 4 x float> %b) 28 ret <vscale x 4 x float> %out [all …]
|
D | sve-intrinsics-int-compares-with-imm.ll | 15 define <vscale x 16 x i1> @ir_cmpeq_b(<vscale x 16 x i8> %a) { 19 %elt = insertelement <vscale x 16 x i8> undef, i8 4, i32 0 20 …%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zero… 21 %out = icmp eq <vscale x 16 x i8> %a, %splat 22 ret <vscale x 16 x i1> %out 25 define <vscale x 16 x i1> @int_cmpeq_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) { 29 %elt = insertelement <vscale x 16 x i8> undef, i8 4, i32 0 30 …%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zero… 31 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.nxv16i8(<vscale x 16 x i1> %pg, 32 <vscale x 16 x i8> %a, [all …]
|
D | sve2-mla-unpredicated.ll | 10 define <vscale x 8 x i16> @smlalb_i16(<vscale x 8 x i16> %a, 11 <vscale x 16 x i8> %b, 12 <vscale x 16 x i8> %c) { 16 %res = call <vscale x 8 x i16> @llvm.aarch64.sve.smlalb.nxv8i16(<vscale x 8 x i16> %a, 17 <vscale x 16 x i8> %b, 18 <vscale x 16 x i8> %c) 19 ret <vscale x 8 x i16> %res 22 define <vscale x 4 x i32> @smlalb_i32(<vscale x 4 x i32> %a, 23 <vscale x 8 x i16> %b, 24 <vscale x 8 x i16> %c) { [all …]
|
D | sve2-intrinsics-uniform-dsp.ll | 11 define <vscale x 16 x i8> @saba_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8>… 15 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.saba.nxv16i8(<vscale x 16 x i8> %a, 16 <vscale x 16 x i8> %b, 17 <vscale x 16 x i8> %c) 18 ret <vscale x 16 x i8> %out 21 define <vscale x 8 x i16> @saba_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16… 25 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.saba.nxv8i16(<vscale x 8 x i16> %a, 26 <vscale x 8 x i16> %b, 27 <vscale x 8 x i16> %c) 28 ret <vscale x 8 x i16> %out [all …]
|
D | sve-int-arith-pred.ll | 7 define <vscale x 16 x i8> @add_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8>… 11 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1> %pg, 12 <vscale x 16 x i8> %a, 13 <vscale x 16 x i8> %b) 14 ret <vscale x 16 x i8> %out 17 define <vscale x 8 x i16> @add_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16>… 21 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1> %pg, 22 <vscale x 8 x i16> %a, 23 <vscale x 8 x i16> %b) 24 ret <vscale x 8 x i16> %out [all …]
|
D | sve2-bitwise-ternary.ll | 10 define <vscale x 16 x i8> @eor3_i8(<vscale x 16 x i8> %a, 11 <vscale x 16 x i8> %b, 12 <vscale x 16 x i8> %c) { 16 …%res = call <vscale x 16 x i8> @llvm.aarch64.sve.eor3.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 … 17 ret <vscale x 16 x i8> %res 20 define <vscale x 8 x i16> @eor3_i16(<vscale x 8 x i16> %a, 21 <vscale x 8 x i16> %b, 22 <vscale x 8 x i16> %c) { 26 …%res = call <vscale x 8 x i16> @llvm.aarch64.sve.eor3.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x… 27 ret <vscale x 8 x i16> %res [all …]
|
D | sve2-intrinsics-non-widening-pairwise-arith.ll | 11 define <vscale x 16 x i8> @addp_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8… 15 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.addp.nxv16i8(<vscale x 16 x i1> %pg, 16 <vscale x 16 x i8> %a, 17 <vscale x 16 x i8> %b) 18 ret <vscale x 16 x i8> %out 21 define <vscale x 8 x i16> @addp_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16… 25 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.addp.nxv8i16(<vscale x 8 x i1> %pg, 26 <vscale x 8 x i16> %a, 27 <vscale x 8 x i16> %b) 28 ret <vscale x 8 x i16> %out [all …]
|
D | sve2-intrinsics-widening-dsp.ll | 11 define <vscale x 8 x i16> @sabalb_b(<vscale x 8 x i16> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8… 15 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sabalb.nxv8i16(<vscale x 8 x i16> %a, 16 <vscale x 16 x i8> %b, 17 <vscale x 16 x i8> %c) 18 ret <vscale x 8 x i16> %out 21 define <vscale x 4 x i32> @sabalb_h(<vscale x 4 x i32> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16… 25 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sabalb.nxv4i32(<vscale x 4 x i32> %a, 26 <vscale x 8 x i16> %b, 27 <vscale x 8 x i16> %c) 28 ret <vscale x 4 x i32> %out [all …]
|
D | sve-int-mad-pred.ll | 7 define <vscale x 16 x i8> @mad_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8>… 11 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.mad.nxv16i8(<vscale x 16 x i1> %pg, 12 <vscale x 16 x i8> %a, 13 <vscale x 16 x i8> %b, 14 <vscale x 16 x i8> %c) 15 ret <vscale x 16 x i8> %out 18 define <vscale x 8 x i16> @mad_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16>… 22 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.mad.nxv8i16(<vscale x 8 x i1> %pg, 23 <vscale x 8 x i16> %a, 24 <vscale x 8 x i16> %b, [all …]
|
D | sve-intrinsics-fp-converts.ll | 11 define <vscale x 8 x half> @fcvt_f16_f32(<vscale x 8 x half> %a, <vscale x 4 x i1> %pg, <vscale x 4… 15 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fcvt.f16f32(<vscale x 8 x half> %a, 16 <vscale x 4 x i1> %pg, 17 <vscale x 4 x float> %b) 18 ret <vscale x 8 x half> %out 21 define <vscale x 8 x half> @fcvt_f16_f64(<vscale x 8 x half> %a, <vscale x 2 x i1> %pg, <vscale x 2… 25 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fcvt.f16f64(<vscale x 8 x half> %a, 26 <vscale x 2 x i1> %pg, 27 <vscale x 2 x double> %b) 28 ret <vscale x 8 x half> %out [all …]
|
D | sve-intrinsics-int-arith.ll | 11 define <vscale x 16 x i8> @abs_i8(<vscale x 16 x i8> %a, <vscale x 16 x i1> %pg, <vscale x 16 x i8>… 15 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.abs.nxv16i8(<vscale x 16 x i8> %a, 16 <vscale x 16 x i1> %pg, 17 <vscale x 16 x i8> %b) 18 ret <vscale x 16 x i8> %out 21 define <vscale x 8 x i16> @abs_i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg, <vscale x 8 x i16>… 25 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.abs.nxv8i16(<vscale x 8 x i16> %a, 26 <vscale x 8 x i1> %pg, 27 <vscale x 8 x i16> %b) 28 ret <vscale x 8 x i16> %out [all …]
|
D | sve-intrinsics-shifts.ll | 11 define <vscale x 16 x i8> @asr_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8>… 15 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg, 16 <vscale x 16 x i8> %a, 17 <vscale x 16 x i8> %b) 18 ret <vscale x 16 x i8> %out 21 define <vscale x 8 x i16> @asr_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16>… 25 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %pg, 26 <vscale x 8 x i16> %a, 27 <vscale x 8 x i16> %b) 28 ret <vscale x 8 x i16> %out [all …]
|
D | sve-intrinsics-fp-arith-merging.ll | 11 define <vscale x 8 x half> @fadd_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 … 16 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer 17 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1> %pg, 18 <vscale x 8 x half> %a_z, 19 <vscale x 8 x half> %b) 20 ret <vscale x 8 x half> %out 23 define <vscale x 4 x float> @fadd_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x … 28 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer 29 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1> %pg, 30 <vscale x 4 x float> %a_z, [all …]
|
D | sve-int-imm.ll | 12 define <vscale x 16 x i8> @add_i8_low(<vscale x 16 x i8> %a) { 16 %elt = insertelement <vscale x 16 x i8> undef, i8 30, i32 0 17 …%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zero… 18 %res = add <vscale x 16 x i8> %a, %splat 19 ret <vscale x 16 x i8> %res 22 define <vscale x 8 x i16> @add_i16_low(<vscale x 8 x i16> %a) { 26 %elt = insertelement <vscale x 8 x i16> undef, i16 30, i32 0 27 …%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroi… 28 %res = add <vscale x 8 x i16> %a, %splat 29 ret <vscale x 8 x i16> %res [all …]
|
D | sve-pred-log.ll | 7 define <vscale x 16 x i1> @vselect_16(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 … 11 %res = select <vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd 12 ret <vscale x 16 x i1> %res; 15 define <vscale x 8 x i1> @vselect_8(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1>… 19 %res = select <vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd 20 ret <vscale x 8 x i1> %res; 23 define <vscale x 4 x i1> @vselect_4(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1>… 27 %res = select <vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd 28 ret <vscale x 4 x i1> %res; 31 define <vscale x 2 x i1> @vselect_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1>… [all …]
|
D | sve2-mla-indexed.ll | 10 define <vscale x 4 x i32> @smlalb_i32(<vscale x 4 x i32> %a, 11 <vscale x 8 x i16> %b, 12 <vscale x 8 x i16> %c) { 16 %res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlalb.lane.nxv4i32(<vscale x 4 x i32> %a, 17 <vscale x 8 x i16> %b, 18 <vscale x 8 x i16> %c, 20 ret <vscale x 4 x i32> %res 23 define <vscale x 4 x i32> @smlalb_i32_2(<vscale x 4 x i32> %a, 24 <vscale x 8 x i16> %b, 25 <vscale x 8 x i16> %c) { [all …]
|
D | sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll | 12 define void @imm_out_of_range(<vscale x 2 x i64> * %base, <vscale x 2 x i1> %mask) nounwind { 21 %base_load = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base, i64 8 22 %data = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(<vscale x 2 x i64>* %base_load, 24 <vscale x 2 x i1> %mask, 25 <vscale x 2 x i64> undef) 26 %base_store = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64> * %base, i64 -9 27 call void @llvm.masked.store.nxv2i64(<vscale x 2 x i64> %data, 28 <vscale x 2 x i64>* %base_store, 30 <vscale x 2 x i1> %mask) 36 define void @test_masked_ldst_sv2i8(<vscale x 2 x i8> * %base, <vscale x 2 x i1> %mask) nounwind { [all …]
|
D | sve-intrinsics-int-arith-imm.ll | 10 define <vscale x 16 x i8> @smax_i8(<vscale x 16 x i8> %a) { 15 %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) 16 %elt = insertelement <vscale x 16 x i8> undef, i8 -128, i32 0 17 …%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zero… 18 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.smax.nxv16i8(<vscale x 16 x i1> %pg, 19 <vscale x 16 x i8> %a, 20 <vscale x 16 x i8> %splat) 21 ret <vscale x 16 x i8> %out 24 define <vscale x 8 x i16> @smax_i16(<vscale x 8 x i16> %a) { 29 %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) [all …]
|
D | sve2-intrinsics-binary-narrowing-add-sub.ll | 9 define <vscale x 16 x i8> @addhnb_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { 13 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.addhnb.nxv8i16(<vscale x 8 x i16> %a, 14 <vscale x 8 x i16> %b) 15 ret <vscale x 16 x i8> %out 18 define <vscale x 8 x i16> @addhnb_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { 22 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.addhnb.nxv4i32(<vscale x 4 x i32> %a, 23 <vscale x 4 x i32> %b) 24 ret <vscale x 8 x i16> %out 27 define <vscale x 4 x i32> @addhnb_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { 31 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.addhnb.nxv2i64(<vscale x 2 x i64> %a, [all …]
|
D | sve-intrinsics-shifts-merging.ll | 11 define <vscale x 16 x i8> @asr_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 … 16 %a_z = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer 17 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg, 18 <vscale x 16 x i8> %a_z, 19 <vscale x 16 x i8> %b) 20 ret <vscale x 16 x i8> %out 23 define <vscale x 8 x i16> @asr_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x… 28 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer 29 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %pg, 30 <vscale x 8 x i16> %a_z, [all …]
|
D | sve-intrinsics-fp-compares.ll | 11 define <vscale x 8 x i1> @facge_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half… 15 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.facge.nxv8f16(<vscale x 8 x i1> %pg, 16 <vscale x 8 x half> %a, 17 <vscale x 8 x half> %b) 18 ret <vscale x 8 x i1> %out 21 define <vscale x 4 x i1> @facge_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x flo… 25 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.facge.nxv4f32(<vscale x 4 x i1> %pg, 26 <vscale x 4 x float> %a, 27 <vscale x 4 x float> %b) 28 ret <vscale x 4 x i1> %out [all …]
|
D | sve-intrinsics-counting-bits.ll | 11 define <vscale x 16 x i8> @cls_i8(<vscale x 16 x i8> %a, <vscale x 16 x i1> %pg, <vscale x 16 x i8>… 15 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.cls.nxv16i8(<vscale x 16 x i8> %a, 16 <vscale x 16 x i1> %pg, 17 <vscale x 16 x i8> %b) 18 ret <vscale x 16 x i8> %out 21 define <vscale x 8 x i16> @cls_i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg, <vscale x 8 x i16>… 25 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.cls.nxv8i16(<vscale x 8 x i16> %a, 26 <vscale x 8 x i1> %pg, 27 <vscale x 8 x i16> %b) 28 ret <vscale x 8 x i16> %out [all …]
|
D | sve-int-log-pred.ll | 7 define <vscale x 16 x i8> @and_pred_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 … 11 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.and.nxv2i8(<vscale x 16 x i1> %pg, 12 <vscale x 16 x i8> %a, 13 <vscale x 16 x i8> %b) 14 ret <vscale x 16 x i8> %out 17 define <vscale x 8 x i16> @and_pred_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x… 21 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.and.nxv2i16(<vscale x 8 x i1> %pg, 22 <vscale x 8 x i16> %a, 23 <vscale x 8 x i16> %b) 24 ret <vscale x 8 x i16> %out [all …]
|
D | sve-intrinsics-perm-select-matmul-fp64.ll | 11 define <vscale x 16 x i8> @trn1_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) nounwind { 15 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.trn1q.nxv16i8(<vscale x 16 x i8> %a, 16 <vscale x 16 x i8> %b) 17 ret <vscale x 16 x i8> %out 20 define <vscale x 8 x i16> @trn1_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) nounwind { 24 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.trn1q.nxv8i16(<vscale x 8 x i16> %a, 25 <vscale x 8 x i16> %b) 26 ret <vscale x 8 x i16> %out 29 define <vscale x 4 x i32> @trn1_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) nounwind { 33 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.trn1q.nxv4i32(<vscale x 4 x i32> %a, [all …]
|
/external/llvm-project/llvm/test/Other/ |
D | scalable-vectors-core-ir.ll | 5 ;; Check supported instructions are accepted without dropping 'vscale'. 13 define <vscale x 2 x double> @fneg(<vscale x 2 x double> %val) { 15 ; CHECK: %r = fneg <vscale x 2 x double> %val 16 ; CHECK-NEXT: ret <vscale x 2 x double> %r 17 %r = fneg <vscale x 2 x double> %val 18 ret <vscale x 2 x double> %r 25 define <vscale x 8 x i16> @add(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { 27 ; CHECK: %r = add <vscale x 8 x i16> %a, %b 28 ; CHECK-NEXT: ret <vscale x 8 x i16> %r 29 %r = add <vscale x 8 x i16> %a, %b [all …]
|