; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s 2>%t | FileCheck %s ; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t ; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it. ; WARN-NOT: warning ; SMAX define @smax_i8( %a) { ; CHECK-LABEL: smax_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: smax z0.b, z0.b, #-128 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) %elt = insertelement undef, i8 -128, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.smax.nxv16i8( %pg, %a, %splat) ret %out } define @smax_i16( %a) { ; CHECK-LABEL: smax_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: smax z0.h, z0.h, #127 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) %elt = insertelement undef, i16 127, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.smax.nxv8i16( %pg, %a, %splat) ret %out } define @smax_i16_out_of_range( %a) { ; CHECK-LABEL: smax_i16_out_of_range: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #129 ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: mov z1.h, w8 ; CHECK-NEXT: smax z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) %elt = insertelement undef, i16 129, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.smax.nxv8i16( %pg, %a, %splat) ret %out } define @smax_i32( %a) { ; CHECK-LABEL: smax_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: smax z0.s, z0.s, #-128 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) %elt = insertelement undef, i32 -128, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.smax.nxv4i32( %pg, %a, %splat) ret %out } define @smax_i32_out_of_range( %a) { ; CHECK-LABEL: smax_i32_out_of_range: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #-129 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: mov z1.s, w8 ; CHECK-NEXT: smax z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) %elt = insertelement undef, i32 -129, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.smax.nxv4i32( %pg, %a, %splat) ret %out } define @smax_i64( %a) { ; CHECK-LABEL: smax_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: smax z0.d, z0.d, #127 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) %elt = insertelement undef, i64 127, i64 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.smax.nxv2i64( %pg, %a, %splat) ret %out } define @smax_i64_out_of_range( %a) { ; CHECK-LABEL: smax_i64_out_of_range: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #65535 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov z1.d, x8 ; CHECK-NEXT: smax z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) %elt = insertelement undef, i64 65535, i64 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.smax.nxv2i64( %pg, %a, %splat) ret %out } ; SMIN define @smin_i8( %a) { ; CHECK-LABEL: smin_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: smin z0.b, z0.b, #127 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) %elt = insertelement undef, i8 127, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.smin.nxv16i8( %pg, %a, %splat) ret %out } define @smin_i16( %a) { ; CHECK-LABEL: smin_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: smin z0.h, z0.h, #-128 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) %elt = insertelement undef, i16 -128, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.smin.nxv8i16( %pg, %a, %splat) ret %out } define @smin_i16_out_of_range( %a) { ; CHECK-LABEL: smin_i16_out_of_range: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #-129 ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: mov z1.h, w8 ; CHECK-NEXT: smin z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) %elt = insertelement undef, i16 -129, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.smin.nxv8i16( %pg, %a, %splat) ret %out } define @smin_i32( %a) { ; CHECK-LABEL: smin_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: smin z0.s, z0.s, #127 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) %elt = insertelement undef, i32 127, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.smin.nxv4i32( %pg, %a, %splat) ret %out } define @smin_i32_out_of_range( %a) { ; CHECK-LABEL: smin_i32_out_of_range: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #257 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: mov z1.s, w8 ; CHECK-NEXT: smin z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) %elt = insertelement undef, i32 257, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.smin.nxv4i32( %pg, %a, %splat) ret %out } define @smin_i64( %a) { ; CHECK-LABEL: smin_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: smin z0.d, z0.d, #-128 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) %elt = insertelement undef, i64 -128, i64 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.smin.nxv2i64( %pg, %a, %splat) ret %out } define @smin_i64_out_of_range( %a) { ; CHECK-LABEL: smin_i64_out_of_range: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov z1.d, #-256 // =0xffffffffffffff00 ; CHECK-NEXT: smin z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) %elt = insertelement undef, i64 -256, i64 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.smin.nxv2i64( %pg, %a, %splat) ret %out } ; UMAX define @umax_i8( %a) { ; CHECK-LABEL: umax_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: umax z0.b, z0.b, #0 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) %elt = insertelement undef, i8 0, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.umax.nxv16i8( %pg, %a, %splat) ret %out } define @umax_i16( %a) { ; CHECK-LABEL: umax_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: umax z0.h, z0.h, #255 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) %elt = insertelement undef, i16 255, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.umax.nxv8i16( %pg, %a, %splat) ret %out } define @umax_i16_out_of_range( %a) { ; CHECK-LABEL: umax_i16_out_of_range: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #257 ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: mov z1.h, w8 ; CHECK-NEXT: umax z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) %elt = insertelement undef, i16 257, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.umax.nxv8i16( %pg, %a, %splat) ret %out } define @umax_i32( %a) { ; CHECK-LABEL: umax_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: umax z0.s, z0.s, #0 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) %elt = insertelement undef, i32 0, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.umax.nxv4i32( %pg, %a, %splat) ret %out } define @umax_i32_out_of_range( %a) { ; CHECK-LABEL: umax_i32_out_of_range: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #257 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: mov z1.s, w8 ; CHECK-NEXT: umax z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) %elt = insertelement undef, i32 257, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.umax.nxv4i32( %pg, %a, %splat) ret %out } define @umax_i64( %a) { ; CHECK-LABEL: umax_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: umax z0.d, z0.d, #255 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) %elt = insertelement undef, i64 255, i64 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.umax.nxv2i64( %pg, %a, %splat) ret %out } define @umax_i64_out_of_range( %a) { ; CHECK-LABEL: umax_i64_out_of_range: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #65535 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov z1.d, x8 ; CHECK-NEXT: umax z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) %elt = insertelement undef, i64 65535, i64 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.umax.nxv2i64( %pg, %a, %splat) ret %out } ; UMIN define @umin_i8( %a) { ; CHECK-LABEL: umin_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: umin z0.b, z0.b, #255 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) %elt = insertelement undef, i8 255, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.umin.nxv16i8( %pg, %a, %splat) ret %out } define @umin_i16( %a) { ; CHECK-LABEL: umin_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: umin z0.h, z0.h, #0 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) %elt = insertelement undef, i16 0, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.umin.nxv8i16( %pg, %a, %splat) ret %out } define @umin_i16_out_of_range( %a) { ; CHECK-LABEL: umin_i16_out_of_range: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #257 ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: mov z1.h, w8 ; CHECK-NEXT: umin z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) %elt = insertelement undef, i16 257, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.umin.nxv8i16( %pg, %a, %splat) ret %out } define @umin_i32( %a) { ; CHECK-LABEL: umin_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: umin z0.s, z0.s, #255 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) %elt = insertelement undef, i32 255, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.umin.nxv4i32( %pg, %a, %splat) ret %out } define @umin_i32_out_of_range( %a) { ; CHECK-LABEL: umin_i32_out_of_range: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #257 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: mov z1.s, w8 ; CHECK-NEXT: umin z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) %elt = insertelement undef, i32 257, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.umin.nxv4i32( %pg, %a, %splat) ret %out } define @umin_i64( %a) { ; CHECK-LABEL: umin_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: umin z0.d, z0.d, #0 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) %elt = insertelement undef, i64 0, i64 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.umin.nxv2i64( %pg, %a, %splat) ret %out } define @umin_i64_out_of_range( %a) { ; CHECK-LABEL: umin_i64_out_of_range: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #65535 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov z1.d, x8 ; CHECK-NEXT: umin z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) %elt = insertelement undef, i64 65535, i64 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.umin.nxv2i64( %pg, %a, %splat) ret %out } ; SQADD define @sqadd_b_lowimm( %a) { ; CHECK-LABEL: sqadd_b_lowimm: ; CHECK: // %bb.0: ; CHECK-NEXT: sqadd z0.b, z0.b, #27 // =0x1b ; CHECK-NEXT: ret %elt = insertelement undef, i8 27, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.sqadd.x.nxv16i8( %a, %splat) ret %out } define @sqadd_h_lowimm( %a) { ; CHECK-LABEL: sqadd_h_lowimm: ; CHECK: // %bb.0: ; CHECK-NEXT: sqadd z0.h, z0.h, #43 // =0x2b ; CHECK-NEXT: ret %elt = insertelement undef, i16 43, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.sqadd.x.nxv8i16( %a, %splat) ret %out } define @sqadd_h_highimm( %a) { ; CHECK-LABEL: sqadd_h_highimm: ; CHECK: // %bb.0: ; CHECK-NEXT: sqadd z0.h, z0.h, #2048 // =0x800 ; CHECK-NEXT: ret %elt = insertelement undef, i16 2048, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.sqadd.x.nxv8i16( %a, %splat) ret %out } define @sqadd_s_lowimm( %a) { ; CHECK-LABEL: sqadd_s_lowimm: ; CHECK: // %bb.0: ; CHECK-NEXT: sqadd z0.s, z0.s, #1 // =0x1 ; CHECK-NEXT: ret %elt = insertelement undef, i32 1, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.sqadd.x.nxv4i32( %a, %splat) ret %out } define @sqadd_s_highimm( %a) { ; CHECK-LABEL: sqadd_s_highimm: ; CHECK: // %bb.0: ; CHECK-NEXT: sqadd z0.s, z0.s, #8192 // =0x2000 ; CHECK-NEXT: ret %elt = insertelement undef, i32 8192, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.sqadd.x.nxv4i32( %a, %splat) ret %out } define @sqadd_d_lowimm( %a) { ; CHECK-LABEL: sqadd_d_lowimm: ; CHECK: // %bb.0: ; CHECK-NEXT: sqadd z0.d, z0.d, #255 // =0xff ; CHECK-NEXT: ret %elt = insertelement undef, i64 255, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.sqadd.x.nxv2i64( %a, %splat) ret %out } define @sqadd_d_highimm( %a) { ; CHECK-LABEL: sqadd_d_highimm: ; CHECK: // %bb.0: ; CHECK-NEXT: sqadd z0.d, z0.d, #65280 // =0xff00 ; CHECK-NEXT: ret %elt = insertelement undef, i64 65280, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.sqadd.x.nxv2i64( %a, %splat) ret %out } ; SQSUB define @sqsub_b_lowimm( %a) { ; CHECK-LABEL: sqsub_b_lowimm: ; CHECK: // %bb.0: ; CHECK-NEXT: sqsub z0.b, z0.b, #27 // =0x1b ; CHECK-NEXT: ret %elt = insertelement undef, i8 27, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.sqsub.x.nxv16i8( %a, %splat) ret %out } define @sqsub_h_lowimm( %a) { ; CHECK-LABEL: sqsub_h_lowimm: ; CHECK: // %bb.0: ; CHECK-NEXT: sqsub z0.h, z0.h, #43 // =0x2b ; CHECK-NEXT: ret %elt = insertelement undef, i16 43, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.sqsub.x.nxv8i16( %a, %splat) ret %out } define @sqsub_h_highimm( %a) { ; CHECK-LABEL: sqsub_h_highimm: ; CHECK: // %bb.0: ; CHECK-NEXT: sqsub z0.h, z0.h, #2048 // =0x800 ; CHECK-NEXT: ret %elt = insertelement undef, i16 2048, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.sqsub.x.nxv8i16( %a, %splat) ret %out } define @sqsub_s_lowimm( %a) { ; CHECK-LABEL: sqsub_s_lowimm: ; CHECK: // %bb.0: ; CHECK-NEXT: sqsub z0.s, z0.s, #1 // =0x1 ; CHECK-NEXT: ret %elt = insertelement undef, i32 1, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.sqsub.x.nxv4i32( %a, %splat) ret %out } define @sqsub_s_highimm( %a) { ; CHECK-LABEL: sqsub_s_highimm: ; CHECK: // %bb.0: ; CHECK-NEXT: sqsub z0.s, z0.s, #8192 // =0x2000 ; CHECK-NEXT: ret %elt = insertelement undef, i32 8192, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.sqsub.x.nxv4i32( %a, %splat) ret %out } define @sqsub_d_lowimm( %a) { ; CHECK-LABEL: sqsub_d_lowimm: ; CHECK: // %bb.0: ; CHECK-NEXT: sqsub z0.d, z0.d, #255 // =0xff ; CHECK-NEXT: ret %elt = insertelement undef, i64 255, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.sqsub.x.nxv2i64( %a, %splat) ret %out } define @sqsub_d_highimm( %a) { ; CHECK-LABEL: sqsub_d_highimm: ; CHECK: // %bb.0: ; CHECK-NEXT: sqsub z0.d, z0.d, #65280 // =0xff00 ; CHECK-NEXT: ret %elt = insertelement undef, i64 65280, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.sqsub.x.nxv2i64( %a, %splat) ret %out } ; UQADD define @uqadd_b_lowimm( %a) { ; CHECK-LABEL: uqadd_b_lowimm: ; CHECK: // %bb.0: ; CHECK-NEXT: uqadd z0.b, z0.b, #27 // =0x1b ; CHECK-NEXT: ret %elt = insertelement undef, i8 27, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.uqadd.x.nxv16i8( %a, %splat) ret %out } define @uqadd_h_lowimm( %a) { ; CHECK-LABEL: uqadd_h_lowimm: ; CHECK: // %bb.0: ; CHECK-NEXT: uqadd z0.h, z0.h, #43 // =0x2b ; CHECK-NEXT: ret %elt = insertelement undef, i16 43, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.uqadd.x.nxv8i16( %a, %splat) ret %out } define @uqadd_h_highimm( %a) { ; CHECK-LABEL: uqadd_h_highimm: ; CHECK: // %bb.0: ; CHECK-NEXT: uqadd z0.h, z0.h, #2048 // =0x800 ; CHECK-NEXT: ret %elt = insertelement undef, i16 2048, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.uqadd.x.nxv8i16( %a, %splat) ret %out } define @uqadd_s_lowimm( %a) { ; CHECK-LABEL: uqadd_s_lowimm: ; CHECK: // %bb.0: ; CHECK-NEXT: uqadd z0.s, z0.s, #1 // =0x1 ; CHECK-NEXT: ret %elt = insertelement undef, i32 1, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.uqadd.x.nxv4i32( %a, %splat) ret %out } ; UQSUB define @uqsub_b_lowimm( %a) { ; CHECK-LABEL: uqsub_b_lowimm: ; CHECK: // %bb.0: ; CHECK-NEXT: uqsub z0.b, z0.b, #27 // =0x1b ; CHECK-NEXT: ret %elt = insertelement undef, i8 27, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.uqsub.x.nxv16i8( %a, %splat) ret %out } define @uqsub_h_lowimm( %a) { ; CHECK-LABEL: uqsub_h_lowimm: ; CHECK: // %bb.0: ; CHECK-NEXT: uqsub z0.h, z0.h, #43 // =0x2b ; CHECK-NEXT: ret %elt = insertelement undef, i16 43, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.uqsub.x.nxv8i16( %a, %splat) ret %out } define @uqsub_h_highimm( %a) { ; CHECK-LABEL: uqsub_h_highimm: ; CHECK: // %bb.0: ; CHECK-NEXT: uqsub z0.h, z0.h, #2048 // =0x800 ; CHECK-NEXT: ret %elt = insertelement undef, i16 2048, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.uqsub.x.nxv8i16( %a, %splat) ret %out } define @uqsub_s_lowimm( %a) { ; CHECK-LABEL: uqsub_s_lowimm: ; CHECK: // %bb.0: ; CHECK-NEXT: uqsub z0.s, z0.s, #1 // =0x1 ; CHECK-NEXT: ret %elt = insertelement undef, i32 1, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.uqsub.x.nxv4i32( %a, %splat) ret %out } define @uqsub_s_highimm( %a) { ; CHECK-LABEL: uqsub_s_highimm: ; CHECK: // %bb.0: ; CHECK-NEXT: uqsub z0.s, z0.s, #8192 // =0x2000 ; CHECK-NEXT: ret %elt = insertelement undef, i32 8192, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.uqsub.x.nxv4i32( %a, %splat) ret %out } define @uqsub_d_lowimm( %a) { ; CHECK-LABEL: uqsub_d_lowimm: ; CHECK: // %bb.0: ; CHECK-NEXT: uqsub z0.d, z0.d, #255 // =0xff ; CHECK-NEXT: ret %elt = insertelement undef, i64 255, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.uqsub.x.nxv2i64( %a, %splat) ret %out } define @uqsub_d_highimm( %a) { ; CHECK-LABEL: uqsub_d_highimm: ; CHECK: // %bb.0: ; CHECK-NEXT: uqsub z0.d, z0.d, #65280 // =0xff00 ; CHECK-NEXT: ret %elt = insertelement undef, i64 65280, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.uqsub.x.nxv2i64( %a, %splat) ret %out } define @uqadd_s_highimm( %a) { ; CHECK-LABEL: uqadd_s_highimm: ; CHECK: // %bb.0: ; CHECK-NEXT: uqadd z0.s, z0.s, #8192 // =0x2000 ; CHECK-NEXT: ret %elt = insertelement undef, i32 8192, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.uqadd.x.nxv4i32( %a, %splat) ret %out } define @uqadd_d_lowimm( %a) { ; CHECK-LABEL: uqadd_d_lowimm: ; CHECK: // %bb.0: ; CHECK-NEXT: uqadd z0.d, z0.d, #255 // =0xff ; CHECK-NEXT: ret %elt = insertelement undef, i64 255, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.uqadd.x.nxv2i64( %a, %splat) ret %out } define @uqadd_d_highimm( %a) { ; CHECK-LABEL: uqadd_d_highimm: ; CHECK: // %bb.0: ; CHECK-NEXT: uqadd z0.d, z0.d, #65280 // =0xff00 ; CHECK-NEXT: ret %elt = insertelement undef, i64 65280, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.uqadd.x.nxv2i64( %a, %splat) ret %out } ; ASR define @asr_i8( %pg, %a) { ; CHECK-LABEL: asr_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: asr z0.b, p0/m, z0.b, #8 ; CHECK-NEXT: ret %elt = insertelement undef, i8 9, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.asr.nxv16i8( %pg, %a, %splat) ret %out } define @asr_i8_all_active( %a) { ; CHECK-LABEL: asr_i8_all_active: ; CHECK: // %bb.0: ; CHECK-NEXT: asr z0.b, z0.b, #8 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) %elt = insertelement undef, i8 8, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.asr.nxv16i8( %pg, %a, %splat) ret %out } ; Ensure we don't match a right shift by zero to the immediate form. define @asr_i8_too_small( %pg, %a) { ; CHECK-LABEL: asr_i8_too_small: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z1.b, #0 // =0x0 ; CHECK-NEXT: asr z0.b, p0/m, z0.b, z1.b ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.asr.nxv16i8( %pg, %a, zeroinitializer) ret %out } define @asr_i16( %pg, %a) { ; CHECK-LABEL: asr_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: asr z0.h, p0/m, z0.h, #16 ; CHECK-NEXT: ret %elt = insertelement undef, i16 17, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.asr.nxv8i16( %pg, %a, %splat) ret %out } define @asr_i16_all_active( %a) { ; CHECK-LABEL: asr_i16_all_active: ; CHECK: // %bb.0: ; CHECK-NEXT: asr z0.h, z0.h, #16 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) %elt = insertelement undef, i16 16, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.asr.nxv8i16( %pg, %a, %splat) ret %out } ; Ensure we don't match a right shift by zero to the immediate form. define @asr_i16_too_small( %pg, %a) { ; CHECK-LABEL: asr_i16_too_small: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z1.h, #0 // =0x0 ; CHECK-NEXT: asr z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.asr.nxv8i16( %pg, %a, zeroinitializer) ret %out } define @asr_i32( %pg, %a) { ; CHECK-LABEL: asr_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: asr z0.s, p0/m, z0.s, #32 ; CHECK-NEXT: ret %elt = insertelement undef, i32 33, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.asr.nxv4i32( %pg, %a, %splat) ret %out } define @asr_i32_all_active( %a) { ; CHECK-LABEL: asr_i32_all_active: ; CHECK: // %bb.0: ; CHECK-NEXT: asr z0.s, z0.s, #32 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) %elt = insertelement undef, i32 32, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.asr.nxv4i32( %pg, %a, %splat) ret %out } ; Ensure we don't match a right shift by zero to the immediate form. define @asr_i32_too_small( %pg, %a) { ; CHECK-LABEL: asr_i32_too_small: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z1.s, #0 // =0x0 ; CHECK-NEXT: asr z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.asr.nxv4i32( %pg, %a, zeroinitializer) ret %out } define @asr_i64( %pg, %a) { ; CHECK-LABEL: asr_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: asr z0.d, p0/m, z0.d, #64 ; CHECK-NEXT: ret %elt = insertelement undef, i64 65, i64 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.asr.nxv2i64( %pg, %a, %splat) ret %out } define @asr_i64_all_active( %a) { ; CHECK-LABEL: asr_i64_all_active: ; CHECK: // %bb.0: ; CHECK-NEXT: asr z0.d, z0.d, #64 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) %elt = insertelement undef, i64 64, i64 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.asr.nxv2i64( %pg, %a, %splat) ret %out } ; Ensure we don't match a right shift by zero to the immediate form. define @asr_i64_too_small( %pg, %a) { ; CHECK-LABEL: asr_i64_too_small: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z1.d, #0 // =0x0 ; CHECK-NEXT: asr z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.asr.nxv2i64( %pg, %a, zeroinitializer) ret %out } ; LSL define @lsl_i8( %pg, %a) { ; CHECK-LABEL: lsl_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: lsl z0.b, p0/m, z0.b, #7 ; CHECK-NEXT: ret %elt = insertelement undef, i8 7, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsl.nxv16i8( %pg, %a, %splat) ret %out } define @lsl_i8_all_active( %a) { ; CHECK-LABEL: lsl_i8_all_active: ; CHECK: // %bb.0: ; CHECK-NEXT: lsl z0.b, z0.b, #7 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) %elt = insertelement undef, i8 7, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsl.nxv16i8( %pg, %a, %splat) ret %out } ; Ensure we don't match a left shift bigger than its bitwidth to the immediate form. define @lsl_i8_too_big( %pg, %a) { ; CHECK-LABEL: lsl_i8_too_big: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z1.b, #8 // =0x8 ; CHECK-NEXT: lsl z0.b, p0/m, z0.b, z1.b ; CHECK-NEXT: ret %elt = insertelement undef, i8 8, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsl.nxv16i8( %pg, %a, %splat) ret %out } define @lsl_i8_zero( %pg, %a) { ; CHECK-LABEL: lsl_i8_zero: ; CHECK: // %bb.0: ; CHECK-NEXT: lsl z0.b, p0/m, z0.b, #0 ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.lsl.nxv16i8( %pg, %a, zeroinitializer) ret %out } define @lsl_i16( %pg, %a) { ; CHECK-LABEL: lsl_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: lsl z0.h, p0/m, z0.h, #15 ; CHECK-NEXT: ret %elt = insertelement undef, i16 15, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsl.nxv8i16( %pg, %a, %splat) ret %out } define @lsl_i16_all_active( %a) { ; CHECK-LABEL: lsl_i16_all_active: ; CHECK: // %bb.0: ; CHECK-NEXT: lsl z0.h, z0.h, #15 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) %elt = insertelement undef, i16 15, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsl.nxv8i16( %pg, %a, %splat) ret %out } ; Ensure we don't match a left shift bigger than its bitwidth to the immediate form. define @lsl_i16_too_big( %pg, %a) { ; CHECK-LABEL: lsl_i16_too_big: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z1.h, #16 // =0x10 ; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: ret %elt = insertelement undef, i16 16, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsl.nxv8i16( %pg, %a, %splat) ret %out } define @lsl_i16_zero( %pg, %a) { ; CHECK-LABEL: lsl_i16_zero: ; CHECK: // %bb.0: ; CHECK-NEXT: lsl z0.h, p0/m, z0.h, #0 ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.lsl.nxv8i16( %pg, %a, zeroinitializer) ret %out } define @lsl_i32( %pg, %a) { ; CHECK-LABEL: lsl_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: lsl z0.s, p0/m, z0.s, #31 ; CHECK-NEXT: ret %elt = insertelement undef, i32 31, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsl.nxv4i32( %pg, %a, %splat) ret %out } define @lsl_i32_all_active( %a) { ; CHECK-LABEL: lsl_i32_all_active: ; CHECK: // %bb.0: ; CHECK-NEXT: lsl z0.s, z0.s, #31 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) %elt = insertelement undef, i32 31, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsl.nxv4i32( %pg, %a, %splat) ret %out } ; Ensure we don't match a left shift bigger than its bitwidth to the immediate form. define @lsl_i32_too_big( %pg, %a) { ; CHECK-LABEL: lsl_i32_too_big: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z1.s, #32 // =0x20 ; CHECK-NEXT: lsl z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: ret %elt = insertelement undef, i32 32, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsl.nxv4i32( %pg, %a, %splat) ret %out } define @lsl_i32_zero( %pg, %a) { ; CHECK-LABEL: lsl_i32_zero: ; CHECK: // %bb.0: ; CHECK-NEXT: lsl z0.s, p0/m, z0.s, #0 ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.lsl.nxv4i32( %pg, %a, zeroinitializer) ret %out } define @lsl_i64( %pg, %a) { ; CHECK-LABEL: lsl_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: lsl z0.d, p0/m, z0.d, #63 ; CHECK-NEXT: ret %elt = insertelement undef, i64 63, i64 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsl.nxv2i64( %pg, %a, %splat) ret %out } define @lsl_i64_all_active( %a) { ; CHECK-LABEL: lsl_i64_all_active: ; CHECK: // %bb.0: ; CHECK-NEXT: lsl z0.d, z0.d, #63 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) %elt = insertelement undef, i64 63, i64 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsl.nxv2i64( %pg, %a, %splat) ret %out } ; Ensure we don't match a left shift bigger than its bitwidth to the immediate form. define @lsl_i64_too_big( %pg, %a) { ; CHECK-LABEL: lsl_i64_too_big: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z1.d, #64 // =0x40 ; CHECK-NEXT: lsl z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: ret %elt = insertelement undef, i64 64, i64 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsl.nxv2i64( %pg, %a, %splat) ret %out } define @lsl_i64_zero( %pg, %a) { ; CHECK-LABEL: lsl_i64_zero: ; CHECK: // %bb.0: ; CHECK-NEXT: lsl z0.d, p0/m, z0.d, #0 ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.lsl.nxv2i64( %pg, %a, zeroinitializer) ret %out } ; LSR define @lsr_i8( %pg, %a) { ; CHECK-LABEL: lsr_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: lsr z0.b, p0/m, z0.b, #8 ; CHECK-NEXT: ret %elt = insertelement undef, i8 9, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsr.nxv16i8( %pg, %a, %splat) ret %out } define @lsr_i8_all_active( %a) { ; CHECK-LABEL: lsr_i8_all_active: ; CHECK: // %bb.0: ; CHECK-NEXT: lsr z0.b, z0.b, #8 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) %elt = insertelement undef, i8 8, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsr.nxv16i8( %pg, %a, %splat) ret %out } ; Ensure we don't match a right shift by zero to the immediate form. define @lsr_i8_too_small( %pg, %a) { ; CHECK-LABEL: lsr_i8_too_small: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z1.b, #0 // =0x0 ; CHECK-NEXT: lsr z0.b, p0/m, z0.b, z1.b ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.lsr.nxv16i8( %pg, %a, zeroinitializer) ret %out } define @lsr_i16( %pg, %a) { ; CHECK-LABEL: lsr_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: lsr z0.h, p0/m, z0.h, #16 ; CHECK-NEXT: ret %elt = insertelement undef, i16 17, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsr.nxv8i16( %pg, %a, %splat) ret %out } define @lsr_i16_all_active( %a) { ; CHECK-LABEL: lsr_i16_all_active: ; CHECK: // %bb.0: ; CHECK-NEXT: lsr z0.h, z0.h, #16 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) %elt = insertelement undef, i16 16, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsr.nxv8i16( %pg, %a, %splat) ret %out } ; Ensure we don't match a right shift by zero to the immediate form. define @lsr_i16_too_small( %pg, %a) { ; CHECK-LABEL: lsr_i16_too_small: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z1.h, #0 // =0x0 ; CHECK-NEXT: lsr z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.lsr.nxv8i16( %pg, %a, zeroinitializer) ret %out } define @lsr_i32( %pg, %a) { ; CHECK-LABEL: lsr_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: lsr z0.s, p0/m, z0.s, #32 ; CHECK-NEXT: ret %elt = insertelement undef, i32 33, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsr.nxv4i32( %pg, %a, %splat) ret %out } define @lsr_i32_all_active( %a) { ; CHECK-LABEL: lsr_i32_all_active: ; CHECK: // %bb.0: ; CHECK-NEXT: lsr z0.s, z0.s, #32 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) %elt = insertelement undef, i32 32, i32 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsr.nxv4i32( %pg, %a, %splat) ret %out } ; Ensure we don't match a right shift by zero to the immediate form. define @lsr_i32_too_small( %pg, %a) { ; CHECK-LABEL: lsr_i32_too_small: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z1.s, #0 // =0x0 ; CHECK-NEXT: lsr z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.lsr.nxv4i32( %pg, %a, zeroinitializer) ret %out } define @lsr_i64( %pg, %a) { ; CHECK-LABEL: lsr_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: lsr z0.d, p0/m, z0.d, #64 ; CHECK-NEXT: ret %elt = insertelement undef, i64 65, i64 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsr.nxv2i64( %pg, %a, %splat) ret %out } define @lsr_i64_all_active( %a) { ; CHECK-LABEL: lsr_i64_all_active: ; CHECK: // %bb.0: ; CHECK-NEXT: lsr z0.d, z0.d, #64 ; CHECK-NEXT: ret %pg = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) %elt = insertelement undef, i64 64, i64 0 %splat = shufflevector %elt, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsr.nxv2i64( %pg, %a, %splat) ret %out } ; Ensure we don't match a right shift by zero to the immediate form. define @lsr_i64_too_small( %pg, %a) { ; CHECK-LABEL: lsr_i64_too_small: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z1.d, #0 // =0x0 ; CHECK-NEXT: lsr z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.lsr.nxv2i64( %pg, %a, zeroinitializer) ret %out } declare @llvm.aarch64.sve.sqadd.x.nxv16i8(, ) declare @llvm.aarch64.sve.sqadd.x.nxv8i16(, ) declare @llvm.aarch64.sve.sqadd.x.nxv4i32(, ) declare @llvm.aarch64.sve.sqadd.x.nxv2i64(, ) declare @llvm.aarch64.sve.sqsub.x.nxv16i8(, ) declare @llvm.aarch64.sve.sqsub.x.nxv8i16(, ) declare @llvm.aarch64.sve.sqsub.x.nxv4i32(, ) declare @llvm.aarch64.sve.sqsub.x.nxv2i64(, ) declare @llvm.aarch64.sve.uqadd.x.nxv16i8(, ) declare @llvm.aarch64.sve.uqadd.x.nxv8i16(, ) declare @llvm.aarch64.sve.uqadd.x.nxv4i32(, ) declare @llvm.aarch64.sve.uqadd.x.nxv2i64(, ) declare @llvm.aarch64.sve.uqsub.x.nxv16i8(, ) declare @llvm.aarch64.sve.uqsub.x.nxv8i16(, ) declare @llvm.aarch64.sve.uqsub.x.nxv4i32(, ) declare @llvm.aarch64.sve.uqsub.x.nxv2i64(, ) declare @llvm.aarch64.sve.smax.nxv16i8(, , ) declare @llvm.aarch64.sve.smax.nxv8i16(, , ) declare @llvm.aarch64.sve.smax.nxv4i32(, , ) declare @llvm.aarch64.sve.smax.nxv2i64(, , ) declare @llvm.aarch64.sve.smin.nxv16i8(, , ) declare @llvm.aarch64.sve.smin.nxv8i16(, , ) declare @llvm.aarch64.sve.smin.nxv4i32(, , ) declare @llvm.aarch64.sve.smin.nxv2i64(, , ) declare @llvm.aarch64.sve.umax.nxv16i8(, , ) declare @llvm.aarch64.sve.umax.nxv8i16(, , ) declare @llvm.aarch64.sve.umax.nxv4i32(, , ) declare @llvm.aarch64.sve.umax.nxv2i64(, , ) declare @llvm.aarch64.sve.umin.nxv16i8(, , ) declare @llvm.aarch64.sve.umin.nxv8i16(, , ) declare @llvm.aarch64.sve.umin.nxv4i32(, , ) declare @llvm.aarch64.sve.umin.nxv2i64(, , ) declare @llvm.aarch64.sve.asr.nxv16i8(, , ) declare @llvm.aarch64.sve.asr.nxv8i16(, , ) declare @llvm.aarch64.sve.asr.nxv4i32(, , ) declare @llvm.aarch64.sve.asr.nxv2i64(, , ) declare @llvm.aarch64.sve.lsl.nxv16i8(, , ) declare @llvm.aarch64.sve.lsl.nxv8i16(, , ) declare @llvm.aarch64.sve.lsl.nxv4i32(, , ) declare @llvm.aarch64.sve.lsl.nxv2i64(, , ) declare @llvm.aarch64.sve.lsr.nxv16i8(, , ) declare @llvm.aarch64.sve.lsr.nxv8i16(, , ) declare @llvm.aarch64.sve.lsr.nxv4i32(, , ) declare @llvm.aarch64.sve.lsr.nxv2i64(, , ) declare @llvm.aarch64.sve.ptrue.nxv16i1(i32 %pattern) declare @llvm.aarch64.sve.ptrue.nxv8i1(i32 %pattern) declare @llvm.aarch64.sve.ptrue.nxv4i1(i32 %pattern) declare @llvm.aarch64.sve.ptrue.nxv2i1(i32 %pattern)