1; RUN: llc < %s -mtriple aarch64-none-linux-gnu -mattr=+sve -stop-after=finalize-isel | FileCheck %s --check-prefix=CHECK 2 3target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" 4target triple = "aarch64-none-linux-gnu" 5 6; Function Attrs: nounwind readnone 7; CHECK: [[ARG1:%[0-9]+]]:zpr = COPY $z1 8; CHECK: [[ARG2:%[0-9]+]]:zpr = COPY $z0 9; CHECK: [[ARG3:%[0-9]+]]:zpr = COPY [[ARG2]] 10; CHECK: [[ARG4:%[0-9]+]]:zpr_3b = COPY [[ARG1]] 11; CHECK: INLINEASM {{.*}} [[ARG4]] 12define <vscale x 16 x i8> @test_svadd_i8(<vscale x 16 x i8> %Zn, <vscale x 16 x i8> %Zm) { 13 %1 = tail call <vscale x 16 x i8> asm "add $0.b, $1.b, $2.b", "=w,w,y"(<vscale x 16 x i8> %Zn, <vscale x 16 x i8> %Zm) 14 ret <vscale x 16 x i8> %1 15} 16 17; Function Attrs: nounwind readnone 18; CHECK: [[ARG1:%[0-9]+]]:zpr = COPY $z1 19; CHECK: [[ARG2:%[0-9]+]]:zpr = COPY $z0 20; CHECK: [[ARG3:%[0-9]+]]:zpr = COPY [[ARG2]] 21; CHECK: [[ARG4:%[0-9]+]]:zpr_4b = COPY [[ARG1]] 22; CHECK: INLINEASM {{.*}} [[ARG4]] 23define <vscale x 2 x i64> @test_svsub_i64(<vscale x 2 x i64> %Zn, <vscale x 2 x i64> %Zm) { 24 %1 = tail call <vscale x 2 x i64> asm "sub $0.d, $1.d, $2.d", "=w,w,x"(<vscale x 2 x i64> %Zn, <vscale x 2 x i64> %Zm) 25 ret <vscale x 2 x i64> %1 26} 27 28; Function Attrs: nounwind readnone 29; CHECK: [[ARG1:%[0-9]+]]:zpr = COPY $z1 30; CHECK: [[ARG2:%[0-9]+]]:zpr = COPY $z0 31; CHECK: [[ARG3:%[0-9]+]]:zpr = COPY [[ARG2]] 32; CHECK: [[ARG4:%[0-9]+]]:zpr_3b = COPY [[ARG1]] 33; CHECK: INLINEASM {{.*}} [[ARG4]] 34define <vscale x 8 x half> @test_svfmul_f16(<vscale x 8 x half> %Zn, <vscale x 8 x half> %Zm) { 35 %1 = tail call <vscale x 8 x half> asm "fmul $0.h, $1.h, $2.h", "=w,w,y"(<vscale x 8 x half> %Zn, <vscale x 8 x half> %Zm) 36 ret <vscale x 8 x half> %1 37} 38 39; Function Attrs: nounwind readnone 40; CHECK: [[ARG1:%[0-9]+]]:zpr = COPY $z1 41; CHECK: [[ARG2:%[0-9]+]]:zpr = COPY $z0 42; CHECK: [[ARG3:%[0-9]+]]:zpr = COPY [[ARG2]] 43; CHECK: [[ARG4:%[0-9]+]]:zpr_4b = COPY [[ARG1]] 44; CHECK: INLINEASM {{.*}} [[ARG4]] 45define <vscale x 4 x float> @test_svfmul_f(<vscale x 4 x float> %Zn, <vscale x 4 x float> %Zm) { 46 %1 = tail call <vscale x 4 x float> asm "fmul $0.s, $1.s, $2.s", "=w,w,x"(<vscale x 4 x float> %Zn, <vscale x 4 x float> %Zm) 47 ret <vscale x 4 x float> %1 48} 49 50; Function Attrs: nounwind readnone 51; CHECK: [[ARG1:%[0-9]+]]:zpr = COPY $z1 52; CHECK: [[ARG2:%[0-9]+]]:zpr = COPY $z0 53; CHECK: [[ARG3:%[0-9]+]]:ppr = COPY $p0 54; CHECK: [[ARG4:%[0-9]+]]:ppr_3b = COPY [[ARG3]] 55; CHECK: INLINEASM {{.*}} [[ARG4]] 56define <vscale x 8 x half> @test_svfadd_f16(<vscale x 16 x i1> %Pg, <vscale x 8 x half> %Zn, <vscale x 8 x half> %Zm) { 57 %1 = tail call <vscale x 8 x half> asm "fadd $0.h, $1/m, $2.h, $3.h", "=w,@3Upl,w,w"(<vscale x 16 x i1> %Pg, <vscale x 8 x half> %Zn, <vscale x 8 x half> %Zm) 58 ret <vscale x 8 x half> %1 59} 60 61; Function Attrs: nounwind readnone 62; CHECK: [[ARG1:%[0-9]+]]:zpr = COPY $z0 63; CHECK: [[ARG2:%[0-9]+]]:ppr = COPY $p0 64; CHECK: [[ARG3:%[0-9]+]]:ppr = COPY [[ARG2]] 65; CHECK: [[ARG4:%[0-9]+]]:zpr = COPY [[ARG1]] 66; CHECK: INLINEASM {{.*}} [[ARG3]] 67define <vscale x 4 x i32> @test_incp(<vscale x 16 x i1> %Pg, <vscale x 4 x i32> %Zn) { 68 %1 = tail call <vscale x 4 x i32> asm "incp $0.s, $1", "=w,@3Upa,0"(<vscale x 16 x i1> %Pg, <vscale x 4 x i32> %Zn) 69 ret <vscale x 4 x i32> %1 70} 71