1; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s 2 3define <8 x i8> @vnegs8(<8 x i8>* %A) nounwind { 4;CHECK: vnegs8: 5;CHECK: vneg.s8 6 %tmp1 = load <8 x i8>* %A 7 %tmp2 = sub <8 x i8> zeroinitializer, %tmp1 8 ret <8 x i8> %tmp2 9} 10 11define <4 x i16> @vnegs16(<4 x i16>* %A) nounwind { 12;CHECK: vnegs16: 13;CHECK: vneg.s16 14 %tmp1 = load <4 x i16>* %A 15 %tmp2 = sub <4 x i16> zeroinitializer, %tmp1 16 ret <4 x i16> %tmp2 17} 18 19define <2 x i32> @vnegs32(<2 x i32>* %A) nounwind { 20;CHECK: vnegs32: 21;CHECK: vneg.s32 22 %tmp1 = load <2 x i32>* %A 23 %tmp2 = sub <2 x i32> zeroinitializer, %tmp1 24 ret <2 x i32> %tmp2 25} 26 27define <2 x float> @vnegf32(<2 x float>* %A) nounwind { 28;CHECK: vnegf32: 29;CHECK: vneg.f32 30 %tmp1 = load <2 x float>* %A 31 %tmp2 = fsub <2 x float> < float -0.000000e+00, float -0.000000e+00 >, %tmp1 32 ret <2 x float> %tmp2 33} 34 35define <16 x i8> @vnegQs8(<16 x i8>* %A) nounwind { 36;CHECK: vnegQs8: 37;CHECK: vneg.s8 38 %tmp1 = load <16 x i8>* %A 39 %tmp2 = sub <16 x i8> zeroinitializer, %tmp1 40 ret <16 x i8> %tmp2 41} 42 43define <8 x i16> @vnegQs16(<8 x i16>* %A) nounwind { 44;CHECK: vnegQs16: 45;CHECK: vneg.s16 46 %tmp1 = load <8 x i16>* %A 47 %tmp2 = sub <8 x i16> zeroinitializer, %tmp1 48 ret <8 x i16> %tmp2 49} 50 51define <4 x i32> @vnegQs32(<4 x i32>* %A) nounwind { 52;CHECK: vnegQs32: 53;CHECK: vneg.s32 54 %tmp1 = load <4 x i32>* %A 55 %tmp2 = sub <4 x i32> zeroinitializer, %tmp1 56 ret <4 x i32> %tmp2 57} 58 59define <4 x float> @vnegQf32(<4 x float>* %A) nounwind { 60;CHECK: vnegQf32: 61;CHECK: vneg.f32 62 %tmp1 = load <4 x float>* %A 63 %tmp2 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %tmp1 64 ret <4 x float> %tmp2 65} 66 67define <8 x i8> @vqnegs8(<8 x i8>* %A) nounwind { 68;CHECK: vqnegs8: 69;CHECK: vqneg.s8 70 %tmp1 = load <8 x i8>* %A 71 %tmp2 = call <8 x i8> @llvm.arm.neon.vqneg.v8i8(<8 x i8> %tmp1) 72 ret <8 x i8> %tmp2 73} 74 75define <4 x i16> @vqnegs16(<4 x i16>* %A) nounwind { 76;CHECK: vqnegs16: 77;CHECK: vqneg.s16 78 %tmp1 = load <4 x i16>* %A 79 %tmp2 = call <4 x i16> @llvm.arm.neon.vqneg.v4i16(<4 x i16> %tmp1) 80 ret <4 x i16> %tmp2 81} 82 83define <2 x i32> @vqnegs32(<2 x i32>* %A) nounwind { 84;CHECK: vqnegs32: 85;CHECK: vqneg.s32 86 %tmp1 = load <2 x i32>* %A 87 %tmp2 = call <2 x i32> @llvm.arm.neon.vqneg.v2i32(<2 x i32> %tmp1) 88 ret <2 x i32> %tmp2 89} 90 91define <16 x i8> @vqnegQs8(<16 x i8>* %A) nounwind { 92;CHECK: vqnegQs8: 93;CHECK: vqneg.s8 94 %tmp1 = load <16 x i8>* %A 95 %tmp2 = call <16 x i8> @llvm.arm.neon.vqneg.v16i8(<16 x i8> %tmp1) 96 ret <16 x i8> %tmp2 97} 98 99define <8 x i16> @vqnegQs16(<8 x i16>* %A) nounwind { 100;CHECK: vqnegQs16: 101;CHECK: vqneg.s16 102 %tmp1 = load <8 x i16>* %A 103 %tmp2 = call <8 x i16> @llvm.arm.neon.vqneg.v8i16(<8 x i16> %tmp1) 104 ret <8 x i16> %tmp2 105} 106 107define <4 x i32> @vqnegQs32(<4 x i32>* %A) nounwind { 108;CHECK: vqnegQs32: 109;CHECK: vqneg.s32 110 %tmp1 = load <4 x i32>* %A 111 %tmp2 = call <4 x i32> @llvm.arm.neon.vqneg.v4i32(<4 x i32> %tmp1) 112 ret <4 x i32> %tmp2 113} 114 115declare <8 x i8> @llvm.arm.neon.vqneg.v8i8(<8 x i8>) nounwind readnone 116declare <4 x i16> @llvm.arm.neon.vqneg.v4i16(<4 x i16>) nounwind readnone 117declare <2 x i32> @llvm.arm.neon.vqneg.v2i32(<2 x i32>) nounwind readnone 118 119declare <16 x i8> @llvm.arm.neon.vqneg.v16i8(<16 x i8>) nounwind readnone 120declare <8 x i16> @llvm.arm.neon.vqneg.v8i16(<8 x i16>) nounwind readnone 121declare <4 x i32> @llvm.arm.neon.vqneg.v4i32(<4 x i32>) nounwind readnone 122