1; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s 2 3define <8 x i8> @vabds8(<8 x i8>* %A, <8 x i8>* %B) nounwind { 4;CHECK: vabds8: 5;CHECK: vabd.s8 6 %tmp1 = load <8 x i8>* %A 7 %tmp2 = load <8 x i8>* %B 8 %tmp3 = call <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 9 ret <8 x i8> %tmp3 10} 11 12define <4 x i16> @vabds16(<4 x i16>* %A, <4 x i16>* %B) nounwind { 13;CHECK: vabds16: 14;CHECK: vabd.s16 15 %tmp1 = load <4 x i16>* %A 16 %tmp2 = load <4 x i16>* %B 17 %tmp3 = call <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 18 ret <4 x i16> %tmp3 19} 20 21define <2 x i32> @vabds32(<2 x i32>* %A, <2 x i32>* %B) nounwind { 22;CHECK: vabds32: 23;CHECK: vabd.s32 24 %tmp1 = load <2 x i32>* %A 25 %tmp2 = load <2 x i32>* %B 26 %tmp3 = call <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 27 ret <2 x i32> %tmp3 28} 29 30define <8 x i8> @vabdu8(<8 x i8>* %A, <8 x i8>* %B) nounwind { 31;CHECK: vabdu8: 32;CHECK: vabd.u8 33 %tmp1 = load <8 x i8>* %A 34 %tmp2 = load <8 x i8>* %B 35 %tmp3 = call <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 36 ret <8 x i8> %tmp3 37} 38 39define <4 x i16> @vabdu16(<4 x i16>* %A, <4 x i16>* %B) nounwind { 40;CHECK: vabdu16: 41;CHECK: vabd.u16 42 %tmp1 = load <4 x i16>* %A 43 %tmp2 = load <4 x i16>* %B 44 %tmp3 = call <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 45 ret <4 x i16> %tmp3 46} 47 48define <2 x i32> @vabdu32(<2 x i32>* %A, <2 x i32>* %B) nounwind { 49;CHECK: vabdu32: 50;CHECK: vabd.u32 51 %tmp1 = load <2 x i32>* %A 52 %tmp2 = load <2 x i32>* %B 53 %tmp3 = call <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 54 ret <2 x i32> %tmp3 55} 56 57define <2 x float> @vabdf32(<2 x float>* %A, <2 x float>* %B) nounwind { 58;CHECK: vabdf32: 59;CHECK: vabd.f32 60 %tmp1 = load <2 x float>* %A 61 %tmp2 = load <2 x float>* %B 62 %tmp3 = call <2 x float> @llvm.arm.neon.vabds.v2f32(<2 x float> %tmp1, <2 x float> %tmp2) 63 ret <2 x float> %tmp3 64} 65 66define <16 x i8> @vabdQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind { 67;CHECK: vabdQs8: 68;CHECK: vabd.s8 69 %tmp1 = load <16 x i8>* %A 70 %tmp2 = load <16 x i8>* %B 71 %tmp3 = call <16 x i8> @llvm.arm.neon.vabds.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 72 ret <16 x i8> %tmp3 73} 74 75define <8 x i16> @vabdQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind { 76;CHECK: vabdQs16: 77;CHECK: vabd.s16 78 %tmp1 = load <8 x i16>* %A 79 %tmp2 = load <8 x i16>* %B 80 %tmp3 = call <8 x i16> @llvm.arm.neon.vabds.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 81 ret <8 x i16> %tmp3 82} 83 84define <4 x i32> @vabdQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind { 85;CHECK: vabdQs32: 86;CHECK: vabd.s32 87 %tmp1 = load <4 x i32>* %A 88 %tmp2 = load <4 x i32>* %B 89 %tmp3 = call <4 x i32> @llvm.arm.neon.vabds.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2) 90 ret <4 x i32> %tmp3 91} 92 93define <16 x i8> @vabdQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind { 94;CHECK: vabdQu8: 95;CHECK: vabd.u8 96 %tmp1 = load <16 x i8>* %A 97 %tmp2 = load <16 x i8>* %B 98 %tmp3 = call <16 x i8> @llvm.arm.neon.vabdu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 99 ret <16 x i8> %tmp3 100} 101 102define <8 x i16> @vabdQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind { 103;CHECK: vabdQu16: 104;CHECK: vabd.u16 105 %tmp1 = load <8 x i16>* %A 106 %tmp2 = load <8 x i16>* %B 107 %tmp3 = call <8 x i16> @llvm.arm.neon.vabdu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) 108 ret <8 x i16> %tmp3 109} 110 111define <4 x i32> @vabdQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind { 112;CHECK: vabdQu32: 113;CHECK: vabd.u32 114 %tmp1 = load <4 x i32>* %A 115 %tmp2 = load <4 x i32>* %B 116 %tmp3 = call <4 x i32> @llvm.arm.neon.vabdu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2) 117 ret <4 x i32> %tmp3 118} 119 120define <4 x float> @vabdQf32(<4 x float>* %A, <4 x float>* %B) nounwind { 121;CHECK: vabdQf32: 122;CHECK: vabd.f32 123 %tmp1 = load <4 x float>* %A 124 %tmp2 = load <4 x float>* %B 125 %tmp3 = call <4 x float> @llvm.arm.neon.vabds.v4f32(<4 x float> %tmp1, <4 x float> %tmp2) 126 ret <4 x float> %tmp3 127} 128 129declare <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8>, <8 x i8>) nounwind readnone 130declare <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16>, <4 x i16>) nounwind readnone 131declare <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32>, <2 x i32>) nounwind readnone 132 133declare <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone 134declare <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone 135declare <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone 136 137declare <2 x float> @llvm.arm.neon.vabds.v2f32(<2 x float>, <2 x float>) nounwind readnone 138 139declare <16 x i8> @llvm.arm.neon.vabds.v16i8(<16 x i8>, <16 x i8>) nounwind readnone 140declare <8 x i16> @llvm.arm.neon.vabds.v8i16(<8 x i16>, <8 x i16>) nounwind readnone 141declare <4 x i32> @llvm.arm.neon.vabds.v4i32(<4 x i32>, <4 x i32>) nounwind readnone 142 143declare <16 x i8> @llvm.arm.neon.vabdu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone 144declare <8 x i16> @llvm.arm.neon.vabdu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone 145declare <4 x i32> @llvm.arm.neon.vabdu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone 146 147declare <4 x float> @llvm.arm.neon.vabds.v4f32(<4 x float>, <4 x float>) nounwind readnone 148 149define <8 x i16> @vabdls8(<8 x i8>* %A, <8 x i8>* %B) nounwind { 150;CHECK: vabdls8: 151;CHECK: vabdl.s8 152 %tmp1 = load <8 x i8>* %A 153 %tmp2 = load <8 x i8>* %B 154 %tmp3 = call <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 155 %tmp4 = zext <8 x i8> %tmp3 to <8 x i16> 156 ret <8 x i16> %tmp4 157} 158 159define <4 x i32> @vabdls16(<4 x i16>* %A, <4 x i16>* %B) nounwind { 160;CHECK: vabdls16: 161;CHECK: vabdl.s16 162 %tmp1 = load <4 x i16>* %A 163 %tmp2 = load <4 x i16>* %B 164 %tmp3 = call <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 165 %tmp4 = zext <4 x i16> %tmp3 to <4 x i32> 166 ret <4 x i32> %tmp4 167} 168 169define <2 x i64> @vabdls32(<2 x i32>* %A, <2 x i32>* %B) nounwind { 170;CHECK: vabdls32: 171;CHECK: vabdl.s32 172 %tmp1 = load <2 x i32>* %A 173 %tmp2 = load <2 x i32>* %B 174 %tmp3 = call <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 175 %tmp4 = zext <2 x i32> %tmp3 to <2 x i64> 176 ret <2 x i64> %tmp4 177} 178 179define <8 x i16> @vabdlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind { 180;CHECK: vabdlu8: 181;CHECK: vabdl.u8 182 %tmp1 = load <8 x i8>* %A 183 %tmp2 = load <8 x i8>* %B 184 %tmp3 = call <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 185 %tmp4 = zext <8 x i8> %tmp3 to <8 x i16> 186 ret <8 x i16> %tmp4 187} 188 189define <4 x i32> @vabdlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind { 190;CHECK: vabdlu16: 191;CHECK: vabdl.u16 192 %tmp1 = load <4 x i16>* %A 193 %tmp2 = load <4 x i16>* %B 194 %tmp3 = call <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 195 %tmp4 = zext <4 x i16> %tmp3 to <4 x i32> 196 ret <4 x i32> %tmp4 197} 198 199define <2 x i64> @vabdlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind { 200;CHECK: vabdlu32: 201;CHECK: vabdl.u32 202 %tmp1 = load <2 x i32>* %A 203 %tmp2 = load <2 x i32>* %B 204 %tmp3 = call <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 205 %tmp4 = zext <2 x i32> %tmp3 to <2 x i64> 206 ret <2 x i64> %tmp4 207} 208