1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - | FileCheck %s 3 4define arm_aapcs_vfpcc <8 x i16> @zext_unscaled_i8_i16(i8* %base, <8 x i16>* %offptr) { 5; CHECK-LABEL: zext_unscaled_i8_i16: 6; CHECK: @ %bb.0: @ %entry 7; CHECK-NEXT: vldrh.u16 q1, [r1] 8; CHECK-NEXT: vldrb.u16 q0, [r0, q1] 9; CHECK-NEXT: bx lr 10entry: 11 %offs = load <8 x i16>, <8 x i16>* %offptr, align 2 12 %offs.zext = zext <8 x i16> %offs to <8 x i32> 13 %ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext 14 %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*> %ptrs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef) 15 %gather.zext = zext <8 x i8> %gather to <8 x i16> 16 ret <8 x i16> %gather.zext 17} 18 19define arm_aapcs_vfpcc <8 x i16> @zext_unscaled_i8_i16_noext(i8* %base, <8 x i8>* %offptr) { 20; CHECK-LABEL: zext_unscaled_i8_i16_noext: 21; CHECK: @ %bb.0: @ %entry 22; CHECK-NEXT: .save {r4, r5, r7, lr} 23; CHECK-NEXT: push {r4, r5, r7, lr} 24; CHECK-NEXT: vldrb.s32 q0, [r1] 25; CHECK-NEXT: vldrb.s32 q1, [r1, #4] 26; CHECK-NEXT: vadd.i32 q0, q0, r0 27; CHECK-NEXT: vadd.i32 q1, q1, r0 28; CHECK-NEXT: vmov r2, s3 29; CHECK-NEXT: vmov r3, s1 30; CHECK-NEXT: vmov r5, s0 31; CHECK-NEXT: vmov r0, s4 32; CHECK-NEXT: vmov r1, s5 33; CHECK-NEXT: vmov r4, s7 34; CHECK-NEXT: ldrb.w r12, [r2] 35; CHECK-NEXT: vmov r2, s2 36; CHECK-NEXT: ldrb.w lr, [r3] 37; CHECK-NEXT: vmov r3, s6 38; CHECK-NEXT: ldrb r5, [r5] 39; CHECK-NEXT: ldrb r0, [r0] 40; CHECK-NEXT: vmov.16 q0[0], r5 41; CHECK-NEXT: ldrb r1, [r1] 42; CHECK-NEXT: vmov.16 q0[1], lr 43; CHECK-NEXT: ldrb r4, [r4] 44; CHECK-NEXT: ldrb r2, [r2] 45; CHECK-NEXT: ldrb r3, [r3] 46; CHECK-NEXT: vmov.16 q0[2], r2 47; CHECK-NEXT: vmov.16 q0[3], r12 48; CHECK-NEXT: vmov.16 q0[4], r0 49; CHECK-NEXT: vmov.16 q0[5], r1 50; CHECK-NEXT: vmov.16 q0[6], r3 51; CHECK-NEXT: vmov.16 q0[7], r4 52; CHECK-NEXT: vmovlb.u8 q0, q0 53; CHECK-NEXT: pop {r4, r5, r7, pc} 54entry: 55 %offs = load <8 x i8>, <8 x i8>* %offptr, align 2 56 %ptrs = getelementptr inbounds i8, i8* %base, <8 x i8> %offs 57 %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*> %ptrs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef) 58 %gather.zext = zext <8 x i8> %gather to <8 x i16> 59 ret <8 x i16> %gather.zext 60} 61 62define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_sext(i16* %base, <8 x i8>* %offptr) { 63; CHECK-LABEL: scaled_v8i16_sext: 64; CHECK: @ %bb.0: @ %entry 65; CHECK-NEXT: .save {r4, r5, r7, lr} 66; CHECK-NEXT: push {r4, r5, r7, lr} 67; CHECK-NEXT: vldrb.s32 q0, [r1] 68; CHECK-NEXT: vldrb.s32 q1, [r1, #4] 69; CHECK-NEXT: vshl.i32 q0, q0, #1 70; CHECK-NEXT: vshl.i32 q1, q1, #1 71; CHECK-NEXT: vadd.i32 q0, q0, r0 72; CHECK-NEXT: vadd.i32 q1, q1, r0 73; CHECK-NEXT: vmov r2, s2 74; CHECK-NEXT: vmov r3, s3 75; CHECK-NEXT: vmov r5, s1 76; CHECK-NEXT: vmov r0, s4 77; CHECK-NEXT: vmov r1, s5 78; CHECK-NEXT: vmov r4, s7 79; CHECK-NEXT: ldrh.w r12, [r2] 80; CHECK-NEXT: vmov r2, s0 81; CHECK-NEXT: ldrh.w lr, [r3] 82; CHECK-NEXT: vmov r3, s6 83; CHECK-NEXT: ldrh r5, [r5] 84; CHECK-NEXT: ldrh r0, [r0] 85; CHECK-NEXT: ldrh r1, [r1] 86; CHECK-NEXT: ldrh r4, [r4] 87; CHECK-NEXT: ldrh r2, [r2] 88; CHECK-NEXT: ldrh r3, [r3] 89; CHECK-NEXT: vmov.16 q0[0], r2 90; CHECK-NEXT: vmov.16 q0[1], r5 91; CHECK-NEXT: vmov.16 q0[2], r12 92; CHECK-NEXT: vmov.16 q0[3], lr 93; CHECK-NEXT: vmov.16 q0[4], r0 94; CHECK-NEXT: vmov.16 q0[5], r1 95; CHECK-NEXT: vmov.16 q0[6], r3 96; CHECK-NEXT: vmov.16 q0[7], r4 97; CHECK-NEXT: pop {r4, r5, r7, pc} 98entry: 99 %offs = load <8 x i8>, <8 x i8>* %offptr, align 2 100 %offs.sext = sext <8 x i8> %offs to <8 x i16> 101 %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %offs.sext 102 %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef) 103 ret <8 x i16> %gather 104} 105 106define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_zext(i16* %base, <8 x i8>* %offptr) { 107; CHECK-LABEL: scaled_v8i16_zext: 108; CHECK: @ %bb.0: @ %entry 109; CHECK-NEXT: .save {r4, r5, r7, lr} 110; CHECK-NEXT: push {r4, r5, r7, lr} 111; CHECK-NEXT: vldrb.u32 q0, [r1] 112; CHECK-NEXT: vldrb.u32 q1, [r1, #4] 113; CHECK-NEXT: vshl.i32 q0, q0, #1 114; CHECK-NEXT: vshl.i32 q1, q1, #1 115; CHECK-NEXT: vadd.i32 q0, q0, r0 116; CHECK-NEXT: vadd.i32 q1, q1, r0 117; CHECK-NEXT: vmov r2, s2 118; CHECK-NEXT: vmov r3, s3 119; CHECK-NEXT: vmov r5, s1 120; CHECK-NEXT: vmov r0, s4 121; CHECK-NEXT: vmov r1, s5 122; CHECK-NEXT: vmov r4, s7 123; CHECK-NEXT: ldrh.w r12, [r2] 124; CHECK-NEXT: vmov r2, s0 125; CHECK-NEXT: ldrh.w lr, [r3] 126; CHECK-NEXT: vmov r3, s6 127; CHECK-NEXT: ldrh r5, [r5] 128; CHECK-NEXT: ldrh r0, [r0] 129; CHECK-NEXT: ldrh r1, [r1] 130; CHECK-NEXT: ldrh r4, [r4] 131; CHECK-NEXT: ldrh r2, [r2] 132; CHECK-NEXT: ldrh r3, [r3] 133; CHECK-NEXT: vmov.16 q0[0], r2 134; CHECK-NEXT: vmov.16 q0[1], r5 135; CHECK-NEXT: vmov.16 q0[2], r12 136; CHECK-NEXT: vmov.16 q0[3], lr 137; CHECK-NEXT: vmov.16 q0[4], r0 138; CHECK-NEXT: vmov.16 q0[5], r1 139; CHECK-NEXT: vmov.16 q0[6], r3 140; CHECK-NEXT: vmov.16 q0[7], r4 141; CHECK-NEXT: pop {r4, r5, r7, pc} 142entry: 143 %offs = load <8 x i8>, <8 x i8>* %offptr, align 2 144 %offs.zext = zext <8 x i8> %offs to <8 x i16> 145 %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %offs.zext 146 %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef) 147 ret <8 x i16> %gather 148} 149 150define arm_aapcs_vfpcc <8 x i16> @sext_unscaled_i8_i16(i8* %base, <8 x i16>* %offptr) { 151; CHECK-LABEL: sext_unscaled_i8_i16: 152; CHECK: @ %bb.0: @ %entry 153; CHECK-NEXT: vldrh.u16 q1, [r1] 154; CHECK-NEXT: vldrb.s16 q0, [r0, q1] 155; CHECK-NEXT: bx lr 156entry: 157 %offs = load <8 x i16>, <8 x i16>* %offptr, align 2 158 %offs.zext = zext <8 x i16> %offs to <8 x i32> 159 %ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext 160 %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*> %ptrs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef) 161 %gather.sext = sext <8 x i8> %gather to <8 x i16> 162 ret <8 x i16> %gather.sext 163} 164 165define arm_aapcs_vfpcc <8 x i16> @unscaled_i16_i16(i8* %base, <8 x i16>* %offptr) { 166; CHECK-LABEL: unscaled_i16_i16: 167; CHECK: @ %bb.0: @ %entry 168; CHECK-NEXT: vldrh.u16 q1, [r1] 169; CHECK-NEXT: vldrh.u16 q0, [r0, q1] 170; CHECK-NEXT: bx lr 171entry: 172 %offs = load <8 x i16>, <8 x i16>* %offptr, align 2 173 %offs.zext = zext <8 x i16> %offs to <8 x i32> 174 %byte_ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext 175 %ptrs = bitcast <8 x i8*> %byte_ptrs to <8 x i16*> 176 %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef) 177 ret <8 x i16> %gather 178} 179 180define arm_aapcs_vfpcc <8 x half> @unscaled_f16_i16(i8* %base, <8 x i16>* %offptr) { 181; CHECK-LABEL: unscaled_f16_i16: 182; CHECK: @ %bb.0: @ %entry 183; CHECK-NEXT: vldrh.u16 q1, [r1] 184; CHECK-NEXT: vldrh.u16 q0, [r0, q1] 185; CHECK-NEXT: bx lr 186entry: 187 %offs = load <8 x i16>, <8 x i16>* %offptr, align 2 188 %offs.zext = zext <8 x i16> %offs to <8 x i32> 189 %byte_ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext 190 %ptrs = bitcast <8 x i8*> %byte_ptrs to <8 x half*> 191 %gather = call <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x half> undef) 192 ret <8 x half> %gather 193} 194 195define arm_aapcs_vfpcc <8 x i16> @zext_unsigned_unscaled_i8_i8(i8* %base, <8 x i8>* %offptr) { 196; CHECK-LABEL: zext_unsigned_unscaled_i8_i8: 197; CHECK: @ %bb.0: @ %entry 198; CHECK-NEXT: vldrb.u16 q1, [r1] 199; CHECK-NEXT: vldrb.u16 q0, [r0, q1] 200; CHECK-NEXT: bx lr 201entry: 202 %offs = load <8 x i8>, <8 x i8>* %offptr, align 1 203 %offs.zext = zext <8 x i8> %offs to <8 x i32> 204 %ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext 205 %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*> %ptrs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef) 206 %gather.zext = zext <8 x i8> %gather to <8 x i16> 207 ret <8 x i16> %gather.zext 208} 209 210define arm_aapcs_vfpcc <8 x i16> @sext_unsigned_unscaled_i8_i8(i8* %base, <8 x i8>* %offptr) { 211; CHECK-LABEL: sext_unsigned_unscaled_i8_i8: 212; CHECK: @ %bb.0: @ %entry 213; CHECK-NEXT: vldrb.u16 q1, [r1] 214; CHECK-NEXT: vldrb.s16 q0, [r0, q1] 215; CHECK-NEXT: bx lr 216entry: 217 %offs = load <8 x i8>, <8 x i8>* %offptr, align 1 218 %offs.zext = zext <8 x i8> %offs to <8 x i32> 219 %ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext 220 %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*> %ptrs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef) 221 %gather.sext = sext <8 x i8> %gather to <8 x i16> 222 ret <8 x i16> %gather.sext 223} 224 225define arm_aapcs_vfpcc <8 x i16> @unsigned_unscaled_i16_i8(i8* %base, <8 x i8>* %offptr) { 226; CHECK-LABEL: unsigned_unscaled_i16_i8: 227; CHECK: @ %bb.0: @ %entry 228; CHECK-NEXT: vldrb.u16 q1, [r1] 229; CHECK-NEXT: vldrh.u16 q0, [r0, q1] 230; CHECK-NEXT: bx lr 231entry: 232 %offs = load <8 x i8>, <8 x i8>* %offptr, align 1 233 %offs.zext = zext <8 x i8> %offs to <8 x i32> 234 %byte_ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext 235 %ptrs = bitcast <8 x i8*> %byte_ptrs to <8 x i16*> 236 %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef) 237 ret <8 x i16> %gather 238} 239 240define arm_aapcs_vfpcc <8 x half> @unsigned_unscaled_f16_i8(i8* %base, <8 x i8>* %offptr) { 241; CHECK-LABEL: unsigned_unscaled_f16_i8: 242; CHECK: @ %bb.0: @ %entry 243; CHECK-NEXT: vldrb.u16 q1, [r1] 244; CHECK-NEXT: vldrh.u16 q0, [r0, q1] 245; CHECK-NEXT: bx lr 246entry: 247 %offs = load <8 x i8>, <8 x i8>* %offptr, align 1 248 %offs.zext = zext <8 x i8> %offs to <8 x i32> 249 %byte_ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext 250 %ptrs = bitcast <8 x i8*> %byte_ptrs to <8 x half*> 251 %gather = call <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x half> undef) 252 ret <8 x half> %gather 253} 254 255declare <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*>, i32, <8 x i1>, <8 x i8>) #1 256declare <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*>, i32, <8 x i1>, <8 x i16>) #1 257declare <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*>, i32, <8 x i1>, <8 x half>) #1 258