1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp,+fp64 -verify-machineinstrs -o - %s | FileCheck %s 3 4define arm_aapcs_vfpcc <4 x float> @foo_v4i16(<4 x i16>* nocapture readonly %pSrc, <4 x i16> %a) { 5; CHECK-LABEL: foo_v4i16: 6; CHECK: @ %bb.0: @ %entry 7; CHECK-NEXT: vmovlb.s16 q0, q0 8; CHECK-NEXT: vpt.s32 lt, q0, zr 9; CHECK-NEXT: vldrht.u32 q0, [r0] 10; CHECK-NEXT: vcvt.f32.u32 q0, q0 11; CHECK-NEXT: bx lr 12entry: 13 %active.lane.mask = icmp slt <4 x i16> %a, zeroinitializer 14 %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %pSrc, i32 2, <4 x i1> %active.lane.mask, <4 x i16> undef) 15 %0 = uitofp <4 x i16> %wide.masked.load to <4 x float> 16 ret <4 x float> %0 17} 18 19define arm_aapcs_vfpcc <8 x half> @foo_v8i8(<8 x i8>* nocapture readonly %pSrc, i32 %blockSize, <8 x i8> %a) { 20; CHECK-LABEL: foo_v8i8: 21; CHECK: @ %bb.0: @ %entry 22; CHECK-NEXT: vmovlb.s8 q0, q0 23; CHECK-NEXT: vpt.s16 lt, q0, zr 24; CHECK-NEXT: vldrbt.u16 q0, [r0] 25; CHECK-NEXT: vcvt.f16.u16 q0, q0 26; CHECK-NEXT: bx lr 27entry: 28 %active.lane.mask = icmp slt <8 x i8> %a, zeroinitializer 29 %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %pSrc, i32 1, <8 x i1> %active.lane.mask, <8 x i8> undef) 30 %0 = uitofp <8 x i8> %wide.masked.load to <8 x half> 31 ret <8 x half> %0 32} 33 34define arm_aapcs_vfpcc <4 x float> @foo_v4i8(<4 x i8>* nocapture readonly %pSrc, i32 %blockSize, <4 x i8> %a) { 35; CHECK-LABEL: foo_v4i8: 36; CHECK: @ %bb.0: @ %entry 37; CHECK-NEXT: vmovlb.s8 q0, q0 38; CHECK-NEXT: vmovlb.s16 q0, q0 39; CHECK-NEXT: vpt.s32 lt, q0, zr 40; CHECK-NEXT: vldrbt.u32 q0, [r0] 41; CHECK-NEXT: vcvt.f32.u32 q0, q0 42; CHECK-NEXT: bx lr 43entry: 44 %active.lane.mask = icmp slt <4 x i8> %a, zeroinitializer 45 %wide.masked.load = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %pSrc, i32 1, <4 x i1> %active.lane.mask, <4 x i8> undef) 46 %0 = uitofp <4 x i8> %wide.masked.load to <4 x float> 47 ret <4 x float> %0 48} 49 50define arm_aapcs_vfpcc <4 x double> @foo_v4i32(<4 x i32>* nocapture readonly %pSrc, i32 %blockSize, <4 x i32> %a) { 51; CHECK-LABEL: foo_v4i32: 52; CHECK: @ %bb.0: @ %entry 53; CHECK-NEXT: .save {r4, r5, r6, r7, lr} 54; CHECK-NEXT: push {r4, r5, r6, r7, lr} 55; CHECK-NEXT: .pad #4 56; CHECK-NEXT: sub sp, #4 57; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13} 58; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13} 59; CHECK-NEXT: vpt.s32 lt, q0, zr 60; CHECK-NEXT: vldrwt.u32 q4, [r0] 61; CHECK-NEXT: vmov.f64 d0, d8 62; CHECK-NEXT: vmov.i64 q5, #0xffffffff 63; CHECK-NEXT: vmov.f32 s2, s17 64; CHECK-NEXT: vand q6, q0, q5 65; CHECK-NEXT: vmov r0, s24 66; CHECK-NEXT: vmov r1, s25 67; CHECK-NEXT: bl __aeabi_ul2d 68; CHECK-NEXT: mov r4, r0 69; CHECK-NEXT: mov r5, r1 70; CHECK-NEXT: vmov r0, s26 71; CHECK-NEXT: vmov r1, s27 72; CHECK-NEXT: bl __aeabi_ul2d 73; CHECK-NEXT: vmov.f64 d0, d9 74; CHECK-NEXT: vmov.f32 s2, s19 75; CHECK-NEXT: vand q0, q0, q5 76; CHECK-NEXT: vmov d9, r0, r1 77; CHECK-NEXT: vmov r2, s2 78; CHECK-NEXT: vmov r3, s3 79; CHECK-NEXT: vmov r6, s0 80; CHECK-NEXT: vmov r7, s1 81; CHECK-NEXT: vmov d8, r4, r5 82; CHECK-NEXT: mov r0, r2 83; CHECK-NEXT: mov r1, r3 84; CHECK-NEXT: bl __aeabi_ul2d 85; CHECK-NEXT: vmov d11, r0, r1 86; CHECK-NEXT: mov r0, r6 87; CHECK-NEXT: mov r1, r7 88; CHECK-NEXT: bl __aeabi_ul2d 89; CHECK-NEXT: vmov d10, r0, r1 90; CHECK-NEXT: vmov q0, q4 91; CHECK-NEXT: vmov q1, q5 92; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13} 93; CHECK-NEXT: add sp, #4 94; CHECK-NEXT: pop {r4, r5, r6, r7, pc} 95entry: 96 %active.lane.mask = icmp slt <4 x i32> %a, zeroinitializer 97 %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %pSrc, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef) 98 %0 = uitofp <4 x i32> %wide.masked.load to <4 x double> 99 ret <4 x double> %0 100} 101 102declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>) 103 104declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32 immarg, <8 x i1>, <8 x i8>) 105 106declare <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>*, i32 immarg, <4 x i1>, <4 x i8>) 107 108declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>) 109