1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs -tail-predication=enabled -o - %s | FileCheck %s 3%struct.SpeexPreprocessState_ = type { i32, i32, half*, half* } 4 5define void @foo(%struct.SpeexPreprocessState_* nocapture readonly %st, i16* %x) { 6; CHECK-LABEL: foo: 7; CHECK: @ %bb.0: @ %entry 8; CHECK-NEXT: .save {r4, lr} 9; CHECK-NEXT: push {r4, lr} 10; CHECK-NEXT: ldrd r12, r4, [r0] 11; CHECK-NEXT: ldrd r2, r3, [r0, #8] 12; CHECK-NEXT: rsb r12, r12, r4, lsl #1 13; CHECK-NEXT: mov r4, r12 14; CHECK-NEXT: dlstp.16 lr, r12 15; CHECK-NEXT: .LBB0_1: @ %do.body 16; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 17; CHECK-NEXT: vldrh.u16 q0, [r3], #16 18; CHECK-NEXT: vstrh.16 q0, [r2], #16 19; CHECK-NEXT: letp lr, .LBB0_1 20; CHECK-NEXT: @ %bb.2: @ %do.end 21; CHECK-NEXT: ldr r2, [r0] 22; CHECK-NEXT: ldr r0, [r0, #8] 23; CHECK-NEXT: vmov.i16 q0, #0x1800 24; CHECK-NEXT: add.w r0, r0, r12, lsl #1 25; CHECK-NEXT: dlstp.16 lr, r2 26; CHECK-NEXT: .LBB0_3: @ %do.body6 27; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 28; CHECK-NEXT: vldrh.u16 q1, [r1], #16 29; CHECK-NEXT: vcvt.f16.s16 q1, q1 30; CHECK-NEXT: vmul.f16 q1, q1, q0 31; CHECK-NEXT: vstrh.16 q1, [r0], #16 32; CHECK-NEXT: letp lr, .LBB0_3 33; CHECK-NEXT: @ %bb.4: @ %do.end13 34; CHECK-NEXT: pop {r4, pc} 35entry: 36 %ps_size = getelementptr inbounds %struct.SpeexPreprocessState_, %struct.SpeexPreprocessState_* %st, i32 0, i32 1 37 %0 = load i32, i32* %ps_size, align 4 38 %mul = shl nsw i32 %0, 1 39 %frame_size = getelementptr inbounds %struct.SpeexPreprocessState_, %struct.SpeexPreprocessState_* %st, i32 0, i32 0 40 %1 = load i32, i32* %frame_size, align 4 41 %sub = sub nsw i32 %mul, %1 42 %inbuf = getelementptr inbounds %struct.SpeexPreprocessState_, %struct.SpeexPreprocessState_* %st, i32 0, i32 3 43 %2 = load half*, half** %inbuf, align 4 44 %frame = getelementptr inbounds %struct.SpeexPreprocessState_, %struct.SpeexPreprocessState_* %st, i32 0, i32 2 45 %3 = load half*, half** %frame, align 4 46 br label %do.body 47 48do.body: ; preds = %do.body, %entry 49 %pinbuff16.0 = phi half* [ %2, %entry ], [ %add.ptr, %do.body ] 50 %blkCnt.0 = phi i32 [ %sub, %entry ], [ %sub2, %do.body ] 51 %pframef16.0 = phi half* [ %3, %entry ], [ %add.ptr1, %do.body ] 52 %4 = tail call <8 x i1> @llvm.arm.mve.vctp16(i32 %blkCnt.0) 53 %5 = bitcast half* %pinbuff16.0 to <8 x half>* 54 %6 = tail call fast <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %5, i32 2, <8 x i1> %4, <8 x half> zeroinitializer) 55 %7 = bitcast half* %pframef16.0 to <8 x half>* 56 tail call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %6, <8 x half>* %7, i32 2, <8 x i1> %4) 57 %add.ptr = getelementptr inbounds half, half* %pinbuff16.0, i32 8 58 %add.ptr1 = getelementptr inbounds half, half* %pframef16.0, i32 8 59 %sub2 = add nsw i32 %blkCnt.0, -8 60 %cmp = icmp sgt i32 %blkCnt.0, 8 61 br i1 %cmp, label %do.body, label %do.end 62 63do.end: ; preds = %do.body 64 %8 = load half*, half** %frame, align 4 65 %add.ptr4 = getelementptr inbounds half, half* %8, i32 %sub 66 %9 = load i32, i32* %frame_size, align 4 67 br label %do.body6 68 69do.body6: ; preds = %do.body6, %do.end 70 %px.0 = phi i16* [ %x, %do.end ], [ %add.ptr8, %do.body6 ] 71 %blkCnt.1 = phi i32 [ %9, %do.end ], [ %sub10, %do.body6 ] 72 %pframef16.1 = phi half* [ %add.ptr4, %do.end ], [ %add.ptr9, %do.body6 ] 73 %10 = tail call <8 x i1> @llvm.arm.mve.vctp16(i32 %blkCnt.1) 74 %11 = bitcast i16* %px.0 to <8 x i16>* 75 %12 = tail call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %11, i32 2, <8 x i1> %10, <8 x i16> zeroinitializer) 76 %13 = tail call fast <8 x half> @llvm.arm.mve.vcvt.fp.int.predicated.v8f16.v8i16.v8i1(<8 x i16> %12, i32 0, <8 x i1> %10, <8 x half> undef) 77 %14 = tail call fast <8 x half> @llvm.arm.mve.mul.predicated.v8f16.v8i1(<8 x half> %13, <8 x half> <half 0xH1800, half 0xH1800, half 0xH1800, half 0xH1800, half 0xH1800, half 0xH1800, half 0xH1800, half 0xH1800>, <8 x i1> %10, <8 x half> undef) 78 %15 = bitcast half* %pframef16.1 to <8 x half>* 79 tail call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %14, <8 x half>* %15, i32 2, <8 x i1> %10) 80 %add.ptr8 = getelementptr inbounds i16, i16* %px.0, i32 8 81 %add.ptr9 = getelementptr inbounds half, half* %pframef16.1, i32 8 82 %sub10 = add nsw i32 %blkCnt.1, -8 83 %cmp12 = icmp sgt i32 %blkCnt.1, 8 84 br i1 %cmp12, label %do.body6, label %do.end13 85 86do.end13: ; preds = %do.body6 87 ret void 88} 89 90declare <8 x i1> @llvm.arm.mve.vctp16(i32) 91 92declare <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>*, i32 immarg, <8 x i1>, <8 x half>) 93 94declare void @llvm.masked.store.v8f16.p0v8f16(<8 x half>, <8 x half>*, i32 immarg, <8 x i1>) 95 96declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32 immarg, <8 x i1>, <8 x i16>) 97 98declare <8 x half> @llvm.arm.mve.vcvt.fp.int.predicated.v8f16.v8i16.v8i1(<8 x i16>, i32, <8 x i1>, <8 x half>) 99 100declare <8 x half> @llvm.arm.mve.mul.predicated.v8f16.v8i1(<8 x half>, <8 x half>, <8 x i1>, <8 x half>) 101