1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=arm-none-eabi -mattr=-neon | FileCheck %s --check-prefix=CHECK 3 4declare half @llvm.vector.reduce.fmul.f16.v4f16(half, <4 x half>) 5declare float @llvm.vector.reduce.fmul.f32.v4f32(float, <4 x float>) 6declare double @llvm.vector.reduce.fmul.f64.v2f64(double, <2 x double>) 7declare fp128 @llvm.vector.reduce.fmul.f128.v2f128(fp128, <2 x fp128>) 8 9define half @test_v4f16(<4 x half> %a) nounwind { 10; CHECK-LABEL: test_v4f16: 11; CHECK: @ %bb.0: 12; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr} 13; CHECK-NEXT: push {r4, r5, r6, r7, r8, lr} 14; CHECK-NEXT: mov r4, #255 15; CHECK-NEXT: mov r7, r0 16; CHECK-NEXT: orr r4, r4, #65280 17; CHECK-NEXT: mov r5, r2 18; CHECK-NEXT: and r0, r3, r4 19; CHECK-NEXT: mov r6, r1 20; CHECK-NEXT: bl __aeabi_h2f 21; CHECK-NEXT: mov r8, r0 22; CHECK-NEXT: and r0, r5, r4 23; CHECK-NEXT: bl __aeabi_h2f 24; CHECK-NEXT: mov r5, r0 25; CHECK-NEXT: and r0, r7, r4 26; CHECK-NEXT: bl __aeabi_h2f 27; CHECK-NEXT: mov r7, r0 28; CHECK-NEXT: and r0, r6, r4 29; CHECK-NEXT: bl __aeabi_h2f 30; CHECK-NEXT: mov r1, r0 31; CHECK-NEXT: mov r0, r7 32; CHECK-NEXT: bl __aeabi_fmul 33; CHECK-NEXT: mov r1, r5 34; CHECK-NEXT: bl __aeabi_fmul 35; CHECK-NEXT: mov r1, r8 36; CHECK-NEXT: bl __aeabi_fmul 37; CHECK-NEXT: bl __aeabi_f2h 38; CHECK-NEXT: pop {r4, r5, r6, r7, r8, lr} 39; CHECK-NEXT: mov pc, lr 40 %b = call fast half @llvm.vector.reduce.fmul.f16.v4f16(half 1.0, <4 x half> %a) 41 ret half %b 42} 43 44define float @test_v4f32(<4 x float> %a) nounwind { 45; CHECK-LABEL: test_v4f32: 46; CHECK: @ %bb.0: 47; CHECK-NEXT: .save {r4, r5, r11, lr} 48; CHECK-NEXT: push {r4, r5, r11, lr} 49; CHECK-NEXT: mov r4, r3 50; CHECK-NEXT: mov r5, r2 51; CHECK-NEXT: bl __aeabi_fmul 52; CHECK-NEXT: mov r1, r5 53; CHECK-NEXT: bl __aeabi_fmul 54; CHECK-NEXT: mov r1, r4 55; CHECK-NEXT: bl __aeabi_fmul 56; CHECK-NEXT: pop {r4, r5, r11, lr} 57; CHECK-NEXT: mov pc, lr 58 %b = call fast float @llvm.vector.reduce.fmul.f32.v4f32(float 1.0, <4 x float> %a) 59 ret float %b 60} 61 62define float @test_v4f32_strict(<4 x float> %a) nounwind { 63; CHECK-LABEL: test_v4f32_strict: 64; CHECK: @ %bb.0: 65; CHECK-NEXT: .save {r4, r5, r11, lr} 66; CHECK-NEXT: push {r4, r5, r11, lr} 67; CHECK-NEXT: mov r4, r3 68; CHECK-NEXT: mov r5, r2 69; CHECK-NEXT: bl __aeabi_fmul 70; CHECK-NEXT: mov r1, r5 71; CHECK-NEXT: bl __aeabi_fmul 72; CHECK-NEXT: mov r1, r4 73; CHECK-NEXT: bl __aeabi_fmul 74; CHECK-NEXT: pop {r4, r5, r11, lr} 75; CHECK-NEXT: mov pc, lr 76 %b = call float @llvm.vector.reduce.fmul.f32.v4f32(float 1.0, <4 x float> %a) 77 ret float %b 78} 79 80define double @test_v2f64(<2 x double> %a) nounwind { 81; CHECK-LABEL: test_v2f64: 82; CHECK: @ %bb.0: 83; CHECK-NEXT: .save {r11, lr} 84; CHECK-NEXT: push {r11, lr} 85; CHECK-NEXT: bl __aeabi_dmul 86; CHECK-NEXT: pop {r11, lr} 87; CHECK-NEXT: mov pc, lr 88 %b = call fast double @llvm.vector.reduce.fmul.f64.v2f64(double 1.0, <2 x double> %a) 89 ret double %b 90} 91 92define double @test_v2f64_strict(<2 x double> %a) nounwind { 93; CHECK-LABEL: test_v2f64_strict: 94; CHECK: @ %bb.0: 95; CHECK-NEXT: .save {r11, lr} 96; CHECK-NEXT: push {r11, lr} 97; CHECK-NEXT: bl __aeabi_dmul 98; CHECK-NEXT: pop {r11, lr} 99; CHECK-NEXT: mov pc, lr 100 %b = call double @llvm.vector.reduce.fmul.f64.v2f64(double 1.0, <2 x double> %a) 101 ret double %b 102} 103 104define fp128 @test_v2f128(<2 x fp128> %a) nounwind { 105; CHECK-LABEL: test_v2f128: 106; CHECK: @ %bb.0: 107; CHECK-NEXT: .save {r11, lr} 108; CHECK-NEXT: push {r11, lr} 109; CHECK-NEXT: .pad #16 110; CHECK-NEXT: sub sp, sp, #16 111; CHECK-NEXT: ldr r12, [sp, #36] 112; CHECK-NEXT: str r12, [sp, #12] 113; CHECK-NEXT: ldr r12, [sp, #32] 114; CHECK-NEXT: str r12, [sp, #8] 115; CHECK-NEXT: ldr r12, [sp, #28] 116; CHECK-NEXT: str r12, [sp, #4] 117; CHECK-NEXT: ldr r12, [sp, #24] 118; CHECK-NEXT: str r12, [sp] 119; CHECK-NEXT: bl __multf3 120; CHECK-NEXT: add sp, sp, #16 121; CHECK-NEXT: pop {r11, lr} 122; CHECK-NEXT: mov pc, lr 123 %b = call fast fp128 @llvm.vector.reduce.fmul.f128.v2f128(fp128 0xL00000000000000003fff00000000000000, <2 x fp128> %a) 124 ret fp128 %b 125} 126 127define fp128 @test_v2f128_strict(<2 x fp128> %a) nounwind { 128; CHECK-LABEL: test_v2f128_strict: 129; CHECK: @ %bb.0: 130; CHECK-NEXT: .save {r11, lr} 131; CHECK-NEXT: push {r11, lr} 132; CHECK-NEXT: .pad #16 133; CHECK-NEXT: sub sp, sp, #16 134; CHECK-NEXT: ldr r12, [sp, #36] 135; CHECK-NEXT: str r12, [sp, #12] 136; CHECK-NEXT: ldr r12, [sp, #32] 137; CHECK-NEXT: str r12, [sp, #8] 138; CHECK-NEXT: ldr r12, [sp, #28] 139; CHECK-NEXT: str r12, [sp, #4] 140; CHECK-NEXT: ldr r12, [sp, #24] 141; CHECK-NEXT: str r12, [sp] 142; CHECK-NEXT: bl __multf3 143; CHECK-NEXT: add sp, sp, #16 144; CHECK-NEXT: pop {r11, lr} 145; CHECK-NEXT: mov pc, lr 146 %b = call fp128 @llvm.vector.reduce.fmul.f128.v2f128(fp128 0xL00000000000000003fff00000000000000, <2 x fp128> %a) 147 ret fp128 %b 148} 149