1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s --check-prefix=CHECK 3 4declare half @llvm.vector.reduce.fadd.f16.v1f16(half, <1 x half>) 5declare float @llvm.vector.reduce.fadd.f32.v1f32(float, <1 x float>) 6declare double @llvm.vector.reduce.fadd.f64.v1f64(double, <1 x double>) 7declare fp128 @llvm.vector.reduce.fadd.f128.v1f128(fp128, <1 x fp128>) 8 9declare float @llvm.vector.reduce.fadd.f32.v3f32(float, <3 x float>) 10declare float @llvm.vector.reduce.fadd.f32.v5f32(float, <5 x float>) 11declare fp128 @llvm.vector.reduce.fadd.f128.v2f128(fp128, <2 x fp128>) 12declare float @llvm.vector.reduce.fadd.f32.v16f32(float, <16 x float>) 13 14define half @test_v1f16(<1 x half> %a) nounwind { 15; CHECK-LABEL: test_v1f16: 16; CHECK: // %bb.0: 17; CHECK-NEXT: ret 18 %b = call reassoc half @llvm.vector.reduce.fadd.f16.v1f16(half -0.0, <1 x half> %a) 19 ret half %b 20} 21 22define float @test_v1f32(<1 x float> %a) nounwind { 23; CHECK-LABEL: test_v1f32: 24; CHECK: // %bb.0: 25; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 26; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0 27; CHECK-NEXT: ret 28 %b = call reassoc float @llvm.vector.reduce.fadd.f32.v1f32(float -0.0, <1 x float> %a) 29 ret float %b 30} 31 32define double @test_v1f64(<1 x double> %a) nounwind { 33; CHECK-LABEL: test_v1f64: 34; CHECK: // %bb.0: 35; CHECK-NEXT: ret 36 %b = call reassoc double @llvm.vector.reduce.fadd.f64.v1f64(double -0.0, <1 x double> %a) 37 ret double %b 38} 39 40define fp128 @test_v1f128(<1 x fp128> %a) nounwind { 41; CHECK-LABEL: test_v1f128: 42; CHECK: // %bb.0: 43; CHECK-NEXT: ret 44 %b = call reassoc fp128 @llvm.vector.reduce.fadd.f128.v1f128(fp128 0xL00000000000000008000000000000000, <1 x fp128> %a) 45 ret fp128 %b 46} 47 48define float @test_v3f32(<3 x float> %a) nounwind { 49; CHECK-LABEL: test_v3f32: 50; CHECK: // %bb.0: 51; CHECK-NEXT: mov w8, #-2147483648 52; CHECK-NEXT: fmov s1, w8 53; CHECK-NEXT: mov v0.s[3], v1.s[0] 54; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8 55; CHECK-NEXT: fadd v0.2s, v0.2s, v1.2s 56; CHECK-NEXT: faddp s0, v0.2s 57; CHECK-NEXT: ret 58 %b = call reassoc float @llvm.vector.reduce.fadd.f32.v3f32(float -0.0, <3 x float> %a) 59 ret float %b 60} 61 62define float @test_v5f32(<5 x float> %a) nounwind { 63; CHECK-LABEL: test_v5f32: 64; CHECK: // %bb.0: 65; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 66; CHECK-NEXT: // kill: def $s1 killed $s1 def $q1 67; CHECK-NEXT: // kill: def $s2 killed $s2 def $q2 68; CHECK-NEXT: movi v5.4s, #128, lsl #24 69; CHECK-NEXT: mov v0.s[1], v1.s[0] 70; CHECK-NEXT: mov v0.s[2], v2.s[0] 71; CHECK-NEXT: // kill: def $s4 killed $s4 def $q4 72; CHECK-NEXT: // kill: def $s3 killed $s3 def $q3 73; CHECK-NEXT: mov v0.s[3], v3.s[0] 74; CHECK-NEXT: mov v5.s[0], v4.s[0] 75; CHECK-NEXT: fadd v0.4s, v0.4s, v5.4s 76; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8 77; CHECK-NEXT: fadd v0.2s, v0.2s, v1.2s 78; CHECK-NEXT: faddp s0, v0.2s 79; CHECK-NEXT: ret 80 %b = call reassoc float @llvm.vector.reduce.fadd.f32.v5f32(float -0.0, <5 x float> %a) 81 ret float %b 82} 83 84define fp128 @test_v2f128(<2 x fp128> %a) nounwind { 85; CHECK-LABEL: test_v2f128: 86; CHECK: // %bb.0: 87; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill 88; CHECK-NEXT: bl __addtf3 89; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload 90; CHECK-NEXT: ret 91 %b = call reassoc fp128 @llvm.vector.reduce.fadd.f128.v2f128(fp128 0xL00000000000000008000000000000000, <2 x fp128> %a) 92 ret fp128 %b 93} 94 95define float @test_v16f32(<16 x float> %a) nounwind { 96; CHECK-LABEL: test_v16f32: 97; CHECK: // %bb.0: 98; CHECK-NEXT: fadd v1.4s, v1.4s, v3.4s 99; CHECK-NEXT: fadd v0.4s, v0.4s, v2.4s 100; CHECK-NEXT: fadd v0.4s, v0.4s, v1.4s 101; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8 102; CHECK-NEXT: fadd v0.2s, v0.2s, v1.2s 103; CHECK-NEXT: faddp s0, v0.2s 104; CHECK-NEXT: ret 105 %b = call reassoc float @llvm.vector.reduce.fadd.f32.v16f32(float -0.0, <16 x float> %a) 106 ret float %b 107} 108