1; RUN: llc -mtriple=arm-eabihf -mattr=+vfp2 %s -o - \ 2; RUN: | FileCheck %s -check-prefix=VFP2 3 4; RUN: llc -mtriple=arm-eabihf -mattr=+vfp3 %s -o - \ 5; RUN: | FileCheck %s -check-prefix=VFP3 6 7; RUN: llc -mtriple=arm-eabihf -mattr=+neon %s -o - \ 8; RUN: | FileCheck %s -check-prefix=NEON 9 10; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - \ 11; RUN: | FileCheck %s -check-prefix=A8 12 13; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 -regalloc=basic %s -o - \ 14; RUN: | FileCheck %s -check-prefix=A8 15 16; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 --enable-unsafe-fp-math %s -o - \ 17; RUN: | FileCheck %s -check-prefix=A8U 18 19; RUN: llc -mtriple=arm-darwin -mcpu=cortex-a8 %s -o - \ 20; RUN: | FileCheck %s -check-prefix=A8U 21 22define float @t1(float %acc, float %a, float %b) nounwind { 23entry: 24; VFP2-LABEL: t1: 25; VFP2: vnmla.f32 26 27; VFP3-LABEL: t1: 28; VFP3: vnmla.f32 29 30; NEON-LABEL: t1: 31; NEON: vnmla.f32 32 33; A8U-LABEL: t1: 34; A8U: vnmul.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}} 35; A8U: vsub.f32 d{{[0-9]}}, d{{[0-9]}}, d{{[0-9]}} 36 37; A8-LABEL: t1: 38; A8: vnmul.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}} 39; A8: vsub.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}} 40 %0 = fmul float %a, %b 41 %1 = fsub float -0.0, %0 42 %2 = fsub float %1, %acc 43 ret float %2 44} 45 46define float @t2(float %acc, float %a, float %b) nounwind { 47entry: 48; VFP2-LABEL: t2: 49; VFP2: vnmla.f32 50 51; VFP3-LABEL: t2: 52; VFP3: vnmla.f32 53 54; NEON-LABEL: t2: 55; NEON: vnmla.f32 56 57; A8U-LABEL: t2: 58; A8U: vnmul.f32 s{{[01234]}}, s{{[01234]}}, s{{[01234]}} 59; A8U: vsub.f32 d{{[0-9]}}, d{{[0-9]}}, d{{[0-9]}} 60 61; A8-LABEL: t2: 62; A8: vnmul.f32 s{{[01234]}}, s{{[01234]}}, s{{[01234]}} 63; A8: vsub.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}} 64 %0 = fmul float %a, %b 65 %1 = fmul float -1.0, %0 66 %2 = fsub float %1, %acc 67 ret float %2 68} 69 70define double @t3(double %acc, double %a, double %b) nounwind { 71entry: 72; VFP2-LABEL: t3: 73; VFP2: vnmla.f64 74 75; VFP3-LABEL: t3: 76; VFP3: vnmla.f64 77 78; NEON-LABEL: t3: 79; NEON: vnmla.f64 80 81; A8U-LABEL: t3: 82; A8U: vnmul.f64 d 83; A8U: vsub.f64 d 84 85; A8-LABEL: t3: 86; A8: vnmul.f64 d 87; A8: vsub.f64 d 88 %0 = fmul double %a, %b 89 %1 = fsub double -0.0, %0 90 %2 = fsub double %1, %acc 91 ret double %2 92} 93 94define double @t4(double %acc, double %a, double %b) nounwind { 95entry: 96; VFP2-LABEL: t4: 97; VFP2: vnmla.f64 98 99; VFP3-LABEL: t4: 100; VFP3: vnmla.f64 101 102; NEON-LABEL: t4: 103; NEON: vnmla.f64 104 105; A8U-LABEL: t4: 106; A8U: vnmul.f64 d 107; A8U: vsub.f64 d 108 109; A8-LABEL: t4: 110; A8: vnmul.f64 d 111; A8: vsub.f64 d 112 %0 = fmul double %a, %b 113 %1 = fmul double -1.0, %0 114 %2 = fsub double %1, %acc 115 ret double %2 116} 117 118define double @t5(double %acc, double %a, double %b) nounwind { 119entry: 120; VFP2-LABEL: t5: 121; VFP2: vnmla.f64 122 123; VFP3-LABEL: t5: 124; VFP3: vnmla.f64 125 126; NEON-LABEL: t5: 127; NEON: vnmla.f64 128 129; A8U-LABEL: t5: 130; A8U: vmul.f64 d 131; A8U: vsub.f64 d 132 133; A8-LABEL: t5: 134; A8: vmul.f64 d 135; A8: vsub.f64 d 136 137 %0 = fsub double -0.0, %acc 138 %1 = fmul double %a, %b 139 %2 = fsub double %0, %1 140 ret double %2 141} 142 143define float @t6(float %acc, float %a, float %b) nounwind { 144entry: 145; VFP2-LABEL: t6: 146; VFP2: vnmla.f32 147 148; VFP3-LABEL: t6: 149; VFP3: vnmla.f32 150 151; NEON-LABEL: t6: 152; NEON: vnmla.f32 153 154; A8U-LABEL: t6: 155; A8U: vmul.f32 d 156; A8U: vsub.f32 d 157 158; A8-LABEL: t6: 159; A8: vmul.f32 s 160; A8: vsub.f32 s 161 162 %0 = fsub float -0.0, %acc 163 %1 = fmul float %a, %b 164 %2 = fsub float %0, %1 165 ret float %2 166} 167