• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -mtriple=thumbv7-none-eabi   -mcpu=cortex-m3                    | FileCheck %s -check-prefix=CHECK -check-prefix=SOFT -check-prefix=NONE
2; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m4                    | FileCheck %s -check-prefix=CHECK -check-prefix=SOFT -check-prefix=SP
3; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m7                    | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=VFP  -check-prefix=FP-ARMv8
4; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m7 -mattr=+fp-only-sp | FileCheck %s -check-prefix=CHECK -check-prefix=SOFT -check-prefix=SP
5; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a7                    | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=NEON -check-prefix=VFP4
6; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a57                   | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=NEON -check-prefix=FP-ARMv8
7
8declare double     @llvm.sqrt.f64(double %Val)
9define double @sqrt_d(double %a) {
10; CHECK-LABEL: sqrt_d:
11; SOFT: {{(bl|b)}} sqrt
12; HARD: vsqrt.f64 d0, d0
13  %1 = call double @llvm.sqrt.f64(double %a)
14  ret double %1
15}
16
17declare double     @llvm.powi.f64(double %Val, i32 %power)
18define double @powi_d(double %a, i32 %b) {
19; CHECK-LABEL: powi_d:
20; SOFT: {{(bl|b)}} __powidf2
21; HARD: b __powidf2
22  %1 = call double @llvm.powi.f64(double %a, i32 %b)
23  ret double %1
24}
25
26declare double     @llvm.sin.f64(double %Val)
27define double @sin_d(double %a) {
28; CHECK-LABEL: sin_d:
29; SOFT: {{(bl|b)}} sin
30; HARD: b sin
31  %1 = call double @llvm.sin.f64(double %a)
32  ret double %1
33}
34
35declare double     @llvm.cos.f64(double %Val)
36define double @cos_d(double %a) {
37; CHECK-LABEL: cos_d:
38; SOFT: {{(bl|b)}} cos
39; HARD: b cos
40  %1 = call double @llvm.cos.f64(double %a)
41  ret double %1
42}
43
44declare double     @llvm.pow.f64(double %Val, double %power)
45define double @pow_d(double %a, double %b) {
46; CHECK-LABEL: pow_d:
47; SOFT: {{(bl|b)}} pow
48; HARD: b pow
49  %1 = call double @llvm.pow.f64(double %a, double %b)
50  ret double %1
51}
52
53declare double     @llvm.exp.f64(double %Val)
54define double @exp_d(double %a) {
55; CHECK-LABEL: exp_d:
56; SOFT: {{(bl|b)}} exp
57; HARD: b exp
58  %1 = call double @llvm.exp.f64(double %a)
59  ret double %1
60}
61
62declare double     @llvm.exp2.f64(double %Val)
63define double @exp2_d(double %a) {
64; CHECK-LABEL: exp2_d:
65; SOFT: {{(bl|b)}} exp2
66; HARD: b exp2
67  %1 = call double @llvm.exp2.f64(double %a)
68  ret double %1
69}
70
71declare double     @llvm.log.f64(double %Val)
72define double @log_d(double %a) {
73; CHECK-LABEL: log_d:
74; SOFT: {{(bl|b)}} log
75; HARD: b log
76  %1 = call double @llvm.log.f64(double %a)
77  ret double %1
78}
79
80declare double     @llvm.log10.f64(double %Val)
81define double @log10_d(double %a) {
82; CHECK-LABEL: log10_d:
83; SOFT: {{(bl|b)}} log10
84; HARD: b log10
85  %1 = call double @llvm.log10.f64(double %a)
86  ret double %1
87}
88
89declare double     @llvm.log2.f64(double %Val)
90define double @log2_d(double %a) {
91; CHECK-LABEL: log2_d:
92; SOFT: {{(bl|b)}} log2
93; HARD: b log2
94  %1 = call double @llvm.log2.f64(double %a)
95  ret double %1
96}
97
98declare double     @llvm.fma.f64(double %a, double %b, double %c)
99define double @fma_d(double %a, double %b, double %c) {
100; CHECK-LABEL: fma_d:
101; SOFT: {{(bl|b)}} fma
102; HARD: vfma.f64
103  %1 = call double @llvm.fma.f64(double %a, double %b, double %c)
104  ret double %1
105}
106
107; FIXME: the FPv4-SP version is less efficient than the no-FPU version
108declare double     @llvm.fabs.f64(double %Val)
109define double @abs_d(double %a) {
110; CHECK-LABEL: abs_d:
111; NONE: bic r1, r1, #-2147483648
112; SP: vldr d1, .LCPI{{.*}}
113; SP: vmov r0, r1, d0
114; SP: vmov r2, r3, d1
115; SP: lsrs r2, r3, #31
116; SP: bfi r1, r2, #31, #1
117; SP: vmov d0, r0, r1
118; DP: vabs.f64 d0, d0
119  %1 = call double @llvm.fabs.f64(double %a)
120  ret double %1
121}
122
123declare double     @llvm.copysign.f64(double  %Mag, double  %Sgn)
124define double @copysign_d(double %a, double %b) {
125; CHECK-LABEL: copysign_d:
126; SOFT: lsrs [[REG:r[0-9]+]], r3, #31
127; SOFT: bfi r1, [[REG]], #31, #1
128; VFP: lsrs [[REG:r[0-9]+]], r3, #31
129; VFP: bfi r1, [[REG]], #31, #1
130; NEON: vmov.i32 [[REG:d[0-9]+]], #0x80000000
131; NEON: vshl.i64 [[REG]], [[REG]], #32
132; NEON: vbsl [[REG]], d
133  %1 = call double @llvm.copysign.f64(double %a, double %b)
134  ret double %1
135}
136
137declare double     @llvm.floor.f64(double %Val)
138define double @floor_d(double %a) {
139; CHECK-LABEL: floor_d:
140; SOFT: {{(bl|b)}} floor
141; VFP4: b floor
142; FP-ARMv8: vrintm.f64
143  %1 = call double @llvm.floor.f64(double %a)
144  ret double %1
145}
146
147declare double     @llvm.ceil.f64(double %Val)
148define double @ceil_d(double %a) {
149; CHECK-LABEL: ceil_d:
150; SOFT: {{(bl|b)}} ceil
151; VFP4: b ceil
152; FP-ARMv8: vrintp.f64
153  %1 = call double @llvm.ceil.f64(double %a)
154  ret double %1
155}
156
157declare double     @llvm.trunc.f64(double %Val)
158define double @trunc_d(double %a) {
159; CHECK-LABEL: trunc_d:
160; SOFT: {{(bl|b)}} trunc
161; FFP4: b trunc
162; FP-ARMv8: vrintz.f64
163  %1 = call double @llvm.trunc.f64(double %a)
164  ret double %1
165}
166
167declare double     @llvm.rint.f64(double %Val)
168define double @rint_d(double %a) {
169; CHECK-LABEL: rint_d:
170; SOFT: {{(bl|b)}} rint
171; VFP4: b rint
172; FP-ARMv8: vrintx.f64
173  %1 = call double @llvm.rint.f64(double %a)
174  ret double %1
175}
176
177declare double     @llvm.nearbyint.f64(double %Val)
178define double @nearbyint_d(double %a) {
179; CHECK-LABEL: nearbyint_d:
180; SOFT: {{(bl|b)}} nearbyint
181; VFP4: b nearbyint
182; FP-ARMv8: vrintr.f64
183  %1 = call double @llvm.nearbyint.f64(double %a)
184  ret double %1
185}
186
187declare double     @llvm.round.f64(double %Val)
188define double @round_d(double %a) {
189; CHECK-LABEL: round_d:
190; SOFT: {{(bl|b)}} round
191; VFP4: b round
192; FP-ARMv8: vrinta.f64
193  %1 = call double @llvm.round.f64(double %a)
194  ret double %1
195}
196
197declare double     @llvm.fmuladd.f64(double %a, double %b, double %c)
198define double @fmuladd_d(double %a, double %b, double %c) {
199; CHECK-LABEL: fmuladd_d:
200; SOFT: bl __aeabi_dmul
201; SOFT: bl __aeabi_dadd
202; VFP4: vmul.f64
203; VFP4: vadd.f64
204; FP-ARMv8: vmla.f64
205  %1 = call double @llvm.fmuladd.f64(double %a, double %b, double %c)
206  ret double %1
207}
208
209declare i16 @llvm.convert.to.fp16.f64(double %a)
210define i16 @d_to_h(double %a) {
211; CHECK-LABEL: d_to_h:
212; SOFT: bl __aeabi_d2h
213; VFP4: bl __aeabi_d2h
214; FP-ARMv8: vcvt{{[bt]}}.f16.f64
215  %1 = call i16 @llvm.convert.to.fp16.f64(double %a)
216  ret i16 %1
217}
218
219declare double @llvm.convert.from.fp16.f64(i16 %a)
220define double @h_to_d(i16 %a) {
221; CHECK-LABEL: h_to_d:
222; NONE: bl __aeabi_h2f
223; NONE: bl __aeabi_f2d
224; SP: vcvt{{[bt]}}.f32.f16
225; SP: bl __aeabi_f2d
226; VFPv4: vcvt{{[bt]}}.f32.f16
227; VFPv4: vcvt.f64.f32
228; FP-ARMv8: vcvt{{[bt]}}.f64.f16
229  %1 = call double @llvm.convert.from.fp16.f64(i16 %a)
230  ret double %1
231}
232