Home
last modified time | relevance | path

Searched full:float (Results 1 – 25 of 8183) sorted by relevance

12345678910>>...328

/external/webrtc/webrtc/modules/audio_processing/ns/
Dwindows_private.h15 static const float kHanning64w128[128] = {
64 static const float kBlocks80w128[128] = {
65 (float)0.00000000, (float)0.03271908, (float)0.06540313, (float)0.09801714, (float)0.13052619,
66 (float)0.16289547, (float)0.19509032, (float)0.22707626, (float)0.25881905, (float)0.29028468,
67 (float)0.32143947, (float)0.35225005, (float)0.38268343, (float)0.41270703, (float)0.44228869,
68 (float)0.47139674, (float)0.50000000, (float)0.52806785, (float)0.55557023, (float)0.58247770,
69 (float)0.60876143, (float)0.63439328, (float)0.65934582, (float)0.68359230, (float)0.70710678,
70 (float)0.72986407, (float)0.75183981, (float)0.77301045, (float)0.79335334, (float)0.81284668,
71 (float)0.83146961, (float)0.84920218, (float)0.86602540, (float)0.88192126, (float)0.89687274,
72 (float)0.91086382, (float)0.92387953, (float)0.93590593, (float)0.94693013, (float)0.95694034,
[all …]
/external/llvm/test/CodeGen/X86/
Dlarge-gep-chain.ll4 %0 = type { i32, float* }
24 %tmp = getelementptr inbounds float, float* null, i64 1
25 %tmp3 = getelementptr inbounds float, float* %tmp, i64 1
26 %tmp4 = getelementptr inbounds float, float* %tmp3, i64 1
27 %tmp5 = getelementptr inbounds float, float* %tmp4, i64 1
28 %tmp6 = getelementptr inbounds float, float* %tmp5, i64 1
29 %tmp7 = getelementptr inbounds float, float* %tmp6, i64 1
30 %tmp8 = getelementptr inbounds float, float* %tmp7, i64 1
31 %tmp9 = getelementptr inbounds float, float* %tmp8, i64 1
32 %tmp10 = getelementptr inbounds float, float* %tmp9, i64 1
[all …]
D2008-07-19-movups-spills.ll7 @0 = external global <4 x float>, align 1 ; <<4 x float>*>:0 [#uses=2]
8 @1 = external global <4 x float>, align 1 ; <<4 x float>*>:1 [#uses=1]
9 @2 = external global <4 x float>, align 1 ; <<4 x float>*>:2 [#uses=1]
10 @3 = external global <4 x float>, align 1 ; <<4 x float>*>:3 [#uses=1]
11 @4 = external global <4 x float>, align 1 ; <<4 x float>*>:4 [#uses=1]
12 @5 = external global <4 x float>, align 1 ; <<4 x float>*>:5 [#uses=1]
13 @6 = external global <4 x float>, align 1 ; <<4 x float>*>:6 [#uses=1]
14 @7 = external global <4 x float>, align 1 ; <<4 x float>*>:7 [#uses=1]
15 @8 = external global <4 x float>, align 1 ; <<4 x float>*>:8 [#uses=1]
16 @9 = external global <4 x float>, align 1 ; <<4 x float>*>:9 [#uses=1]
[all …]
Dfmul-combines.ll5 define float @fmul2_f32(float %x) {
6 %y = fmul float %x, 2.0
7 ret float %y
15 define <4 x float> @fmul2_v4f32(<4 x float> %x) {
16 %y = fmul <4 x float> %x, <float 2.0, float 2.0, float 2.0, float 2.0>
17 ret <4 x float> %y
23 define <4 x float> @constant_fold_fmul_v4f32(<4 x float> %x) {
24 …%y = fmul <4 x float> <float 4.0, float 4.0, float 4.0, float 4.0>, <float 2.0, float 2.0, float 2…
25 ret <4 x float> %y
31 define <4 x float> @fmul0_v4f32(<4 x float> %x) #0 {
[all …]
/external/llvm/test/Transforms/InstCombine/
Dminnum.ll3 declare float @llvm.minnum.f32(float, float) #0
4 declare float @llvm.minnum.v2f32(<2 x float>, <2 x float>) #0
5 declare <4 x float> @llvm.minnum.v4f32(<4 x float>, <4 x float>) #0
10 declare float @llvm.maxnum.f32(float, float) #0
13 ; CHECK-NEXT: ret float 1.000000e+00
14 define float @constant_fold_minnum_f32() #0 {
15 %x = call float @llvm.minnum.f32(float 1.0, float 2.0) #0
16 ret float %x
20 ; CHECK-NEXT: ret float 1.000000e+00
21 define float @constant_fold_minnum_f32_inv() #0 {
[all …]
Dx86-sse.ll5 define float @test_rcp_ss_0(float %a) {
7 ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x float> undef, float %a, i32 0
8 ; CHECK-NEXT: [[TMP2:%.*]] = tail call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> [[TMP1]])
9 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[TMP2]], i32 0
10 ; CHECK-NEXT: ret float [[TMP3]]
12 %1 = insertelement <4 x float> undef, float %a, i32 0
13 %2 = insertelement <4 x float> %1, float 1.000000e+00, i32 1
14 %3 = insertelement <4 x float> %2, float 2.000000e+00, i32 2
15 %4 = insertelement <4 x float> %3, float 3.000000e+00, i32 3
16 %5 = tail call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %4)
[all …]
Dmaxnum.ll3 declare float @llvm.maxnum.f32(float, float) #0
4 declare float @llvm.maxnum.v2f32(<2 x float>, <2 x float>) #0
5 declare <4 x float> @llvm.maxnum.v4f32(<4 x float>, <4 x float>) #0
11 ; CHECK-NEXT: ret float 2.000000e+00
12 define float @constant_fold_maxnum_f32() #0 {
13 %x = call float @llvm.maxnum.f32(float 1.0, float 2.0) #0
14 ret float %x
18 ; CHECK-NEXT: ret float 2.000000e+00
19 define float @constant_fold_maxnum_f32_inv() #0 {
20 %x = call float @llvm.maxnum.f32(float 2.0, float 1.0) #0
[all …]
/external/llvm/test/CodeGen/AMDGPU/
Dlocal-stack-slot-bug.ll16 define amdgpu_ps float @main(i32 %idx) {
18float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef,…
19float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef,…
20 %r = fadd float %v1, %v2
21 ret float %r
Dvgpr-spill-emergency-stack-slot-compute.ll46 …void @spill_vgpr_compute(<4 x float> %arg6, float addrspace(1)* %arg, i32 %arg1, i32 %arg2, float
49 %tmp7 = extractelement <4 x float> %arg6, i32 0
50 %tmp8 = extractelement <4 x float> %arg6, i32 1
51 %tmp9 = extractelement <4 x float> %arg6, i32 2
52 %tmp10 = extractelement <4 x float> %arg6, i32 3
53 %tmp11 = bitcast float %arg5 to i32
57 %tmp13 = phi float [ 0.000000e+00, %bb ], [ %tmp338, %bb145 ]
58 %tmp14 = phi float [ 0.000000e+00, %bb ], [ %tmp337, %bb145 ]
59 %tmp15 = phi float [ 0.000000e+00, %bb ], [ %tmp336, %bb145 ]
60 %tmp16 = phi float [ 0.000000e+00, %bb ], [ %tmp339, %bb145 ]
[all …]
Dbig_alu.ll5float> inreg %reg0, <4 x float> inreg %reg1, <4 x float> inreg %reg2, <4 x float> inreg %reg3, <4 …
7 %tmp = extractelement <4 x float> %reg0, i32 0
8 %tmp1 = extractelement <4 x float> %reg0, i32 1
9 %tmp2 = extractelement <4 x float> %reg0, i32 2
10 %tmp3 = extractelement <4 x float> %reg0, i32 3
11 %tmp4 = extractelement <4 x float> %reg1, i32 0
12 %tmp5 = extractelement <4 x float> %reg9, i32 0
13 %tmp6 = extractelement <4 x float> %reg8, i32 0
14 %tmp7 = fcmp ugt float %tmp6, 0.000000e+00
15 %tmp8 = select i1 %tmp7, float %tmp4, float %tmp5
[all …]
Dpv.ll6float> inreg %reg0, <4 x float> inreg %reg1, <4 x float> inreg %reg2, <4 x float> inreg %reg3, <4 …
8 %0 = extractelement <4 x float> %reg1, i32 0
9 %1 = extractelement <4 x float> %reg1, i32 1
10 %2 = extractelement <4 x float> %reg1, i32 2
11 %3 = extractelement <4 x float> %reg1, i32 3
12 %4 = extractelement <4 x float> %reg2, i32 0
13 %5 = extractelement <4 x float> %reg2, i32 1
14 %6 = extractelement <4 x float> %reg2, i32 2
15 %7 = extractelement <4 x float> %reg2, i32 3
16 %8 = extractelement <4 x float> %reg3, i32 0
[all …]
Dret.ll4 declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
12 define amdgpu_vs {float, float} @vgpr([9 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, fl…
13 …call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %3, float %3, float %3, float
14 %x = fadd float %3, 1.0
15 %a = insertvalue {float, float} undef, float %x, 0
16 %b = insertvalue {float, float} %a, float %3, 1
17 ret {float, float} %b
28 define amdgpu_vs {float, float, float, float} @vgpr_literal([9 x <16 x i8>] addrspace(2)* byval, i3…
29 …call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %3, float %3, float %3, float
30 ret {float, float, float, float} {float 1.0, float 2.0, float 4.0, float -1.0}
[all …]
Dsi-spill-cf.ll11 %0 = call float @llvm.SI.load.const(<16 x i8> undef, i32 16)
12 %1 = call float @llvm.SI.load.const(<16 x i8> undef, i32 32)
13 %2 = call float @llvm.SI.load.const(<16 x i8> undef, i32 80)
14 %3 = call float @llvm.SI.load.const(<16 x i8> undef, i32 84)
15 %4 = call float @llvm.SI.load.const(<16 x i8> undef, i32 88)
16 %5 = call float @llvm.SI.load.const(<16 x i8> undef, i32 96)
17 %6 = call float @llvm.SI.load.const(<16 x i8> undef, i32 100)
18 %7 = call float @llvm.SI.load.const(<16 x i8> undef, i32 104)
19 %8 = call float @llvm.SI.load.const(<16 x i8> undef, i32 112)
20 %9 = call float @llvm.SI.load.const(<16 x i8> undef, i32 116)
[all …]
Dvgpr-spill-emergency-stack-slot.ll33 %tmp12 = call float @llvm.SI.load.const(<16 x i8> %tmp11, i32 0)
34 %tmp13 = call float @llvm.SI.load.const(<16 x i8> %tmp11, i32 16)
35 %tmp14 = call float @llvm.SI.load.const(<16 x i8> %tmp11, i32 32)
39 %tmp18 = call <4 x float> @llvm.SI.vs.load.input(<16 x i8> %tmp16, i32 0, i32 %tmp17)
40 %tmp19 = extractelement <4 x float> %tmp18, i32 0
41 %tmp20 = extractelement <4 x float> %tmp18, i32 1
42 %tmp21 = extractelement <4 x float> %tmp18, i32 2
43 %tmp22 = extractelement <4 x float> %tmp18, i32 3
44 %tmp23 = bitcast float %tmp14 to i32
48 %tmp25 = phi float [ 0.000000e+00, %bb ], [ %tmp350, %bb157 ]
[all …]
/external/llvm/test/CodeGen/Generic/
D2003-05-28-ManyArgs.ll21 %struct..s_annealing_sched = type { i32, float, float, float, float }
22 %struct..s_chan = type { i32, float, float, float, float }
23 …%struct..s_det_routing_arch = type { i32, float, float, float, i32, i32, i16, i16, i16, float, flo…
24 %struct..s_placer_opts = type { i32, float, i32, i32, i8*, i32, i32 }
25 %struct..s_router_opts = type { float, float, float, float, float, i32, i32, i32, i32 }
26 %struct..s_segment_inf = type { float, i32, i16, i16, float, float, i32, float, float }
27 %struct..s_switch_inf = type { i32, float, float, float, float }
44float, float, float, float, float, float, float, float, float, float } ; <{ i32, float, float, f…
50 …2, float, float, float, float, float, float, float, float, float, float }, { i32, float, float, fl…
56 …tr %struct..s_placer_opts, %struct..s_placer_opts* %placer_opts, i64 0, i32 1 ; <float*> [#uses=1]
[all …]
/external/llvm/test/Transforms/LoopReroll/
Dbasic32iters.ll5 ; void goo32(float alpha, float *a, float *b) {
43 define void @goo32(float %alpha, float* %a, float* readonly %b) #0 {
49 %arrayidx = getelementptr inbounds float, float* %b, i64 %indvars.iv
50 %0 = load float, float* %arrayidx, align 4
51 %mul = fmul float %0, %alpha
52 %arrayidx2 = getelementptr inbounds float, float* %a, i64 %indvars.iv
53 %1 = load float, float* %arrayidx2, align 4
54 %add = fadd float %1, %mul
55 store float %add, float* %arrayidx2, align 4
57 %arrayidx5 = getelementptr inbounds float, float* %b, i64 %2
[all …]
/external/llvm/test/CodeGen/SystemZ/
Dfp-sqrt-01.ll5 declare float @llvm.sqrt.f32(float)
6 declare float @sqrtf(float)
9 define float @f1(float %val) {
13 %res = call float @llvm.sqrt.f32(float %val)
14 ret float %res
18 define float @f2(float *%ptr) {
22 %val = load float , float *%ptr
23 %res = call float @llvm.sqrt.f32(float %val)
24 ret float %res
28 define float @f3(float *%base) {
[all …]
Dframe-02.ll9 define void @f1(float *%ptr) {
40 %l0 = load volatile float , float *%ptr
41 %l1 = load volatile float , float *%ptr
42 %l2 = load volatile float , float *%ptr
43 %l3 = load volatile float , float *%ptr
44 %l4 = load volatile float , float *%ptr
45 %l5 = load volatile float , float *%ptr
46 %l6 = load volatile float , float *%ptr
47 %l7 = load volatile float , float *%ptr
48 %l8 = load volatile float , float *%ptr
[all …]
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/
D2008-07-19-movups-spills.ll8 external global <4 x float>, align 1 ; <<4 x float>*>:0 [#uses=2]
9 external global <4 x float>, align 1 ; <<4 x float>*>:1 [#uses=1]
10 external global <4 x float>, align 1 ; <<4 x float>*>:2 [#uses=1]
11 external global <4 x float>, align 1 ; <<4 x float>*>:3 [#uses=1]
12 external global <4 x float>, align 1 ; <<4 x float>*>:4 [#uses=1]
13 external global <4 x float>, align 1 ; <<4 x float>*>:5 [#uses=1]
14 external global <4 x float>, align 1 ; <<4 x float>*>:6 [#uses=1]
15 external global <4 x float>, align 1 ; <<4 x float>*>:7 [#uses=1]
16 external global <4 x float>, align 1 ; <<4 x float>*>:8 [#uses=1]
17 external global <4 x float>, align 1 ; <<4 x float>*>:9 [#uses=1]
[all …]
/external/llvm/test/CodeGen/AArch64/
Dcallee-save.ll3 @var = global float 0.0
15 %val1 = load volatile float, float* @var
16 %val2 = load volatile float, float* @var
17 %val3 = load volatile float, float* @var
18 %val4 = load volatile float, float* @var
19 %val5 = load volatile float, float* @var
20 %val6 = load volatile float, float* @var
21 %val7 = load volatile float, float* @var
22 %val8 = load volatile float, float* @var
23 %val9 = load volatile float, float* @var
[all …]
/external/llvm/test/Transforms/Reassociate/
Dfast-basictest.ll4 define float @test1(float %arg) {
6 ; CHECK-NEXT: fsub fast float -0.000000e+00, %arg
7 ; CHECK-NEXT: ret float
9 %tmp1 = fsub fast float -1.200000e+01, %arg
10 %tmp2 = fadd fast float %tmp1, 1.200000e+01
11 ret float %tmp2
14 define float @test2(float %reg109, float %reg1111) {
16 ; CHECK-NEXT: fadd float %reg109, -3.000000e+01
17 ; CHECK-NEXT: fadd float %reg115, %reg1111
18 ; CHECK-NEXT: fadd float %reg116, 3.000000e+01
[all …]
/external/llvm/test/CodeGen/PowerPC/
DfloatPSA.ll10 …ne float @bar(float %a, float %b, float %c, float %d, float %e, float %f, float %g, float %h, floa…
12 %a.addr = alloca float, align 4
13 %b.addr = alloca float, align 4
14 %c.addr = alloca float, align 4
15 %d.addr = alloca float, align 4
16 %e.addr = alloca float, align 4
17 %f.addr = alloca float, align 4
18 %g.addr = alloca float, align 4
19 %h.addr = alloca float, align 4
20 %i.addr = alloca float, align 4
[all …]
Dunal-altivec2.ll6 define void @foo(float* noalias nocapture %x, float* noalias nocapture readonly %y) #0 {
15 %0 = getelementptr inbounds float, float* %y, i64 %index
16 %1 = bitcast float* %0 to <4 x float>*
17 %wide.load = load <4 x float>, <4 x float>* %1, align 4
18 %2 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load)
19 %3 = getelementptr inbounds float, float* %x, i64 %index
20 %4 = bitcast float* %3 to <4 x float>*
21 store <4 x float> %2, <4 x float>* %4, align 4
23 %5 = getelementptr inbounds float, float* %y, i64 %index.next
24 %6 = bitcast float* %5 to <4 x float>*
[all …]
/external/deqp/data/gles3/shaders/
Dqualification_order.test14 precision mediump float;
17 invariant smooth centroid out lowp float x0;
19 flat out mediump float x1;
21 uniform highp float x2;
33 precision mediump float;
36 smooth centroid in lowp float x0;
38 flat in mediump float x1;
40 uniform highp float x2;
44 float result = (x0 + x1 + x2) / 3.0;
55 precision mediump float;
[all …]
/external/llvm/test/Transforms/SLPVectorizer/X86/
Dfma.ll13 @srcA32 = common global [16 x float] zeroinitializer, align 64
14 @srcB32 = common global [16 x float] zeroinitializer, align 64
15 @srcC32 = common global [16 x float] zeroinitializer, align 64
17 @dst32 = common global [16 x float] zeroinitializer, align 64
19 declare float @llvm.fma.f32(float, float, float)
226 ; NO-FMA-NEXT: [[A0:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float
227 ; NO-FMA-NEXT: [[A1:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float
228 ; NO-FMA-NEXT: [[A2:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float
229 ; NO-FMA-NEXT: [[A3:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float
230 ; NO-FMA-NEXT: [[B0:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float
[all …]

12345678910>>...328