Home
last modified time | relevance | path

Searched refs:float (Results 1 – 25 of 7534) sorted by relevance

12345678910>>...302

/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/
Dscratch-simple.ll21 define amdgpu_ps float @ps_main(i32 %idx) {
22float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef,…
23float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef,…
24 %r = fadd float %v1, %v2
25 ret float %r
33 define amdgpu_vs float @vs_main(i32 %idx) {
34float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef,…
35float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef,…
36 %r = fadd float %v1, %v2
37 ret float %r
[all …]
Dschedule-regpressure-limit.ll8 define amdgpu_kernel void @load_fma_store(float addrspace(3)* nocapture readonly %arg, float addrsp…
10 %tmp = getelementptr inbounds float, float addrspace(3)* %arg, i32 1
11 %tmp2 = load float, float addrspace(3)* %tmp, align 4
12 %tmp3 = getelementptr inbounds float, float addrspace(3)* %arg, i32 2
13 %tmp4 = load float, float addrspace(3)* %tmp3, align 4
14 %tmp5 = getelementptr inbounds float, float addrspace(3)* %arg, i32 3
15 %tmp6 = load float, float addrspace(3)* %tmp5, align 4
16 %tmp7 = tail call float @llvm.fmuladd.f32(float %tmp2, float %tmp4, float %tmp6)
17 %tmp8 = getelementptr inbounds float, float addrspace(3)* %arg, i32 5
18 %tmp9 = load float, float addrspace(3)* %tmp8, align 4
[all …]
Dschedule-ilp.ll5 define amdgpu_kernel void @load_fma_store(float addrspace(3)* nocapture readonly %arg, float addrsp…
7 %tmp = getelementptr inbounds float, float addrspace(3)* %arg, i32 1
8 %tmp2 = load float, float addrspace(3)* %tmp, align 4
9 %tmp3 = getelementptr inbounds float, float addrspace(3)* %arg, i32 2
10 %tmp4 = load float, float addrspace(3)* %tmp3, align 4
11 %tmp5 = getelementptr inbounds float, float addrspace(3)* %arg, i32 3
12 %tmp6 = load float, float addrspace(3)* %tmp5, align 4
13 %tmp7 = tail call float @llvm.fmuladd.f32(float %tmp2, float %tmp4, float %tmp6)
14 %tmp8 = getelementptr inbounds float, float addrspace(3)* %arg, i32 5
15 %tmp9 = load float, float addrspace(3)* %tmp8, align 4
[all …]
Dschedule-regpressure-limit3.ll7 define amdgpu_kernel void @load_fma_store(float addrspace(3)* nocapture readonly %arg, float addrsp…
9 %tmp = getelementptr inbounds float, float addrspace(3)* %arg, i32 1
10 %tmp2 = load float, float addrspace(3)* %tmp, align 4
11 %tmp3 = getelementptr inbounds float, float addrspace(3)* %arg, i32 2
12 %tmp4 = load float, float addrspace(3)* %tmp3, align 4
13 %tmp5 = getelementptr inbounds float, float addrspace(3)* %arg, i32 3
14 %tmp6 = load float, float addrspace(3)* %tmp5, align 4
15 %tmp7 = tail call float @llvm.fmuladd.f32(float %tmp2, float %tmp4, float %tmp6)
16 %tmp8 = getelementptr inbounds float, float addrspace(3)* %arg, i32 5
17 %tmp9 = load float, float addrspace(3)* %tmp8, align 4
[all …]
Dschedule-regpressure-limit2.ll16 define amdgpu_kernel void @load_fma_store(float addrspace(3)* nocapture readonly %in_arg, float add…
18 %adr.a.0 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 20004
19 %adr.b.0 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 20252
20 %adr.c.0 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 20508
21 %adr.a.1 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 20772
22 %adr.b.1 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 21020
23 %adr.c.1 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 21276
24 %adr.a.2 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 21540
25 %adr.b.2 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 21788
26 %adr.c.2 = getelementptr inbounds float, float addrspace(3)* %in_arg, i32 22044
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/
D2008-07-19-movups-spills.ll7 @0 = external global <4 x float>, align 1 ; <<4 x float>*>:0 [#uses=2]
8 @1 = external global <4 x float>, align 1 ; <<4 x float>*>:1 [#uses=1]
9 @2 = external global <4 x float>, align 1 ; <<4 x float>*>:2 [#uses=1]
10 @3 = external global <4 x float>, align 1 ; <<4 x float>*>:3 [#uses=1]
11 @4 = external global <4 x float>, align 1 ; <<4 x float>*>:4 [#uses=1]
12 @5 = external global <4 x float>, align 1 ; <<4 x float>*>:5 [#uses=1]
13 @6 = external global <4 x float>, align 1 ; <<4 x float>*>:6 [#uses=1]
14 @7 = external global <4 x float>, align 1 ; <<4 x float>*>:7 [#uses=1]
15 @8 = external global <4 x float>, align 1 ; <<4 x float>*>:8 [#uses=1]
16 @9 = external global <4 x float>, align 1 ; <<4 x float>*>:9 [#uses=1]
[all …]
Dlarge-gep-chain.ll4 %0 = type { i32, float* }
24 %tmp = getelementptr inbounds float, float* null, i64 1
25 %tmp3 = getelementptr inbounds float, float* %tmp, i64 1
26 %tmp4 = getelementptr inbounds float, float* %tmp3, i64 1
27 %tmp5 = getelementptr inbounds float, float* %tmp4, i64 1
28 %tmp6 = getelementptr inbounds float, float* %tmp5, i64 1
29 %tmp7 = getelementptr inbounds float, float* %tmp6, i64 1
30 %tmp8 = getelementptr inbounds float, float* %tmp7, i64 1
31 %tmp9 = getelementptr inbounds float, float* %tmp8, i64 1
32 %tmp10 = getelementptr inbounds float, float* %tmp9, i64 1
[all …]
/external/llvm/test/CodeGen/X86/
D2008-07-19-movups-spills.ll7 @0 = external global <4 x float>, align 1 ; <<4 x float>*>:0 [#uses=2]
8 @1 = external global <4 x float>, align 1 ; <<4 x float>*>:1 [#uses=1]
9 @2 = external global <4 x float>, align 1 ; <<4 x float>*>:2 [#uses=1]
10 @3 = external global <4 x float>, align 1 ; <<4 x float>*>:3 [#uses=1]
11 @4 = external global <4 x float>, align 1 ; <<4 x float>*>:4 [#uses=1]
12 @5 = external global <4 x float>, align 1 ; <<4 x float>*>:5 [#uses=1]
13 @6 = external global <4 x float>, align 1 ; <<4 x float>*>:6 [#uses=1]
14 @7 = external global <4 x float>, align 1 ; <<4 x float>*>:7 [#uses=1]
15 @8 = external global <4 x float>, align 1 ; <<4 x float>*>:8 [#uses=1]
16 @9 = external global <4 x float>, align 1 ; <<4 x float>*>:9 [#uses=1]
[all …]
Dlarge-gep-chain.ll4 %0 = type { i32, float* }
24 %tmp = getelementptr inbounds float, float* null, i64 1
25 %tmp3 = getelementptr inbounds float, float* %tmp, i64 1
26 %tmp4 = getelementptr inbounds float, float* %tmp3, i64 1
27 %tmp5 = getelementptr inbounds float, float* %tmp4, i64 1
28 %tmp6 = getelementptr inbounds float, float* %tmp5, i64 1
29 %tmp7 = getelementptr inbounds float, float* %tmp6, i64 1
30 %tmp8 = getelementptr inbounds float, float* %tmp7, i64 1
31 %tmp9 = getelementptr inbounds float, float* %tmp8, i64 1
32 %tmp10 = getelementptr inbounds float, float* %tmp9, i64 1
[all …]
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/
D2008-07-19-movups-spills.ll8 external global <4 x float>, align 1 ; <<4 x float>*>:0 [#uses=2]
9 external global <4 x float>, align 1 ; <<4 x float>*>:1 [#uses=1]
10 external global <4 x float>, align 1 ; <<4 x float>*>:2 [#uses=1]
11 external global <4 x float>, align 1 ; <<4 x float>*>:3 [#uses=1]
12 external global <4 x float>, align 1 ; <<4 x float>*>:4 [#uses=1]
13 external global <4 x float>, align 1 ; <<4 x float>*>:5 [#uses=1]
14 external global <4 x float>, align 1 ; <<4 x float>*>:6 [#uses=1]
15 external global <4 x float>, align 1 ; <<4 x float>*>:7 [#uses=1]
16 external global <4 x float>, align 1 ; <<4 x float>*>:8 [#uses=1]
17 external global <4 x float>, align 1 ; <<4 x float>*>:9 [#uses=1]
[all …]
/external/llvm/test/CodeGen/AMDGPU/
Dpv.ll6float> inreg %reg0, <4 x float> inreg %reg1, <4 x float> inreg %reg2, <4 x float> inreg %reg3, <4 …
8 %0 = extractelement <4 x float> %reg1, i32 0
9 %1 = extractelement <4 x float> %reg1, i32 1
10 %2 = extractelement <4 x float> %reg1, i32 2
11 %3 = extractelement <4 x float> %reg1, i32 3
12 %4 = extractelement <4 x float> %reg2, i32 0
13 %5 = extractelement <4 x float> %reg2, i32 1
14 %6 = extractelement <4 x float> %reg2, i32 2
15 %7 = extractelement <4 x float> %reg2, i32 3
16 %8 = extractelement <4 x float> %reg3, i32 0
[all …]
Dlocal-stack-slot-bug.ll16 define amdgpu_ps float @main(i32 %idx) {
18float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef,…
19float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef,…
20 %r = fadd float %v1, %v2
21 ret float %r
Dret.ll4 declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
12 define amdgpu_vs {float, float} @vgpr([9 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, fl…
13 …call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %3, float %3, float %3, float
14 %x = fadd float %3, 1.0
15 %a = insertvalue {float, float} undef, float %x, 0
16 %b = insertvalue {float, float} %a, float %3, 1
17 ret {float, float} %b
28 define amdgpu_vs {float, float, float, float} @vgpr_literal([9 x <16 x i8>] addrspace(2)* byval, i3…
29 …call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %3, float %3, float %3, float
30 ret {float, float, float, float} {float 1.0, float 2.0, float 4.0, float -1.0}
[all …]
/external/llvm/test/Transforms/InstCombine/
Dx86-sse.ll5 define float @test_rcp_ss_0(float %a) {
7 ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x float> undef, float %a, i32 0
8 ; CHECK-NEXT: [[TMP2:%.*]] = tail call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> [[TMP1]])
9 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[TMP2]], i32 0
10 ; CHECK-NEXT: ret float [[TMP3]]
12 %1 = insertelement <4 x float> undef, float %a, i32 0
13 %2 = insertelement <4 x float> %1, float 1.000000e+00, i32 1
14 %3 = insertelement <4 x float> %2, float 2.000000e+00, i32 2
15 %4 = insertelement <4 x float> %3, float 3.000000e+00, i32 3
16 %5 = tail call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %4)
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/
Dfma.ll4 declare float @llvm.fma.f32(float, float, float) #1
5 declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>) #1
6 declare float @llvm.fmuladd.f32(float, float, float) #1
7 declare float @llvm.fabs.f32(float) #1
11 define float @fma_fneg_x_fneg_y(float %x, float %y, float %z) {
13 ; CHECK-NEXT: [[FMA:%.*]] = call float @llvm.fma.f32(float [[X:%.*]], float [[Y:%.*]], float [[Z…
14 ; CHECK-NEXT: ret float [[FMA]]
16 %x.fneg = fsub float -0.0, %x
17 %y.fneg = fsub float -0.0, %y
18 %fma = call float @llvm.fma.f32(float %x.fneg, float %y.fneg, float %z)
[all …]
Dmaxnum.ll4 declare float @llvm.maxnum.f32(float, float)
5 declare <2 x float> @llvm.maxnum.v2f32(<2 x float>, <2 x float>)
6 declare <4 x float> @llvm.maxnum.v4f32(<4 x float>, <4 x float>)
11 define float @constant_fold_maxnum_f32() {
13 ; CHECK-NEXT: ret float 2.000000e+00
15 %x = call float @llvm.maxnum.f32(float 1.0, float 2.0)
16 ret float %x
19 define float @constant_fold_maxnum_f32_inv() {
21 ; CHECK-NEXT: ret float 2.000000e+00
23 %x = call float @llvm.maxnum.f32(float 2.0, float 1.0)
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/X86/
Dx86-sse.ll5 define float @test_rcp_ss_0(float %a) {
7 ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x float> undef, float %a, i32 0
8 ; CHECK-NEXT: [[TMP2:%.*]] = tail call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> [[TMP1]])
9 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[TMP2]], i32 0
10 ; CHECK-NEXT: ret float [[TMP3]]
12 %1 = insertelement <4 x float> undef, float %a, i32 0
13 %2 = insertelement <4 x float> %1, float 1.000000e+00, i32 1
14 %3 = insertelement <4 x float> %2, float 2.000000e+00, i32 2
15 %4 = insertelement <4 x float> %3, float 3.000000e+00, i32 3
16 %5 = tail call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %4)
[all …]
/external/llvm/test/CodeGen/Generic/
D2003-05-28-ManyArgs.ll21 %struct..s_annealing_sched = type { i32, float, float, float, float }
22 %struct..s_chan = type { i32, float, float, float, float }
23 …%struct..s_det_routing_arch = type { i32, float, float, float, i32, i32, i16, i16, i16, float, flo…
24 %struct..s_placer_opts = type { i32, float, i32, i32, i8*, i32, i32 }
25 %struct..s_router_opts = type { float, float, float, float, float, i32, i32, i32, i32 }
26 %struct..s_segment_inf = type { float, i32, i16, i16, float, float, i32, float, float }
27 %struct..s_switch_inf = type { i32, float, float, float, float }
44float, float, float, float, float, float, float, float, float, float } ; <{ i32, float, float, f…
50 …2, float, float, float, float, float, float, float, float, float, float }, { i32, float, float, fl…
56 …tr %struct..s_placer_opts, %struct..s_placer_opts* %placer_opts, i64 0, i32 1 ; <float*> [#uses=1]
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Generic/
D2003-05-28-ManyArgs.ll21 %struct..s_annealing_sched = type { i32, float, float, float, float }
22 %struct..s_chan = type { i32, float, float, float, float }
23 …%struct..s_det_routing_arch = type { i32, float, float, float, i32, i32, i16, i16, i16, float, flo…
24 %struct..s_placer_opts = type { i32, float, i32, i32, i8*, i32, i32 }
25 %struct..s_router_opts = type { float, float, float, float, float, i32, i32, i32, i32 }
26 %struct..s_segment_inf = type { float, i32, i16, i16, float, float, i32, float, float }
27 %struct..s_switch_inf = type { i32, float, float, float, float }
44float, float, float, float, float, float, float, float, float, float } ; <{ i32, float, float, f…
50 …2, float, float, float, float, float, float, float, float, float, float }, { i32, float, float, fl…
56 …tr %struct..s_placer_opts, %struct..s_placer_opts* %placer_opts, i64 0, i32 1 ; <float*> [#uses=1]
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/ARM/
D2009-11-13-ScavengerAssert2.ll4 %bar = type { %quad, float, float, [3 x %quuz*], [3 x %bar*], [2 x %bar*], [3 x i8], i8 }
6 %foo = type { i8, %quux, %quad, float, [64 x %quuz], [128 x %bar], i32, %baz, %baz }
7 %quad = type { [4 x float] }
8 %quux = type { [4 x %quuz*], [4 x float], i32 }
22 %0 = getelementptr inbounds %quuz, %quuz* %a, i32 0, i32 1, i32 0, i32 0 ; <float*> [#uses=0]
23 %1 = fsub float 0.000000e+00, undef ; <float> [#uses=1]
24 %2 = getelementptr inbounds %quuz, %quuz* %b, i32 0, i32 1, i32 0, i32 1 ; <float*> [#uses=2]
25 %3 = load float, float* %2, align 4 ; <float> [#uses=1]
26 %4 = getelementptr inbounds %quuz, %quuz* %a, i32 0, i32 1, i32 0, i32 1 ; <float*> [#uses=1]
27 %5 = fsub float %3, undef ; <float> [#uses=2]
[all …]
/external/llvm/test/CodeGen/ARM/
D2009-11-13-ScavengerAssert2.ll4 %bar = type { %quad, float, float, [3 x %quuz*], [3 x %bar*], [2 x %bar*], [3 x i8], i8 }
6 %foo = type { i8, %quux, %quad, float, [64 x %quuz], [128 x %bar], i32, %baz, %baz }
7 %quad = type { [4 x float] }
8 %quux = type { [4 x %quuz*], [4 x float], i32 }
22 %0 = getelementptr inbounds %quuz, %quuz* %a, i32 0, i32 1, i32 0, i32 0 ; <float*> [#uses=0]
23 %1 = fsub float 0.000000e+00, undef ; <float> [#uses=1]
24 %2 = getelementptr inbounds %quuz, %quuz* %b, i32 0, i32 1, i32 0, i32 1 ; <float*> [#uses=2]
25 %3 = load float, float* %2, align 4 ; <float> [#uses=1]
26 %4 = getelementptr inbounds %quuz, %quuz* %a, i32 0, i32 1, i32 0, i32 1 ; <float*> [#uses=1]
27 %5 = fsub float %3, undef ; <float> [#uses=2]
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Hexagon/
Dlarge-number-of-preds.ll6 @g0 = external global void (float*, i32, i32, float*, float*)**
9 define void @f0(float* nocapture %a0, float* nocapture %a1, float* %a2) #0 {
11 %v0 = alloca [64 x float], align 16
12 %v1 = alloca [8 x float], align 8
13 %v2 = bitcast [64 x float]* %v0 to i8*
15 %v3 = load float, float* %a0, align 4, !tbaa !0
16 %v4 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 35
17 store float %v3, float* %v4, align 4, !tbaa !0
18 %v5 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 0
19 store float %v3, float* %v5, align 16, !tbaa !0
[all …]
/external/swiftshader/third_party/LLVM/test/CodeGen/ARM/
D2009-11-13-ScavengerAssert2.ll4 %bar = type { %quad, float, float, [3 x %quuz*], [3 x %bar*], [2 x %bar*], [3 x i8], i8 }
6 %foo = type { i8, %quux, %quad, float, [64 x %quuz], [128 x %bar], i32, %baz, %baz }
7 %quad = type { [4 x float] }
8 %quux = type { [4 x %quuz*], [4 x float], i32 }
22 %0 = getelementptr inbounds %quuz* %a, i32 0, i32 1, i32 0, i32 0 ; <float*> [#uses=0]
23 %1 = fsub float 0.000000e+00, undef ; <float> [#uses=1]
24 %2 = getelementptr inbounds %quuz* %b, i32 0, i32 1, i32 0, i32 1 ; <float*> [#uses=2]
25 %3 = load float* %2, align 4 ; <float> [#uses=1]
26 %4 = getelementptr inbounds %quuz* %a, i32 0, i32 1, i32 0, i32 1 ; <float*> [#uses=1]
27 %5 = fsub float %3, undef ; <float> [#uses=2]
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/LoopReroll/
Dbasic32iters.ll5 ; void goo32(float alpha, float *a, float *b) {
43 define void @goo32(float %alpha, float* %a, float* readonly %b) #0 {
49 %arrayidx = getelementptr inbounds float, float* %b, i64 %indvars.iv
50 %0 = load float, float* %arrayidx, align 4
51 %mul = fmul float %0, %alpha
52 %arrayidx2 = getelementptr inbounds float, float* %a, i64 %indvars.iv
53 %1 = load float, float* %arrayidx2, align 4
54 %add = fadd float %1, %mul
55 store float %add, float* %arrayidx2, align 4
57 %arrayidx5 = getelementptr inbounds float, float* %b, i64 %2
[all …]
/external/llvm/test/Transforms/LoopReroll/
Dbasic32iters.ll5 ; void goo32(float alpha, float *a, float *b) {
43 define void @goo32(float %alpha, float* %a, float* readonly %b) #0 {
49 %arrayidx = getelementptr inbounds float, float* %b, i64 %indvars.iv
50 %0 = load float, float* %arrayidx, align 4
51 %mul = fmul float %0, %alpha
52 %arrayidx2 = getelementptr inbounds float, float* %a, i64 %indvars.iv
53 %1 = load float, float* %arrayidx2, align 4
54 %add = fadd float %1, %mul
55 store float %add, float* %arrayidx2, align 4
57 %arrayidx5 = getelementptr inbounds float, float* %b, i64 %2
[all …]

12345678910>>...302