/external/llvm/test/CodeGen/Thumb2/ |
D | large-call.ll | 22 %d = alloca double, align 8 23 store double 1.000000e+00, double* %d, align 8 24 %0 = load double, double* %d, align 8 25 …double 1.234800e+03, double 2.363450e+03, double %0, i32 1, double 1.234560e+03, double 2.345670e+…
|
/external/llvm/test/CodeGen/ARM/ |
D | 2009-02-27-SpillerBug.ll | 4 @a = external global double ; <double*> [#uses=1] 5 @N = external global double ; <double*> [#uses=1] 7 declare double @llvm.exp.f64(double) nounwind readonly 9 define fastcc void @findratio(double* nocapture %res1, double* nocapture %res2) nounwind { 14 %0 = fadd double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1] 20 …%k.4 = phi double [ %0, %bb32 ], [ 0.000000e+00, %bb53 ], [ 0.000000e+00, %bb.thread ] ; <double>… 31 %4 = load double, double* @a, align 4 ; <double> [#uses=10] 32 %5 = fadd double %4, 0.000000e+00 ; <double> [#uses=16] 33 %6 = fcmp ogt double %k.4, 0.000000e+00 ; <i1> [#uses=1] 34 %.pn404 = fmul double %4, %4 ; <double> [#uses=4] [all …]
|
D | 2009-07-29-VFP3Registers.ll | 3 @a = external global double ; <double*> [#uses=1] 5 declare double @llvm.exp.f64(double) nounwind readonly 7 define void @findratio(double* nocapture %res1, double* nocapture %res2) nounwind { 15 %0 = load double, double* @a, align 4 ; <double> [#uses=2] 16 %1 = fadd double %0, undef ; <double> [#uses=2] 20 %2 = fsub double -0.000000e+00, undef ; <double> [#uses=2] 24 %3 = tail call double @llvm.exp.f64(double undef) nounwind ; <double> [#uses=1] 25 %4 = fsub double -0.000000e+00, %3 ; <double> [#uses=2] 26 %5 = fsub double -0.000000e+00, undef ; <double> [#uses=1] 27 %6 = fsub double -0.000000e+00, undef ; <double> [#uses=1] [all …]
|
D | stack-alignment.ll | 34 define i8* @f_alignedDPRCS2Spills(double* %d) #0 { 43 %0 = load double, double* %d, align 4 44 %arrayidx1 = getelementptr inbounds double, double* %d, i32 1 45 %1 = load double, double* %arrayidx1, align 4 46 %arrayidx2 = getelementptr inbounds double, double* %d, i32 2 47 %2 = load double, double* %arrayidx2, align 4 48 %arrayidx3 = getelementptr inbounds double, double* %d, i32 3 49 %3 = load double, double* %arrayidx3, align 4 50 %arrayidx4 = getelementptr inbounds double, double* %d, i32 4 51 %4 = load double, double* %arrayidx4, align 4 [all …]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/ARM/ |
D | 2009-02-27-SpillerBug.ll | 4 @a = external global double ; <double*> [#uses=1] 5 @N = external global double ; <double*> [#uses=1] 7 declare double @llvm.exp.f64(double) nounwind readonly 9 define fastcc void @findratio(double* nocapture %res1, double* nocapture %res2) nounwind { 14 %0 = fadd double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1] 20 …%k.4 = phi double [ %0, %bb32 ], [ 0.000000e+00, %bb53 ], [ 0.000000e+00, %bb.thread ] ; <double>… 31 %4 = load double* @a, align 4 ; <double> [#uses=10] 32 %5 = fadd double %4, 0.000000e+00 ; <double> [#uses=16] 33 %6 = fcmp ogt double %k.4, 0.000000e+00 ; <i1> [#uses=1] 34 %.pn404 = fmul double %4, %4 ; <double> [#uses=4] [all …]
|
D | 2009-07-29-VFP3Registers.ll | 3 @a = external global double ; <double*> [#uses=1] 5 declare double @llvm.exp.f64(double) nounwind readonly 7 define void @findratio(double* nocapture %res1, double* nocapture %res2) nounwind { 15 %0 = load double* @a, align 4 ; <double> [#uses=2] 16 %1 = fadd double %0, undef ; <double> [#uses=2] 20 %2 = fsub double -0.000000e+00, undef ; <double> [#uses=2] 24 %3 = tail call double @llvm.exp.f64(double undef) nounwind ; <double> [#uses=1] 25 %4 = fsub double -0.000000e+00, %3 ; <double> [#uses=2] 26 %5 = fsub double -0.000000e+00, undef ; <double> [#uses=1] 27 %6 = fsub double -0.000000e+00, undef ; <double> [#uses=1] [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | remat-scalar-zero.ll | 11 define void @foo(double* nocapture %x, double* nocapture %y) nounwind { 13 %tmp1 = load double, double* %x ; <double> [#uses=1] 14 %arrayidx4 = getelementptr inbounds double, double* %x, i64 1 ; <double*> [#uses=1] 15 %tmp5 = load double, double* %arrayidx4 ; <double> [#uses=1] 16 %arrayidx8 = getelementptr inbounds double, double* %x, i64 2 ; <double*> [#uses=1] 17 %tmp9 = load double, double* %arrayidx8 ; <double> [#uses=1] 18 %arrayidx12 = getelementptr inbounds double, double* %x, i64 3 ; <double*> [#uses=1] 19 %tmp13 = load double, double* %arrayidx12 ; <double> [#uses=1] 20 %arrayidx16 = getelementptr inbounds double, double* %x, i64 4 ; <double*> [#uses=1] 21 %tmp17 = load double, double* %arrayidx16 ; <double> [#uses=1] [all …]
|
D | 2006-11-12-CSRetCC.ll | 21 %tmp = alloca { double, double }, align 16 ; <{ double, double }*> [#uses=4] 22 %tmp1 = alloca { double, double }, align 16 ; <{ double, double }*> [#uses=4] 23 %tmp2 = alloca { double, double }, align 16 ; <{ double, double }*> [#uses=3] 24 %pi = alloca double, align 8 ; <double*> [#uses=2] 25 %z = alloca { double, double }, align 16 ; <{ double, double }*> [#uses=4] 27 store double 0x400921FB54442D18, double* %pi 28 %tmp.upgrd.1 = load double, double* %pi ; <double> [#uses=1] 29 …%real = getelementptr { double, double }, { double, double }* %tmp1, i64 0, i32 0 ; <dou… 30 store double 0.000000e+00, double* %real 31 …%real3 = getelementptr { double, double }, { double, double }* %tmp1, i64 0, i32 1 ; <dou… [all …]
|
D | misched-matmul.ll | 15 define void @wrap_mul4(double* nocapture %Out, [4 x double]* nocapture %A, [4 x double]* nocapture … 17 %arrayidx1.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 0, i64 0 18 %0 = load double, double* %arrayidx1.i, align 8 19 %arrayidx3.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 0, i64 0 20 %1 = load double, double* %arrayidx3.i, align 8 21 %mul.i = fmul double %0, %1 22 %arrayidx5.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 0, i64 1 23 %2 = load double, double* %arrayidx5.i, align 8 24 %arrayidx7.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 1, i64 0 25 %3 = load double, double* %arrayidx7.i, align 8 [all …]
|
D | masked-iv-unsafe.ll | 9 define void @count_up(double* %d, i64 %n) nounwind { 16 %t0 = getelementptr double, double* %d, i64 %indvar.i8 17 %t1 = load double, double* %t0 18 %t2 = fmul double %t1, 0.1 19 store double %t2, double* %t0 21 %t3 = getelementptr double, double* %d, i64 %indvar.i24 22 %t4 = load double, double* %t3 23 %t5 = fmul double %t4, 2.3 24 store double %t5, double* %t3 25 %t6 = getelementptr double, double* %d, i64 %indvar [all …]
|
/external/llvm/test/CodeGen/SystemZ/ |
D | frame-20.ll | 10 define void @f1(double *%ptr) { 48 %l0 = load volatile double, double *%ptr 49 %l1 = load volatile double, double *%ptr 50 %l2 = load volatile double, double *%ptr 51 %l3 = load volatile double, double *%ptr 52 %l4 = load volatile double, double *%ptr 53 %l5 = load volatile double, double *%ptr 54 %l6 = load volatile double, double *%ptr 55 %l7 = load volatile double, double *%ptr 56 %l8 = load volatile double, double *%ptr [all …]
|
D | fp-sqrt-02.ll | 7 declare double @llvm.sqrt.f64(double %f) 8 declare double @sqrt(double) 11 define double @f1(double %val) { 15 %res = call double @llvm.sqrt.f64(double %val) 16 ret double %res 20 define double @f2(double *%ptr) { 24 %val = load double , double *%ptr 25 %res = call double @llvm.sqrt.f64(double %val) 26 ret double %res 30 define double @f3(double *%base) { [all …]
|
D | frame-03.ll | 11 define void @f1(double *%ptr) { 42 %l0 = load volatile double , double *%ptr 43 %l1 = load volatile double , double *%ptr 44 %l2 = load volatile double , double *%ptr 45 %l3 = load volatile double , double *%ptr 46 %l4 = load volatile double , double *%ptr 47 %l5 = load volatile double , double *%ptr 48 %l6 = load volatile double , double *%ptr 49 %l7 = load volatile double , double *%ptr 50 %l8 = load volatile double , double *%ptr [all …]
|
/external/llvm/test/Analysis/BasicAA/ |
D | args-rets-allocas-loads.ll | 3 declare void @callee(double* %callee_arg) 4 declare void @nocap_callee(double* nocapture %nocap_callee_arg) 6 declare double* @normal_returner() 7 declare noalias double* @noalias_returner() 9 define void @caller_a(double* %arg_a0, 10 double* %arg_a1, 11 double* noalias %noalias_arg_a0, 12 double* noalias %noalias_arg_a1, 13 double** %indirect_a0, 14 double** %indirect_a1) { [all …]
|
/external/swiftshader/third_party/LLVM/test/Analysis/BasicAA/ |
D | args-rets-allocas-loads.ll | 3 declare void @callee(double* %callee_arg) 4 declare void @nocap_callee(double* nocapture %nocap_callee_arg) 6 declare double* @normal_returner() 7 declare noalias double* @noalias_returner() 9 define void @caller_a(double* %arg_a0, 10 double* %arg_a1, 11 double* noalias %noalias_arg_a0, 12 double* noalias %noalias_arg_a1, 13 double** %indirect_a0, 14 double** %indirect_a1) { [all …]
|
/external/llvm/test/Transforms/InstCombine/ |
D | x86-sse2.ll | 5 define double @test_sqrt_sd_0(double %a) { 7 ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> undef, double %a, i32 0 8 ; CHECK-NEXT: [[TMP2:%.*]] = tail call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> [[TMP1]]) 9 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[TMP2]], i32 0 10 ; CHECK-NEXT: ret double [[TMP3]] 12 %1 = insertelement <2 x double> undef, double %a, i32 0 13 %2 = insertelement <2 x double> %1, double 1.000000e+00, i32 1 14 %3 = tail call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %2) 15 %4 = extractelement <2 x double> %3, i32 0 16 ret double %4 [all …]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | remat-scalar-zero.ll | 10 define void @foo(double* nocapture %x, double* nocapture %y) nounwind { 12 %tmp1 = load double* %x ; <double> [#uses=1] 13 %arrayidx4 = getelementptr inbounds double* %x, i64 1 ; <double*> [#uses=1] 14 %tmp5 = load double* %arrayidx4 ; <double> [#uses=1] 15 %arrayidx8 = getelementptr inbounds double* %x, i64 2 ; <double*> [#uses=1] 16 %tmp9 = load double* %arrayidx8 ; <double> [#uses=1] 17 %arrayidx12 = getelementptr inbounds double* %x, i64 3 ; <double*> [#uses=1] 18 %tmp13 = load double* %arrayidx12 ; <double> [#uses=1] 19 %arrayidx16 = getelementptr inbounds double* %x, i64 4 ; <double*> [#uses=1] 20 %tmp17 = load double* %arrayidx16 ; <double> [#uses=1] [all …]
|
/external/llvm/test/CodeGen/PowerPC/ |
D | qpx-unal-cons-lds.ll | 6 define void @foo(double* noalias nocapture %a, double* noalias nocapture readonly %b) #0 { 50 %1 = getelementptr inbounds double, double* %b, i64 %0 51 %2 = bitcast double* %1 to <8 x double>* 52 %wide.vec = load <8 x double>, <8 x double>* %2, align 8 53 …%strided.vec = shufflevector <8 x double> %wide.vec, <8 x double> undef, <4 x i32> <i32 0, i32 2, … 54 …%3 = fadd <4 x double> %strided.vec, <double 1.000000e+00, double 1.000000e+00, double 1.000000e+0… 55 %4 = getelementptr inbounds double, double* %a, i64 %index 56 %5 = bitcast double* %4 to <4 x double>* 57 store <4 x double> %3, <4 x double>* %5, align 8 60 %7 = getelementptr inbounds double, double* %b, i64 %6 [all …]
|
D | vsx_shuffle_le.ll | 3 define <2 x double> @test00(<2 x double>* %p1, <2 x double>* %p2) { 4 %v1 = load <2 x double>, <2 x double>* %p1 5 %v2 = load <2 x double>, <2 x double>* %p2 6 %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 0, i32 0> 7 ret <2 x double> %v3 14 define <2 x double> @test01(<2 x double>* %p1, <2 x double>* %p2) { 15 %v1 = load <2 x double>, <2 x double>* %p1 16 %v2 = load <2 x double>, <2 x double>* %p2 17 %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 0, i32 1> 18 ret <2 x double> %v3 [all …]
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
D | cse.ll | 6 ;int test(double *G) { 14 ;CHECK: load <2 x double> 15 ;CHECK: fadd <4 x double> 16 ;CHECK: store <4 x double> 19 define i32 @test(double* nocapture %G) { 21 %arrayidx = getelementptr inbounds double, double* %G, i64 5 22 %0 = load double, double* %arrayidx, align 8 23 %mul = fmul double %0, 4.000000e+00 24 %add = fadd double %mul, 1.000000e+00 25 store double %add, double* %G, align 8 [all …]
|
D | call.ll | 6 declare double @sin(double) 7 declare double @cos(double) 8 declare double @pow(double, double) 9 declare double @exp2(double) 10 declare double @sqrt(double) 15 ; CHECK: call <2 x double> @llvm.sin.v2f64 17 define void @sin_libm(double* %a, double* %b, double* %c) { 19 %i0 = load double, double* %a, align 8 20 %i1 = load double, double* %b, align 8 21 %mul = fmul double %i0, %i1 [all …]
|
/external/llvm/test/Transforms/InstSimplify/ |
D | fold-builtin-fma.ll | 7 declare double @llvm.fma.f64(double, double, double) 10 define double @PR20832() { 11 %1 = call double @llvm.fma.f64(double 7.0, double 8.0, double 0.0) 12 ret double %1 15 ; CHECK: ret double 5.600000e+01 18 define double @test_all_finite() { 19 %1 = call double @llvm.fma.f64(double 7.0, double 8.0, double 5.0) 20 ret double %1 23 ; CHECK: ret double 6.100000e+01 26 define double @test_NaN_addend() { [all …]
|
/external/llvm/test/Transforms/Inline/ |
D | inline-byval-bonus.ll | 11 %struct.sphere = type { %struct.vec3, double, %struct.material, %struct.sphere* } 12 %struct.vec3 = type { double, double, double } 13 %struct.material = type { %struct.vec3, double, double } 15 %struct.spoint = type { %struct.vec3, %struct.vec3, %struct.vec3, double } 33 %2 = load double, double* %1, align 8 34 %3 = fmul double %2, %2 36 %5 = load double, double* %4, align 8 37 %6 = fmul double %5, %5 38 %7 = fadd double %3, %6 40 %9 = load double, double* %8, align 8 [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | PBQP-chain.ll | 23 define void @fir(double* nocapture %rx, double* nocapture %ry, double* nocapture %c, double* nocapt… 25 %0 = load double, double* %c, align 8 26 %1 = load double, double* %x, align 8 27 %mul = fmul fast double %1, %0 28 %2 = load double, double* %y, align 8 29 %mul7 = fmul fast double %2, %0 30 %arrayidx.1 = getelementptr inbounds double, double* %c, i64 1 31 %3 = load double, double* %arrayidx.1, align 8 32 %arrayidx2.1 = getelementptr inbounds double, double* %x, i64 1 33 %4 = load double, double* %arrayidx2.1, align 8 [all …]
|
/external/llvm/test/Transforms/BBVectorize/X86/ |
D | simple-int.ll | 4 declare double @llvm.fma.f64(double, double, double) 5 declare double @llvm.fmuladd.f64(double, double, double) 6 declare double @llvm.cos.f64(double) 7 declare double @llvm.powi.f64(double, i32) 10 define double @test1(double %A1, double %A2, double %B1, double %B2, double %C1, double %C2) { 11 %X1 = fsub double %A1, %B1 12 %X2 = fsub double %A2, %B2 13 %Y1 = call double @llvm.fma.f64(double %X1, double %A1, double %C1) 14 %Y2 = call double @llvm.fma.f64(double %X2, double %A2, double %C2) 15 %Z1 = fadd double %Y1, %B1 [all …]
|